##// END OF EJS Templates
merge with stable
Matt Mackall -
r15159:85322c19 merge default
parent child Browse files
Show More
@@ -1,282 +1,280 b''
1 # httpconnection.py - urllib2 handler for new http support
1 # httpconnection.py - urllib2 handler for new http support
2 #
2 #
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
4 # Copyright 2006, 2007 Alexis S. L. Carvalho <alexis@cecm.usp.br>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 # Copyright 2011 Google, Inc.
6 # Copyright 2011 Google, Inc.
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10 import logging
10 import logging
11 import socket
11 import socket
12 import urllib
12 import urllib
13 import urllib2
13 import urllib2
14 import os
14 import os
15
15
16 from mercurial import httpclient
16 from mercurial import httpclient
17 from mercurial import sslutil
17 from mercurial import sslutil
18 from mercurial import util
18 from mercurial import util
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20
20
21 # moved here from url.py to avoid a cycle
21 # moved here from url.py to avoid a cycle
22 class httpsendfile(object):
22 class httpsendfile(object):
23 """This is a wrapper around the objects returned by python's "open".
23 """This is a wrapper around the objects returned by python's "open".
24
24
25 Its purpose is to send file-like objects via HTTP and, to do so, it
25 Its purpose is to send file-like objects via HTTP.
26 defines a __len__ attribute to feed the Content-Length header.
26 It do however not define a __len__ attribute because the length
27 might be more than Py_ssize_t can handle.
27 """
28 """
28
29
29 def __init__(self, ui, *args, **kwargs):
30 def __init__(self, ui, *args, **kwargs):
30 # We can't just "self._data = open(*args, **kwargs)" here because there
31 # We can't just "self._data = open(*args, **kwargs)" here because there
31 # is an "open" function defined in this module that shadows the global
32 # is an "open" function defined in this module that shadows the global
32 # one
33 # one
33 self.ui = ui
34 self.ui = ui
34 self._data = open(*args, **kwargs)
35 self._data = open(*args, **kwargs)
35 self.seek = self._data.seek
36 self.seek = self._data.seek
36 self.close = self._data.close
37 self.close = self._data.close
37 self.write = self._data.write
38 self.write = self._data.write
38 self._len = os.fstat(self._data.fileno()).st_size
39 self.length = os.fstat(self._data.fileno()).st_size
39 self._pos = 0
40 self._pos = 0
40 self._total = self._len / 1024 * 2
41 self._total = self.length / 1024 * 2
41
42
42 def read(self, *args, **kwargs):
43 def read(self, *args, **kwargs):
43 try:
44 try:
44 ret = self._data.read(*args, **kwargs)
45 ret = self._data.read(*args, **kwargs)
45 except EOFError:
46 except EOFError:
46 self.ui.progress(_('sending'), None)
47 self.ui.progress(_('sending'), None)
47 self._pos += len(ret)
48 self._pos += len(ret)
48 # We pass double the max for total because we currently have
49 # We pass double the max for total because we currently have
49 # to send the bundle twice in the case of a server that
50 # to send the bundle twice in the case of a server that
50 # requires authentication. Since we can't know until we try
51 # requires authentication. Since we can't know until we try
51 # once whether authentication will be required, just lie to
52 # once whether authentication will be required, just lie to
52 # the user and maybe the push succeeds suddenly at 50%.
53 # the user and maybe the push succeeds suddenly at 50%.
53 self.ui.progress(_('sending'), self._pos / 1024,
54 self.ui.progress(_('sending'), self._pos / 1024,
54 unit=_('kb'), total=self._total)
55 unit=_('kb'), total=self._total)
55 return ret
56 return ret
56
57
57 def __len__(self):
58 return self._len
59
60 # moved here from url.py to avoid a cycle
58 # moved here from url.py to avoid a cycle
61 def readauthforuri(ui, uri, user):
59 def readauthforuri(ui, uri, user):
62 # Read configuration
60 # Read configuration
63 config = dict()
61 config = dict()
64 for key, val in ui.configitems('auth'):
62 for key, val in ui.configitems('auth'):
65 if '.' not in key:
63 if '.' not in key:
66 ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
64 ui.warn(_("ignoring invalid [auth] key '%s'\n") % key)
67 continue
65 continue
68 group, setting = key.rsplit('.', 1)
66 group, setting = key.rsplit('.', 1)
69 gdict = config.setdefault(group, dict())
67 gdict = config.setdefault(group, dict())
70 if setting in ('username', 'cert', 'key'):
68 if setting in ('username', 'cert', 'key'):
71 val = util.expandpath(val)
69 val = util.expandpath(val)
72 gdict[setting] = val
70 gdict[setting] = val
73
71
74 # Find the best match
72 # Find the best match
75 scheme, hostpath = uri.split('://', 1)
73 scheme, hostpath = uri.split('://', 1)
76 bestuser = None
74 bestuser = None
77 bestlen = 0
75 bestlen = 0
78 bestauth = None
76 bestauth = None
79 for group, auth in config.iteritems():
77 for group, auth in config.iteritems():
80 if user and user != auth.get('username', user):
78 if user and user != auth.get('username', user):
81 # If a username was set in the URI, the entry username
79 # If a username was set in the URI, the entry username
82 # must either match it or be unset
80 # must either match it or be unset
83 continue
81 continue
84 prefix = auth.get('prefix')
82 prefix = auth.get('prefix')
85 if not prefix:
83 if not prefix:
86 continue
84 continue
87 p = prefix.split('://', 1)
85 p = prefix.split('://', 1)
88 if len(p) > 1:
86 if len(p) > 1:
89 schemes, prefix = [p[0]], p[1]
87 schemes, prefix = [p[0]], p[1]
90 else:
88 else:
91 schemes = (auth.get('schemes') or 'https').split()
89 schemes = (auth.get('schemes') or 'https').split()
92 if (prefix == '*' or hostpath.startswith(prefix)) and \
90 if (prefix == '*' or hostpath.startswith(prefix)) and \
93 (len(prefix) > bestlen or (len(prefix) == bestlen and \
91 (len(prefix) > bestlen or (len(prefix) == bestlen and \
94 not bestuser and 'username' in auth)) \
92 not bestuser and 'username' in auth)) \
95 and scheme in schemes:
93 and scheme in schemes:
96 bestlen = len(prefix)
94 bestlen = len(prefix)
97 bestauth = group, auth
95 bestauth = group, auth
98 bestuser = auth.get('username')
96 bestuser = auth.get('username')
99 if user and not bestuser:
97 if user and not bestuser:
100 auth['username'] = user
98 auth['username'] = user
101 return bestauth
99 return bestauth
102
100
103 # Mercurial (at least until we can remove the old codepath) requires
101 # Mercurial (at least until we can remove the old codepath) requires
104 # that the http response object be sufficiently file-like, so we
102 # that the http response object be sufficiently file-like, so we
105 # provide a close() method here.
103 # provide a close() method here.
106 class HTTPResponse(httpclient.HTTPResponse):
104 class HTTPResponse(httpclient.HTTPResponse):
107 def close(self):
105 def close(self):
108 pass
106 pass
109
107
110 class HTTPConnection(httpclient.HTTPConnection):
108 class HTTPConnection(httpclient.HTTPConnection):
111 response_class = HTTPResponse
109 response_class = HTTPResponse
112 def request(self, method, uri, body=None, headers={}):
110 def request(self, method, uri, body=None, headers={}):
113 if isinstance(body, httpsendfile):
111 if isinstance(body, httpsendfile):
114 body.seek(0)
112 body.seek(0)
115 httpclient.HTTPConnection.request(self, method, uri, body=body,
113 httpclient.HTTPConnection.request(self, method, uri, body=body,
116 headers=headers)
114 headers=headers)
117
115
118
116
119 _configuredlogging = False
117 _configuredlogging = False
120 LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s'
118 LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s'
121 # Subclass BOTH of these because otherwise urllib2 "helpfully"
119 # Subclass BOTH of these because otherwise urllib2 "helpfully"
122 # reinserts them since it notices we don't include any subclasses of
120 # reinserts them since it notices we don't include any subclasses of
123 # them.
121 # them.
124 class http2handler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
122 class http2handler(urllib2.HTTPHandler, urllib2.HTTPSHandler):
125 def __init__(self, ui, pwmgr):
123 def __init__(self, ui, pwmgr):
126 global _configuredlogging
124 global _configuredlogging
127 urllib2.AbstractHTTPHandler.__init__(self)
125 urllib2.AbstractHTTPHandler.__init__(self)
128 self.ui = ui
126 self.ui = ui
129 self.pwmgr = pwmgr
127 self.pwmgr = pwmgr
130 self._connections = {}
128 self._connections = {}
131 loglevel = ui.config('ui', 'http2debuglevel', default=None)
129 loglevel = ui.config('ui', 'http2debuglevel', default=None)
132 if loglevel and not _configuredlogging:
130 if loglevel and not _configuredlogging:
133 _configuredlogging = True
131 _configuredlogging = True
134 logger = logging.getLogger('mercurial.httpclient')
132 logger = logging.getLogger('mercurial.httpclient')
135 logger.setLevel(getattr(logging, loglevel.upper()))
133 logger.setLevel(getattr(logging, loglevel.upper()))
136 handler = logging.StreamHandler()
134 handler = logging.StreamHandler()
137 handler.setFormatter(logging.Formatter(LOGFMT))
135 handler.setFormatter(logging.Formatter(LOGFMT))
138 logger.addHandler(handler)
136 logger.addHandler(handler)
139
137
140 def close_all(self):
138 def close_all(self):
141 """Close and remove all connection objects being kept for reuse."""
139 """Close and remove all connection objects being kept for reuse."""
142 for openconns in self._connections.values():
140 for openconns in self._connections.values():
143 for conn in openconns:
141 for conn in openconns:
144 conn.close()
142 conn.close()
145 self._connections = {}
143 self._connections = {}
146
144
147 # shamelessly borrowed from urllib2.AbstractHTTPHandler
145 # shamelessly borrowed from urllib2.AbstractHTTPHandler
148 def do_open(self, http_class, req, use_ssl):
146 def do_open(self, http_class, req, use_ssl):
149 """Return an addinfourl object for the request, using http_class.
147 """Return an addinfourl object for the request, using http_class.
150
148
151 http_class must implement the HTTPConnection API from httplib.
149 http_class must implement the HTTPConnection API from httplib.
152 The addinfourl return value is a file-like object. It also
150 The addinfourl return value is a file-like object. It also
153 has methods and attributes including:
151 has methods and attributes including:
154 - info(): return a mimetools.Message object for the headers
152 - info(): return a mimetools.Message object for the headers
155 - geturl(): return the original request URL
153 - geturl(): return the original request URL
156 - code: HTTP status code
154 - code: HTTP status code
157 """
155 """
158 # If using a proxy, the host returned by get_host() is
156 # If using a proxy, the host returned by get_host() is
159 # actually the proxy. On Python 2.6.1, the real destination
157 # actually the proxy. On Python 2.6.1, the real destination
160 # hostname is encoded in the URI in the urllib2 request
158 # hostname is encoded in the URI in the urllib2 request
161 # object. On Python 2.6.5, it's stored in the _tunnel_host
159 # object. On Python 2.6.5, it's stored in the _tunnel_host
162 # attribute which has no accessor.
160 # attribute which has no accessor.
163 tunhost = getattr(req, '_tunnel_host', None)
161 tunhost = getattr(req, '_tunnel_host', None)
164 host = req.get_host()
162 host = req.get_host()
165 if tunhost:
163 if tunhost:
166 proxyhost = host
164 proxyhost = host
167 host = tunhost
165 host = tunhost
168 elif req.has_proxy():
166 elif req.has_proxy():
169 proxyhost = req.get_host()
167 proxyhost = req.get_host()
170 host = req.get_selector().split('://', 1)[1].split('/', 1)[0]
168 host = req.get_selector().split('://', 1)[1].split('/', 1)[0]
171 else:
169 else:
172 proxyhost = None
170 proxyhost = None
173
171
174 if proxyhost:
172 if proxyhost:
175 if ':' in proxyhost:
173 if ':' in proxyhost:
176 # Note: this means we'll explode if we try and use an
174 # Note: this means we'll explode if we try and use an
177 # IPv6 http proxy. This isn't a regression, so we
175 # IPv6 http proxy. This isn't a regression, so we
178 # won't worry about it for now.
176 # won't worry about it for now.
179 proxyhost, proxyport = proxyhost.rsplit(':', 1)
177 proxyhost, proxyport = proxyhost.rsplit(':', 1)
180 else:
178 else:
181 proxyport = 3128 # squid default
179 proxyport = 3128 # squid default
182 proxy = (proxyhost, proxyport)
180 proxy = (proxyhost, proxyport)
183 else:
181 else:
184 proxy = None
182 proxy = None
185
183
186 if not host:
184 if not host:
187 raise urllib2.URLError('no host given')
185 raise urllib2.URLError('no host given')
188
186
189 connkey = use_ssl, host, proxy
187 connkey = use_ssl, host, proxy
190 allconns = self._connections.get(connkey, [])
188 allconns = self._connections.get(connkey, [])
191 conns = [c for c in allconns if not c.busy()]
189 conns = [c for c in allconns if not c.busy()]
192 if conns:
190 if conns:
193 h = conns[0]
191 h = conns[0]
194 else:
192 else:
195 if allconns:
193 if allconns:
196 self.ui.debug('all connections for %s busy, making a new '
194 self.ui.debug('all connections for %s busy, making a new '
197 'one\n' % host)
195 'one\n' % host)
198 timeout = None
196 timeout = None
199 if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
197 if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
200 timeout = req.timeout
198 timeout = req.timeout
201 h = http_class(host, timeout=timeout, proxy_hostport=proxy)
199 h = http_class(host, timeout=timeout, proxy_hostport=proxy)
202 self._connections.setdefault(connkey, []).append(h)
200 self._connections.setdefault(connkey, []).append(h)
203
201
204 headers = dict(req.headers)
202 headers = dict(req.headers)
205 headers.update(req.unredirected_hdrs)
203 headers.update(req.unredirected_hdrs)
206 headers = dict(
204 headers = dict(
207 (name.title(), val) for name, val in headers.items())
205 (name.title(), val) for name, val in headers.items())
208 try:
206 try:
209 path = req.get_selector()
207 path = req.get_selector()
210 if '://' in path:
208 if '://' in path:
211 path = path.split('://', 1)[1].split('/', 1)[1]
209 path = path.split('://', 1)[1].split('/', 1)[1]
212 if path[0] != '/':
210 if path[0] != '/':
213 path = '/' + path
211 path = '/' + path
214 h.request(req.get_method(), path, req.data, headers)
212 h.request(req.get_method(), path, req.data, headers)
215 r = h.getresponse()
213 r = h.getresponse()
216 except socket.error, err: # XXX what error?
214 except socket.error, err: # XXX what error?
217 raise urllib2.URLError(err)
215 raise urllib2.URLError(err)
218
216
219 # Pick apart the HTTPResponse object to get the addinfourl
217 # Pick apart the HTTPResponse object to get the addinfourl
220 # object initialized properly.
218 # object initialized properly.
221 r.recv = r.read
219 r.recv = r.read
222
220
223 resp = urllib.addinfourl(r, r.headers, req.get_full_url())
221 resp = urllib.addinfourl(r, r.headers, req.get_full_url())
224 resp.code = r.status
222 resp.code = r.status
225 resp.msg = r.reason
223 resp.msg = r.reason
226 return resp
224 return resp
227
225
228 # httplib always uses the given host/port as the socket connect
226 # httplib always uses the given host/port as the socket connect
229 # target, and then allows full URIs in the request path, which it
227 # target, and then allows full URIs in the request path, which it
230 # then observes and treats as a signal to do proxying instead.
228 # then observes and treats as a signal to do proxying instead.
231 def http_open(self, req):
229 def http_open(self, req):
232 if req.get_full_url().startswith('https'):
230 if req.get_full_url().startswith('https'):
233 return self.https_open(req)
231 return self.https_open(req)
234 return self.do_open(HTTPConnection, req, False)
232 return self.do_open(HTTPConnection, req, False)
235
233
236 def https_open(self, req):
234 def https_open(self, req):
237 # req.get_full_url() does not contain credentials and we may
235 # req.get_full_url() does not contain credentials and we may
238 # need them to match the certificates.
236 # need them to match the certificates.
239 url = req.get_full_url()
237 url = req.get_full_url()
240 user, password = self.pwmgr.find_stored_password(url)
238 user, password = self.pwmgr.find_stored_password(url)
241 res = readauthforuri(self.ui, url, user)
239 res = readauthforuri(self.ui, url, user)
242 if res:
240 if res:
243 group, auth = res
241 group, auth = res
244 self.auth = auth
242 self.auth = auth
245 self.ui.debug("using auth.%s.* for authentication\n" % group)
243 self.ui.debug("using auth.%s.* for authentication\n" % group)
246 else:
244 else:
247 self.auth = None
245 self.auth = None
248 return self.do_open(self._makesslconnection, req, True)
246 return self.do_open(self._makesslconnection, req, True)
249
247
250 def _makesslconnection(self, host, port=443, *args, **kwargs):
248 def _makesslconnection(self, host, port=443, *args, **kwargs):
251 keyfile = None
249 keyfile = None
252 certfile = None
250 certfile = None
253
251
254 if args: # key_file
252 if args: # key_file
255 keyfile = args.pop(0)
253 keyfile = args.pop(0)
256 if args: # cert_file
254 if args: # cert_file
257 certfile = args.pop(0)
255 certfile = args.pop(0)
258
256
259 # if the user has specified different key/cert files in
257 # if the user has specified different key/cert files in
260 # hgrc, we prefer these
258 # hgrc, we prefer these
261 if self.auth and 'key' in self.auth and 'cert' in self.auth:
259 if self.auth and 'key' in self.auth and 'cert' in self.auth:
262 keyfile = self.auth['key']
260 keyfile = self.auth['key']
263 certfile = self.auth['cert']
261 certfile = self.auth['cert']
264
262
265 # let host port take precedence
263 # let host port take precedence
266 if ':' in host and '[' not in host or ']:' in host:
264 if ':' in host and '[' not in host or ']:' in host:
267 host, port = host.rsplit(':', 1)
265 host, port = host.rsplit(':', 1)
268 port = int(port)
266 port = int(port)
269 if '[' in host:
267 if '[' in host:
270 host = host[1:-1]
268 host = host[1:-1]
271
269
272 if keyfile:
270 if keyfile:
273 kwargs['keyfile'] = keyfile
271 kwargs['keyfile'] = keyfile
274 if certfile:
272 if certfile:
275 kwargs['certfile'] = certfile
273 kwargs['certfile'] = certfile
276
274
277 kwargs.update(sslutil.sslkwargs(self.ui, host))
275 kwargs.update(sslutil.sslkwargs(self.ui, host))
278
276
279 con = HTTPConnection(host, port, use_ssl=True,
277 con = HTTPConnection(host, port, use_ssl=True,
280 ssl_validator=sslutil.validator(self.ui, host),
278 ssl_validator=sslutil.validator(self.ui, host),
281 **kwargs)
279 **kwargs)
282 return con
280 return con
@@ -1,243 +1,245 b''
1 # httprepo.py - HTTP repository proxy classes for mercurial
1 # httprepo.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from node import nullid
9 from node import nullid
10 from i18n import _
10 from i18n import _
11 import changegroup, statichttprepo, error, httpconnection, url, util, wireproto
11 import changegroup, statichttprepo, error, httpconnection, url, util, wireproto
12 import os, urllib, urllib2, zlib, httplib
12 import os, urllib, urllib2, zlib, httplib
13 import errno, socket
13 import errno, socket
14
14
15 def zgenerator(f):
15 def zgenerator(f):
16 zd = zlib.decompressobj()
16 zd = zlib.decompressobj()
17 try:
17 try:
18 for chunk in util.filechunkiter(f):
18 for chunk in util.filechunkiter(f):
19 while chunk:
19 while chunk:
20 yield zd.decompress(chunk, 2**18)
20 yield zd.decompress(chunk, 2**18)
21 chunk = zd.unconsumed_tail
21 chunk = zd.unconsumed_tail
22 except httplib.HTTPException:
22 except httplib.HTTPException:
23 raise IOError(None, _('connection ended unexpectedly'))
23 raise IOError(None, _('connection ended unexpectedly'))
24 yield zd.flush()
24 yield zd.flush()
25
25
26 class httprepository(wireproto.wirerepository):
26 class httprepository(wireproto.wirerepository):
27 def __init__(self, ui, path):
27 def __init__(self, ui, path):
28 self.path = path
28 self.path = path
29 self.caps = None
29 self.caps = None
30 self.handler = None
30 self.handler = None
31 u = util.url(path)
31 u = util.url(path)
32 if u.query or u.fragment:
32 if u.query or u.fragment:
33 raise util.Abort(_('unsupported URL component: "%s"') %
33 raise util.Abort(_('unsupported URL component: "%s"') %
34 (u.query or u.fragment))
34 (u.query or u.fragment))
35
35
36 # urllib cannot handle URLs with embedded user or passwd
36 # urllib cannot handle URLs with embedded user or passwd
37 self._url, authinfo = u.authinfo()
37 self._url, authinfo = u.authinfo()
38
38
39 self.ui = ui
39 self.ui = ui
40 self.ui.debug('using %s\n' % self._url)
40 self.ui.debug('using %s\n' % self._url)
41
41
42 self.urlopener = url.opener(ui, authinfo)
42 self.urlopener = url.opener(ui, authinfo)
43
43
44 def __del__(self):
44 def __del__(self):
45 for h in self.urlopener.handlers:
45 for h in self.urlopener.handlers:
46 h.close()
46 h.close()
47 getattr(h, "close_all", lambda : None)()
47 getattr(h, "close_all", lambda : None)()
48
48
49 def url(self):
49 def url(self):
50 return self.path
50 return self.path
51
51
52 # look up capabilities only when needed
52 # look up capabilities only when needed
53
53
54 def _fetchcaps(self):
54 def _fetchcaps(self):
55 self.caps = set(self._call('capabilities').split())
55 self.caps = set(self._call('capabilities').split())
56
56
57 def get_caps(self):
57 def get_caps(self):
58 if self.caps is None:
58 if self.caps is None:
59 try:
59 try:
60 self._fetchcaps()
60 self._fetchcaps()
61 except error.RepoError:
61 except error.RepoError:
62 self.caps = set()
62 self.caps = set()
63 self.ui.debug('capabilities: %s\n' %
63 self.ui.debug('capabilities: %s\n' %
64 (' '.join(self.caps or ['none'])))
64 (' '.join(self.caps or ['none'])))
65 return self.caps
65 return self.caps
66
66
67 capabilities = property(get_caps)
67 capabilities = property(get_caps)
68
68
69 def lock(self):
69 def lock(self):
70 raise util.Abort(_('operation not supported over http'))
70 raise util.Abort(_('operation not supported over http'))
71
71
72 def _callstream(self, cmd, **args):
72 def _callstream(self, cmd, **args):
73 if cmd == 'pushkey':
73 if cmd == 'pushkey':
74 args['data'] = ''
74 args['data'] = ''
75 data = args.pop('data', None)
75 data = args.pop('data', None)
76 size = 0
77 if util.safehasattr(data, 'length'):
78 size = data.length
79 elif data is not None:
80 size = len(data)
76 headers = args.pop('headers', {})
81 headers = args.pop('headers', {})
77
82
78 if data and self.ui.configbool('ui', 'usehttp2', False):
83 if size and self.ui.configbool('ui', 'usehttp2', False):
79 headers['Expect'] = '100-Continue'
84 headers['Expect'] = '100-Continue'
80 headers['X-HgHttp2'] = '1'
85 headers['X-HgHttp2'] = '1'
81
86
82 self.ui.debug("sending %s command\n" % cmd)
87 self.ui.debug("sending %s command\n" % cmd)
83 q = [('cmd', cmd)]
88 q = [('cmd', cmd)]
84 headersize = 0
89 headersize = 0
85 if len(args) > 0:
90 if len(args) > 0:
86 httpheader = self.capable('httpheader')
91 httpheader = self.capable('httpheader')
87 if httpheader:
92 if httpheader:
88 headersize = int(httpheader.split(',')[0])
93 headersize = int(httpheader.split(',')[0])
89 if headersize > 0:
94 if headersize > 0:
90 # The headers can typically carry more data than the URL.
95 # The headers can typically carry more data than the URL.
91 encargs = urllib.urlencode(sorted(args.items()))
96 encargs = urllib.urlencode(sorted(args.items()))
92 headerfmt = 'X-HgArg-%s'
97 headerfmt = 'X-HgArg-%s'
93 contentlen = headersize - len(headerfmt % '000' + ': \r\n')
98 contentlen = headersize - len(headerfmt % '000' + ': \r\n')
94 headernum = 0
99 headernum = 0
95 for i in xrange(0, len(encargs), contentlen):
100 for i in xrange(0, len(encargs), contentlen):
96 headernum += 1
101 headernum += 1
97 header = headerfmt % str(headernum)
102 header = headerfmt % str(headernum)
98 headers[header] = encargs[i:i + contentlen]
103 headers[header] = encargs[i:i + contentlen]
99 varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)]
104 varyheaders = [headerfmt % str(h) for h in range(1, headernum + 1)]
100 headers['Vary'] = ','.join(varyheaders)
105 headers['Vary'] = ','.join(varyheaders)
101 else:
106 else:
102 q += sorted(args.items())
107 q += sorted(args.items())
103 qs = '?%s' % urllib.urlencode(q)
108 qs = '?%s' % urllib.urlencode(q)
104 cu = "%s%s" % (self._url, qs)
109 cu = "%s%s" % (self._url, qs)
105 req = urllib2.Request(cu, data, headers)
110 req = urllib2.Request(cu, data, headers)
106 if data is not None:
111 if data is not None:
107 # len(data) is broken if data doesn't fit into Py_ssize_t
108 # add the header ourself to avoid OverflowError
109 size = data.__len__()
110 self.ui.debug("sending %s bytes\n" % size)
112 self.ui.debug("sending %s bytes\n" % size)
111 req.add_unredirected_header('Content-Length', '%d' % size)
113 req.add_unredirected_header('Content-Length', '%d' % size)
112 try:
114 try:
113 resp = self.urlopener.open(req)
115 resp = self.urlopener.open(req)
114 except urllib2.HTTPError, inst:
116 except urllib2.HTTPError, inst:
115 if inst.code == 401:
117 if inst.code == 401:
116 raise util.Abort(_('authorization failed'))
118 raise util.Abort(_('authorization failed'))
117 raise
119 raise
118 except httplib.HTTPException, inst:
120 except httplib.HTTPException, inst:
119 self.ui.debug('http error while sending %s command\n' % cmd)
121 self.ui.debug('http error while sending %s command\n' % cmd)
120 self.ui.traceback()
122 self.ui.traceback()
121 raise IOError(None, inst)
123 raise IOError(None, inst)
122 except IndexError:
124 except IndexError:
123 # this only happens with Python 2.3, later versions raise URLError
125 # this only happens with Python 2.3, later versions raise URLError
124 raise util.Abort(_('http error, possibly caused by proxy setting'))
126 raise util.Abort(_('http error, possibly caused by proxy setting'))
125 # record the url we got redirected to
127 # record the url we got redirected to
126 resp_url = resp.geturl()
128 resp_url = resp.geturl()
127 if resp_url.endswith(qs):
129 if resp_url.endswith(qs):
128 resp_url = resp_url[:-len(qs)]
130 resp_url = resp_url[:-len(qs)]
129 if self._url.rstrip('/') != resp_url.rstrip('/'):
131 if self._url.rstrip('/') != resp_url.rstrip('/'):
130 if not self.ui.quiet:
132 if not self.ui.quiet:
131 self.ui.warn(_('real URL is %s\n') % resp_url)
133 self.ui.warn(_('real URL is %s\n') % resp_url)
132 self._url = resp_url
134 self._url = resp_url
133 try:
135 try:
134 proto = resp.getheader('content-type')
136 proto = resp.getheader('content-type')
135 except AttributeError:
137 except AttributeError:
136 proto = resp.headers.get('content-type', '')
138 proto = resp.headers.get('content-type', '')
137
139
138 safeurl = util.hidepassword(self._url)
140 safeurl = util.hidepassword(self._url)
139 if proto.startswith('application/hg-error'):
141 if proto.startswith('application/hg-error'):
140 raise error.OutOfBandError(resp.read())
142 raise error.OutOfBandError(resp.read())
141 # accept old "text/plain" and "application/hg-changegroup" for now
143 # accept old "text/plain" and "application/hg-changegroup" for now
142 if not (proto.startswith('application/mercurial-') or
144 if not (proto.startswith('application/mercurial-') or
143 proto.startswith('text/plain') or
145 proto.startswith('text/plain') or
144 proto.startswith('application/hg-changegroup')):
146 proto.startswith('application/hg-changegroup')):
145 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
147 self.ui.debug("requested URL: '%s'\n" % util.hidepassword(cu))
146 raise error.RepoError(
148 raise error.RepoError(
147 _("'%s' does not appear to be an hg repository:\n"
149 _("'%s' does not appear to be an hg repository:\n"
148 "---%%<--- (%s)\n%s\n---%%<---\n")
150 "---%%<--- (%s)\n%s\n---%%<---\n")
149 % (safeurl, proto or 'no content-type', resp.read()))
151 % (safeurl, proto or 'no content-type', resp.read()))
150
152
151 if proto.startswith('application/mercurial-'):
153 if proto.startswith('application/mercurial-'):
152 try:
154 try:
153 version = proto.split('-', 1)[1]
155 version = proto.split('-', 1)[1]
154 version_info = tuple([int(n) for n in version.split('.')])
156 version_info = tuple([int(n) for n in version.split('.')])
155 except ValueError:
157 except ValueError:
156 raise error.RepoError(_("'%s' sent a broken Content-Type "
158 raise error.RepoError(_("'%s' sent a broken Content-Type "
157 "header (%s)") % (safeurl, proto))
159 "header (%s)") % (safeurl, proto))
158 if version_info > (0, 1):
160 if version_info > (0, 1):
159 raise error.RepoError(_("'%s' uses newer protocol %s") %
161 raise error.RepoError(_("'%s' uses newer protocol %s") %
160 (safeurl, version))
162 (safeurl, version))
161
163
162 return resp
164 return resp
163
165
164 def _call(self, cmd, **args):
166 def _call(self, cmd, **args):
165 fp = self._callstream(cmd, **args)
167 fp = self._callstream(cmd, **args)
166 try:
168 try:
167 return fp.read()
169 return fp.read()
168 finally:
170 finally:
169 # if using keepalive, allow connection to be reused
171 # if using keepalive, allow connection to be reused
170 fp.close()
172 fp.close()
171
173
172 def _callpush(self, cmd, cg, **args):
174 def _callpush(self, cmd, cg, **args):
173 # have to stream bundle to a temp file because we do not have
175 # have to stream bundle to a temp file because we do not have
174 # http 1.1 chunked transfer.
176 # http 1.1 chunked transfer.
175
177
176 types = self.capable('unbundle')
178 types = self.capable('unbundle')
177 try:
179 try:
178 types = types.split(',')
180 types = types.split(',')
179 except AttributeError:
181 except AttributeError:
180 # servers older than d1b16a746db6 will send 'unbundle' as a
182 # servers older than d1b16a746db6 will send 'unbundle' as a
181 # boolean capability. They only support headerless/uncompressed
183 # boolean capability. They only support headerless/uncompressed
182 # bundles.
184 # bundles.
183 types = [""]
185 types = [""]
184 for x in types:
186 for x in types:
185 if x in changegroup.bundletypes:
187 if x in changegroup.bundletypes:
186 type = x
188 type = x
187 break
189 break
188
190
189 tempname = changegroup.writebundle(cg, None, type)
191 tempname = changegroup.writebundle(cg, None, type)
190 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
192 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
191 headers = {'Content-Type': 'application/mercurial-0.1'}
193 headers = {'Content-Type': 'application/mercurial-0.1'}
192
194
193 try:
195 try:
194 try:
196 try:
195 r = self._call(cmd, data=fp, headers=headers, **args)
197 r = self._call(cmd, data=fp, headers=headers, **args)
196 vals = r.split('\n', 1)
198 vals = r.split('\n', 1)
197 if len(vals) < 2:
199 if len(vals) < 2:
198 raise error.ResponseError(_("unexpected response:"), r)
200 raise error.ResponseError(_("unexpected response:"), r)
199 return vals
201 return vals
200 except socket.error, err:
202 except socket.error, err:
201 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
203 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
202 raise util.Abort(_('push failed: %s') % err.args[1])
204 raise util.Abort(_('push failed: %s') % err.args[1])
203 raise util.Abort(err.args[1])
205 raise util.Abort(err.args[1])
204 finally:
206 finally:
205 fp.close()
207 fp.close()
206 os.unlink(tempname)
208 os.unlink(tempname)
207
209
208 def _abort(self, exception):
210 def _abort(self, exception):
209 raise exception
211 raise exception
210
212
211 def _decompress(self, stream):
213 def _decompress(self, stream):
212 return util.chunkbuffer(zgenerator(stream))
214 return util.chunkbuffer(zgenerator(stream))
213
215
214 class httpsrepository(httprepository):
216 class httpsrepository(httprepository):
215 def __init__(self, ui, path):
217 def __init__(self, ui, path):
216 if not url.has_https:
218 if not url.has_https:
217 raise util.Abort(_('Python support for SSL and HTTPS '
219 raise util.Abort(_('Python support for SSL and HTTPS '
218 'is not installed'))
220 'is not installed'))
219 httprepository.__init__(self, ui, path)
221 httprepository.__init__(self, ui, path)
220
222
221 def instance(ui, path, create):
223 def instance(ui, path, create):
222 if create:
224 if create:
223 raise util.Abort(_('cannot create new http repository'))
225 raise util.Abort(_('cannot create new http repository'))
224 try:
226 try:
225 if path.startswith('https:'):
227 if path.startswith('https:'):
226 inst = httpsrepository(ui, path)
228 inst = httpsrepository(ui, path)
227 else:
229 else:
228 inst = httprepository(ui, path)
230 inst = httprepository(ui, path)
229 try:
231 try:
230 # Try to do useful work when checking compatibility.
232 # Try to do useful work when checking compatibility.
231 # Usually saves a roundtrip since we want the caps anyway.
233 # Usually saves a roundtrip since we want the caps anyway.
232 inst._fetchcaps()
234 inst._fetchcaps()
233 except error.RepoError:
235 except error.RepoError:
234 # No luck, try older compatibility check.
236 # No luck, try older compatibility check.
235 inst.between([(nullid, nullid)])
237 inst.between([(nullid, nullid)])
236 return inst
238 return inst
237 except error.RepoError, httpexception:
239 except error.RepoError, httpexception:
238 try:
240 try:
239 r = statichttprepo.instance(ui, "static-" + path, create)
241 r = statichttprepo.instance(ui, "static-" + path, create)
240 ui.note('(falling back to static-http)\n')
242 ui.note('(falling back to static-http)\n')
241 return r
243 return r
242 except error.RepoError:
244 except error.RepoError:
243 raise httpexception # use the original http RepoError instead
245 raise httpexception # use the original http RepoError instead
@@ -1,1859 +1,1859 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = subject.replace('\n\t', ' ')
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[10:])
233 parents.append(line[10:])
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def __repr__(self):
293 def __repr__(self):
294 return "<patchmeta %s %r>" % (self.op, self.path)
294 return "<patchmeta %s %r>" % (self.op, self.path)
295
295
296 def readgitpatch(lr):
296 def readgitpatch(lr):
297 """extract git-style metadata about patches from <patchname>"""
297 """extract git-style metadata about patches from <patchname>"""
298
298
299 # Filter patch for git information
299 # Filter patch for git information
300 gp = None
300 gp = None
301 gitpatches = []
301 gitpatches = []
302 for line in lr:
302 for line in lr:
303 line = line.rstrip(' \r\n')
303 line = line.rstrip(' \r\n')
304 if line.startswith('diff --git'):
304 if line.startswith('diff --git'):
305 m = gitre.match(line)
305 m = gitre.match(line)
306 if m:
306 if m:
307 if gp:
307 if gp:
308 gitpatches.append(gp)
308 gitpatches.append(gp)
309 dst = m.group(2)
309 dst = m.group(2)
310 gp = patchmeta(dst)
310 gp = patchmeta(dst)
311 elif gp:
311 elif gp:
312 if line.startswith('--- '):
312 if line.startswith('--- '):
313 gitpatches.append(gp)
313 gitpatches.append(gp)
314 gp = None
314 gp = None
315 continue
315 continue
316 if line.startswith('rename from '):
316 if line.startswith('rename from '):
317 gp.op = 'RENAME'
317 gp.op = 'RENAME'
318 gp.oldpath = line[12:]
318 gp.oldpath = line[12:]
319 elif line.startswith('rename to '):
319 elif line.startswith('rename to '):
320 gp.path = line[10:]
320 gp.path = line[10:]
321 elif line.startswith('copy from '):
321 elif line.startswith('copy from '):
322 gp.op = 'COPY'
322 gp.op = 'COPY'
323 gp.oldpath = line[10:]
323 gp.oldpath = line[10:]
324 elif line.startswith('copy to '):
324 elif line.startswith('copy to '):
325 gp.path = line[8:]
325 gp.path = line[8:]
326 elif line.startswith('deleted file'):
326 elif line.startswith('deleted file'):
327 gp.op = 'DELETE'
327 gp.op = 'DELETE'
328 elif line.startswith('new file mode '):
328 elif line.startswith('new file mode '):
329 gp.op = 'ADD'
329 gp.op = 'ADD'
330 gp.setmode(int(line[-6:], 8))
330 gp.setmode(int(line[-6:], 8))
331 elif line.startswith('new mode '):
331 elif line.startswith('new mode '):
332 gp.setmode(int(line[-6:], 8))
332 gp.setmode(int(line[-6:], 8))
333 elif line.startswith('GIT binary patch'):
333 elif line.startswith('GIT binary patch'):
334 gp.binary = True
334 gp.binary = True
335 if gp:
335 if gp:
336 gitpatches.append(gp)
336 gitpatches.append(gp)
337
337
338 return gitpatches
338 return gitpatches
339
339
340 class linereader(object):
340 class linereader(object):
341 # simple class to allow pushing lines back into the input stream
341 # simple class to allow pushing lines back into the input stream
342 def __init__(self, fp):
342 def __init__(self, fp):
343 self.fp = fp
343 self.fp = fp
344 self.buf = []
344 self.buf = []
345
345
346 def push(self, line):
346 def push(self, line):
347 if line is not None:
347 if line is not None:
348 self.buf.append(line)
348 self.buf.append(line)
349
349
350 def readline(self):
350 def readline(self):
351 if self.buf:
351 if self.buf:
352 l = self.buf[0]
352 l = self.buf[0]
353 del self.buf[0]
353 del self.buf[0]
354 return l
354 return l
355 return self.fp.readline()
355 return self.fp.readline()
356
356
357 def __iter__(self):
357 def __iter__(self):
358 while True:
358 while True:
359 l = self.readline()
359 l = self.readline()
360 if not l:
360 if not l:
361 break
361 break
362 yield l
362 yield l
363
363
364 class abstractbackend(object):
364 class abstractbackend(object):
365 def __init__(self, ui):
365 def __init__(self, ui):
366 self.ui = ui
366 self.ui = ui
367
367
368 def getfile(self, fname):
368 def getfile(self, fname):
369 """Return target file data and flags as a (data, (islink,
369 """Return target file data and flags as a (data, (islink,
370 isexec)) tuple.
370 isexec)) tuple.
371 """
371 """
372 raise NotImplementedError
372 raise NotImplementedError
373
373
374 def setfile(self, fname, data, mode, copysource):
374 def setfile(self, fname, data, mode, copysource):
375 """Write data to target file fname and set its mode. mode is a
375 """Write data to target file fname and set its mode. mode is a
376 (islink, isexec) tuple. If data is None, the file content should
376 (islink, isexec) tuple. If data is None, the file content should
377 be left unchanged. If the file is modified after being copied,
377 be left unchanged. If the file is modified after being copied,
378 copysource is set to the original file name.
378 copysource is set to the original file name.
379 """
379 """
380 raise NotImplementedError
380 raise NotImplementedError
381
381
382 def unlink(self, fname):
382 def unlink(self, fname):
383 """Unlink target file."""
383 """Unlink target file."""
384 raise NotImplementedError
384 raise NotImplementedError
385
385
386 def writerej(self, fname, failed, total, lines):
386 def writerej(self, fname, failed, total, lines):
387 """Write rejected lines for fname. total is the number of hunks
387 """Write rejected lines for fname. total is the number of hunks
388 which failed to apply and total the total number of hunks for this
388 which failed to apply and total the total number of hunks for this
389 files.
389 files.
390 """
390 """
391 pass
391 pass
392
392
393 def exists(self, fname):
393 def exists(self, fname):
394 raise NotImplementedError
394 raise NotImplementedError
395
395
396 class fsbackend(abstractbackend):
396 class fsbackend(abstractbackend):
397 def __init__(self, ui, basedir):
397 def __init__(self, ui, basedir):
398 super(fsbackend, self).__init__(ui)
398 super(fsbackend, self).__init__(ui)
399 self.opener = scmutil.opener(basedir)
399 self.opener = scmutil.opener(basedir)
400
400
401 def _join(self, f):
401 def _join(self, f):
402 return os.path.join(self.opener.base, f)
402 return os.path.join(self.opener.base, f)
403
403
404 def getfile(self, fname):
404 def getfile(self, fname):
405 path = self._join(fname)
405 path = self._join(fname)
406 if os.path.islink(path):
406 if os.path.islink(path):
407 return (os.readlink(path), (True, False))
407 return (os.readlink(path), (True, False))
408 isexec = False
408 isexec = False
409 try:
409 try:
410 isexec = os.lstat(path).st_mode & 0100 != 0
410 isexec = os.lstat(path).st_mode & 0100 != 0
411 except OSError, e:
411 except OSError, e:
412 if e.errno != errno.ENOENT:
412 if e.errno != errno.ENOENT:
413 raise
413 raise
414 return (self.opener.read(fname), (False, isexec))
414 return (self.opener.read(fname), (False, isexec))
415
415
416 def setfile(self, fname, data, mode, copysource):
416 def setfile(self, fname, data, mode, copysource):
417 islink, isexec = mode
417 islink, isexec = mode
418 if data is None:
418 if data is None:
419 util.setflags(self._join(fname), islink, isexec)
419 util.setflags(self._join(fname), islink, isexec)
420 return
420 return
421 if islink:
421 if islink:
422 self.opener.symlink(data, fname)
422 self.opener.symlink(data, fname)
423 else:
423 else:
424 self.opener.write(fname, data)
424 self.opener.write(fname, data)
425 if isexec:
425 if isexec:
426 util.setflags(self._join(fname), False, True)
426 util.setflags(self._join(fname), False, True)
427
427
428 def unlink(self, fname):
428 def unlink(self, fname):
429 try:
429 try:
430 util.unlinkpath(self._join(fname))
430 util.unlinkpath(self._join(fname))
431 except OSError, inst:
431 except OSError, inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 def writerej(self, fname, failed, total, lines):
435 def writerej(self, fname, failed, total, lines):
436 fname = fname + ".rej"
436 fname = fname + ".rej"
437 self.ui.warn(
437 self.ui.warn(
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
439 (failed, total, fname))
439 (failed, total, fname))
440 fp = self.opener(fname, 'w')
440 fp = self.opener(fname, 'w')
441 fp.writelines(lines)
441 fp.writelines(lines)
442 fp.close()
442 fp.close()
443
443
444 def exists(self, fname):
444 def exists(self, fname):
445 return os.path.lexists(self._join(fname))
445 return os.path.lexists(self._join(fname))
446
446
447 class workingbackend(fsbackend):
447 class workingbackend(fsbackend):
448 def __init__(self, ui, repo, similarity):
448 def __init__(self, ui, repo, similarity):
449 super(workingbackend, self).__init__(ui, repo.root)
449 super(workingbackend, self).__init__(ui, repo.root)
450 self.repo = repo
450 self.repo = repo
451 self.similarity = similarity
451 self.similarity = similarity
452 self.removed = set()
452 self.removed = set()
453 self.changed = set()
453 self.changed = set()
454 self.copied = []
454 self.copied = []
455
455
456 def _checkknown(self, fname):
456 def _checkknown(self, fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
459
459
460 def setfile(self, fname, data, mode, copysource):
460 def setfile(self, fname, data, mode, copysource):
461 self._checkknown(fname)
461 self._checkknown(fname)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
463 if copysource is not None:
463 if copysource is not None:
464 self.copied.append((copysource, fname))
464 self.copied.append((copysource, fname))
465 self.changed.add(fname)
465 self.changed.add(fname)
466
466
467 def unlink(self, fname):
467 def unlink(self, fname):
468 self._checkknown(fname)
468 self._checkknown(fname)
469 super(workingbackend, self).unlink(fname)
469 super(workingbackend, self).unlink(fname)
470 self.removed.add(fname)
470 self.removed.add(fname)
471 self.changed.add(fname)
471 self.changed.add(fname)
472
472
473 def close(self):
473 def close(self):
474 wctx = self.repo[None]
474 wctx = self.repo[None]
475 addremoved = set(self.changed)
475 addremoved = set(self.changed)
476 for src, dst in self.copied:
476 for src, dst in self.copied:
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
478 addremoved.discard(src)
478 addremoved.discard(src)
479 if (not self.similarity) and self.removed:
479 if (not self.similarity) and self.removed:
480 wctx.forget(sorted(self.removed))
480 wctx.forget(sorted(self.removed))
481 if addremoved:
481 if addremoved:
482 cwd = self.repo.getcwd()
482 cwd = self.repo.getcwd()
483 if cwd:
483 if cwd:
484 addremoved = [util.pathto(self.repo.root, cwd, f)
484 addremoved = [util.pathto(self.repo.root, cwd, f)
485 for f in addremoved]
485 for f in addremoved]
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
487 return sorted(self.changed)
487 return sorted(self.changed)
488
488
489 class filestore(object):
489 class filestore(object):
490 def __init__(self, maxsize=None):
490 def __init__(self, maxsize=None):
491 self.opener = None
491 self.opener = None
492 self.files = {}
492 self.files = {}
493 self.created = 0
493 self.created = 0
494 self.maxsize = maxsize
494 self.maxsize = maxsize
495 if self.maxsize is None:
495 if self.maxsize is None:
496 self.maxsize = 4*(2**20)
496 self.maxsize = 4*(2**20)
497 self.size = 0
497 self.size = 0
498 self.data = {}
498 self.data = {}
499
499
500 def setfile(self, fname, data, mode, copied=None):
500 def setfile(self, fname, data, mode, copied=None):
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
502 self.data[fname] = (data, mode, copied)
502 self.data[fname] = (data, mode, copied)
503 self.size += len(data)
503 self.size += len(data)
504 else:
504 else:
505 if self.opener is None:
505 if self.opener is None:
506 root = tempfile.mkdtemp(prefix='hg-patch-')
506 root = tempfile.mkdtemp(prefix='hg-patch-')
507 self.opener = scmutil.opener(root)
507 self.opener = scmutil.opener(root)
508 # Avoid filename issues with these simple names
508 # Avoid filename issues with these simple names
509 fn = str(self.created)
509 fn = str(self.created)
510 self.opener.write(fn, data)
510 self.opener.write(fn, data)
511 self.created += 1
511 self.created += 1
512 self.files[fname] = (fn, mode, copied)
512 self.files[fname] = (fn, mode, copied)
513
513
514 def getfile(self, fname):
514 def getfile(self, fname):
515 if fname in self.data:
515 if fname in self.data:
516 return self.data[fname]
516 return self.data[fname]
517 if not self.opener or fname not in self.files:
517 if not self.opener or fname not in self.files:
518 raise IOError()
518 raise IOError()
519 fn, mode, copied = self.files[fname]
519 fn, mode, copied = self.files[fname]
520 return self.opener.read(fn), mode, copied
520 return self.opener.read(fn), mode, copied
521
521
522 def close(self):
522 def close(self):
523 if self.opener:
523 if self.opener:
524 shutil.rmtree(self.opener.base)
524 shutil.rmtree(self.opener.base)
525
525
526 class repobackend(abstractbackend):
526 class repobackend(abstractbackend):
527 def __init__(self, ui, repo, ctx, store):
527 def __init__(self, ui, repo, ctx, store):
528 super(repobackend, self).__init__(ui)
528 super(repobackend, self).__init__(ui)
529 self.repo = repo
529 self.repo = repo
530 self.ctx = ctx
530 self.ctx = ctx
531 self.store = store
531 self.store = store
532 self.changed = set()
532 self.changed = set()
533 self.removed = set()
533 self.removed = set()
534 self.copied = {}
534 self.copied = {}
535
535
536 def _checkknown(self, fname):
536 def _checkknown(self, fname):
537 if fname not in self.ctx:
537 if fname not in self.ctx:
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
539
539
540 def getfile(self, fname):
540 def getfile(self, fname):
541 try:
541 try:
542 fctx = self.ctx[fname]
542 fctx = self.ctx[fname]
543 except error.LookupError:
543 except error.LookupError:
544 raise IOError()
544 raise IOError()
545 flags = fctx.flags()
545 flags = fctx.flags()
546 return fctx.data(), ('l' in flags, 'x' in flags)
546 return fctx.data(), ('l' in flags, 'x' in flags)
547
547
548 def setfile(self, fname, data, mode, copysource):
548 def setfile(self, fname, data, mode, copysource):
549 if copysource:
549 if copysource:
550 self._checkknown(copysource)
550 self._checkknown(copysource)
551 if data is None:
551 if data is None:
552 data = self.ctx[fname].data()
552 data = self.ctx[fname].data()
553 self.store.setfile(fname, data, mode, copysource)
553 self.store.setfile(fname, data, mode, copysource)
554 self.changed.add(fname)
554 self.changed.add(fname)
555 if copysource:
555 if copysource:
556 self.copied[fname] = copysource
556 self.copied[fname] = copysource
557
557
558 def unlink(self, fname):
558 def unlink(self, fname):
559 self._checkknown(fname)
559 self._checkknown(fname)
560 self.removed.add(fname)
560 self.removed.add(fname)
561
561
562 def exists(self, fname):
562 def exists(self, fname):
563 return fname in self.ctx
563 return fname in self.ctx
564
564
565 def close(self):
565 def close(self):
566 return self.changed | self.removed
566 return self.changed | self.removed
567
567
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
569 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
569 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
570 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
570 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
572
572
573 class patchfile(object):
573 class patchfile(object):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
575 self.fname = gp.path
575 self.fname = gp.path
576 self.eolmode = eolmode
576 self.eolmode = eolmode
577 self.eol = None
577 self.eol = None
578 self.backend = backend
578 self.backend = backend
579 self.ui = ui
579 self.ui = ui
580 self.lines = []
580 self.lines = []
581 self.exists = False
581 self.exists = False
582 self.missing = True
582 self.missing = True
583 self.mode = gp.mode
583 self.mode = gp.mode
584 self.copysource = gp.oldpath
584 self.copysource = gp.oldpath
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
586 self.remove = gp.op == 'DELETE'
586 self.remove = gp.op == 'DELETE'
587 try:
587 try:
588 if self.copysource is None:
588 if self.copysource is None:
589 data, mode = backend.getfile(self.fname)
589 data, mode = backend.getfile(self.fname)
590 self.exists = True
590 self.exists = True
591 else:
591 else:
592 data, mode = store.getfile(self.copysource)[:2]
592 data, mode = store.getfile(self.copysource)[:2]
593 self.exists = backend.exists(self.fname)
593 self.exists = backend.exists(self.fname)
594 self.missing = False
594 self.missing = False
595 if data:
595 if data:
596 self.lines = mdiff.splitnewlines(data)
596 self.lines = mdiff.splitnewlines(data)
597 if self.mode is None:
597 if self.mode is None:
598 self.mode = mode
598 self.mode = mode
599 if self.lines:
599 if self.lines:
600 # Normalize line endings
600 # Normalize line endings
601 if self.lines[0].endswith('\r\n'):
601 if self.lines[0].endswith('\r\n'):
602 self.eol = '\r\n'
602 self.eol = '\r\n'
603 elif self.lines[0].endswith('\n'):
603 elif self.lines[0].endswith('\n'):
604 self.eol = '\n'
604 self.eol = '\n'
605 if eolmode != 'strict':
605 if eolmode != 'strict':
606 nlines = []
606 nlines = []
607 for l in self.lines:
607 for l in self.lines:
608 if l.endswith('\r\n'):
608 if l.endswith('\r\n'):
609 l = l[:-2] + '\n'
609 l = l[:-2] + '\n'
610 nlines.append(l)
610 nlines.append(l)
611 self.lines = nlines
611 self.lines = nlines
612 except IOError:
612 except IOError:
613 if self.create:
613 if self.create:
614 self.missing = False
614 self.missing = False
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = (False, False)
616 self.mode = (False, False)
617 if self.missing:
617 if self.missing:
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
619
619
620 self.hash = {}
620 self.hash = {}
621 self.dirty = 0
621 self.dirty = 0
622 self.offset = 0
622 self.offset = 0
623 self.skew = 0
623 self.skew = 0
624 self.rej = []
624 self.rej = []
625 self.fileprinted = False
625 self.fileprinted = False
626 self.printfile(False)
626 self.printfile(False)
627 self.hunks = 0
627 self.hunks = 0
628
628
629 def writelines(self, fname, lines, mode):
629 def writelines(self, fname, lines, mode):
630 if self.eolmode == 'auto':
630 if self.eolmode == 'auto':
631 eol = self.eol
631 eol = self.eol
632 elif self.eolmode == 'crlf':
632 elif self.eolmode == 'crlf':
633 eol = '\r\n'
633 eol = '\r\n'
634 else:
634 else:
635 eol = '\n'
635 eol = '\n'
636
636
637 if self.eolmode != 'strict' and eol and eol != '\n':
637 if self.eolmode != 'strict' and eol and eol != '\n':
638 rawlines = []
638 rawlines = []
639 for l in lines:
639 for l in lines:
640 if l and l[-1] == '\n':
640 if l and l[-1] == '\n':
641 l = l[:-1] + eol
641 l = l[:-1] + eol
642 rawlines.append(l)
642 rawlines.append(l)
643 lines = rawlines
643 lines = rawlines
644
644
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
646
646
647 def printfile(self, warn):
647 def printfile(self, warn):
648 if self.fileprinted:
648 if self.fileprinted:
649 return
649 return
650 if warn or self.ui.verbose:
650 if warn or self.ui.verbose:
651 self.fileprinted = True
651 self.fileprinted = True
652 s = _("patching file %s\n") % self.fname
652 s = _("patching file %s\n") % self.fname
653 if warn:
653 if warn:
654 self.ui.warn(s)
654 self.ui.warn(s)
655 else:
655 else:
656 self.ui.note(s)
656 self.ui.note(s)
657
657
658
658
659 def findlines(self, l, linenum):
659 def findlines(self, l, linenum):
660 # looks through the hash and finds candidate lines. The
660 # looks through the hash and finds candidate lines. The
661 # result is a list of line numbers sorted based on distance
661 # result is a list of line numbers sorted based on distance
662 # from linenum
662 # from linenum
663
663
664 cand = self.hash.get(l, [])
664 cand = self.hash.get(l, [])
665 if len(cand) > 1:
665 if len(cand) > 1:
666 # resort our list of potentials forward then back.
666 # resort our list of potentials forward then back.
667 cand.sort(key=lambda x: abs(x - linenum))
667 cand.sort(key=lambda x: abs(x - linenum))
668 return cand
668 return cand
669
669
670 def write_rej(self):
670 def write_rej(self):
671 # our rejects are a little different from patch(1). This always
671 # our rejects are a little different from patch(1). This always
672 # creates rejects in the same form as the original patch. A file
672 # creates rejects in the same form as the original patch. A file
673 # header is inserted so that you can run the reject through patch again
673 # header is inserted so that you can run the reject through patch again
674 # without having to type the filename.
674 # without having to type the filename.
675 if not self.rej:
675 if not self.rej:
676 return
676 return
677 base = os.path.basename(self.fname)
677 base = os.path.basename(self.fname)
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
679 for x in self.rej:
679 for x in self.rej:
680 for l in x.hunk:
680 for l in x.hunk:
681 lines.append(l)
681 lines.append(l)
682 if l[-1] != '\n':
682 if l[-1] != '\n':
683 lines.append("\n\ No newline at end of file\n")
683 lines.append("\n\ No newline at end of file\n")
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
685
685
686 def apply(self, h):
686 def apply(self, h):
687 if not h.complete():
687 if not h.complete():
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
690 h.lenb))
690 h.lenb))
691
691
692 self.hunks += 1
692 self.hunks += 1
693
693
694 if self.missing:
694 if self.missing:
695 self.rej.append(h)
695 self.rej.append(h)
696 return -1
696 return -1
697
697
698 if self.exists and self.create:
698 if self.exists and self.create:
699 if self.copysource:
699 if self.copysource:
700 self.ui.warn(_("cannot create %s: destination already "
700 self.ui.warn(_("cannot create %s: destination already "
701 "exists\n" % self.fname))
701 "exists\n" % self.fname))
702 else:
702 else:
703 self.ui.warn(_("file %s already exists\n") % self.fname)
703 self.ui.warn(_("file %s already exists\n") % self.fname)
704 self.rej.append(h)
704 self.rej.append(h)
705 return -1
705 return -1
706
706
707 if isinstance(h, binhunk):
707 if isinstance(h, binhunk):
708 if self.remove:
708 if self.remove:
709 self.backend.unlink(self.fname)
709 self.backend.unlink(self.fname)
710 else:
710 else:
711 self.lines[:] = h.new()
711 self.lines[:] = h.new()
712 self.offset += len(h.new())
712 self.offset += len(h.new())
713 self.dirty = True
713 self.dirty = True
714 return 0
714 return 0
715
715
716 horig = h
716 horig = h
717 if (self.eolmode in ('crlf', 'lf')
717 if (self.eolmode in ('crlf', 'lf')
718 or self.eolmode == 'auto' and self.eol):
718 or self.eolmode == 'auto' and self.eol):
719 # If new eols are going to be normalized, then normalize
719 # If new eols are going to be normalized, then normalize
720 # hunk data before patching. Otherwise, preserve input
720 # hunk data before patching. Otherwise, preserve input
721 # line-endings.
721 # line-endings.
722 h = h.getnormalized()
722 h = h.getnormalized()
723
723
724 # fast case first, no offsets, no fuzz
724 # fast case first, no offsets, no fuzz
725 old = h.old()
725 old = h.old()
726 # patch starts counting at 1 unless we are adding the file
726 # patch starts counting at 1 unless we are adding the file
727 if h.starta == 0:
727 if h.starta == 0:
728 start = 0
728 start = 0
729 else:
729 else:
730 start = h.starta + self.offset - 1
730 start = h.starta + self.offset - 1
731 orig_start = start
731 orig_start = start
732 # if there's skew we want to emit the "(offset %d lines)" even
732 # if there's skew we want to emit the "(offset %d lines)" even
733 # when the hunk cleanly applies at start + skew, so skip the
733 # when the hunk cleanly applies at start + skew, so skip the
734 # fast case code
734 # fast case code
735 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
735 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
736 if self.remove:
736 if self.remove:
737 self.backend.unlink(self.fname)
737 self.backend.unlink(self.fname)
738 else:
738 else:
739 self.lines[start : start + h.lena] = h.new()
739 self.lines[start : start + h.lena] = h.new()
740 self.offset += h.lenb - h.lena
740 self.offset += h.lenb - h.lena
741 self.dirty = True
741 self.dirty = True
742 return 0
742 return 0
743
743
744 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
744 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
745 self.hash = {}
745 self.hash = {}
746 for x, s in enumerate(self.lines):
746 for x, s in enumerate(self.lines):
747 self.hash.setdefault(s, []).append(x)
747 self.hash.setdefault(s, []).append(x)
748 if h.hunk[-1][0] != ' ':
748 if h.hunk[-1][0] != ' ':
749 # if the hunk tried to put something at the bottom of the file
749 # if the hunk tried to put something at the bottom of the file
750 # override the start line and use eof here
750 # override the start line and use eof here
751 search_start = len(self.lines)
751 search_start = len(self.lines)
752 else:
752 else:
753 search_start = orig_start + self.skew
753 search_start = orig_start + self.skew
754
754
755 for fuzzlen in xrange(3):
755 for fuzzlen in xrange(3):
756 for toponly in [True, False]:
756 for toponly in [True, False]:
757 old = h.old(fuzzlen, toponly)
757 old = h.old(fuzzlen, toponly)
758
758
759 cand = self.findlines(old[0][1:], search_start)
759 cand = self.findlines(old[0][1:], search_start)
760 for l in cand:
760 for l in cand:
761 if diffhelpers.testhunk(old, self.lines, l) == 0:
761 if diffhelpers.testhunk(old, self.lines, l) == 0:
762 newlines = h.new(fuzzlen, toponly)
762 newlines = h.new(fuzzlen, toponly)
763 self.lines[l : l + len(old)] = newlines
763 self.lines[l : l + len(old)] = newlines
764 self.offset += len(newlines) - len(old)
764 self.offset += len(newlines) - len(old)
765 self.skew = l - orig_start
765 self.skew = l - orig_start
766 self.dirty = True
766 self.dirty = True
767 offset = l - orig_start - fuzzlen
767 offset = l - orig_start - fuzzlen
768 if fuzzlen:
768 if fuzzlen:
769 msg = _("Hunk #%d succeeded at %d "
769 msg = _("Hunk #%d succeeded at %d "
770 "with fuzz %d "
770 "with fuzz %d "
771 "(offset %d lines).\n")
771 "(offset %d lines).\n")
772 self.printfile(True)
772 self.printfile(True)
773 self.ui.warn(msg %
773 self.ui.warn(msg %
774 (h.number, l + 1, fuzzlen, offset))
774 (h.number, l + 1, fuzzlen, offset))
775 else:
775 else:
776 msg = _("Hunk #%d succeeded at %d "
776 msg = _("Hunk #%d succeeded at %d "
777 "(offset %d lines).\n")
777 "(offset %d lines).\n")
778 self.ui.note(msg % (h.number, l + 1, offset))
778 self.ui.note(msg % (h.number, l + 1, offset))
779 return fuzzlen
779 return fuzzlen
780 self.printfile(True)
780 self.printfile(True)
781 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
781 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
782 self.rej.append(horig)
782 self.rej.append(horig)
783 return -1
783 return -1
784
784
785 def close(self):
785 def close(self):
786 if self.dirty:
786 if self.dirty:
787 self.writelines(self.fname, self.lines, self.mode)
787 self.writelines(self.fname, self.lines, self.mode)
788 self.write_rej()
788 self.write_rej()
789 return len(self.rej)
789 return len(self.rej)
790
790
791 class hunk(object):
791 class hunk(object):
792 def __init__(self, desc, num, lr, context):
792 def __init__(self, desc, num, lr, context):
793 self.number = num
793 self.number = num
794 self.desc = desc
794 self.desc = desc
795 self.hunk = [desc]
795 self.hunk = [desc]
796 self.a = []
796 self.a = []
797 self.b = []
797 self.b = []
798 self.starta = self.lena = None
798 self.starta = self.lena = None
799 self.startb = self.lenb = None
799 self.startb = self.lenb = None
800 if lr is not None:
800 if lr is not None:
801 if context:
801 if context:
802 self.read_context_hunk(lr)
802 self.read_context_hunk(lr)
803 else:
803 else:
804 self.read_unified_hunk(lr)
804 self.read_unified_hunk(lr)
805
805
806 def getnormalized(self):
806 def getnormalized(self):
807 """Return a copy with line endings normalized to LF."""
807 """Return a copy with line endings normalized to LF."""
808
808
809 def normalize(lines):
809 def normalize(lines):
810 nlines = []
810 nlines = []
811 for line in lines:
811 for line in lines:
812 if line.endswith('\r\n'):
812 if line.endswith('\r\n'):
813 line = line[:-2] + '\n'
813 line = line[:-2] + '\n'
814 nlines.append(line)
814 nlines.append(line)
815 return nlines
815 return nlines
816
816
817 # Dummy object, it is rebuilt manually
817 # Dummy object, it is rebuilt manually
818 nh = hunk(self.desc, self.number, None, None)
818 nh = hunk(self.desc, self.number, None, None)
819 nh.number = self.number
819 nh.number = self.number
820 nh.desc = self.desc
820 nh.desc = self.desc
821 nh.hunk = self.hunk
821 nh.hunk = self.hunk
822 nh.a = normalize(self.a)
822 nh.a = normalize(self.a)
823 nh.b = normalize(self.b)
823 nh.b = normalize(self.b)
824 nh.starta = self.starta
824 nh.starta = self.starta
825 nh.startb = self.startb
825 nh.startb = self.startb
826 nh.lena = self.lena
826 nh.lena = self.lena
827 nh.lenb = self.lenb
827 nh.lenb = self.lenb
828 return nh
828 return nh
829
829
830 def read_unified_hunk(self, lr):
830 def read_unified_hunk(self, lr):
831 m = unidesc.match(self.desc)
831 m = unidesc.match(self.desc)
832 if not m:
832 if not m:
833 raise PatchError(_("bad hunk #%d") % self.number)
833 raise PatchError(_("bad hunk #%d") % self.number)
834 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
834 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
835 if self.lena is None:
835 if self.lena is None:
836 self.lena = 1
836 self.lena = 1
837 else:
837 else:
838 self.lena = int(self.lena)
838 self.lena = int(self.lena)
839 if self.lenb is None:
839 if self.lenb is None:
840 self.lenb = 1
840 self.lenb = 1
841 else:
841 else:
842 self.lenb = int(self.lenb)
842 self.lenb = int(self.lenb)
843 self.starta = int(self.starta)
843 self.starta = int(self.starta)
844 self.startb = int(self.startb)
844 self.startb = int(self.startb)
845 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
845 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
846 # if we hit eof before finishing out the hunk, the last line will
846 # if we hit eof before finishing out the hunk, the last line will
847 # be zero length. Lets try to fix it up.
847 # be zero length. Lets try to fix it up.
848 while len(self.hunk[-1]) == 0:
848 while len(self.hunk[-1]) == 0:
849 del self.hunk[-1]
849 del self.hunk[-1]
850 del self.a[-1]
850 del self.a[-1]
851 del self.b[-1]
851 del self.b[-1]
852 self.lena -= 1
852 self.lena -= 1
853 self.lenb -= 1
853 self.lenb -= 1
854 self._fixnewline(lr)
854 self._fixnewline(lr)
855
855
856 def read_context_hunk(self, lr):
856 def read_context_hunk(self, lr):
857 self.desc = lr.readline()
857 self.desc = lr.readline()
858 m = contextdesc.match(self.desc)
858 m = contextdesc.match(self.desc)
859 if not m:
859 if not m:
860 raise PatchError(_("bad hunk #%d") % self.number)
860 raise PatchError(_("bad hunk #%d") % self.number)
861 foo, self.starta, foo2, aend, foo3 = m.groups()
861 foo, self.starta, foo2, aend, foo3 = m.groups()
862 self.starta = int(self.starta)
862 self.starta = int(self.starta)
863 if aend is None:
863 if aend is None:
864 aend = self.starta
864 aend = self.starta
865 self.lena = int(aend) - self.starta
865 self.lena = int(aend) - self.starta
866 if self.starta:
866 if self.starta:
867 self.lena += 1
867 self.lena += 1
868 for x in xrange(self.lena):
868 for x in xrange(self.lena):
869 l = lr.readline()
869 l = lr.readline()
870 if l.startswith('---'):
870 if l.startswith('---'):
871 # lines addition, old block is empty
871 # lines addition, old block is empty
872 lr.push(l)
872 lr.push(l)
873 break
873 break
874 s = l[2:]
874 s = l[2:]
875 if l.startswith('- ') or l.startswith('! '):
875 if l.startswith('- ') or l.startswith('! '):
876 u = '-' + s
876 u = '-' + s
877 elif l.startswith(' '):
877 elif l.startswith(' '):
878 u = ' ' + s
878 u = ' ' + s
879 else:
879 else:
880 raise PatchError(_("bad hunk #%d old text line %d") %
880 raise PatchError(_("bad hunk #%d old text line %d") %
881 (self.number, x))
881 (self.number, x))
882 self.a.append(u)
882 self.a.append(u)
883 self.hunk.append(u)
883 self.hunk.append(u)
884
884
885 l = lr.readline()
885 l = lr.readline()
886 if l.startswith('\ '):
886 if l.startswith('\ '):
887 s = self.a[-1][:-1]
887 s = self.a[-1][:-1]
888 self.a[-1] = s
888 self.a[-1] = s
889 self.hunk[-1] = s
889 self.hunk[-1] = s
890 l = lr.readline()
890 l = lr.readline()
891 m = contextdesc.match(l)
891 m = contextdesc.match(l)
892 if not m:
892 if not m:
893 raise PatchError(_("bad hunk #%d") % self.number)
893 raise PatchError(_("bad hunk #%d") % self.number)
894 foo, self.startb, foo2, bend, foo3 = m.groups()
894 foo, self.startb, foo2, bend, foo3 = m.groups()
895 self.startb = int(self.startb)
895 self.startb = int(self.startb)
896 if bend is None:
896 if bend is None:
897 bend = self.startb
897 bend = self.startb
898 self.lenb = int(bend) - self.startb
898 self.lenb = int(bend) - self.startb
899 if self.startb:
899 if self.startb:
900 self.lenb += 1
900 self.lenb += 1
901 hunki = 1
901 hunki = 1
902 for x in xrange(self.lenb):
902 for x in xrange(self.lenb):
903 l = lr.readline()
903 l = lr.readline()
904 if l.startswith('\ '):
904 if l.startswith('\ '):
905 # XXX: the only way to hit this is with an invalid line range.
905 # XXX: the only way to hit this is with an invalid line range.
906 # The no-eol marker is not counted in the line range, but I
906 # The no-eol marker is not counted in the line range, but I
907 # guess there are diff(1) out there which behave differently.
907 # guess there are diff(1) out there which behave differently.
908 s = self.b[-1][:-1]
908 s = self.b[-1][:-1]
909 self.b[-1] = s
909 self.b[-1] = s
910 self.hunk[hunki - 1] = s
910 self.hunk[hunki - 1] = s
911 continue
911 continue
912 if not l:
912 if not l:
913 # line deletions, new block is empty and we hit EOF
913 # line deletions, new block is empty and we hit EOF
914 lr.push(l)
914 lr.push(l)
915 break
915 break
916 s = l[2:]
916 s = l[2:]
917 if l.startswith('+ ') or l.startswith('! '):
917 if l.startswith('+ ') or l.startswith('! '):
918 u = '+' + s
918 u = '+' + s
919 elif l.startswith(' '):
919 elif l.startswith(' '):
920 u = ' ' + s
920 u = ' ' + s
921 elif len(self.b) == 0:
921 elif len(self.b) == 0:
922 # line deletions, new block is empty
922 # line deletions, new block is empty
923 lr.push(l)
923 lr.push(l)
924 break
924 break
925 else:
925 else:
926 raise PatchError(_("bad hunk #%d old text line %d") %
926 raise PatchError(_("bad hunk #%d old text line %d") %
927 (self.number, x))
927 (self.number, x))
928 self.b.append(s)
928 self.b.append(s)
929 while True:
929 while True:
930 if hunki >= len(self.hunk):
930 if hunki >= len(self.hunk):
931 h = ""
931 h = ""
932 else:
932 else:
933 h = self.hunk[hunki]
933 h = self.hunk[hunki]
934 hunki += 1
934 hunki += 1
935 if h == u:
935 if h == u:
936 break
936 break
937 elif h.startswith('-'):
937 elif h.startswith('-'):
938 continue
938 continue
939 else:
939 else:
940 self.hunk.insert(hunki - 1, u)
940 self.hunk.insert(hunki - 1, u)
941 break
941 break
942
942
943 if not self.a:
943 if not self.a:
944 # this happens when lines were only added to the hunk
944 # this happens when lines were only added to the hunk
945 for x in self.hunk:
945 for x in self.hunk:
946 if x.startswith('-') or x.startswith(' '):
946 if x.startswith('-') or x.startswith(' '):
947 self.a.append(x)
947 self.a.append(x)
948 if not self.b:
948 if not self.b:
949 # this happens when lines were only deleted from the hunk
949 # this happens when lines were only deleted from the hunk
950 for x in self.hunk:
950 for x in self.hunk:
951 if x.startswith('+') or x.startswith(' '):
951 if x.startswith('+') or x.startswith(' '):
952 self.b.append(x[1:])
952 self.b.append(x[1:])
953 # @@ -start,len +start,len @@
953 # @@ -start,len +start,len @@
954 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
954 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
955 self.startb, self.lenb)
955 self.startb, self.lenb)
956 self.hunk[0] = self.desc
956 self.hunk[0] = self.desc
957 self._fixnewline(lr)
957 self._fixnewline(lr)
958
958
959 def _fixnewline(self, lr):
959 def _fixnewline(self, lr):
960 l = lr.readline()
960 l = lr.readline()
961 if l.startswith('\ '):
961 if l.startswith('\ '):
962 diffhelpers.fix_newline(self.hunk, self.a, self.b)
962 diffhelpers.fix_newline(self.hunk, self.a, self.b)
963 else:
963 else:
964 lr.push(l)
964 lr.push(l)
965
965
966 def complete(self):
966 def complete(self):
967 return len(self.a) == self.lena and len(self.b) == self.lenb
967 return len(self.a) == self.lena and len(self.b) == self.lenb
968
968
969 def fuzzit(self, l, fuzz, toponly):
969 def fuzzit(self, l, fuzz, toponly):
970 # this removes context lines from the top and bottom of list 'l'. It
970 # this removes context lines from the top and bottom of list 'l'. It
971 # checks the hunk to make sure only context lines are removed, and then
971 # checks the hunk to make sure only context lines are removed, and then
972 # returns a new shortened list of lines.
972 # returns a new shortened list of lines.
973 fuzz = min(fuzz, len(l)-1)
973 fuzz = min(fuzz, len(l)-1)
974 if fuzz:
974 if fuzz:
975 top = 0
975 top = 0
976 bot = 0
976 bot = 0
977 hlen = len(self.hunk)
977 hlen = len(self.hunk)
978 for x in xrange(hlen - 1):
978 for x in xrange(hlen - 1):
979 # the hunk starts with the @@ line, so use x+1
979 # the hunk starts with the @@ line, so use x+1
980 if self.hunk[x + 1][0] == ' ':
980 if self.hunk[x + 1][0] == ' ':
981 top += 1
981 top += 1
982 else:
982 else:
983 break
983 break
984 if not toponly:
984 if not toponly:
985 for x in xrange(hlen - 1):
985 for x in xrange(hlen - 1):
986 if self.hunk[hlen - bot - 1][0] == ' ':
986 if self.hunk[hlen - bot - 1][0] == ' ':
987 bot += 1
987 bot += 1
988 else:
988 else:
989 break
989 break
990
990
991 # top and bot now count context in the hunk
991 # top and bot now count context in the hunk
992 # adjust them if either one is short
992 # adjust them if either one is short
993 context = max(top, bot, 3)
993 context = max(top, bot, 3)
994 if bot < context:
994 if bot < context:
995 bot = max(0, fuzz - (context - bot))
995 bot = max(0, fuzz - (context - bot))
996 else:
996 else:
997 bot = min(fuzz, bot)
997 bot = min(fuzz, bot)
998 if top < context:
998 if top < context:
999 top = max(0, fuzz - (context - top))
999 top = max(0, fuzz - (context - top))
1000 else:
1000 else:
1001 top = min(fuzz, top)
1001 top = min(fuzz, top)
1002
1002
1003 return l[top:len(l)-bot]
1003 return l[top:len(l)-bot]
1004 return l
1004 return l
1005
1005
1006 def old(self, fuzz=0, toponly=False):
1006 def old(self, fuzz=0, toponly=False):
1007 return self.fuzzit(self.a, fuzz, toponly)
1007 return self.fuzzit(self.a, fuzz, toponly)
1008
1008
1009 def new(self, fuzz=0, toponly=False):
1009 def new(self, fuzz=0, toponly=False):
1010 return self.fuzzit(self.b, fuzz, toponly)
1010 return self.fuzzit(self.b, fuzz, toponly)
1011
1011
1012 class binhunk(object):
1012 class binhunk(object):
1013 'A binary patch file. Only understands literals so far.'
1013 'A binary patch file. Only understands literals so far.'
1014 def __init__(self, lr):
1014 def __init__(self, lr):
1015 self.text = None
1015 self.text = None
1016 self.hunk = ['GIT binary patch\n']
1016 self.hunk = ['GIT binary patch\n']
1017 self._read(lr)
1017 self._read(lr)
1018
1018
1019 def complete(self):
1019 def complete(self):
1020 return self.text is not None
1020 return self.text is not None
1021
1021
1022 def new(self):
1022 def new(self):
1023 return [self.text]
1023 return [self.text]
1024
1024
1025 def _read(self, lr):
1025 def _read(self, lr):
1026 line = lr.readline()
1026 line = lr.readline()
1027 self.hunk.append(line)
1027 self.hunk.append(line)
1028 while line and not line.startswith('literal '):
1028 while line and not line.startswith('literal '):
1029 line = lr.readline()
1029 line = lr.readline()
1030 self.hunk.append(line)
1030 self.hunk.append(line)
1031 if not line:
1031 if not line:
1032 raise PatchError(_('could not extract binary patch'))
1032 raise PatchError(_('could not extract binary patch'))
1033 size = int(line[8:].rstrip())
1033 size = int(line[8:].rstrip())
1034 dec = []
1034 dec = []
1035 line = lr.readline()
1035 line = lr.readline()
1036 self.hunk.append(line)
1036 self.hunk.append(line)
1037 while len(line) > 1:
1037 while len(line) > 1:
1038 l = line[0]
1038 l = line[0]
1039 if l <= 'Z' and l >= 'A':
1039 if l <= 'Z' and l >= 'A':
1040 l = ord(l) - ord('A') + 1
1040 l = ord(l) - ord('A') + 1
1041 else:
1041 else:
1042 l = ord(l) - ord('a') + 27
1042 l = ord(l) - ord('a') + 27
1043 dec.append(base85.b85decode(line[1:-1])[:l])
1043 dec.append(base85.b85decode(line[1:-1])[:l])
1044 line = lr.readline()
1044 line = lr.readline()
1045 self.hunk.append(line)
1045 self.hunk.append(line)
1046 text = zlib.decompress(''.join(dec))
1046 text = zlib.decompress(''.join(dec))
1047 if len(text) != size:
1047 if len(text) != size:
1048 raise PatchError(_('binary patch is %d bytes, not %d') %
1048 raise PatchError(_('binary patch is %d bytes, not %d') %
1049 len(text), size)
1049 len(text), size)
1050 self.text = text
1050 self.text = text
1051
1051
1052 def parsefilename(str):
1052 def parsefilename(str):
1053 # --- filename \t|space stuff
1053 # --- filename \t|space stuff
1054 s = str[4:].rstrip('\r\n')
1054 s = str[4:].rstrip('\r\n')
1055 i = s.find('\t')
1055 i = s.find('\t')
1056 if i < 0:
1056 if i < 0:
1057 i = s.find(' ')
1057 i = s.find(' ')
1058 if i < 0:
1058 if i < 0:
1059 return s
1059 return s
1060 return s[:i]
1060 return s[:i]
1061
1061
1062 def pathstrip(path, strip):
1062 def pathstrip(path, strip):
1063 pathlen = len(path)
1063 pathlen = len(path)
1064 i = 0
1064 i = 0
1065 if strip == 0:
1065 if strip == 0:
1066 return '', path.rstrip()
1066 return '', path.rstrip()
1067 count = strip
1067 count = strip
1068 while count > 0:
1068 while count > 0:
1069 i = path.find('/', i)
1069 i = path.find('/', i)
1070 if i == -1:
1070 if i == -1:
1071 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1071 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1072 (count, strip, path))
1072 (count, strip, path))
1073 i += 1
1073 i += 1
1074 # consume '//' in the path
1074 # consume '//' in the path
1075 while i < pathlen - 1 and path[i] == '/':
1075 while i < pathlen - 1 and path[i] == '/':
1076 i += 1
1076 i += 1
1077 count -= 1
1077 count -= 1
1078 return path[:i].lstrip(), path[i:].rstrip()
1078 return path[:i].lstrip(), path[i:].rstrip()
1079
1079
1080 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1080 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1081 nulla = afile_orig == "/dev/null"
1081 nulla = afile_orig == "/dev/null"
1082 nullb = bfile_orig == "/dev/null"
1082 nullb = bfile_orig == "/dev/null"
1083 create = nulla and hunk.starta == 0 and hunk.lena == 0
1083 create = nulla and hunk.starta == 0 and hunk.lena == 0
1084 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1084 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1085 abase, afile = pathstrip(afile_orig, strip)
1085 abase, afile = pathstrip(afile_orig, strip)
1086 gooda = not nulla and backend.exists(afile)
1086 gooda = not nulla and backend.exists(afile)
1087 bbase, bfile = pathstrip(bfile_orig, strip)
1087 bbase, bfile = pathstrip(bfile_orig, strip)
1088 if afile == bfile:
1088 if afile == bfile:
1089 goodb = gooda
1089 goodb = gooda
1090 else:
1090 else:
1091 goodb = not nullb and backend.exists(bfile)
1091 goodb = not nullb and backend.exists(bfile)
1092 missing = not goodb and not gooda and not create
1092 missing = not goodb and not gooda and not create
1093
1093
1094 # some diff programs apparently produce patches where the afile is
1094 # some diff programs apparently produce patches where the afile is
1095 # not /dev/null, but afile starts with bfile
1095 # not /dev/null, but afile starts with bfile
1096 abasedir = afile[:afile.rfind('/') + 1]
1096 abasedir = afile[:afile.rfind('/') + 1]
1097 bbasedir = bfile[:bfile.rfind('/') + 1]
1097 bbasedir = bfile[:bfile.rfind('/') + 1]
1098 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1098 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1099 and hunk.starta == 0 and hunk.lena == 0):
1099 and hunk.starta == 0 and hunk.lena == 0):
1100 create = True
1100 create = True
1101 missing = False
1101 missing = False
1102
1102
1103 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1103 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1104 # diff is between a file and its backup. In this case, the original
1104 # diff is between a file and its backup. In this case, the original
1105 # file should be patched (see original mpatch code).
1105 # file should be patched (see original mpatch code).
1106 isbackup = (abase == bbase and bfile.startswith(afile))
1106 isbackup = (abase == bbase and bfile.startswith(afile))
1107 fname = None
1107 fname = None
1108 if not missing:
1108 if not missing:
1109 if gooda and goodb:
1109 if gooda and goodb:
1110 fname = isbackup and afile or bfile
1110 fname = isbackup and afile or bfile
1111 elif gooda:
1111 elif gooda:
1112 fname = afile
1112 fname = afile
1113
1113
1114 if not fname:
1114 if not fname:
1115 if not nullb:
1115 if not nullb:
1116 fname = isbackup and afile or bfile
1116 fname = isbackup and afile or bfile
1117 elif not nulla:
1117 elif not nulla:
1118 fname = afile
1118 fname = afile
1119 else:
1119 else:
1120 raise PatchError(_("undefined source and destination files"))
1120 raise PatchError(_("undefined source and destination files"))
1121
1121
1122 gp = patchmeta(fname)
1122 gp = patchmeta(fname)
1123 if create:
1123 if create:
1124 gp.op = 'ADD'
1124 gp.op = 'ADD'
1125 elif remove:
1125 elif remove:
1126 gp.op = 'DELETE'
1126 gp.op = 'DELETE'
1127 return gp
1127 return gp
1128
1128
1129 def scangitpatch(lr, firstline):
1129 def scangitpatch(lr, firstline):
1130 """
1130 """
1131 Git patches can emit:
1131 Git patches can emit:
1132 - rename a to b
1132 - rename a to b
1133 - change b
1133 - change b
1134 - copy a to c
1134 - copy a to c
1135 - change c
1135 - change c
1136
1136
1137 We cannot apply this sequence as-is, the renamed 'a' could not be
1137 We cannot apply this sequence as-is, the renamed 'a' could not be
1138 found for it would have been renamed already. And we cannot copy
1138 found for it would have been renamed already. And we cannot copy
1139 from 'b' instead because 'b' would have been changed already. So
1139 from 'b' instead because 'b' would have been changed already. So
1140 we scan the git patch for copy and rename commands so we can
1140 we scan the git patch for copy and rename commands so we can
1141 perform the copies ahead of time.
1141 perform the copies ahead of time.
1142 """
1142 """
1143 pos = 0
1143 pos = 0
1144 try:
1144 try:
1145 pos = lr.fp.tell()
1145 pos = lr.fp.tell()
1146 fp = lr.fp
1146 fp = lr.fp
1147 except IOError:
1147 except IOError:
1148 fp = cStringIO.StringIO(lr.fp.read())
1148 fp = cStringIO.StringIO(lr.fp.read())
1149 gitlr = linereader(fp)
1149 gitlr = linereader(fp)
1150 gitlr.push(firstline)
1150 gitlr.push(firstline)
1151 gitpatches = readgitpatch(gitlr)
1151 gitpatches = readgitpatch(gitlr)
1152 fp.seek(pos)
1152 fp.seek(pos)
1153 return gitpatches
1153 return gitpatches
1154
1154
1155 def iterhunks(fp):
1155 def iterhunks(fp):
1156 """Read a patch and yield the following events:
1156 """Read a patch and yield the following events:
1157 - ("file", afile, bfile, firsthunk): select a new target file.
1157 - ("file", afile, bfile, firsthunk): select a new target file.
1158 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1158 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1159 "file" event.
1159 "file" event.
1160 - ("git", gitchanges): current diff is in git format, gitchanges
1160 - ("git", gitchanges): current diff is in git format, gitchanges
1161 maps filenames to gitpatch records. Unique event.
1161 maps filenames to gitpatch records. Unique event.
1162 """
1162 """
1163 afile = ""
1163 afile = ""
1164 bfile = ""
1164 bfile = ""
1165 state = None
1165 state = None
1166 hunknum = 0
1166 hunknum = 0
1167 emitfile = newfile = False
1167 emitfile = newfile = False
1168 gitpatches = None
1168 gitpatches = None
1169
1169
1170 # our states
1170 # our states
1171 BFILE = 1
1171 BFILE = 1
1172 context = None
1172 context = None
1173 lr = linereader(fp)
1173 lr = linereader(fp)
1174
1174
1175 while True:
1175 while True:
1176 x = lr.readline()
1176 x = lr.readline()
1177 if not x:
1177 if not x:
1178 break
1178 break
1179 if state == BFILE and (
1179 if state == BFILE and (
1180 (not context and x[0] == '@')
1180 (not context and x[0] == '@')
1181 or (context is not False and x.startswith('***************'))
1181 or (context is not False and x.startswith('***************'))
1182 or x.startswith('GIT binary patch')):
1182 or x.startswith('GIT binary patch')):
1183 gp = None
1183 gp = None
1184 if (gitpatches and
1184 if (gitpatches and
1185 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1185 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1186 gp = gitpatches.pop()[2]
1186 gp = gitpatches.pop()[2]
1187 if x.startswith('GIT binary patch'):
1187 if x.startswith('GIT binary patch'):
1188 h = binhunk(lr)
1188 h = binhunk(lr)
1189 else:
1189 else:
1190 if context is None and x.startswith('***************'):
1190 if context is None and x.startswith('***************'):
1191 context = True
1191 context = True
1192 h = hunk(x, hunknum + 1, lr, context)
1192 h = hunk(x, hunknum + 1, lr, context)
1193 hunknum += 1
1193 hunknum += 1
1194 if emitfile:
1194 if emitfile:
1195 emitfile = False
1195 emitfile = False
1196 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1196 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1197 yield 'hunk', h
1197 yield 'hunk', h
1198 elif x.startswith('diff --git'):
1198 elif x.startswith('diff --git'):
1199 m = gitre.match(x)
1199 m = gitre.match(x)
1200 if not m:
1200 if not m:
1201 continue
1201 continue
1202 if not gitpatches:
1202 if not gitpatches:
1203 # scan whole input for git metadata
1203 # scan whole input for git metadata
1204 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1204 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1205 in scangitpatch(lr, x)]
1205 in scangitpatch(lr, x)]
1206 yield 'git', [g[2].copy() for g in gitpatches
1206 yield 'git', [g[2].copy() for g in gitpatches
1207 if g[2].op in ('COPY', 'RENAME')]
1207 if g[2].op in ('COPY', 'RENAME')]
1208 gitpatches.reverse()
1208 gitpatches.reverse()
1209 afile = 'a/' + m.group(1)
1209 afile = 'a/' + m.group(1)
1210 bfile = 'b/' + m.group(2)
1210 bfile = 'b/' + m.group(2)
1211 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1211 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1212 gp = gitpatches.pop()[2]
1212 gp = gitpatches.pop()[2]
1213 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1213 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1214 gp = gitpatches[-1][2]
1214 gp = gitpatches[-1][2]
1215 # copy/rename + modify should modify target, not source
1215 # copy/rename + modify should modify target, not source
1216 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1216 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1217 afile = bfile
1217 afile = bfile
1218 newfile = True
1218 newfile = True
1219 elif x.startswith('---'):
1219 elif x.startswith('---'):
1220 # check for a unified diff
1220 # check for a unified diff
1221 l2 = lr.readline()
1221 l2 = lr.readline()
1222 if not l2.startswith('+++'):
1222 if not l2.startswith('+++'):
1223 lr.push(l2)
1223 lr.push(l2)
1224 continue
1224 continue
1225 newfile = True
1225 newfile = True
1226 context = False
1226 context = False
1227 afile = parsefilename(x)
1227 afile = parsefilename(x)
1228 bfile = parsefilename(l2)
1228 bfile = parsefilename(l2)
1229 elif x.startswith('***'):
1229 elif x.startswith('***'):
1230 # check for a context diff
1230 # check for a context diff
1231 l2 = lr.readline()
1231 l2 = lr.readline()
1232 if not l2.startswith('---'):
1232 if not l2.startswith('---'):
1233 lr.push(l2)
1233 lr.push(l2)
1234 continue
1234 continue
1235 l3 = lr.readline()
1235 l3 = lr.readline()
1236 lr.push(l3)
1236 lr.push(l3)
1237 if not l3.startswith("***************"):
1237 if not l3.startswith("***************"):
1238 lr.push(l2)
1238 lr.push(l2)
1239 continue
1239 continue
1240 newfile = True
1240 newfile = True
1241 context = True
1241 context = True
1242 afile = parsefilename(x)
1242 afile = parsefilename(x)
1243 bfile = parsefilename(l2)
1243 bfile = parsefilename(l2)
1244
1244
1245 if newfile:
1245 if newfile:
1246 newfile = False
1246 newfile = False
1247 emitfile = True
1247 emitfile = True
1248 state = BFILE
1248 state = BFILE
1249 hunknum = 0
1249 hunknum = 0
1250
1250
1251 while gitpatches:
1251 while gitpatches:
1252 gp = gitpatches.pop()[2]
1252 gp = gitpatches.pop()[2]
1253 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1253 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1254
1254
1255 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1255 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1256 """Reads a patch from fp and tries to apply it.
1256 """Reads a patch from fp and tries to apply it.
1257
1257
1258 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1258 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1259 there was any fuzz.
1259 there was any fuzz.
1260
1260
1261 If 'eolmode' is 'strict', the patch content and patched file are
1261 If 'eolmode' is 'strict', the patch content and patched file are
1262 read in binary mode. Otherwise, line endings are ignored when
1262 read in binary mode. Otherwise, line endings are ignored when
1263 patching then normalized according to 'eolmode'.
1263 patching then normalized according to 'eolmode'.
1264 """
1264 """
1265 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1265 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1266 eolmode=eolmode)
1266 eolmode=eolmode)
1267
1267
1268 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1268 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1269 eolmode='strict'):
1269 eolmode='strict'):
1270
1270
1271 def pstrip(p):
1271 def pstrip(p):
1272 return pathstrip(p, strip - 1)[1]
1272 return pathstrip(p, strip - 1)[1]
1273
1273
1274 rejects = 0
1274 rejects = 0
1275 err = 0
1275 err = 0
1276 current_file = None
1276 current_file = None
1277
1277
1278 for state, values in iterhunks(fp):
1278 for state, values in iterhunks(fp):
1279 if state == 'hunk':
1279 if state == 'hunk':
1280 if not current_file:
1280 if not current_file:
1281 continue
1281 continue
1282 ret = current_file.apply(values)
1282 ret = current_file.apply(values)
1283 if ret > 0:
1283 if ret > 0:
1284 err = 1
1284 err = 1
1285 elif state == 'file':
1285 elif state == 'file':
1286 if current_file:
1286 if current_file:
1287 rejects += current_file.close()
1287 rejects += current_file.close()
1288 current_file = None
1288 current_file = None
1289 afile, bfile, first_hunk, gp = values
1289 afile, bfile, first_hunk, gp = values
1290 if gp:
1290 if gp:
1291 path = pstrip(gp.path)
1291 path = pstrip(gp.path)
1292 gp.path = pstrip(gp.path)
1292 gp.path = pstrip(gp.path)
1293 if gp.oldpath:
1293 if gp.oldpath:
1294 gp.oldpath = pstrip(gp.oldpath)
1294 gp.oldpath = pstrip(gp.oldpath)
1295 else:
1295 else:
1296 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1296 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1297 if gp.op == 'RENAME':
1297 if gp.op == 'RENAME':
1298 backend.unlink(gp.oldpath)
1298 backend.unlink(gp.oldpath)
1299 if not first_hunk:
1299 if not first_hunk:
1300 if gp.op == 'DELETE':
1300 if gp.op == 'DELETE':
1301 backend.unlink(gp.path)
1301 backend.unlink(gp.path)
1302 continue
1302 continue
1303 data, mode = None, None
1303 data, mode = None, None
1304 if gp.op in ('RENAME', 'COPY'):
1304 if gp.op in ('RENAME', 'COPY'):
1305 data, mode = store.getfile(gp.oldpath)[:2]
1305 data, mode = store.getfile(gp.oldpath)[:2]
1306 if gp.mode:
1306 if gp.mode:
1307 mode = gp.mode
1307 mode = gp.mode
1308 if gp.op == 'ADD':
1308 if gp.op == 'ADD':
1309 # Added files without content have no hunk and
1309 # Added files without content have no hunk and
1310 # must be created
1310 # must be created
1311 data = ''
1311 data = ''
1312 if data or mode:
1312 if data or mode:
1313 if (gp.op in ('ADD', 'RENAME', 'COPY')
1313 if (gp.op in ('ADD', 'RENAME', 'COPY')
1314 and backend.exists(gp.path)):
1314 and backend.exists(gp.path)):
1315 raise PatchError(_("cannot create %s: destination "
1315 raise PatchError(_("cannot create %s: destination "
1316 "already exists") % gp.path)
1316 "already exists") % gp.path)
1317 backend.setfile(gp.path, data, mode, gp.oldpath)
1317 backend.setfile(gp.path, data, mode, gp.oldpath)
1318 continue
1318 continue
1319 try:
1319 try:
1320 current_file = patcher(ui, gp, backend, store,
1320 current_file = patcher(ui, gp, backend, store,
1321 eolmode=eolmode)
1321 eolmode=eolmode)
1322 except PatchError, inst:
1322 except PatchError, inst:
1323 ui.warn(str(inst) + '\n')
1323 ui.warn(str(inst) + '\n')
1324 current_file = None
1324 current_file = None
1325 rejects += 1
1325 rejects += 1
1326 continue
1326 continue
1327 elif state == 'git':
1327 elif state == 'git':
1328 for gp in values:
1328 for gp in values:
1329 path = pstrip(gp.oldpath)
1329 path = pstrip(gp.oldpath)
1330 data, mode = backend.getfile(path)
1330 data, mode = backend.getfile(path)
1331 store.setfile(path, data, mode)
1331 store.setfile(path, data, mode)
1332 else:
1332 else:
1333 raise util.Abort(_('unsupported parser state: %s') % state)
1333 raise util.Abort(_('unsupported parser state: %s') % state)
1334
1334
1335 if current_file:
1335 if current_file:
1336 rejects += current_file.close()
1336 rejects += current_file.close()
1337
1337
1338 if rejects:
1338 if rejects:
1339 return -1
1339 return -1
1340 return err
1340 return err
1341
1341
1342 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1342 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1343 similarity):
1343 similarity):
1344 """use <patcher> to apply <patchname> to the working directory.
1344 """use <patcher> to apply <patchname> to the working directory.
1345 returns whether patch was applied with fuzz factor."""
1345 returns whether patch was applied with fuzz factor."""
1346
1346
1347 fuzz = False
1347 fuzz = False
1348 args = []
1348 args = []
1349 cwd = repo.root
1349 cwd = repo.root
1350 if cwd:
1350 if cwd:
1351 args.append('-d %s' % util.shellquote(cwd))
1351 args.append('-d %s' % util.shellquote(cwd))
1352 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1352 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1353 util.shellquote(patchname)))
1353 util.shellquote(patchname)))
1354 try:
1354 try:
1355 for line in fp:
1355 for line in fp:
1356 line = line.rstrip()
1356 line = line.rstrip()
1357 ui.note(line + '\n')
1357 ui.note(line + '\n')
1358 if line.startswith('patching file '):
1358 if line.startswith('patching file '):
1359 pf = util.parsepatchoutput(line)
1359 pf = util.parsepatchoutput(line)
1360 printed_file = False
1360 printed_file = False
1361 files.add(pf)
1361 files.add(pf)
1362 elif line.find('with fuzz') >= 0:
1362 elif line.find('with fuzz') >= 0:
1363 fuzz = True
1363 fuzz = True
1364 if not printed_file:
1364 if not printed_file:
1365 ui.warn(pf + '\n')
1365 ui.warn(pf + '\n')
1366 printed_file = True
1366 printed_file = True
1367 ui.warn(line + '\n')
1367 ui.warn(line + '\n')
1368 elif line.find('saving rejects to file') >= 0:
1368 elif line.find('saving rejects to file') >= 0:
1369 ui.warn(line + '\n')
1369 ui.warn(line + '\n')
1370 elif line.find('FAILED') >= 0:
1370 elif line.find('FAILED') >= 0:
1371 if not printed_file:
1371 if not printed_file:
1372 ui.warn(pf + '\n')
1372 ui.warn(pf + '\n')
1373 printed_file = True
1373 printed_file = True
1374 ui.warn(line + '\n')
1374 ui.warn(line + '\n')
1375 finally:
1375 finally:
1376 if files:
1376 if files:
1377 cfiles = list(files)
1377 cfiles = list(files)
1378 cwd = repo.getcwd()
1378 cwd = repo.getcwd()
1379 if cwd:
1379 if cwd:
1380 cfiles = [util.pathto(repo.root, cwd, f)
1380 cfiles = [util.pathto(repo.root, cwd, f)
1381 for f in cfiles]
1381 for f in cfiles]
1382 scmutil.addremove(repo, cfiles, similarity=similarity)
1382 scmutil.addremove(repo, cfiles, similarity=similarity)
1383 code = fp.close()
1383 code = fp.close()
1384 if code:
1384 if code:
1385 raise PatchError(_("patch command failed: %s") %
1385 raise PatchError(_("patch command failed: %s") %
1386 util.explainexit(code)[0])
1386 util.explainexit(code)[0])
1387 return fuzz
1387 return fuzz
1388
1388
1389 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1389 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1390 if files is None:
1390 if files is None:
1391 files = set()
1391 files = set()
1392 if eolmode is None:
1392 if eolmode is None:
1393 eolmode = ui.config('patch', 'eol', 'strict')
1393 eolmode = ui.config('patch', 'eol', 'strict')
1394 if eolmode.lower() not in eolmodes:
1394 if eolmode.lower() not in eolmodes:
1395 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1395 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1396 eolmode = eolmode.lower()
1396 eolmode = eolmode.lower()
1397
1397
1398 store = filestore()
1398 store = filestore()
1399 try:
1399 try:
1400 fp = open(patchobj, 'rb')
1400 fp = open(patchobj, 'rb')
1401 except TypeError:
1401 except TypeError:
1402 fp = patchobj
1402 fp = patchobj
1403 try:
1403 try:
1404 ret = applydiff(ui, fp, backend, store, strip=strip,
1404 ret = applydiff(ui, fp, backend, store, strip=strip,
1405 eolmode=eolmode)
1405 eolmode=eolmode)
1406 finally:
1406 finally:
1407 if fp != patchobj:
1407 if fp != patchobj:
1408 fp.close()
1408 fp.close()
1409 files.update(backend.close())
1409 files.update(backend.close())
1410 store.close()
1410 store.close()
1411 if ret < 0:
1411 if ret < 0:
1412 raise PatchError(_('patch failed to apply'))
1412 raise PatchError(_('patch failed to apply'))
1413 return ret > 0
1413 return ret > 0
1414
1414
1415 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1415 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1416 similarity=0):
1416 similarity=0):
1417 """use builtin patch to apply <patchobj> to the working directory.
1417 """use builtin patch to apply <patchobj> to the working directory.
1418 returns whether patch was applied with fuzz factor."""
1418 returns whether patch was applied with fuzz factor."""
1419 backend = workingbackend(ui, repo, similarity)
1419 backend = workingbackend(ui, repo, similarity)
1420 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1420 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1421
1421
1422 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1422 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1423 eolmode='strict'):
1423 eolmode='strict'):
1424 backend = repobackend(ui, repo, ctx, store)
1424 backend = repobackend(ui, repo, ctx, store)
1425 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1425 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1426
1426
1427 def makememctx(repo, parents, text, user, date, branch, files, store,
1427 def makememctx(repo, parents, text, user, date, branch, files, store,
1428 editor=None):
1428 editor=None):
1429 def getfilectx(repo, memctx, path):
1429 def getfilectx(repo, memctx, path):
1430 data, (islink, isexec), copied = store.getfile(path)
1430 data, (islink, isexec), copied = store.getfile(path)
1431 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1431 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1432 copied=copied)
1432 copied=copied)
1433 extra = {}
1433 extra = {}
1434 if branch:
1434 if branch:
1435 extra['branch'] = encoding.fromlocal(branch)
1435 extra['branch'] = encoding.fromlocal(branch)
1436 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1436 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1437 date, extra)
1437 date, extra)
1438 if editor:
1438 if editor:
1439 ctx._text = editor(repo, ctx, [])
1439 ctx._text = editor(repo, ctx, [])
1440 return ctx
1440 return ctx
1441
1441
1442 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1442 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1443 similarity=0):
1443 similarity=0):
1444 """Apply <patchname> to the working directory.
1444 """Apply <patchname> to the working directory.
1445
1445
1446 'eolmode' specifies how end of lines should be handled. It can be:
1446 'eolmode' specifies how end of lines should be handled. It can be:
1447 - 'strict': inputs are read in binary mode, EOLs are preserved
1447 - 'strict': inputs are read in binary mode, EOLs are preserved
1448 - 'crlf': EOLs are ignored when patching and reset to CRLF
1448 - 'crlf': EOLs are ignored when patching and reset to CRLF
1449 - 'lf': EOLs are ignored when patching and reset to LF
1449 - 'lf': EOLs are ignored when patching and reset to LF
1450 - None: get it from user settings, default to 'strict'
1450 - None: get it from user settings, default to 'strict'
1451 'eolmode' is ignored when using an external patcher program.
1451 'eolmode' is ignored when using an external patcher program.
1452
1452
1453 Returns whether patch was applied with fuzz factor.
1453 Returns whether patch was applied with fuzz factor.
1454 """
1454 """
1455 patcher = ui.config('ui', 'patch')
1455 patcher = ui.config('ui', 'patch')
1456 if files is None:
1456 if files is None:
1457 files = set()
1457 files = set()
1458 try:
1458 try:
1459 if patcher:
1459 if patcher:
1460 return _externalpatch(ui, repo, patcher, patchname, strip,
1460 return _externalpatch(ui, repo, patcher, patchname, strip,
1461 files, similarity)
1461 files, similarity)
1462 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1462 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1463 similarity)
1463 similarity)
1464 except PatchError, err:
1464 except PatchError, err:
1465 raise util.Abort(str(err))
1465 raise util.Abort(str(err))
1466
1466
1467 def changedfiles(ui, repo, patchpath, strip=1):
1467 def changedfiles(ui, repo, patchpath, strip=1):
1468 backend = fsbackend(ui, repo.root)
1468 backend = fsbackend(ui, repo.root)
1469 fp = open(patchpath, 'rb')
1469 fp = open(patchpath, 'rb')
1470 try:
1470 try:
1471 changed = set()
1471 changed = set()
1472 for state, values in iterhunks(fp):
1472 for state, values in iterhunks(fp):
1473 if state == 'file':
1473 if state == 'file':
1474 afile, bfile, first_hunk, gp = values
1474 afile, bfile, first_hunk, gp = values
1475 if gp:
1475 if gp:
1476 gp.path = pathstrip(gp.path, strip - 1)[1]
1476 gp.path = pathstrip(gp.path, strip - 1)[1]
1477 if gp.oldpath:
1477 if gp.oldpath:
1478 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1478 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1479 else:
1479 else:
1480 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1480 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1481 changed.add(gp.path)
1481 changed.add(gp.path)
1482 if gp.op == 'RENAME':
1482 if gp.op == 'RENAME':
1483 changed.add(gp.oldpath)
1483 changed.add(gp.oldpath)
1484 elif state not in ('hunk', 'git'):
1484 elif state not in ('hunk', 'git'):
1485 raise util.Abort(_('unsupported parser state: %s') % state)
1485 raise util.Abort(_('unsupported parser state: %s') % state)
1486 return changed
1486 return changed
1487 finally:
1487 finally:
1488 fp.close()
1488 fp.close()
1489
1489
1490 def b85diff(to, tn):
1490 def b85diff(to, tn):
1491 '''print base85-encoded binary diff'''
1491 '''print base85-encoded binary diff'''
1492 def gitindex(text):
1492 def gitindex(text):
1493 if not text:
1493 if not text:
1494 return hex(nullid)
1494 return hex(nullid)
1495 l = len(text)
1495 l = len(text)
1496 s = util.sha1('blob %d\0' % l)
1496 s = util.sha1('blob %d\0' % l)
1497 s.update(text)
1497 s.update(text)
1498 return s.hexdigest()
1498 return s.hexdigest()
1499
1499
1500 def fmtline(line):
1500 def fmtline(line):
1501 l = len(line)
1501 l = len(line)
1502 if l <= 26:
1502 if l <= 26:
1503 l = chr(ord('A') + l - 1)
1503 l = chr(ord('A') + l - 1)
1504 else:
1504 else:
1505 l = chr(l - 26 + ord('a') - 1)
1505 l = chr(l - 26 + ord('a') - 1)
1506 return '%c%s\n' % (l, base85.b85encode(line, True))
1506 return '%c%s\n' % (l, base85.b85encode(line, True))
1507
1507
1508 def chunk(text, csize=52):
1508 def chunk(text, csize=52):
1509 l = len(text)
1509 l = len(text)
1510 i = 0
1510 i = 0
1511 while i < l:
1511 while i < l:
1512 yield text[i:i + csize]
1512 yield text[i:i + csize]
1513 i += csize
1513 i += csize
1514
1514
1515 tohash = gitindex(to)
1515 tohash = gitindex(to)
1516 tnhash = gitindex(tn)
1516 tnhash = gitindex(tn)
1517 if tohash == tnhash:
1517 if tohash == tnhash:
1518 return ""
1518 return ""
1519
1519
1520 # TODO: deltas
1520 # TODO: deltas
1521 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1521 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1522 (tohash, tnhash, len(tn))]
1522 (tohash, tnhash, len(tn))]
1523 for l in chunk(zlib.compress(tn)):
1523 for l in chunk(zlib.compress(tn)):
1524 ret.append(fmtline(l))
1524 ret.append(fmtline(l))
1525 ret.append('\n')
1525 ret.append('\n')
1526 return ''.join(ret)
1526 return ''.join(ret)
1527
1527
1528 class GitDiffRequired(Exception):
1528 class GitDiffRequired(Exception):
1529 pass
1529 pass
1530
1530
1531 def diffopts(ui, opts=None, untrusted=False):
1531 def diffopts(ui, opts=None, untrusted=False):
1532 def get(key, name=None, getter=ui.configbool):
1532 def get(key, name=None, getter=ui.configbool):
1533 return ((opts and opts.get(key)) or
1533 return ((opts and opts.get(key)) or
1534 getter('diff', name or key, None, untrusted=untrusted))
1534 getter('diff', name or key, None, untrusted=untrusted))
1535 return mdiff.diffopts(
1535 return mdiff.diffopts(
1536 text=opts and opts.get('text'),
1536 text=opts and opts.get('text'),
1537 git=get('git'),
1537 git=get('git'),
1538 nodates=get('nodates'),
1538 nodates=get('nodates'),
1539 showfunc=get('show_function', 'showfunc'),
1539 showfunc=get('show_function', 'showfunc'),
1540 ignorews=get('ignore_all_space', 'ignorews'),
1540 ignorews=get('ignore_all_space', 'ignorews'),
1541 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1541 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1542 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1542 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1543 context=get('unified', getter=ui.config))
1543 context=get('unified', getter=ui.config))
1544
1544
1545 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1545 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1546 losedatafn=None, prefix=''):
1546 losedatafn=None, prefix=''):
1547 '''yields diff of changes to files between two nodes, or node and
1547 '''yields diff of changes to files between two nodes, or node and
1548 working directory.
1548 working directory.
1549
1549
1550 if node1 is None, use first dirstate parent instead.
1550 if node1 is None, use first dirstate parent instead.
1551 if node2 is None, compare node1 with working directory.
1551 if node2 is None, compare node1 with working directory.
1552
1552
1553 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1553 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1554 every time some change cannot be represented with the current
1554 every time some change cannot be represented with the current
1555 patch format. Return False to upgrade to git patch format, True to
1555 patch format. Return False to upgrade to git patch format, True to
1556 accept the loss or raise an exception to abort the diff. It is
1556 accept the loss or raise an exception to abort the diff. It is
1557 called with the name of current file being diffed as 'fn'. If set
1557 called with the name of current file being diffed as 'fn'. If set
1558 to None, patches will always be upgraded to git format when
1558 to None, patches will always be upgraded to git format when
1559 necessary.
1559 necessary.
1560
1560
1561 prefix is a filename prefix that is prepended to all filenames on
1561 prefix is a filename prefix that is prepended to all filenames on
1562 display (used for subrepos).
1562 display (used for subrepos).
1563 '''
1563 '''
1564
1564
1565 if opts is None:
1565 if opts is None:
1566 opts = mdiff.defaultopts
1566 opts = mdiff.defaultopts
1567
1567
1568 if not node1 and not node2:
1568 if not node1 and not node2:
1569 node1 = repo.dirstate.p1()
1569 node1 = repo.dirstate.p1()
1570
1570
1571 def lrugetfilectx():
1571 def lrugetfilectx():
1572 cache = {}
1572 cache = {}
1573 order = []
1573 order = []
1574 def getfilectx(f, ctx):
1574 def getfilectx(f, ctx):
1575 fctx = ctx.filectx(f, filelog=cache.get(f))
1575 fctx = ctx.filectx(f, filelog=cache.get(f))
1576 if f not in cache:
1576 if f not in cache:
1577 if len(cache) > 20:
1577 if len(cache) > 20:
1578 del cache[order.pop(0)]
1578 del cache[order.pop(0)]
1579 cache[f] = fctx.filelog()
1579 cache[f] = fctx.filelog()
1580 else:
1580 else:
1581 order.remove(f)
1581 order.remove(f)
1582 order.append(f)
1582 order.append(f)
1583 return fctx
1583 return fctx
1584 return getfilectx
1584 return getfilectx
1585 getfilectx = lrugetfilectx()
1585 getfilectx = lrugetfilectx()
1586
1586
1587 ctx1 = repo[node1]
1587 ctx1 = repo[node1]
1588 ctx2 = repo[node2]
1588 ctx2 = repo[node2]
1589
1589
1590 if not changes:
1590 if not changes:
1591 changes = repo.status(ctx1, ctx2, match=match)
1591 changes = repo.status(ctx1, ctx2, match=match)
1592 modified, added, removed = changes[:3]
1592 modified, added, removed = changes[:3]
1593
1593
1594 if not modified and not added and not removed:
1594 if not modified and not added and not removed:
1595 return []
1595 return []
1596
1596
1597 revs = None
1597 revs = None
1598 if not repo.ui.quiet:
1598 if not repo.ui.quiet:
1599 hexfunc = repo.ui.debugflag and hex or short
1599 hexfunc = repo.ui.debugflag and hex or short
1600 revs = [hexfunc(node) for node in [node1, node2] if node]
1600 revs = [hexfunc(node) for node in [node1, node2] if node]
1601
1601
1602 copy = {}
1602 copy = {}
1603 if opts.git or opts.upgrade:
1603 if opts.git or opts.upgrade:
1604 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1604 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1605
1605
1606 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1606 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1607 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1607 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1608 if opts.upgrade and not opts.git:
1608 if opts.upgrade and not opts.git:
1609 try:
1609 try:
1610 def losedata(fn):
1610 def losedata(fn):
1611 if not losedatafn or not losedatafn(fn=fn):
1611 if not losedatafn or not losedatafn(fn=fn):
1612 raise GitDiffRequired()
1612 raise GitDiffRequired()
1613 # Buffer the whole output until we are sure it can be generated
1613 # Buffer the whole output until we are sure it can be generated
1614 return list(difffn(opts.copy(git=False), losedata))
1614 return list(difffn(opts.copy(git=False), losedata))
1615 except GitDiffRequired:
1615 except GitDiffRequired:
1616 return difffn(opts.copy(git=True), None)
1616 return difffn(opts.copy(git=True), None)
1617 else:
1617 else:
1618 return difffn(opts, None)
1618 return difffn(opts, None)
1619
1619
1620 def difflabel(func, *args, **kw):
1620 def difflabel(func, *args, **kw):
1621 '''yields 2-tuples of (output, label) based on the output of func()'''
1621 '''yields 2-tuples of (output, label) based on the output of func()'''
1622 prefixes = [('diff', 'diff.diffline'),
1622 prefixes = [('diff', 'diff.diffline'),
1623 ('copy', 'diff.extended'),
1623 ('copy', 'diff.extended'),
1624 ('rename', 'diff.extended'),
1624 ('rename', 'diff.extended'),
1625 ('old', 'diff.extended'),
1625 ('old', 'diff.extended'),
1626 ('new', 'diff.extended'),
1626 ('new', 'diff.extended'),
1627 ('deleted', 'diff.extended'),
1627 ('deleted', 'diff.extended'),
1628 ('---', 'diff.file_a'),
1628 ('---', 'diff.file_a'),
1629 ('+++', 'diff.file_b'),
1629 ('+++', 'diff.file_b'),
1630 ('@@', 'diff.hunk'),
1630 ('@@', 'diff.hunk'),
1631 ('-', 'diff.deleted'),
1631 ('-', 'diff.deleted'),
1632 ('+', 'diff.inserted')]
1632 ('+', 'diff.inserted')]
1633
1633
1634 for chunk in func(*args, **kw):
1634 for chunk in func(*args, **kw):
1635 lines = chunk.split('\n')
1635 lines = chunk.split('\n')
1636 for i, line in enumerate(lines):
1636 for i, line in enumerate(lines):
1637 if i != 0:
1637 if i != 0:
1638 yield ('\n', '')
1638 yield ('\n', '')
1639 stripline = line
1639 stripline = line
1640 if line and line[0] in '+-':
1640 if line and line[0] in '+-':
1641 # highlight trailing whitespace, but only in changed lines
1641 # highlight trailing whitespace, but only in changed lines
1642 stripline = line.rstrip()
1642 stripline = line.rstrip()
1643 for prefix, label in prefixes:
1643 for prefix, label in prefixes:
1644 if stripline.startswith(prefix):
1644 if stripline.startswith(prefix):
1645 yield (stripline, label)
1645 yield (stripline, label)
1646 break
1646 break
1647 else:
1647 else:
1648 yield (line, '')
1648 yield (line, '')
1649 if line != stripline:
1649 if line != stripline:
1650 yield (line[len(stripline):], 'diff.trailingwhitespace')
1650 yield (line[len(stripline):], 'diff.trailingwhitespace')
1651
1651
1652 def diffui(*args, **kw):
1652 def diffui(*args, **kw):
1653 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1653 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1654 return difflabel(diff, *args, **kw)
1654 return difflabel(diff, *args, **kw)
1655
1655
1656
1656
1657 def _addmodehdr(header, omode, nmode):
1657 def _addmodehdr(header, omode, nmode):
1658 if omode != nmode:
1658 if omode != nmode:
1659 header.append('old mode %s\n' % omode)
1659 header.append('old mode %s\n' % omode)
1660 header.append('new mode %s\n' % nmode)
1660 header.append('new mode %s\n' % nmode)
1661
1661
1662 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1662 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1663 copy, getfilectx, opts, losedatafn, prefix):
1663 copy, getfilectx, opts, losedatafn, prefix):
1664
1664
1665 def join(f):
1665 def join(f):
1666 return os.path.join(prefix, f)
1666 return os.path.join(prefix, f)
1667
1667
1668 date1 = util.datestr(ctx1.date())
1668 date1 = util.datestr(ctx1.date())
1669 man1 = ctx1.manifest()
1669 man1 = ctx1.manifest()
1670
1670
1671 gone = set()
1671 gone = set()
1672 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1672 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1673
1673
1674 copyto = dict([(v, k) for k, v in copy.items()])
1674 copyto = dict([(v, k) for k, v in copy.items()])
1675
1675
1676 if opts.git:
1676 if opts.git:
1677 revs = None
1677 revs = None
1678
1678
1679 for f in sorted(modified + added + removed):
1679 for f in sorted(modified + added + removed):
1680 to = None
1680 to = None
1681 tn = None
1681 tn = None
1682 dodiff = True
1682 dodiff = True
1683 header = []
1683 header = []
1684 if f in man1:
1684 if f in man1:
1685 to = getfilectx(f, ctx1).data()
1685 to = getfilectx(f, ctx1).data()
1686 if f not in removed:
1686 if f not in removed:
1687 tn = getfilectx(f, ctx2).data()
1687 tn = getfilectx(f, ctx2).data()
1688 a, b = f, f
1688 a, b = f, f
1689 if opts.git or losedatafn:
1689 if opts.git or losedatafn:
1690 if f in added:
1690 if f in added:
1691 mode = gitmode[ctx2.flags(f)]
1691 mode = gitmode[ctx2.flags(f)]
1692 if f in copy or f in copyto:
1692 if f in copy or f in copyto:
1693 if opts.git:
1693 if opts.git:
1694 if f in copy:
1694 if f in copy:
1695 a = copy[f]
1695 a = copy[f]
1696 else:
1696 else:
1697 a = copyto[f]
1697 a = copyto[f]
1698 omode = gitmode[man1.flags(a)]
1698 omode = gitmode[man1.flags(a)]
1699 _addmodehdr(header, omode, mode)
1699 _addmodehdr(header, omode, mode)
1700 if a in removed and a not in gone:
1700 if a in removed and a not in gone:
1701 op = 'rename'
1701 op = 'rename'
1702 gone.add(a)
1702 gone.add(a)
1703 else:
1703 else:
1704 op = 'copy'
1704 op = 'copy'
1705 header.append('%s from %s\n' % (op, join(a)))
1705 header.append('%s from %s\n' % (op, join(a)))
1706 header.append('%s to %s\n' % (op, join(f)))
1706 header.append('%s to %s\n' % (op, join(f)))
1707 to = getfilectx(a, ctx1).data()
1707 to = getfilectx(a, ctx1).data()
1708 else:
1708 else:
1709 losedatafn(f)
1709 losedatafn(f)
1710 else:
1710 else:
1711 if opts.git:
1711 if opts.git:
1712 header.append('new file mode %s\n' % mode)
1712 header.append('new file mode %s\n' % mode)
1713 elif ctx2.flags(f):
1713 elif ctx2.flags(f):
1714 losedatafn(f)
1714 losedatafn(f)
1715 # In theory, if tn was copied or renamed we should check
1715 # In theory, if tn was copied or renamed we should check
1716 # if the source is binary too but the copy record already
1716 # if the source is binary too but the copy record already
1717 # forces git mode.
1717 # forces git mode.
1718 if util.binary(tn):
1718 if util.binary(tn):
1719 if opts.git:
1719 if opts.git:
1720 dodiff = 'binary'
1720 dodiff = 'binary'
1721 else:
1721 else:
1722 losedatafn(f)
1722 losedatafn(f)
1723 if not opts.git and not tn:
1723 if not opts.git and not tn:
1724 # regular diffs cannot represent new empty file
1724 # regular diffs cannot represent new empty file
1725 losedatafn(f)
1725 losedatafn(f)
1726 elif f in removed:
1726 elif f in removed:
1727 if opts.git:
1727 if opts.git:
1728 # have we already reported a copy above?
1728 # have we already reported a copy above?
1729 if ((f in copy and copy[f] in added
1729 if ((f in copy and copy[f] in added
1730 and copyto[copy[f]] == f) or
1730 and copyto[copy[f]] == f) or
1731 (f in copyto and copyto[f] in added
1731 (f in copyto and copyto[f] in added
1732 and copy[copyto[f]] == f)):
1732 and copy[copyto[f]] == f)):
1733 dodiff = False
1733 dodiff = False
1734 else:
1734 else:
1735 header.append('deleted file mode %s\n' %
1735 header.append('deleted file mode %s\n' %
1736 gitmode[man1.flags(f)])
1736 gitmode[man1.flags(f)])
1737 elif not to or util.binary(to):
1737 elif not to or util.binary(to):
1738 # regular diffs cannot represent empty file deletion
1738 # regular diffs cannot represent empty file deletion
1739 losedatafn(f)
1739 losedatafn(f)
1740 else:
1740 else:
1741 oflag = man1.flags(f)
1741 oflag = man1.flags(f)
1742 nflag = ctx2.flags(f)
1742 nflag = ctx2.flags(f)
1743 binary = util.binary(to) or util.binary(tn)
1743 binary = util.binary(to) or util.binary(tn)
1744 if opts.git:
1744 if opts.git:
1745 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1745 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1746 if binary:
1746 if binary:
1747 dodiff = 'binary'
1747 dodiff = 'binary'
1748 elif binary or nflag != oflag:
1748 elif binary or nflag != oflag:
1749 losedatafn(f)
1749 losedatafn(f)
1750 if opts.git:
1750 if opts.git:
1751 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1751 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1752
1752
1753 if dodiff:
1753 if dodiff:
1754 if dodiff == 'binary':
1754 if dodiff == 'binary':
1755 text = b85diff(to, tn)
1755 text = b85diff(to, tn)
1756 else:
1756 else:
1757 text = mdiff.unidiff(to, date1,
1757 text = mdiff.unidiff(to, date1,
1758 # ctx2 date may be dynamic
1758 # ctx2 date may be dynamic
1759 tn, util.datestr(ctx2.date()),
1759 tn, util.datestr(ctx2.date()),
1760 join(a), join(b), revs, opts=opts)
1760 join(a), join(b), revs, opts=opts)
1761 if header and (text or len(header) > 1):
1761 if header and (text or len(header) > 1):
1762 yield ''.join(header)
1762 yield ''.join(header)
1763 if text:
1763 if text:
1764 yield text
1764 yield text
1765
1765
1766 def diffstatsum(stats):
1766 def diffstatsum(stats):
1767 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1767 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1768 for f, a, r, b in stats:
1768 for f, a, r, b in stats:
1769 maxfile = max(maxfile, encoding.colwidth(f))
1769 maxfile = max(maxfile, encoding.colwidth(f))
1770 maxtotal = max(maxtotal, a + r)
1770 maxtotal = max(maxtotal, a + r)
1771 addtotal += a
1771 addtotal += a
1772 removetotal += r
1772 removetotal += r
1773 binary = binary or b
1773 binary = binary or b
1774
1774
1775 return maxfile, maxtotal, addtotal, removetotal, binary
1775 return maxfile, maxtotal, addtotal, removetotal, binary
1776
1776
1777 def diffstatdata(lines):
1777 def diffstatdata(lines):
1778 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1778 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1779
1779
1780 results = []
1780 results = []
1781 filename, adds, removes = None, 0, 0
1781 filename, adds, removes = None, 0, 0
1782
1782
1783 def addresult():
1783 def addresult():
1784 if filename:
1784 if filename:
1785 isbinary = adds == 0 and removes == 0
1785 isbinary = adds == 0 and removes == 0
1786 results.append((filename, adds, removes, isbinary))
1786 results.append((filename, adds, removes, isbinary))
1787
1787
1788 for line in lines:
1788 for line in lines:
1789 if line.startswith('diff'):
1789 if line.startswith('diff'):
1790 addresult()
1790 addresult()
1791 # set numbers to 0 anyway when starting new file
1791 # set numbers to 0 anyway when starting new file
1792 adds, removes = 0, 0
1792 adds, removes = 0, 0
1793 if line.startswith('diff --git'):
1793 if line.startswith('diff --git'):
1794 filename = gitre.search(line).group(1)
1794 filename = gitre.search(line).group(1)
1795 elif line.startswith('diff -r'):
1795 elif line.startswith('diff -r'):
1796 # format: "diff -r ... -r ... filename"
1796 # format: "diff -r ... -r ... filename"
1797 filename = diffre.search(line).group(1)
1797 filename = diffre.search(line).group(1)
1798 elif line.startswith('+') and not line.startswith('+++'):
1798 elif line.startswith('+') and not line.startswith('+++'):
1799 adds += 1
1799 adds += 1
1800 elif line.startswith('-') and not line.startswith('---'):
1800 elif line.startswith('-') and not line.startswith('---'):
1801 removes += 1
1801 removes += 1
1802 addresult()
1802 addresult()
1803 return results
1803 return results
1804
1804
1805 def diffstat(lines, width=80, git=False):
1805 def diffstat(lines, width=80, git=False):
1806 output = []
1806 output = []
1807 stats = diffstatdata(lines)
1807 stats = diffstatdata(lines)
1808 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1808 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1809
1809
1810 countwidth = len(str(maxtotal))
1810 countwidth = len(str(maxtotal))
1811 if hasbinary and countwidth < 3:
1811 if hasbinary and countwidth < 3:
1812 countwidth = 3
1812 countwidth = 3
1813 graphwidth = width - countwidth - maxname - 6
1813 graphwidth = width - countwidth - maxname - 6
1814 if graphwidth < 10:
1814 if graphwidth < 10:
1815 graphwidth = 10
1815 graphwidth = 10
1816
1816
1817 def scale(i):
1817 def scale(i):
1818 if maxtotal <= graphwidth:
1818 if maxtotal <= graphwidth:
1819 return i
1819 return i
1820 # If diffstat runs out of room it doesn't print anything,
1820 # If diffstat runs out of room it doesn't print anything,
1821 # which isn't very useful, so always print at least one + or -
1821 # which isn't very useful, so always print at least one + or -
1822 # if there were at least some changes.
1822 # if there were at least some changes.
1823 return max(i * graphwidth // maxtotal, int(bool(i)))
1823 return max(i * graphwidth // maxtotal, int(bool(i)))
1824
1824
1825 for filename, adds, removes, isbinary in stats:
1825 for filename, adds, removes, isbinary in stats:
1826 if git and isbinary:
1826 if git and isbinary:
1827 count = 'Bin'
1827 count = 'Bin'
1828 else:
1828 else:
1829 count = adds + removes
1829 count = adds + removes
1830 pluses = '+' * scale(adds)
1830 pluses = '+' * scale(adds)
1831 minuses = '-' * scale(removes)
1831 minuses = '-' * scale(removes)
1832 output.append(' %s%s | %*s %s%s\n' %
1832 output.append(' %s%s | %*s %s%s\n' %
1833 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1833 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1834 countwidth, count, pluses, minuses))
1834 countwidth, count, pluses, minuses))
1835
1835
1836 if stats:
1836 if stats:
1837 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1837 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1838 % (len(stats), totaladds, totalremoves))
1838 % (len(stats), totaladds, totalremoves))
1839
1839
1840 return ''.join(output)
1840 return ''.join(output)
1841
1841
1842 def diffstatui(*args, **kw):
1842 def diffstatui(*args, **kw):
1843 '''like diffstat(), but yields 2-tuples of (output, label) for
1843 '''like diffstat(), but yields 2-tuples of (output, label) for
1844 ui.write()
1844 ui.write()
1845 '''
1845 '''
1846
1846
1847 for line in diffstat(*args, **kw).splitlines():
1847 for line in diffstat(*args, **kw).splitlines():
1848 if line and line[-1] in '+-':
1848 if line and line[-1] in '+-':
1849 name, graph = line.rsplit(' ', 1)
1849 name, graph = line.rsplit(' ', 1)
1850 yield (name + ' ', '')
1850 yield (name + ' ', '')
1851 m = re.search(r'\++', graph)
1851 m = re.search(r'\++', graph)
1852 if m:
1852 if m:
1853 yield (m.group(0), 'diffstat.inserted')
1853 yield (m.group(0), 'diffstat.inserted')
1854 m = re.search(r'-+', graph)
1854 m = re.search(r'-+', graph)
1855 if m:
1855 if m:
1856 yield (m.group(0), 'diffstat.deleted')
1856 yield (m.group(0), 'diffstat.deleted')
1857 else:
1857 else:
1858 yield (line, '')
1858 yield (line, '')
1859 yield ('\n', '')
1859 yield ('\n', '')
@@ -1,1725 +1,1730 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, time, calendar, textwrap, signal
19 import os, time, calendar, textwrap, signal
20 import imp, socket, urllib
20 import imp, socket, urllib
21
21
22 if os.name == 'nt':
22 if os.name == 'nt':
23 import windows as platform
23 import windows as platform
24 else:
24 else:
25 import posix as platform
25 import posix as platform
26
26
27 cachestat = platform.cachestat
27 cachestat = platform.cachestat
28 checkexec = platform.checkexec
28 checkexec = platform.checkexec
29 checklink = platform.checklink
29 checklink = platform.checklink
30 copymode = platform.copymode
30 copymode = platform.copymode
31 executablepath = platform.executablepath
31 executablepath = platform.executablepath
32 expandglobs = platform.expandglobs
32 expandglobs = platform.expandglobs
33 explainexit = platform.explainexit
33 explainexit = platform.explainexit
34 findexe = platform.findexe
34 findexe = platform.findexe
35 gethgcmd = platform.gethgcmd
35 gethgcmd = platform.gethgcmd
36 getuser = platform.getuser
36 getuser = platform.getuser
37 groupmembers = platform.groupmembers
37 groupmembers = platform.groupmembers
38 groupname = platform.groupname
38 groupname = platform.groupname
39 hidewindow = platform.hidewindow
39 hidewindow = platform.hidewindow
40 isexec = platform.isexec
40 isexec = platform.isexec
41 isowner = platform.isowner
41 isowner = platform.isowner
42 localpath = platform.localpath
42 localpath = platform.localpath
43 lookupreg = platform.lookupreg
43 lookupreg = platform.lookupreg
44 makedir = platform.makedir
44 makedir = platform.makedir
45 nlinks = platform.nlinks
45 nlinks = platform.nlinks
46 normpath = platform.normpath
46 normpath = platform.normpath
47 nulldev = platform.nulldev
47 nulldev = platform.nulldev
48 openhardlinks = platform.openhardlinks
48 openhardlinks = platform.openhardlinks
49 oslink = platform.oslink
49 oslink = platform.oslink
50 parsepatchoutput = platform.parsepatchoutput
50 parsepatchoutput = platform.parsepatchoutput
51 pconvert = platform.pconvert
51 pconvert = platform.pconvert
52 popen = platform.popen
52 popen = platform.popen
53 posixfile = platform.posixfile
53 posixfile = platform.posixfile
54 quotecommand = platform.quotecommand
54 quotecommand = platform.quotecommand
55 realpath = platform.realpath
55 realpath = platform.realpath
56 rename = platform.rename
56 rename = platform.rename
57 samedevice = platform.samedevice
57 samedevice = platform.samedevice
58 samefile = platform.samefile
58 samefile = platform.samefile
59 samestat = platform.samestat
59 samestat = platform.samestat
60 setbinary = platform.setbinary
60 setbinary = platform.setbinary
61 setflags = platform.setflags
61 setflags = platform.setflags
62 setsignalhandler = platform.setsignalhandler
62 setsignalhandler = platform.setsignalhandler
63 shellquote = platform.shellquote
63 shellquote = platform.shellquote
64 spawndetached = platform.spawndetached
64 spawndetached = platform.spawndetached
65 sshargs = platform.sshargs
65 sshargs = platform.sshargs
66 statfiles = platform.statfiles
66 statfiles = platform.statfiles
67 termwidth = platform.termwidth
67 termwidth = platform.termwidth
68 testpid = platform.testpid
68 testpid = platform.testpid
69 umask = platform.umask
69 umask = platform.umask
70 unlink = platform.unlink
70 unlink = platform.unlink
71 unlinkpath = platform.unlinkpath
71 unlinkpath = platform.unlinkpath
72 username = platform.username
72 username = platform.username
73
73
74 # Python compatibility
74 # Python compatibility
75
75
76 def sha1(s):
76 def sha1(s):
77 return _fastsha1(s)
77 return _fastsha1(s)
78
78
79 _notset = object()
79 _notset = object()
80 def safehasattr(thing, attr):
80 def safehasattr(thing, attr):
81 return getattr(thing, attr, _notset) is not _notset
81 return getattr(thing, attr, _notset) is not _notset
82
82
83 def _fastsha1(s):
83 def _fastsha1(s):
84 # This function will import sha1 from hashlib or sha (whichever is
84 # This function will import sha1 from hashlib or sha (whichever is
85 # available) and overwrite itself with it on the first call.
85 # available) and overwrite itself with it on the first call.
86 # Subsequent calls will go directly to the imported function.
86 # Subsequent calls will go directly to the imported function.
87 if sys.version_info >= (2, 5):
87 if sys.version_info >= (2, 5):
88 from hashlib import sha1 as _sha1
88 from hashlib import sha1 as _sha1
89 else:
89 else:
90 from sha import sha as _sha1
90 from sha import sha as _sha1
91 global _fastsha1, sha1
91 global _fastsha1, sha1
92 _fastsha1 = sha1 = _sha1
92 _fastsha1 = sha1 = _sha1
93 return _sha1(s)
93 return _sha1(s)
94
94
95 import __builtin__
95 import __builtin__
96
96
97 if sys.version_info[0] < 3:
97 if sys.version_info[0] < 3:
98 def fakebuffer(sliceable, offset=0):
98 def fakebuffer(sliceable, offset=0):
99 return sliceable[offset:]
99 return sliceable[offset:]
100 else:
100 else:
101 def fakebuffer(sliceable, offset=0):
101 def fakebuffer(sliceable, offset=0):
102 return memoryview(sliceable)[offset:]
102 return memoryview(sliceable)[offset:]
103 try:
103 try:
104 buffer
104 buffer
105 except NameError:
105 except NameError:
106 __builtin__.buffer = fakebuffer
106 __builtin__.buffer = fakebuffer
107
107
108 import subprocess
108 import subprocess
109 closefds = os.name == 'posix'
109 closefds = os.name == 'posix'
110
110
111 def popen2(cmd, env=None, newlines=False):
111 def popen2(cmd, env=None, newlines=False):
112 # Setting bufsize to -1 lets the system decide the buffer size.
112 # Setting bufsize to -1 lets the system decide the buffer size.
113 # The default for bufsize is 0, meaning unbuffered. This leads to
113 # The default for bufsize is 0, meaning unbuffered. This leads to
114 # poor performance on Mac OS X: http://bugs.python.org/issue4194
114 # poor performance on Mac OS X: http://bugs.python.org/issue4194
115 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
115 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
116 close_fds=closefds,
116 close_fds=closefds,
117 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
117 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
118 universal_newlines=newlines,
118 universal_newlines=newlines,
119 env=env)
119 env=env)
120 return p.stdin, p.stdout
120 return p.stdin, p.stdout
121
121
122 def popen3(cmd, env=None, newlines=False):
122 def popen3(cmd, env=None, newlines=False):
123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
124 close_fds=closefds,
124 close_fds=closefds,
125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
126 stderr=subprocess.PIPE,
126 stderr=subprocess.PIPE,
127 universal_newlines=newlines,
127 universal_newlines=newlines,
128 env=env)
128 env=env)
129 return p.stdin, p.stdout, p.stderr
129 return p.stdin, p.stdout, p.stderr
130
130
131 def version():
131 def version():
132 """Return version information if available."""
132 """Return version information if available."""
133 try:
133 try:
134 import __version__
134 import __version__
135 return __version__.version
135 return __version__.version
136 except ImportError:
136 except ImportError:
137 return 'unknown'
137 return 'unknown'
138
138
139 # used by parsedate
139 # used by parsedate
140 defaultdateformats = (
140 defaultdateformats = (
141 '%Y-%m-%d %H:%M:%S',
141 '%Y-%m-%d %H:%M:%S',
142 '%Y-%m-%d %I:%M:%S%p',
142 '%Y-%m-%d %I:%M:%S%p',
143 '%Y-%m-%d %H:%M',
143 '%Y-%m-%d %H:%M',
144 '%Y-%m-%d %I:%M%p',
144 '%Y-%m-%d %I:%M%p',
145 '%Y-%m-%d',
145 '%Y-%m-%d',
146 '%m-%d',
146 '%m-%d',
147 '%m/%d',
147 '%m/%d',
148 '%m/%d/%y',
148 '%m/%d/%y',
149 '%m/%d/%Y',
149 '%m/%d/%Y',
150 '%a %b %d %H:%M:%S %Y',
150 '%a %b %d %H:%M:%S %Y',
151 '%a %b %d %I:%M:%S%p %Y',
151 '%a %b %d %I:%M:%S%p %Y',
152 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
152 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
153 '%b %d %H:%M:%S %Y',
153 '%b %d %H:%M:%S %Y',
154 '%b %d %I:%M:%S%p %Y',
154 '%b %d %I:%M:%S%p %Y',
155 '%b %d %H:%M:%S',
155 '%b %d %H:%M:%S',
156 '%b %d %I:%M:%S%p',
156 '%b %d %I:%M:%S%p',
157 '%b %d %H:%M',
157 '%b %d %H:%M',
158 '%b %d %I:%M%p',
158 '%b %d %I:%M%p',
159 '%b %d %Y',
159 '%b %d %Y',
160 '%b %d',
160 '%b %d',
161 '%H:%M:%S',
161 '%H:%M:%S',
162 '%I:%M:%S%p',
162 '%I:%M:%S%p',
163 '%H:%M',
163 '%H:%M',
164 '%I:%M%p',
164 '%I:%M%p',
165 )
165 )
166
166
167 extendeddateformats = defaultdateformats + (
167 extendeddateformats = defaultdateformats + (
168 "%Y",
168 "%Y",
169 "%Y-%m",
169 "%Y-%m",
170 "%b",
170 "%b",
171 "%b %Y",
171 "%b %Y",
172 )
172 )
173
173
174 def cachefunc(func):
174 def cachefunc(func):
175 '''cache the result of function calls'''
175 '''cache the result of function calls'''
176 # XXX doesn't handle keywords args
176 # XXX doesn't handle keywords args
177 cache = {}
177 cache = {}
178 if func.func_code.co_argcount == 1:
178 if func.func_code.co_argcount == 1:
179 # we gain a small amount of time because
179 # we gain a small amount of time because
180 # we don't need to pack/unpack the list
180 # we don't need to pack/unpack the list
181 def f(arg):
181 def f(arg):
182 if arg not in cache:
182 if arg not in cache:
183 cache[arg] = func(arg)
183 cache[arg] = func(arg)
184 return cache[arg]
184 return cache[arg]
185 else:
185 else:
186 def f(*args):
186 def f(*args):
187 if args not in cache:
187 if args not in cache:
188 cache[args] = func(*args)
188 cache[args] = func(*args)
189 return cache[args]
189 return cache[args]
190
190
191 return f
191 return f
192
192
193 def lrucachefunc(func):
193 def lrucachefunc(func):
194 '''cache most recent results of function calls'''
194 '''cache most recent results of function calls'''
195 cache = {}
195 cache = {}
196 order = []
196 order = []
197 if func.func_code.co_argcount == 1:
197 if func.func_code.co_argcount == 1:
198 def f(arg):
198 def f(arg):
199 if arg not in cache:
199 if arg not in cache:
200 if len(cache) > 20:
200 if len(cache) > 20:
201 del cache[order.pop(0)]
201 del cache[order.pop(0)]
202 cache[arg] = func(arg)
202 cache[arg] = func(arg)
203 else:
203 else:
204 order.remove(arg)
204 order.remove(arg)
205 order.append(arg)
205 order.append(arg)
206 return cache[arg]
206 return cache[arg]
207 else:
207 else:
208 def f(*args):
208 def f(*args):
209 if args not in cache:
209 if args not in cache:
210 if len(cache) > 20:
210 if len(cache) > 20:
211 del cache[order.pop(0)]
211 del cache[order.pop(0)]
212 cache[args] = func(*args)
212 cache[args] = func(*args)
213 else:
213 else:
214 order.remove(args)
214 order.remove(args)
215 order.append(args)
215 order.append(args)
216 return cache[args]
216 return cache[args]
217
217
218 return f
218 return f
219
219
220 class propertycache(object):
220 class propertycache(object):
221 def __init__(self, func):
221 def __init__(self, func):
222 self.func = func
222 self.func = func
223 self.name = func.__name__
223 self.name = func.__name__
224 def __get__(self, obj, type=None):
224 def __get__(self, obj, type=None):
225 result = self.func(obj)
225 result = self.func(obj)
226 setattr(obj, self.name, result)
226 setattr(obj, self.name, result)
227 return result
227 return result
228
228
229 def pipefilter(s, cmd):
229 def pipefilter(s, cmd):
230 '''filter string S through command CMD, returning its output'''
230 '''filter string S through command CMD, returning its output'''
231 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
231 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
232 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
232 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
233 pout, perr = p.communicate(s)
233 pout, perr = p.communicate(s)
234 return pout
234 return pout
235
235
236 def tempfilter(s, cmd):
236 def tempfilter(s, cmd):
237 '''filter string S through a pair of temporary files with CMD.
237 '''filter string S through a pair of temporary files with CMD.
238 CMD is used as a template to create the real command to be run,
238 CMD is used as a template to create the real command to be run,
239 with the strings INFILE and OUTFILE replaced by the real names of
239 with the strings INFILE and OUTFILE replaced by the real names of
240 the temporary files generated.'''
240 the temporary files generated.'''
241 inname, outname = None, None
241 inname, outname = None, None
242 try:
242 try:
243 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
243 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
244 fp = os.fdopen(infd, 'wb')
244 fp = os.fdopen(infd, 'wb')
245 fp.write(s)
245 fp.write(s)
246 fp.close()
246 fp.close()
247 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
247 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
248 os.close(outfd)
248 os.close(outfd)
249 cmd = cmd.replace('INFILE', inname)
249 cmd = cmd.replace('INFILE', inname)
250 cmd = cmd.replace('OUTFILE', outname)
250 cmd = cmd.replace('OUTFILE', outname)
251 code = os.system(cmd)
251 code = os.system(cmd)
252 if sys.platform == 'OpenVMS' and code & 1:
252 if sys.platform == 'OpenVMS' and code & 1:
253 code = 0
253 code = 0
254 if code:
254 if code:
255 raise Abort(_("command '%s' failed: %s") %
255 raise Abort(_("command '%s' failed: %s") %
256 (cmd, explainexit(code)))
256 (cmd, explainexit(code)))
257 fp = open(outname, 'rb')
257 fp = open(outname, 'rb')
258 r = fp.read()
258 r = fp.read()
259 fp.close()
259 fp.close()
260 return r
260 return r
261 finally:
261 finally:
262 try:
262 try:
263 if inname:
263 if inname:
264 os.unlink(inname)
264 os.unlink(inname)
265 except OSError:
265 except OSError:
266 pass
266 pass
267 try:
267 try:
268 if outname:
268 if outname:
269 os.unlink(outname)
269 os.unlink(outname)
270 except OSError:
270 except OSError:
271 pass
271 pass
272
272
273 filtertable = {
273 filtertable = {
274 'tempfile:': tempfilter,
274 'tempfile:': tempfilter,
275 'pipe:': pipefilter,
275 'pipe:': pipefilter,
276 }
276 }
277
277
278 def filter(s, cmd):
278 def filter(s, cmd):
279 "filter a string through a command that transforms its input to its output"
279 "filter a string through a command that transforms its input to its output"
280 for name, fn in filtertable.iteritems():
280 for name, fn in filtertable.iteritems():
281 if cmd.startswith(name):
281 if cmd.startswith(name):
282 return fn(s, cmd[len(name):].lstrip())
282 return fn(s, cmd[len(name):].lstrip())
283 return pipefilter(s, cmd)
283 return pipefilter(s, cmd)
284
284
285 def binary(s):
285 def binary(s):
286 """return true if a string is binary data"""
286 """return true if a string is binary data"""
287 return bool(s and '\0' in s)
287 return bool(s and '\0' in s)
288
288
289 def increasingchunks(source, min=1024, max=65536):
289 def increasingchunks(source, min=1024, max=65536):
290 '''return no less than min bytes per chunk while data remains,
290 '''return no less than min bytes per chunk while data remains,
291 doubling min after each chunk until it reaches max'''
291 doubling min after each chunk until it reaches max'''
292 def log2(x):
292 def log2(x):
293 if not x:
293 if not x:
294 return 0
294 return 0
295 i = 0
295 i = 0
296 while x:
296 while x:
297 x >>= 1
297 x >>= 1
298 i += 1
298 i += 1
299 return i - 1
299 return i - 1
300
300
301 buf = []
301 buf = []
302 blen = 0
302 blen = 0
303 for chunk in source:
303 for chunk in source:
304 buf.append(chunk)
304 buf.append(chunk)
305 blen += len(chunk)
305 blen += len(chunk)
306 if blen >= min:
306 if blen >= min:
307 if min < max:
307 if min < max:
308 min = min << 1
308 min = min << 1
309 nmin = 1 << log2(blen)
309 nmin = 1 << log2(blen)
310 if nmin > min:
310 if nmin > min:
311 min = nmin
311 min = nmin
312 if min > max:
312 if min > max:
313 min = max
313 min = max
314 yield ''.join(buf)
314 yield ''.join(buf)
315 blen = 0
315 blen = 0
316 buf = []
316 buf = []
317 if buf:
317 if buf:
318 yield ''.join(buf)
318 yield ''.join(buf)
319
319
320 Abort = error.Abort
320 Abort = error.Abort
321
321
322 def always(fn):
322 def always(fn):
323 return True
323 return True
324
324
325 def never(fn):
325 def never(fn):
326 return False
326 return False
327
327
328 def pathto(root, n1, n2):
328 def pathto(root, n1, n2):
329 '''return the relative path from one place to another.
329 '''return the relative path from one place to another.
330 root should use os.sep to separate directories
330 root should use os.sep to separate directories
331 n1 should use os.sep to separate directories
331 n1 should use os.sep to separate directories
332 n2 should use "/" to separate directories
332 n2 should use "/" to separate directories
333 returns an os.sep-separated path.
333 returns an os.sep-separated path.
334
334
335 If n1 is a relative path, it's assumed it's
335 If n1 is a relative path, it's assumed it's
336 relative to root.
336 relative to root.
337 n2 should always be relative to root.
337 n2 should always be relative to root.
338 '''
338 '''
339 if not n1:
339 if not n1:
340 return localpath(n2)
340 return localpath(n2)
341 if os.path.isabs(n1):
341 if os.path.isabs(n1):
342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
343 return os.path.join(root, localpath(n2))
343 return os.path.join(root, localpath(n2))
344 n2 = '/'.join((pconvert(root), n2))
344 n2 = '/'.join((pconvert(root), n2))
345 a, b = splitpath(n1), n2.split('/')
345 a, b = splitpath(n1), n2.split('/')
346 a.reverse()
346 a.reverse()
347 b.reverse()
347 b.reverse()
348 while a and b and a[-1] == b[-1]:
348 while a and b and a[-1] == b[-1]:
349 a.pop()
349 a.pop()
350 b.pop()
350 b.pop()
351 b.reverse()
351 b.reverse()
352 return os.sep.join((['..'] * len(a)) + b) or '.'
352 return os.sep.join((['..'] * len(a)) + b) or '.'
353
353
354 _hgexecutable = None
354 _hgexecutable = None
355
355
356 def mainfrozen():
356 def mainfrozen():
357 """return True if we are a frozen executable.
357 """return True if we are a frozen executable.
358
358
359 The code supports py2exe (most common, Windows only) and tools/freeze
359 The code supports py2exe (most common, Windows only) and tools/freeze
360 (portable, not much used).
360 (portable, not much used).
361 """
361 """
362 return (safehasattr(sys, "frozen") or # new py2exe
362 return (safehasattr(sys, "frozen") or # new py2exe
363 safehasattr(sys, "importers") or # old py2exe
363 safehasattr(sys, "importers") or # old py2exe
364 imp.is_frozen("__main__")) # tools/freeze
364 imp.is_frozen("__main__")) # tools/freeze
365
365
366 def hgexecutable():
366 def hgexecutable():
367 """return location of the 'hg' executable.
367 """return location of the 'hg' executable.
368
368
369 Defaults to $HG or 'hg' in the search path.
369 Defaults to $HG or 'hg' in the search path.
370 """
370 """
371 if _hgexecutable is None:
371 if _hgexecutable is None:
372 hg = os.environ.get('HG')
372 hg = os.environ.get('HG')
373 mainmod = sys.modules['__main__']
373 mainmod = sys.modules['__main__']
374 if hg:
374 if hg:
375 _sethgexecutable(hg)
375 _sethgexecutable(hg)
376 elif mainfrozen():
376 elif mainfrozen():
377 _sethgexecutable(sys.executable)
377 _sethgexecutable(sys.executable)
378 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
378 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
379 _sethgexecutable(mainmod.__file__)
379 _sethgexecutable(mainmod.__file__)
380 else:
380 else:
381 exe = findexe('hg') or os.path.basename(sys.argv[0])
381 exe = findexe('hg') or os.path.basename(sys.argv[0])
382 _sethgexecutable(exe)
382 _sethgexecutable(exe)
383 return _hgexecutable
383 return _hgexecutable
384
384
385 def _sethgexecutable(path):
385 def _sethgexecutable(path):
386 """set location of the 'hg' executable"""
386 """set location of the 'hg' executable"""
387 global _hgexecutable
387 global _hgexecutable
388 _hgexecutable = path
388 _hgexecutable = path
389
389
390 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
390 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
391 '''enhanced shell command execution.
391 '''enhanced shell command execution.
392 run with environment maybe modified, maybe in different dir.
392 run with environment maybe modified, maybe in different dir.
393
393
394 if command fails and onerr is None, return status. if ui object,
394 if command fails and onerr is None, return status. if ui object,
395 print error message and return status, else raise onerr object as
395 print error message and return status, else raise onerr object as
396 exception.
396 exception.
397
397
398 if out is specified, it is assumed to be a file-like object that has a
398 if out is specified, it is assumed to be a file-like object that has a
399 write() method. stdout and stderr will be redirected to out.'''
399 write() method. stdout and stderr will be redirected to out.'''
400 try:
400 try:
401 sys.stdout.flush()
401 sys.stdout.flush()
402 except Exception:
402 except Exception:
403 pass
403 pass
404 def py2shell(val):
404 def py2shell(val):
405 'convert python object into string that is useful to shell'
405 'convert python object into string that is useful to shell'
406 if val is None or val is False:
406 if val is None or val is False:
407 return '0'
407 return '0'
408 if val is True:
408 if val is True:
409 return '1'
409 return '1'
410 return str(val)
410 return str(val)
411 origcmd = cmd
411 origcmd = cmd
412 cmd = quotecommand(cmd)
412 cmd = quotecommand(cmd)
413 env = dict(os.environ)
413 env = dict(os.environ)
414 env.update((k, py2shell(v)) for k, v in environ.iteritems())
414 env.update((k, py2shell(v)) for k, v in environ.iteritems())
415 env['HG'] = hgexecutable()
415 env['HG'] = hgexecutable()
416 if out is None or out == sys.__stdout__:
416 if out is None or out == sys.__stdout__:
417 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
417 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
418 env=env, cwd=cwd)
418 env=env, cwd=cwd)
419 else:
419 else:
420 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
420 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
421 env=env, cwd=cwd, stdout=subprocess.PIPE,
421 env=env, cwd=cwd, stdout=subprocess.PIPE,
422 stderr=subprocess.STDOUT)
422 stderr=subprocess.STDOUT)
423 for line in proc.stdout:
423 for line in proc.stdout:
424 out.write(line)
424 out.write(line)
425 proc.wait()
425 proc.wait()
426 rc = proc.returncode
426 rc = proc.returncode
427 if sys.platform == 'OpenVMS' and rc & 1:
427 if sys.platform == 'OpenVMS' and rc & 1:
428 rc = 0
428 rc = 0
429 if rc and onerr:
429 if rc and onerr:
430 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
430 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
431 explainexit(rc)[0])
431 explainexit(rc)[0])
432 if errprefix:
432 if errprefix:
433 errmsg = '%s: %s' % (errprefix, errmsg)
433 errmsg = '%s: %s' % (errprefix, errmsg)
434 try:
434 try:
435 onerr.warn(errmsg + '\n')
435 onerr.warn(errmsg + '\n')
436 except AttributeError:
436 except AttributeError:
437 raise onerr(errmsg)
437 raise onerr(errmsg)
438 return rc
438 return rc
439
439
440 def checksignature(func):
440 def checksignature(func):
441 '''wrap a function with code to check for calling errors'''
441 '''wrap a function with code to check for calling errors'''
442 def check(*args, **kwargs):
442 def check(*args, **kwargs):
443 try:
443 try:
444 return func(*args, **kwargs)
444 return func(*args, **kwargs)
445 except TypeError:
445 except TypeError:
446 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
446 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
447 raise error.SignatureError
447 raise error.SignatureError
448 raise
448 raise
449
449
450 return check
450 return check
451
451
452 def copyfile(src, dest):
452 def copyfile(src, dest):
453 "copy a file, preserving mode and atime/mtime"
453 "copy a file, preserving mode and atime/mtime"
454 if os.path.islink(src):
454 if os.path.islink(src):
455 try:
455 try:
456 os.unlink(dest)
456 os.unlink(dest)
457 except OSError:
457 except OSError:
458 pass
458 pass
459 os.symlink(os.readlink(src), dest)
459 os.symlink(os.readlink(src), dest)
460 else:
460 else:
461 try:
461 try:
462 shutil.copyfile(src, dest)
462 shutil.copyfile(src, dest)
463 shutil.copymode(src, dest)
463 shutil.copymode(src, dest)
464 except shutil.Error, inst:
464 except shutil.Error, inst:
465 raise Abort(str(inst))
465 raise Abort(str(inst))
466
466
467 def copyfiles(src, dst, hardlink=None):
467 def copyfiles(src, dst, hardlink=None):
468 """Copy a directory tree using hardlinks if possible"""
468 """Copy a directory tree using hardlinks if possible"""
469
469
470 if hardlink is None:
470 if hardlink is None:
471 hardlink = (os.stat(src).st_dev ==
471 hardlink = (os.stat(src).st_dev ==
472 os.stat(os.path.dirname(dst)).st_dev)
472 os.stat(os.path.dirname(dst)).st_dev)
473
473
474 num = 0
474 num = 0
475 if os.path.isdir(src):
475 if os.path.isdir(src):
476 os.mkdir(dst)
476 os.mkdir(dst)
477 for name, kind in osutil.listdir(src):
477 for name, kind in osutil.listdir(src):
478 srcname = os.path.join(src, name)
478 srcname = os.path.join(src, name)
479 dstname = os.path.join(dst, name)
479 dstname = os.path.join(dst, name)
480 hardlink, n = copyfiles(srcname, dstname, hardlink)
480 hardlink, n = copyfiles(srcname, dstname, hardlink)
481 num += n
481 num += n
482 else:
482 else:
483 if hardlink:
483 if hardlink:
484 try:
484 try:
485 oslink(src, dst)
485 oslink(src, dst)
486 except (IOError, OSError):
486 except (IOError, OSError):
487 hardlink = False
487 hardlink = False
488 shutil.copy(src, dst)
488 shutil.copy(src, dst)
489 else:
489 else:
490 shutil.copy(src, dst)
490 shutil.copy(src, dst)
491 num += 1
491 num += 1
492
492
493 return hardlink, num
493 return hardlink, num
494
494
495 _winreservednames = '''con prn aux nul
495 _winreservednames = '''con prn aux nul
496 com1 com2 com3 com4 com5 com6 com7 com8 com9
496 com1 com2 com3 com4 com5 com6 com7 com8 com9
497 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
497 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
498 _winreservedchars = ':*?"<>|'
498 _winreservedchars = ':*?"<>|'
499 def checkwinfilename(path):
499 def checkwinfilename(path):
500 '''Check that the base-relative path is a valid filename on Windows.
500 '''Check that the base-relative path is a valid filename on Windows.
501 Returns None if the path is ok, or a UI string describing the problem.
501 Returns None if the path is ok, or a UI string describing the problem.
502
502
503 >>> checkwinfilename("just/a/normal/path")
503 >>> checkwinfilename("just/a/normal/path")
504 >>> checkwinfilename("foo/bar/con.xml")
504 >>> checkwinfilename("foo/bar/con.xml")
505 "filename contains 'con', which is reserved on Windows"
505 "filename contains 'con', which is reserved on Windows"
506 >>> checkwinfilename("foo/con.xml/bar")
506 >>> checkwinfilename("foo/con.xml/bar")
507 "filename contains 'con', which is reserved on Windows"
507 "filename contains 'con', which is reserved on Windows"
508 >>> checkwinfilename("foo/bar/xml.con")
508 >>> checkwinfilename("foo/bar/xml.con")
509 >>> checkwinfilename("foo/bar/AUX/bla.txt")
509 >>> checkwinfilename("foo/bar/AUX/bla.txt")
510 "filename contains 'AUX', which is reserved on Windows"
510 "filename contains 'AUX', which is reserved on Windows"
511 >>> checkwinfilename("foo/bar/bla:.txt")
511 >>> checkwinfilename("foo/bar/bla:.txt")
512 "filename contains ':', which is reserved on Windows"
512 "filename contains ':', which is reserved on Windows"
513 >>> checkwinfilename("foo/bar/b\07la.txt")
513 >>> checkwinfilename("foo/bar/b\07la.txt")
514 "filename contains '\\\\x07', which is invalid on Windows"
514 "filename contains '\\\\x07', which is invalid on Windows"
515 >>> checkwinfilename("foo/bar/bla ")
515 >>> checkwinfilename("foo/bar/bla ")
516 "filename ends with ' ', which is not allowed on Windows"
516 "filename ends with ' ', which is not allowed on Windows"
517 '''
517 '''
518 for n in path.replace('\\', '/').split('/'):
518 for n in path.replace('\\', '/').split('/'):
519 if not n:
519 if not n:
520 continue
520 continue
521 for c in n:
521 for c in n:
522 if c in _winreservedchars:
522 if c in _winreservedchars:
523 return _("filename contains '%s', which is reserved "
523 return _("filename contains '%s', which is reserved "
524 "on Windows") % c
524 "on Windows") % c
525 if ord(c) <= 31:
525 if ord(c) <= 31:
526 return _("filename contains %r, which is invalid "
526 return _("filename contains %r, which is invalid "
527 "on Windows") % c
527 "on Windows") % c
528 base = n.split('.')[0]
528 base = n.split('.')[0]
529 if base and base.lower() in _winreservednames:
529 if base and base.lower() in _winreservednames:
530 return _("filename contains '%s', which is reserved "
530 return _("filename contains '%s', which is reserved "
531 "on Windows") % base
531 "on Windows") % base
532 t = n[-1]
532 t = n[-1]
533 if t in '. ':
533 if t in '. ':
534 return _("filename ends with '%s', which is not allowed "
534 return _("filename ends with '%s', which is not allowed "
535 "on Windows") % t
535 "on Windows") % t
536
536
537 if os.name == 'nt':
537 if os.name == 'nt':
538 checkosfilename = checkwinfilename
538 checkosfilename = checkwinfilename
539 else:
539 else:
540 checkosfilename = platform.checkosfilename
540 checkosfilename = platform.checkosfilename
541
541
542 def makelock(info, pathname):
542 def makelock(info, pathname):
543 try:
543 try:
544 return os.symlink(info, pathname)
544 return os.symlink(info, pathname)
545 except OSError, why:
545 except OSError, why:
546 if why.errno == errno.EEXIST:
546 if why.errno == errno.EEXIST:
547 raise
547 raise
548 except AttributeError: # no symlink in os
548 except AttributeError: # no symlink in os
549 pass
549 pass
550
550
551 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
551 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
552 os.write(ld, info)
552 os.write(ld, info)
553 os.close(ld)
553 os.close(ld)
554
554
555 def readlock(pathname):
555 def readlock(pathname):
556 try:
556 try:
557 return os.readlink(pathname)
557 return os.readlink(pathname)
558 except OSError, why:
558 except OSError, why:
559 if why.errno not in (errno.EINVAL, errno.ENOSYS):
559 if why.errno not in (errno.EINVAL, errno.ENOSYS):
560 raise
560 raise
561 except AttributeError: # no symlink in os
561 except AttributeError: # no symlink in os
562 pass
562 pass
563 fp = posixfile(pathname)
563 fp = posixfile(pathname)
564 r = fp.read()
564 r = fp.read()
565 fp.close()
565 fp.close()
566 return r
566 return r
567
567
568 def fstat(fp):
568 def fstat(fp):
569 '''stat file object that may not have fileno method.'''
569 '''stat file object that may not have fileno method.'''
570 try:
570 try:
571 return os.fstat(fp.fileno())
571 return os.fstat(fp.fileno())
572 except AttributeError:
572 except AttributeError:
573 return os.stat(fp.name)
573 return os.stat(fp.name)
574
574
575 # File system features
575 # File system features
576
576
577 def checkcase(path):
577 def checkcase(path):
578 """
578 """
579 Check whether the given path is on a case-sensitive filesystem
579 Check whether the given path is on a case-sensitive filesystem
580
580
581 Requires a path (like /foo/.hg) ending with a foldable final
581 Requires a path (like /foo/.hg) ending with a foldable final
582 directory component.
582 directory component.
583 """
583 """
584 s1 = os.stat(path)
584 s1 = os.stat(path)
585 d, b = os.path.split(path)
585 d, b = os.path.split(path)
586 p2 = os.path.join(d, b.upper())
586 p2 = os.path.join(d, b.upper())
587 if path == p2:
587 if path == p2:
588 p2 = os.path.join(d, b.lower())
588 p2 = os.path.join(d, b.lower())
589 try:
589 try:
590 s2 = os.stat(p2)
590 s2 = os.stat(p2)
591 if s2 == s1:
591 if s2 == s1:
592 return False
592 return False
593 return True
593 return True
594 except OSError:
594 except OSError:
595 return True
595 return True
596
596
597 _fspathcache = {}
597 _fspathcache = {}
598 def fspath(name, root):
598 def fspath(name, root):
599 '''Get name in the case stored in the filesystem
599 '''Get name in the case stored in the filesystem
600
600
601 The name is either relative to root, or it is an absolute path starting
601 The name is either relative to root, or it is an absolute path starting
602 with root. Note that this function is unnecessary, and should not be
602 with root. Note that this function is unnecessary, and should not be
603 called, for case-sensitive filesystems (simply because it's expensive).
603 called, for case-sensitive filesystems (simply because it's expensive).
604 '''
604 '''
605 # If name is absolute, make it relative
605 # If name is absolute, make it relative
606 if name.lower().startswith(root.lower()):
606 if name.lower().startswith(root.lower()):
607 l = len(root)
607 l = len(root)
608 if name[l] == os.sep or name[l] == os.altsep:
608 if name[l] == os.sep or name[l] == os.altsep:
609 l = l + 1
609 l = l + 1
610 name = name[l:]
610 name = name[l:]
611
611
612 if not os.path.lexists(os.path.join(root, name)):
612 if not os.path.lexists(os.path.join(root, name)):
613 return None
613 return None
614
614
615 seps = os.sep
615 seps = os.sep
616 if os.altsep:
616 if os.altsep:
617 seps = seps + os.altsep
617 seps = seps + os.altsep
618 # Protect backslashes. This gets silly very quickly.
618 # Protect backslashes. This gets silly very quickly.
619 seps.replace('\\','\\\\')
619 seps.replace('\\','\\\\')
620 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
620 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
621 dir = os.path.normcase(os.path.normpath(root))
621 dir = os.path.normcase(os.path.normpath(root))
622 result = []
622 result = []
623 for part, sep in pattern.findall(name):
623 for part, sep in pattern.findall(name):
624 if sep:
624 if sep:
625 result.append(sep)
625 result.append(sep)
626 continue
626 continue
627
627
628 if dir not in _fspathcache:
628 if dir not in _fspathcache:
629 _fspathcache[dir] = os.listdir(dir)
629 _fspathcache[dir] = os.listdir(dir)
630 contents = _fspathcache[dir]
630 contents = _fspathcache[dir]
631
631
632 lpart = part.lower()
632 lpart = part.lower()
633 lenp = len(part)
633 lenp = len(part)
634 for n in contents:
634 for n in contents:
635 if lenp == len(n) and n.lower() == lpart:
635 if lenp == len(n) and n.lower() == lpart:
636 result.append(n)
636 result.append(n)
637 break
637 break
638 else:
638 else:
639 # Cannot happen, as the file exists!
639 # Cannot happen, as the file exists!
640 result.append(part)
640 result.append(part)
641 dir = os.path.join(dir, lpart)
641 dir = os.path.join(dir, lpart)
642
642
643 return ''.join(result)
643 return ''.join(result)
644
644
645 def checknlink(testfile):
645 def checknlink(testfile):
646 '''check whether hardlink count reporting works properly'''
646 '''check whether hardlink count reporting works properly'''
647
647
648 # testfile may be open, so we need a separate file for checking to
648 # testfile may be open, so we need a separate file for checking to
649 # work around issue2543 (or testfile may get lost on Samba shares)
649 # work around issue2543 (or testfile may get lost on Samba shares)
650 f1 = testfile + ".hgtmp1"
650 f1 = testfile + ".hgtmp1"
651 if os.path.lexists(f1):
651 if os.path.lexists(f1):
652 return False
652 return False
653 try:
653 try:
654 posixfile(f1, 'w').close()
654 posixfile(f1, 'w').close()
655 except IOError:
655 except IOError:
656 return False
656 return False
657
657
658 f2 = testfile + ".hgtmp2"
658 f2 = testfile + ".hgtmp2"
659 fd = None
659 fd = None
660 try:
660 try:
661 try:
661 try:
662 oslink(f1, f2)
662 oslink(f1, f2)
663 except OSError:
663 except OSError:
664 return False
664 return False
665
665
666 # nlinks() may behave differently for files on Windows shares if
666 # nlinks() may behave differently for files on Windows shares if
667 # the file is open.
667 # the file is open.
668 fd = posixfile(f2)
668 fd = posixfile(f2)
669 return nlinks(f2) > 1
669 return nlinks(f2) > 1
670 finally:
670 finally:
671 if fd is not None:
671 if fd is not None:
672 fd.close()
672 fd.close()
673 for f in (f1, f2):
673 for f in (f1, f2):
674 try:
674 try:
675 os.unlink(f)
675 os.unlink(f)
676 except OSError:
676 except OSError:
677 pass
677 pass
678
678
679 return False
679 return False
680
680
681 def endswithsep(path):
681 def endswithsep(path):
682 '''Check path ends with os.sep or os.altsep.'''
682 '''Check path ends with os.sep or os.altsep.'''
683 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
683 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
684
684
685 def splitpath(path):
685 def splitpath(path):
686 '''Split path by os.sep.
686 '''Split path by os.sep.
687 Note that this function does not use os.altsep because this is
687 Note that this function does not use os.altsep because this is
688 an alternative of simple "xxx.split(os.sep)".
688 an alternative of simple "xxx.split(os.sep)".
689 It is recommended to use os.path.normpath() before using this
689 It is recommended to use os.path.normpath() before using this
690 function if need.'''
690 function if need.'''
691 return path.split(os.sep)
691 return path.split(os.sep)
692
692
693 def gui():
693 def gui():
694 '''Are we running in a GUI?'''
694 '''Are we running in a GUI?'''
695 if sys.platform == 'darwin':
695 if sys.platform == 'darwin':
696 if 'SSH_CONNECTION' in os.environ:
696 if 'SSH_CONNECTION' in os.environ:
697 # handle SSH access to a box where the user is logged in
697 # handle SSH access to a box where the user is logged in
698 return False
698 return False
699 elif getattr(osutil, 'isgui', None):
699 elif getattr(osutil, 'isgui', None):
700 # check if a CoreGraphics session is available
700 # check if a CoreGraphics session is available
701 return osutil.isgui()
701 return osutil.isgui()
702 else:
702 else:
703 # pure build; use a safe default
703 # pure build; use a safe default
704 return True
704 return True
705 else:
705 else:
706 return os.name == "nt" or os.environ.get("DISPLAY")
706 return os.name == "nt" or os.environ.get("DISPLAY")
707
707
708 def mktempcopy(name, emptyok=False, createmode=None):
708 def mktempcopy(name, emptyok=False, createmode=None):
709 """Create a temporary file with the same contents from name
709 """Create a temporary file with the same contents from name
710
710
711 The permission bits are copied from the original file.
711 The permission bits are copied from the original file.
712
712
713 If the temporary file is going to be truncated immediately, you
713 If the temporary file is going to be truncated immediately, you
714 can use emptyok=True as an optimization.
714 can use emptyok=True as an optimization.
715
715
716 Returns the name of the temporary file.
716 Returns the name of the temporary file.
717 """
717 """
718 d, fn = os.path.split(name)
718 d, fn = os.path.split(name)
719 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
719 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
720 os.close(fd)
720 os.close(fd)
721 # Temporary files are created with mode 0600, which is usually not
721 # Temporary files are created with mode 0600, which is usually not
722 # what we want. If the original file already exists, just copy
722 # what we want. If the original file already exists, just copy
723 # its mode. Otherwise, manually obey umask.
723 # its mode. Otherwise, manually obey umask.
724 copymode(name, temp, createmode)
724 copymode(name, temp, createmode)
725 if emptyok:
725 if emptyok:
726 return temp
726 return temp
727 try:
727 try:
728 try:
728 try:
729 ifp = posixfile(name, "rb")
729 ifp = posixfile(name, "rb")
730 except IOError, inst:
730 except IOError, inst:
731 if inst.errno == errno.ENOENT:
731 if inst.errno == errno.ENOENT:
732 return temp
732 return temp
733 if not getattr(inst, 'filename', None):
733 if not getattr(inst, 'filename', None):
734 inst.filename = name
734 inst.filename = name
735 raise
735 raise
736 ofp = posixfile(temp, "wb")
736 ofp = posixfile(temp, "wb")
737 for chunk in filechunkiter(ifp):
737 for chunk in filechunkiter(ifp):
738 ofp.write(chunk)
738 ofp.write(chunk)
739 ifp.close()
739 ifp.close()
740 ofp.close()
740 ofp.close()
741 except:
741 except:
742 try: os.unlink(temp)
742 try: os.unlink(temp)
743 except: pass
743 except: pass
744 raise
744 raise
745 return temp
745 return temp
746
746
747 class atomictempfile(object):
747 class atomictempfile(object):
748 '''writeable file object that atomically updates a file
748 '''writeable file object that atomically updates a file
749
749
750 All writes will go to a temporary copy of the original file. Call
750 All writes will go to a temporary copy of the original file. Call
751 close() when you are done writing, and atomictempfile will rename
751 close() when you are done writing, and atomictempfile will rename
752 the temporary copy to the original name, making the changes
752 the temporary copy to the original name, making the changes
753 visible. If the object is destroyed without being closed, all your
753 visible. If the object is destroyed without being closed, all your
754 writes are discarded.
754 writes are discarded.
755 '''
755 '''
756 def __init__(self, name, mode='w+b', createmode=None):
756 def __init__(self, name, mode='w+b', createmode=None):
757 self.__name = name # permanent name
757 self.__name = name # permanent name
758 self._tempname = mktempcopy(name, emptyok=('w' in mode),
758 self._tempname = mktempcopy(name, emptyok=('w' in mode),
759 createmode=createmode)
759 createmode=createmode)
760 self._fp = posixfile(self._tempname, mode)
760 self._fp = posixfile(self._tempname, mode)
761
761
762 # delegated methods
762 # delegated methods
763 self.write = self._fp.write
763 self.write = self._fp.write
764 self.fileno = self._fp.fileno
764 self.fileno = self._fp.fileno
765
765
766 def close(self):
766 def close(self):
767 if not self._fp.closed:
767 if not self._fp.closed:
768 self._fp.close()
768 self._fp.close()
769 rename(self._tempname, localpath(self.__name))
769 rename(self._tempname, localpath(self.__name))
770
770
771 def discard(self):
771 def discard(self):
772 if not self._fp.closed:
772 if not self._fp.closed:
773 try:
773 try:
774 os.unlink(self._tempname)
774 os.unlink(self._tempname)
775 except OSError:
775 except OSError:
776 pass
776 pass
777 self._fp.close()
777 self._fp.close()
778
778
779 def __del__(self):
779 def __del__(self):
780 if safehasattr(self, '_fp'): # constructor actually did something
780 if safehasattr(self, '_fp'): # constructor actually did something
781 self.discard()
781 self.discard()
782
782
783 def makedirs(name, mode=None):
783 def makedirs(name, mode=None):
784 """recursive directory creation with parent mode inheritance"""
784 """recursive directory creation with parent mode inheritance"""
785 try:
785 try:
786 os.mkdir(name)
786 os.mkdir(name)
787 except OSError, err:
787 except OSError, err:
788 if err.errno == errno.EEXIST:
788 if err.errno == errno.EEXIST:
789 return
789 return
790 if err.errno != errno.ENOENT or not name:
790 if err.errno != errno.ENOENT or not name:
791 raise
791 raise
792 parent = os.path.dirname(os.path.abspath(name))
792 parent = os.path.dirname(os.path.abspath(name))
793 if parent == name:
793 if parent == name:
794 raise
794 raise
795 makedirs(parent, mode)
795 makedirs(parent, mode)
796 os.mkdir(name)
796 os.mkdir(name)
797 if mode is not None:
797 if mode is not None:
798 os.chmod(name, mode)
798 os.chmod(name, mode)
799
799
800 def readfile(path):
800 def readfile(path):
801 fp = open(path, 'rb')
801 fp = open(path, 'rb')
802 try:
802 try:
803 return fp.read()
803 return fp.read()
804 finally:
804 finally:
805 fp.close()
805 fp.close()
806
806
807 def writefile(path, text):
807 def writefile(path, text):
808 fp = open(path, 'wb')
808 fp = open(path, 'wb')
809 try:
809 try:
810 fp.write(text)
810 fp.write(text)
811 finally:
811 finally:
812 fp.close()
812 fp.close()
813
813
814 def appendfile(path, text):
814 def appendfile(path, text):
815 fp = open(path, 'ab')
815 fp = open(path, 'ab')
816 try:
816 try:
817 fp.write(text)
817 fp.write(text)
818 finally:
818 finally:
819 fp.close()
819 fp.close()
820
820
821 class chunkbuffer(object):
821 class chunkbuffer(object):
822 """Allow arbitrary sized chunks of data to be efficiently read from an
822 """Allow arbitrary sized chunks of data to be efficiently read from an
823 iterator over chunks of arbitrary size."""
823 iterator over chunks of arbitrary size."""
824
824
825 def __init__(self, in_iter):
825 def __init__(self, in_iter):
826 """in_iter is the iterator that's iterating over the input chunks.
826 """in_iter is the iterator that's iterating over the input chunks.
827 targetsize is how big a buffer to try to maintain."""
827 targetsize is how big a buffer to try to maintain."""
828 def splitbig(chunks):
828 def splitbig(chunks):
829 for chunk in chunks:
829 for chunk in chunks:
830 if len(chunk) > 2**20:
830 if len(chunk) > 2**20:
831 pos = 0
831 pos = 0
832 while pos < len(chunk):
832 while pos < len(chunk):
833 end = pos + 2 ** 18
833 end = pos + 2 ** 18
834 yield chunk[pos:end]
834 yield chunk[pos:end]
835 pos = end
835 pos = end
836 else:
836 else:
837 yield chunk
837 yield chunk
838 self.iter = splitbig(in_iter)
838 self.iter = splitbig(in_iter)
839 self._queue = []
839 self._queue = []
840
840
841 def read(self, l):
841 def read(self, l):
842 """Read L bytes of data from the iterator of chunks of data.
842 """Read L bytes of data from the iterator of chunks of data.
843 Returns less than L bytes if the iterator runs dry."""
843 Returns less than L bytes if the iterator runs dry."""
844 left = l
844 left = l
845 buf = ''
845 buf = ''
846 queue = self._queue
846 queue = self._queue
847 while left > 0:
847 while left > 0:
848 # refill the queue
848 # refill the queue
849 if not queue:
849 if not queue:
850 target = 2**18
850 target = 2**18
851 for chunk in self.iter:
851 for chunk in self.iter:
852 queue.append(chunk)
852 queue.append(chunk)
853 target -= len(chunk)
853 target -= len(chunk)
854 if target <= 0:
854 if target <= 0:
855 break
855 break
856 if not queue:
856 if not queue:
857 break
857 break
858
858
859 chunk = queue.pop(0)
859 chunk = queue.pop(0)
860 left -= len(chunk)
860 left -= len(chunk)
861 if left < 0:
861 if left < 0:
862 queue.insert(0, chunk[left:])
862 queue.insert(0, chunk[left:])
863 buf += chunk[:left]
863 buf += chunk[:left]
864 else:
864 else:
865 buf += chunk
865 buf += chunk
866
866
867 return buf
867 return buf
868
868
869 def filechunkiter(f, size=65536, limit=None):
869 def filechunkiter(f, size=65536, limit=None):
870 """Create a generator that produces the data in the file size
870 """Create a generator that produces the data in the file size
871 (default 65536) bytes at a time, up to optional limit (default is
871 (default 65536) bytes at a time, up to optional limit (default is
872 to read all data). Chunks may be less than size bytes if the
872 to read all data). Chunks may be less than size bytes if the
873 chunk is the last chunk in the file, or the file is a socket or
873 chunk is the last chunk in the file, or the file is a socket or
874 some other type of file that sometimes reads less data than is
874 some other type of file that sometimes reads less data than is
875 requested."""
875 requested."""
876 assert size >= 0
876 assert size >= 0
877 assert limit is None or limit >= 0
877 assert limit is None or limit >= 0
878 while True:
878 while True:
879 if limit is None:
879 if limit is None:
880 nbytes = size
880 nbytes = size
881 else:
881 else:
882 nbytes = min(limit, size)
882 nbytes = min(limit, size)
883 s = nbytes and f.read(nbytes)
883 s = nbytes and f.read(nbytes)
884 if not s:
884 if not s:
885 break
885 break
886 if limit:
886 if limit:
887 limit -= len(s)
887 limit -= len(s)
888 yield s
888 yield s
889
889
890 def makedate():
890 def makedate():
891 lt = time.localtime()
891 lt = time.localtime()
892 if lt[8] == 1 and time.daylight:
892 if lt[8] == 1 and time.daylight:
893 tz = time.altzone
893 tz = time.altzone
894 else:
894 else:
895 tz = time.timezone
895 tz = time.timezone
896 t = time.mktime(lt)
896 t = time.mktime(lt)
897 if t < 0:
897 if t < 0:
898 hint = _("check your clock")
898 hint = _("check your clock")
899 raise Abort(_("negative timestamp: %d") % t, hint=hint)
899 raise Abort(_("negative timestamp: %d") % t, hint=hint)
900 return t, tz
900 return t, tz
901
901
902 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
902 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
903 """represent a (unixtime, offset) tuple as a localized time.
903 """represent a (unixtime, offset) tuple as a localized time.
904 unixtime is seconds since the epoch, and offset is the time zone's
904 unixtime is seconds since the epoch, and offset is the time zone's
905 number of seconds away from UTC. if timezone is false, do not
905 number of seconds away from UTC. if timezone is false, do not
906 append time zone to string."""
906 append time zone to string."""
907 t, tz = date or makedate()
907 t, tz = date or makedate()
908 if t < 0:
908 if t < 0:
909 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
909 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
910 tz = 0
910 tz = 0
911 if "%1" in format or "%2" in format:
911 if "%1" in format or "%2" in format:
912 sign = (tz > 0) and "-" or "+"
912 sign = (tz > 0) and "-" or "+"
913 minutes = abs(tz) // 60
913 minutes = abs(tz) // 60
914 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
914 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
915 format = format.replace("%2", "%02d" % (minutes % 60))
915 format = format.replace("%2", "%02d" % (minutes % 60))
916 s = time.strftime(format, time.gmtime(float(t) - tz))
916 try:
917 t = time.gmtime(float(t) - tz)
918 except ValueError:
919 # time was out of range
920 t = time.gmtime(sys.maxint)
921 s = time.strftime(format, t)
917 return s
922 return s
918
923
919 def shortdate(date=None):
924 def shortdate(date=None):
920 """turn (timestamp, tzoff) tuple into iso 8631 date."""
925 """turn (timestamp, tzoff) tuple into iso 8631 date."""
921 return datestr(date, format='%Y-%m-%d')
926 return datestr(date, format='%Y-%m-%d')
922
927
923 def strdate(string, format, defaults=[]):
928 def strdate(string, format, defaults=[]):
924 """parse a localized time string and return a (unixtime, offset) tuple.
929 """parse a localized time string and return a (unixtime, offset) tuple.
925 if the string cannot be parsed, ValueError is raised."""
930 if the string cannot be parsed, ValueError is raised."""
926 def timezone(string):
931 def timezone(string):
927 tz = string.split()[-1]
932 tz = string.split()[-1]
928 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
933 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
929 sign = (tz[0] == "+") and 1 or -1
934 sign = (tz[0] == "+") and 1 or -1
930 hours = int(tz[1:3])
935 hours = int(tz[1:3])
931 minutes = int(tz[3:5])
936 minutes = int(tz[3:5])
932 return -sign * (hours * 60 + minutes) * 60
937 return -sign * (hours * 60 + minutes) * 60
933 if tz == "GMT" or tz == "UTC":
938 if tz == "GMT" or tz == "UTC":
934 return 0
939 return 0
935 return None
940 return None
936
941
937 # NOTE: unixtime = localunixtime + offset
942 # NOTE: unixtime = localunixtime + offset
938 offset, date = timezone(string), string
943 offset, date = timezone(string), string
939 if offset is not None:
944 if offset is not None:
940 date = " ".join(string.split()[:-1])
945 date = " ".join(string.split()[:-1])
941
946
942 # add missing elements from defaults
947 # add missing elements from defaults
943 usenow = False # default to using biased defaults
948 usenow = False # default to using biased defaults
944 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
949 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
945 found = [True for p in part if ("%"+p) in format]
950 found = [True for p in part if ("%"+p) in format]
946 if not found:
951 if not found:
947 date += "@" + defaults[part][usenow]
952 date += "@" + defaults[part][usenow]
948 format += "@%" + part[0]
953 format += "@%" + part[0]
949 else:
954 else:
950 # We've found a specific time element, less specific time
955 # We've found a specific time element, less specific time
951 # elements are relative to today
956 # elements are relative to today
952 usenow = True
957 usenow = True
953
958
954 timetuple = time.strptime(date, format)
959 timetuple = time.strptime(date, format)
955 localunixtime = int(calendar.timegm(timetuple))
960 localunixtime = int(calendar.timegm(timetuple))
956 if offset is None:
961 if offset is None:
957 # local timezone
962 # local timezone
958 unixtime = int(time.mktime(timetuple))
963 unixtime = int(time.mktime(timetuple))
959 offset = unixtime - localunixtime
964 offset = unixtime - localunixtime
960 else:
965 else:
961 unixtime = localunixtime + offset
966 unixtime = localunixtime + offset
962 return unixtime, offset
967 return unixtime, offset
963
968
964 def parsedate(date, formats=None, bias={}):
969 def parsedate(date, formats=None, bias={}):
965 """parse a localized date/time and return a (unixtime, offset) tuple.
970 """parse a localized date/time and return a (unixtime, offset) tuple.
966
971
967 The date may be a "unixtime offset" string or in one of the specified
972 The date may be a "unixtime offset" string or in one of the specified
968 formats. If the date already is a (unixtime, offset) tuple, it is returned.
973 formats. If the date already is a (unixtime, offset) tuple, it is returned.
969 """
974 """
970 if not date:
975 if not date:
971 return 0, 0
976 return 0, 0
972 if isinstance(date, tuple) and len(date) == 2:
977 if isinstance(date, tuple) and len(date) == 2:
973 return date
978 return date
974 if not formats:
979 if not formats:
975 formats = defaultdateformats
980 formats = defaultdateformats
976 date = date.strip()
981 date = date.strip()
977 try:
982 try:
978 when, offset = map(int, date.split(' '))
983 when, offset = map(int, date.split(' '))
979 except ValueError:
984 except ValueError:
980 # fill out defaults
985 # fill out defaults
981 now = makedate()
986 now = makedate()
982 defaults = {}
987 defaults = {}
983 for part in ("d", "mb", "yY", "HI", "M", "S"):
988 for part in ("d", "mb", "yY", "HI", "M", "S"):
984 # this piece is for rounding the specific end of unknowns
989 # this piece is for rounding the specific end of unknowns
985 b = bias.get(part)
990 b = bias.get(part)
986 if b is None:
991 if b is None:
987 if part[0] in "HMS":
992 if part[0] in "HMS":
988 b = "00"
993 b = "00"
989 else:
994 else:
990 b = "0"
995 b = "0"
991
996
992 # this piece is for matching the generic end to today's date
997 # this piece is for matching the generic end to today's date
993 n = datestr(now, "%" + part[0])
998 n = datestr(now, "%" + part[0])
994
999
995 defaults[part] = (b, n)
1000 defaults[part] = (b, n)
996
1001
997 for format in formats:
1002 for format in formats:
998 try:
1003 try:
999 when, offset = strdate(date, format, defaults)
1004 when, offset = strdate(date, format, defaults)
1000 except (ValueError, OverflowError):
1005 except (ValueError, OverflowError):
1001 pass
1006 pass
1002 else:
1007 else:
1003 break
1008 break
1004 else:
1009 else:
1005 raise Abort(_('invalid date: %r') % date)
1010 raise Abort(_('invalid date: %r') % date)
1006 # validate explicit (probably user-specified) date and
1011 # validate explicit (probably user-specified) date and
1007 # time zone offset. values must fit in signed 32 bits for
1012 # time zone offset. values must fit in signed 32 bits for
1008 # current 32-bit linux runtimes. timezones go from UTC-12
1013 # current 32-bit linux runtimes. timezones go from UTC-12
1009 # to UTC+14
1014 # to UTC+14
1010 if abs(when) > 0x7fffffff:
1015 if abs(when) > 0x7fffffff:
1011 raise Abort(_('date exceeds 32 bits: %d') % when)
1016 raise Abort(_('date exceeds 32 bits: %d') % when)
1012 if when < 0:
1017 if when < 0:
1013 raise Abort(_('negative date value: %d') % when)
1018 raise Abort(_('negative date value: %d') % when)
1014 if offset < -50400 or offset > 43200:
1019 if offset < -50400 or offset > 43200:
1015 raise Abort(_('impossible time zone offset: %d') % offset)
1020 raise Abort(_('impossible time zone offset: %d') % offset)
1016 return when, offset
1021 return when, offset
1017
1022
1018 def matchdate(date):
1023 def matchdate(date):
1019 """Return a function that matches a given date match specifier
1024 """Return a function that matches a given date match specifier
1020
1025
1021 Formats include:
1026 Formats include:
1022
1027
1023 '{date}' match a given date to the accuracy provided
1028 '{date}' match a given date to the accuracy provided
1024
1029
1025 '<{date}' on or before a given date
1030 '<{date}' on or before a given date
1026
1031
1027 '>{date}' on or after a given date
1032 '>{date}' on or after a given date
1028
1033
1029 >>> p1 = parsedate("10:29:59")
1034 >>> p1 = parsedate("10:29:59")
1030 >>> p2 = parsedate("10:30:00")
1035 >>> p2 = parsedate("10:30:00")
1031 >>> p3 = parsedate("10:30:59")
1036 >>> p3 = parsedate("10:30:59")
1032 >>> p4 = parsedate("10:31:00")
1037 >>> p4 = parsedate("10:31:00")
1033 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1038 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1034 >>> f = matchdate("10:30")
1039 >>> f = matchdate("10:30")
1035 >>> f(p1[0])
1040 >>> f(p1[0])
1036 False
1041 False
1037 >>> f(p2[0])
1042 >>> f(p2[0])
1038 True
1043 True
1039 >>> f(p3[0])
1044 >>> f(p3[0])
1040 True
1045 True
1041 >>> f(p4[0])
1046 >>> f(p4[0])
1042 False
1047 False
1043 >>> f(p5[0])
1048 >>> f(p5[0])
1044 False
1049 False
1045 """
1050 """
1046
1051
1047 def lower(date):
1052 def lower(date):
1048 d = dict(mb="1", d="1")
1053 d = dict(mb="1", d="1")
1049 return parsedate(date, extendeddateformats, d)[0]
1054 return parsedate(date, extendeddateformats, d)[0]
1050
1055
1051 def upper(date):
1056 def upper(date):
1052 d = dict(mb="12", HI="23", M="59", S="59")
1057 d = dict(mb="12", HI="23", M="59", S="59")
1053 for days in ("31", "30", "29"):
1058 for days in ("31", "30", "29"):
1054 try:
1059 try:
1055 d["d"] = days
1060 d["d"] = days
1056 return parsedate(date, extendeddateformats, d)[0]
1061 return parsedate(date, extendeddateformats, d)[0]
1057 except:
1062 except:
1058 pass
1063 pass
1059 d["d"] = "28"
1064 d["d"] = "28"
1060 return parsedate(date, extendeddateformats, d)[0]
1065 return parsedate(date, extendeddateformats, d)[0]
1061
1066
1062 date = date.strip()
1067 date = date.strip()
1063
1068
1064 if not date:
1069 if not date:
1065 raise Abort(_("dates cannot consist entirely of whitespace"))
1070 raise Abort(_("dates cannot consist entirely of whitespace"))
1066 elif date[0] == "<":
1071 elif date[0] == "<":
1067 if not date[1:]:
1072 if not date[1:]:
1068 raise Abort(_("invalid day spec, use '<DATE'"))
1073 raise Abort(_("invalid day spec, use '<DATE'"))
1069 when = upper(date[1:])
1074 when = upper(date[1:])
1070 return lambda x: x <= when
1075 return lambda x: x <= when
1071 elif date[0] == ">":
1076 elif date[0] == ">":
1072 if not date[1:]:
1077 if not date[1:]:
1073 raise Abort(_("invalid day spec, use '>DATE'"))
1078 raise Abort(_("invalid day spec, use '>DATE'"))
1074 when = lower(date[1:])
1079 when = lower(date[1:])
1075 return lambda x: x >= when
1080 return lambda x: x >= when
1076 elif date[0] == "-":
1081 elif date[0] == "-":
1077 try:
1082 try:
1078 days = int(date[1:])
1083 days = int(date[1:])
1079 except ValueError:
1084 except ValueError:
1080 raise Abort(_("invalid day spec: %s") % date[1:])
1085 raise Abort(_("invalid day spec: %s") % date[1:])
1081 if days < 0:
1086 if days < 0:
1082 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1087 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1083 % date[1:])
1088 % date[1:])
1084 when = makedate()[0] - days * 3600 * 24
1089 when = makedate()[0] - days * 3600 * 24
1085 return lambda x: x >= when
1090 return lambda x: x >= when
1086 elif " to " in date:
1091 elif " to " in date:
1087 a, b = date.split(" to ")
1092 a, b = date.split(" to ")
1088 start, stop = lower(a), upper(b)
1093 start, stop = lower(a), upper(b)
1089 return lambda x: x >= start and x <= stop
1094 return lambda x: x >= start and x <= stop
1090 else:
1095 else:
1091 start, stop = lower(date), upper(date)
1096 start, stop = lower(date), upper(date)
1092 return lambda x: x >= start and x <= stop
1097 return lambda x: x >= start and x <= stop
1093
1098
1094 def shortuser(user):
1099 def shortuser(user):
1095 """Return a short representation of a user name or email address."""
1100 """Return a short representation of a user name or email address."""
1096 f = user.find('@')
1101 f = user.find('@')
1097 if f >= 0:
1102 if f >= 0:
1098 user = user[:f]
1103 user = user[:f]
1099 f = user.find('<')
1104 f = user.find('<')
1100 if f >= 0:
1105 if f >= 0:
1101 user = user[f + 1:]
1106 user = user[f + 1:]
1102 f = user.find(' ')
1107 f = user.find(' ')
1103 if f >= 0:
1108 if f >= 0:
1104 user = user[:f]
1109 user = user[:f]
1105 f = user.find('.')
1110 f = user.find('.')
1106 if f >= 0:
1111 if f >= 0:
1107 user = user[:f]
1112 user = user[:f]
1108 return user
1113 return user
1109
1114
1110 def email(author):
1115 def email(author):
1111 '''get email of author.'''
1116 '''get email of author.'''
1112 r = author.find('>')
1117 r = author.find('>')
1113 if r == -1:
1118 if r == -1:
1114 r = None
1119 r = None
1115 return author[author.find('<') + 1:r]
1120 return author[author.find('<') + 1:r]
1116
1121
1117 def _ellipsis(text, maxlength):
1122 def _ellipsis(text, maxlength):
1118 if len(text) <= maxlength:
1123 if len(text) <= maxlength:
1119 return text, False
1124 return text, False
1120 else:
1125 else:
1121 return "%s..." % (text[:maxlength - 3]), True
1126 return "%s..." % (text[:maxlength - 3]), True
1122
1127
1123 def ellipsis(text, maxlength=400):
1128 def ellipsis(text, maxlength=400):
1124 """Trim string to at most maxlength (default: 400) characters."""
1129 """Trim string to at most maxlength (default: 400) characters."""
1125 try:
1130 try:
1126 # use unicode not to split at intermediate multi-byte sequence
1131 # use unicode not to split at intermediate multi-byte sequence
1127 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1132 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1128 maxlength)
1133 maxlength)
1129 if not truncated:
1134 if not truncated:
1130 return text
1135 return text
1131 return utext.encode(encoding.encoding)
1136 return utext.encode(encoding.encoding)
1132 except (UnicodeDecodeError, UnicodeEncodeError):
1137 except (UnicodeDecodeError, UnicodeEncodeError):
1133 return _ellipsis(text, maxlength)[0]
1138 return _ellipsis(text, maxlength)[0]
1134
1139
1135 def bytecount(nbytes):
1140 def bytecount(nbytes):
1136 '''return byte count formatted as readable string, with units'''
1141 '''return byte count formatted as readable string, with units'''
1137
1142
1138 units = (
1143 units = (
1139 (100, 1 << 30, _('%.0f GB')),
1144 (100, 1 << 30, _('%.0f GB')),
1140 (10, 1 << 30, _('%.1f GB')),
1145 (10, 1 << 30, _('%.1f GB')),
1141 (1, 1 << 30, _('%.2f GB')),
1146 (1, 1 << 30, _('%.2f GB')),
1142 (100, 1 << 20, _('%.0f MB')),
1147 (100, 1 << 20, _('%.0f MB')),
1143 (10, 1 << 20, _('%.1f MB')),
1148 (10, 1 << 20, _('%.1f MB')),
1144 (1, 1 << 20, _('%.2f MB')),
1149 (1, 1 << 20, _('%.2f MB')),
1145 (100, 1 << 10, _('%.0f KB')),
1150 (100, 1 << 10, _('%.0f KB')),
1146 (10, 1 << 10, _('%.1f KB')),
1151 (10, 1 << 10, _('%.1f KB')),
1147 (1, 1 << 10, _('%.2f KB')),
1152 (1, 1 << 10, _('%.2f KB')),
1148 (1, 1, _('%.0f bytes')),
1153 (1, 1, _('%.0f bytes')),
1149 )
1154 )
1150
1155
1151 for multiplier, divisor, format in units:
1156 for multiplier, divisor, format in units:
1152 if nbytes >= divisor * multiplier:
1157 if nbytes >= divisor * multiplier:
1153 return format % (nbytes / float(divisor))
1158 return format % (nbytes / float(divisor))
1154 return units[-1][2] % nbytes
1159 return units[-1][2] % nbytes
1155
1160
1156 def uirepr(s):
1161 def uirepr(s):
1157 # Avoid double backslash in Windows path repr()
1162 # Avoid double backslash in Windows path repr()
1158 return repr(s).replace('\\\\', '\\')
1163 return repr(s).replace('\\\\', '\\')
1159
1164
1160 # delay import of textwrap
1165 # delay import of textwrap
1161 def MBTextWrapper(**kwargs):
1166 def MBTextWrapper(**kwargs):
1162 class tw(textwrap.TextWrapper):
1167 class tw(textwrap.TextWrapper):
1163 """
1168 """
1164 Extend TextWrapper for width-awareness.
1169 Extend TextWrapper for width-awareness.
1165
1170
1166 Neither number of 'bytes' in any encoding nor 'characters' is
1171 Neither number of 'bytes' in any encoding nor 'characters' is
1167 appropriate to calculate terminal columns for specified string.
1172 appropriate to calculate terminal columns for specified string.
1168
1173
1169 Original TextWrapper implementation uses built-in 'len()' directly,
1174 Original TextWrapper implementation uses built-in 'len()' directly,
1170 so overriding is needed to use width information of each characters.
1175 so overriding is needed to use width information of each characters.
1171
1176
1172 In addition, characters classified into 'ambiguous' width are
1177 In addition, characters classified into 'ambiguous' width are
1173 treated as wide in east asian area, but as narrow in other.
1178 treated as wide in east asian area, but as narrow in other.
1174
1179
1175 This requires use decision to determine width of such characters.
1180 This requires use decision to determine width of such characters.
1176 """
1181 """
1177 def __init__(self, **kwargs):
1182 def __init__(self, **kwargs):
1178 textwrap.TextWrapper.__init__(self, **kwargs)
1183 textwrap.TextWrapper.__init__(self, **kwargs)
1179
1184
1180 # for compatibility between 2.4 and 2.6
1185 # for compatibility between 2.4 and 2.6
1181 if getattr(self, 'drop_whitespace', None) is None:
1186 if getattr(self, 'drop_whitespace', None) is None:
1182 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1187 self.drop_whitespace = kwargs.get('drop_whitespace', True)
1183
1188
1184 def _cutdown(self, ucstr, space_left):
1189 def _cutdown(self, ucstr, space_left):
1185 l = 0
1190 l = 0
1186 colwidth = encoding.ucolwidth
1191 colwidth = encoding.ucolwidth
1187 for i in xrange(len(ucstr)):
1192 for i in xrange(len(ucstr)):
1188 l += colwidth(ucstr[i])
1193 l += colwidth(ucstr[i])
1189 if space_left < l:
1194 if space_left < l:
1190 return (ucstr[:i], ucstr[i:])
1195 return (ucstr[:i], ucstr[i:])
1191 return ucstr, ''
1196 return ucstr, ''
1192
1197
1193 # overriding of base class
1198 # overriding of base class
1194 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1199 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1195 space_left = max(width - cur_len, 1)
1200 space_left = max(width - cur_len, 1)
1196
1201
1197 if self.break_long_words:
1202 if self.break_long_words:
1198 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1203 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1199 cur_line.append(cut)
1204 cur_line.append(cut)
1200 reversed_chunks[-1] = res
1205 reversed_chunks[-1] = res
1201 elif not cur_line:
1206 elif not cur_line:
1202 cur_line.append(reversed_chunks.pop())
1207 cur_line.append(reversed_chunks.pop())
1203
1208
1204 # this overriding code is imported from TextWrapper of python 2.6
1209 # this overriding code is imported from TextWrapper of python 2.6
1205 # to calculate columns of string by 'encoding.ucolwidth()'
1210 # to calculate columns of string by 'encoding.ucolwidth()'
1206 def _wrap_chunks(self, chunks):
1211 def _wrap_chunks(self, chunks):
1207 colwidth = encoding.ucolwidth
1212 colwidth = encoding.ucolwidth
1208
1213
1209 lines = []
1214 lines = []
1210 if self.width <= 0:
1215 if self.width <= 0:
1211 raise ValueError("invalid width %r (must be > 0)" % self.width)
1216 raise ValueError("invalid width %r (must be > 0)" % self.width)
1212
1217
1213 # Arrange in reverse order so items can be efficiently popped
1218 # Arrange in reverse order so items can be efficiently popped
1214 # from a stack of chucks.
1219 # from a stack of chucks.
1215 chunks.reverse()
1220 chunks.reverse()
1216
1221
1217 while chunks:
1222 while chunks:
1218
1223
1219 # Start the list of chunks that will make up the current line.
1224 # Start the list of chunks that will make up the current line.
1220 # cur_len is just the length of all the chunks in cur_line.
1225 # cur_len is just the length of all the chunks in cur_line.
1221 cur_line = []
1226 cur_line = []
1222 cur_len = 0
1227 cur_len = 0
1223
1228
1224 # Figure out which static string will prefix this line.
1229 # Figure out which static string will prefix this line.
1225 if lines:
1230 if lines:
1226 indent = self.subsequent_indent
1231 indent = self.subsequent_indent
1227 else:
1232 else:
1228 indent = self.initial_indent
1233 indent = self.initial_indent
1229
1234
1230 # Maximum width for this line.
1235 # Maximum width for this line.
1231 width = self.width - len(indent)
1236 width = self.width - len(indent)
1232
1237
1233 # First chunk on line is whitespace -- drop it, unless this
1238 # First chunk on line is whitespace -- drop it, unless this
1234 # is the very beginning of the text (ie. no lines started yet).
1239 # is the very beginning of the text (ie. no lines started yet).
1235 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1240 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1236 del chunks[-1]
1241 del chunks[-1]
1237
1242
1238 while chunks:
1243 while chunks:
1239 l = colwidth(chunks[-1])
1244 l = colwidth(chunks[-1])
1240
1245
1241 # Can at least squeeze this chunk onto the current line.
1246 # Can at least squeeze this chunk onto the current line.
1242 if cur_len + l <= width:
1247 if cur_len + l <= width:
1243 cur_line.append(chunks.pop())
1248 cur_line.append(chunks.pop())
1244 cur_len += l
1249 cur_len += l
1245
1250
1246 # Nope, this line is full.
1251 # Nope, this line is full.
1247 else:
1252 else:
1248 break
1253 break
1249
1254
1250 # The current line is full, and the next chunk is too big to
1255 # The current line is full, and the next chunk is too big to
1251 # fit on *any* line (not just this one).
1256 # fit on *any* line (not just this one).
1252 if chunks and colwidth(chunks[-1]) > width:
1257 if chunks and colwidth(chunks[-1]) > width:
1253 self._handle_long_word(chunks, cur_line, cur_len, width)
1258 self._handle_long_word(chunks, cur_line, cur_len, width)
1254
1259
1255 # If the last chunk on this line is all whitespace, drop it.
1260 # If the last chunk on this line is all whitespace, drop it.
1256 if (self.drop_whitespace and
1261 if (self.drop_whitespace and
1257 cur_line and cur_line[-1].strip() == ''):
1262 cur_line and cur_line[-1].strip() == ''):
1258 del cur_line[-1]
1263 del cur_line[-1]
1259
1264
1260 # Convert current line back to a string and store it in list
1265 # Convert current line back to a string and store it in list
1261 # of all lines (return value).
1266 # of all lines (return value).
1262 if cur_line:
1267 if cur_line:
1263 lines.append(indent + ''.join(cur_line))
1268 lines.append(indent + ''.join(cur_line))
1264
1269
1265 return lines
1270 return lines
1266
1271
1267 global MBTextWrapper
1272 global MBTextWrapper
1268 MBTextWrapper = tw
1273 MBTextWrapper = tw
1269 return tw(**kwargs)
1274 return tw(**kwargs)
1270
1275
1271 def wrap(line, width, initindent='', hangindent=''):
1276 def wrap(line, width, initindent='', hangindent=''):
1272 maxindent = max(len(hangindent), len(initindent))
1277 maxindent = max(len(hangindent), len(initindent))
1273 if width <= maxindent:
1278 if width <= maxindent:
1274 # adjust for weird terminal size
1279 # adjust for weird terminal size
1275 width = max(78, maxindent + 1)
1280 width = max(78, maxindent + 1)
1276 line = line.decode(encoding.encoding, encoding.encodingmode)
1281 line = line.decode(encoding.encoding, encoding.encodingmode)
1277 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1282 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1278 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1283 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1279 wrapper = MBTextWrapper(width=width,
1284 wrapper = MBTextWrapper(width=width,
1280 initial_indent=initindent,
1285 initial_indent=initindent,
1281 subsequent_indent=hangindent)
1286 subsequent_indent=hangindent)
1282 return wrapper.fill(line).encode(encoding.encoding)
1287 return wrapper.fill(line).encode(encoding.encoding)
1283
1288
1284 def iterlines(iterator):
1289 def iterlines(iterator):
1285 for chunk in iterator:
1290 for chunk in iterator:
1286 for line in chunk.splitlines():
1291 for line in chunk.splitlines():
1287 yield line
1292 yield line
1288
1293
1289 def expandpath(path):
1294 def expandpath(path):
1290 return os.path.expanduser(os.path.expandvars(path))
1295 return os.path.expanduser(os.path.expandvars(path))
1291
1296
1292 def hgcmd():
1297 def hgcmd():
1293 """Return the command used to execute current hg
1298 """Return the command used to execute current hg
1294
1299
1295 This is different from hgexecutable() because on Windows we want
1300 This is different from hgexecutable() because on Windows we want
1296 to avoid things opening new shell windows like batch files, so we
1301 to avoid things opening new shell windows like batch files, so we
1297 get either the python call or current executable.
1302 get either the python call or current executable.
1298 """
1303 """
1299 if mainfrozen():
1304 if mainfrozen():
1300 return [sys.executable]
1305 return [sys.executable]
1301 return gethgcmd()
1306 return gethgcmd()
1302
1307
1303 def rundetached(args, condfn):
1308 def rundetached(args, condfn):
1304 """Execute the argument list in a detached process.
1309 """Execute the argument list in a detached process.
1305
1310
1306 condfn is a callable which is called repeatedly and should return
1311 condfn is a callable which is called repeatedly and should return
1307 True once the child process is known to have started successfully.
1312 True once the child process is known to have started successfully.
1308 At this point, the child process PID is returned. If the child
1313 At this point, the child process PID is returned. If the child
1309 process fails to start or finishes before condfn() evaluates to
1314 process fails to start or finishes before condfn() evaluates to
1310 True, return -1.
1315 True, return -1.
1311 """
1316 """
1312 # Windows case is easier because the child process is either
1317 # Windows case is easier because the child process is either
1313 # successfully starting and validating the condition or exiting
1318 # successfully starting and validating the condition or exiting
1314 # on failure. We just poll on its PID. On Unix, if the child
1319 # on failure. We just poll on its PID. On Unix, if the child
1315 # process fails to start, it will be left in a zombie state until
1320 # process fails to start, it will be left in a zombie state until
1316 # the parent wait on it, which we cannot do since we expect a long
1321 # the parent wait on it, which we cannot do since we expect a long
1317 # running process on success. Instead we listen for SIGCHLD telling
1322 # running process on success. Instead we listen for SIGCHLD telling
1318 # us our child process terminated.
1323 # us our child process terminated.
1319 terminated = set()
1324 terminated = set()
1320 def handler(signum, frame):
1325 def handler(signum, frame):
1321 terminated.add(os.wait())
1326 terminated.add(os.wait())
1322 prevhandler = None
1327 prevhandler = None
1323 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1328 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1324 if SIGCHLD is not None:
1329 if SIGCHLD is not None:
1325 prevhandler = signal.signal(SIGCHLD, handler)
1330 prevhandler = signal.signal(SIGCHLD, handler)
1326 try:
1331 try:
1327 pid = spawndetached(args)
1332 pid = spawndetached(args)
1328 while not condfn():
1333 while not condfn():
1329 if ((pid in terminated or not testpid(pid))
1334 if ((pid in terminated or not testpid(pid))
1330 and not condfn()):
1335 and not condfn()):
1331 return -1
1336 return -1
1332 time.sleep(0.1)
1337 time.sleep(0.1)
1333 return pid
1338 return pid
1334 finally:
1339 finally:
1335 if prevhandler is not None:
1340 if prevhandler is not None:
1336 signal.signal(signal.SIGCHLD, prevhandler)
1341 signal.signal(signal.SIGCHLD, prevhandler)
1337
1342
1338 try:
1343 try:
1339 any, all = any, all
1344 any, all = any, all
1340 except NameError:
1345 except NameError:
1341 def any(iterable):
1346 def any(iterable):
1342 for i in iterable:
1347 for i in iterable:
1343 if i:
1348 if i:
1344 return True
1349 return True
1345 return False
1350 return False
1346
1351
1347 def all(iterable):
1352 def all(iterable):
1348 for i in iterable:
1353 for i in iterable:
1349 if not i:
1354 if not i:
1350 return False
1355 return False
1351 return True
1356 return True
1352
1357
1353 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1358 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1354 """Return the result of interpolating items in the mapping into string s.
1359 """Return the result of interpolating items in the mapping into string s.
1355
1360
1356 prefix is a single character string, or a two character string with
1361 prefix is a single character string, or a two character string with
1357 a backslash as the first character if the prefix needs to be escaped in
1362 a backslash as the first character if the prefix needs to be escaped in
1358 a regular expression.
1363 a regular expression.
1359
1364
1360 fn is an optional function that will be applied to the replacement text
1365 fn is an optional function that will be applied to the replacement text
1361 just before replacement.
1366 just before replacement.
1362
1367
1363 escape_prefix is an optional flag that allows using doubled prefix for
1368 escape_prefix is an optional flag that allows using doubled prefix for
1364 its escaping.
1369 its escaping.
1365 """
1370 """
1366 fn = fn or (lambda s: s)
1371 fn = fn or (lambda s: s)
1367 patterns = '|'.join(mapping.keys())
1372 patterns = '|'.join(mapping.keys())
1368 if escape_prefix:
1373 if escape_prefix:
1369 patterns += '|' + prefix
1374 patterns += '|' + prefix
1370 if len(prefix) > 1:
1375 if len(prefix) > 1:
1371 prefix_char = prefix[1:]
1376 prefix_char = prefix[1:]
1372 else:
1377 else:
1373 prefix_char = prefix
1378 prefix_char = prefix
1374 mapping[prefix_char] = prefix_char
1379 mapping[prefix_char] = prefix_char
1375 r = re.compile(r'%s(%s)' % (prefix, patterns))
1380 r = re.compile(r'%s(%s)' % (prefix, patterns))
1376 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1381 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1377
1382
1378 def getport(port):
1383 def getport(port):
1379 """Return the port for a given network service.
1384 """Return the port for a given network service.
1380
1385
1381 If port is an integer, it's returned as is. If it's a string, it's
1386 If port is an integer, it's returned as is. If it's a string, it's
1382 looked up using socket.getservbyname(). If there's no matching
1387 looked up using socket.getservbyname(). If there's no matching
1383 service, util.Abort is raised.
1388 service, util.Abort is raised.
1384 """
1389 """
1385 try:
1390 try:
1386 return int(port)
1391 return int(port)
1387 except ValueError:
1392 except ValueError:
1388 pass
1393 pass
1389
1394
1390 try:
1395 try:
1391 return socket.getservbyname(port)
1396 return socket.getservbyname(port)
1392 except socket.error:
1397 except socket.error:
1393 raise Abort(_("no port number associated with service '%s'") % port)
1398 raise Abort(_("no port number associated with service '%s'") % port)
1394
1399
1395 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1400 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1396 '0': False, 'no': False, 'false': False, 'off': False,
1401 '0': False, 'no': False, 'false': False, 'off': False,
1397 'never': False}
1402 'never': False}
1398
1403
1399 def parsebool(s):
1404 def parsebool(s):
1400 """Parse s into a boolean.
1405 """Parse s into a boolean.
1401
1406
1402 If s is not a valid boolean, returns None.
1407 If s is not a valid boolean, returns None.
1403 """
1408 """
1404 return _booleans.get(s.lower(), None)
1409 return _booleans.get(s.lower(), None)
1405
1410
1406 _hexdig = '0123456789ABCDEFabcdef'
1411 _hexdig = '0123456789ABCDEFabcdef'
1407 _hextochr = dict((a + b, chr(int(a + b, 16)))
1412 _hextochr = dict((a + b, chr(int(a + b, 16)))
1408 for a in _hexdig for b in _hexdig)
1413 for a in _hexdig for b in _hexdig)
1409
1414
1410 def _urlunquote(s):
1415 def _urlunquote(s):
1411 """unquote('abc%20def') -> 'abc def'."""
1416 """unquote('abc%20def') -> 'abc def'."""
1412 res = s.split('%')
1417 res = s.split('%')
1413 # fastpath
1418 # fastpath
1414 if len(res) == 1:
1419 if len(res) == 1:
1415 return s
1420 return s
1416 s = res[0]
1421 s = res[0]
1417 for item in res[1:]:
1422 for item in res[1:]:
1418 try:
1423 try:
1419 s += _hextochr[item[:2]] + item[2:]
1424 s += _hextochr[item[:2]] + item[2:]
1420 except KeyError:
1425 except KeyError:
1421 s += '%' + item
1426 s += '%' + item
1422 except UnicodeDecodeError:
1427 except UnicodeDecodeError:
1423 s += unichr(int(item[:2], 16)) + item[2:]
1428 s += unichr(int(item[:2], 16)) + item[2:]
1424 return s
1429 return s
1425
1430
1426 class url(object):
1431 class url(object):
1427 r"""Reliable URL parser.
1432 r"""Reliable URL parser.
1428
1433
1429 This parses URLs and provides attributes for the following
1434 This parses URLs and provides attributes for the following
1430 components:
1435 components:
1431
1436
1432 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1437 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1433
1438
1434 Missing components are set to None. The only exception is
1439 Missing components are set to None. The only exception is
1435 fragment, which is set to '' if present but empty.
1440 fragment, which is set to '' if present but empty.
1436
1441
1437 If parsefragment is False, fragment is included in query. If
1442 If parsefragment is False, fragment is included in query. If
1438 parsequery is False, query is included in path. If both are
1443 parsequery is False, query is included in path. If both are
1439 False, both fragment and query are included in path.
1444 False, both fragment and query are included in path.
1440
1445
1441 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1446 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1442
1447
1443 Note that for backward compatibility reasons, bundle URLs do not
1448 Note that for backward compatibility reasons, bundle URLs do not
1444 take host names. That means 'bundle://../' has a path of '../'.
1449 take host names. That means 'bundle://../' has a path of '../'.
1445
1450
1446 Examples:
1451 Examples:
1447
1452
1448 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1453 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1449 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1454 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1450 >>> url('ssh://[::1]:2200//home/joe/repo')
1455 >>> url('ssh://[::1]:2200//home/joe/repo')
1451 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1456 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1452 >>> url('file:///home/joe/repo')
1457 >>> url('file:///home/joe/repo')
1453 <url scheme: 'file', path: '/home/joe/repo'>
1458 <url scheme: 'file', path: '/home/joe/repo'>
1454 >>> url('file:///c:/temp/foo/')
1459 >>> url('file:///c:/temp/foo/')
1455 <url scheme: 'file', path: 'c:/temp/foo/'>
1460 <url scheme: 'file', path: 'c:/temp/foo/'>
1456 >>> url('bundle:foo')
1461 >>> url('bundle:foo')
1457 <url scheme: 'bundle', path: 'foo'>
1462 <url scheme: 'bundle', path: 'foo'>
1458 >>> url('bundle://../foo')
1463 >>> url('bundle://../foo')
1459 <url scheme: 'bundle', path: '../foo'>
1464 <url scheme: 'bundle', path: '../foo'>
1460 >>> url(r'c:\foo\bar')
1465 >>> url(r'c:\foo\bar')
1461 <url path: 'c:\\foo\\bar'>
1466 <url path: 'c:\\foo\\bar'>
1462 >>> url(r'\\blah\blah\blah')
1467 >>> url(r'\\blah\blah\blah')
1463 <url path: '\\\\blah\\blah\\blah'>
1468 <url path: '\\\\blah\\blah\\blah'>
1464 >>> url(r'\\blah\blah\blah#baz')
1469 >>> url(r'\\blah\blah\blah#baz')
1465 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1470 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
1466
1471
1467 Authentication credentials:
1472 Authentication credentials:
1468
1473
1469 >>> url('ssh://joe:xyz@x/repo')
1474 >>> url('ssh://joe:xyz@x/repo')
1470 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1475 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1471 >>> url('ssh://joe@x/repo')
1476 >>> url('ssh://joe@x/repo')
1472 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1477 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1473
1478
1474 Query strings and fragments:
1479 Query strings and fragments:
1475
1480
1476 >>> url('http://host/a?b#c')
1481 >>> url('http://host/a?b#c')
1477 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1482 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1478 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1483 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1479 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1484 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1480 """
1485 """
1481
1486
1482 _safechars = "!~*'()+"
1487 _safechars = "!~*'()+"
1483 _safepchars = "/!~*'()+"
1488 _safepchars = "/!~*'()+"
1484 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1489 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1485
1490
1486 def __init__(self, path, parsequery=True, parsefragment=True):
1491 def __init__(self, path, parsequery=True, parsefragment=True):
1487 # We slowly chomp away at path until we have only the path left
1492 # We slowly chomp away at path until we have only the path left
1488 self.scheme = self.user = self.passwd = self.host = None
1493 self.scheme = self.user = self.passwd = self.host = None
1489 self.port = self.path = self.query = self.fragment = None
1494 self.port = self.path = self.query = self.fragment = None
1490 self._localpath = True
1495 self._localpath = True
1491 self._hostport = ''
1496 self._hostport = ''
1492 self._origpath = path
1497 self._origpath = path
1493
1498
1494 if parsefragment and '#' in path:
1499 if parsefragment and '#' in path:
1495 path, self.fragment = path.split('#', 1)
1500 path, self.fragment = path.split('#', 1)
1496 if not path:
1501 if not path:
1497 path = None
1502 path = None
1498
1503
1499 # special case for Windows drive letters and UNC paths
1504 # special case for Windows drive letters and UNC paths
1500 if hasdriveletter(path) or path.startswith(r'\\'):
1505 if hasdriveletter(path) or path.startswith(r'\\'):
1501 self.path = path
1506 self.path = path
1502 return
1507 return
1503
1508
1504 # For compatibility reasons, we can't handle bundle paths as
1509 # For compatibility reasons, we can't handle bundle paths as
1505 # normal URLS
1510 # normal URLS
1506 if path.startswith('bundle:'):
1511 if path.startswith('bundle:'):
1507 self.scheme = 'bundle'
1512 self.scheme = 'bundle'
1508 path = path[7:]
1513 path = path[7:]
1509 if path.startswith('//'):
1514 if path.startswith('//'):
1510 path = path[2:]
1515 path = path[2:]
1511 self.path = path
1516 self.path = path
1512 return
1517 return
1513
1518
1514 if self._matchscheme(path):
1519 if self._matchscheme(path):
1515 parts = path.split(':', 1)
1520 parts = path.split(':', 1)
1516 if parts[0]:
1521 if parts[0]:
1517 self.scheme, path = parts
1522 self.scheme, path = parts
1518 self._localpath = False
1523 self._localpath = False
1519
1524
1520 if not path:
1525 if not path:
1521 path = None
1526 path = None
1522 if self._localpath:
1527 if self._localpath:
1523 self.path = ''
1528 self.path = ''
1524 return
1529 return
1525 else:
1530 else:
1526 if self._localpath:
1531 if self._localpath:
1527 self.path = path
1532 self.path = path
1528 return
1533 return
1529
1534
1530 if parsequery and '?' in path:
1535 if parsequery and '?' in path:
1531 path, self.query = path.split('?', 1)
1536 path, self.query = path.split('?', 1)
1532 if not path:
1537 if not path:
1533 path = None
1538 path = None
1534 if not self.query:
1539 if not self.query:
1535 self.query = None
1540 self.query = None
1536
1541
1537 # // is required to specify a host/authority
1542 # // is required to specify a host/authority
1538 if path and path.startswith('//'):
1543 if path and path.startswith('//'):
1539 parts = path[2:].split('/', 1)
1544 parts = path[2:].split('/', 1)
1540 if len(parts) > 1:
1545 if len(parts) > 1:
1541 self.host, path = parts
1546 self.host, path = parts
1542 path = path
1547 path = path
1543 else:
1548 else:
1544 self.host = parts[0]
1549 self.host = parts[0]
1545 path = None
1550 path = None
1546 if not self.host:
1551 if not self.host:
1547 self.host = None
1552 self.host = None
1548 # path of file:///d is /d
1553 # path of file:///d is /d
1549 # path of file:///d:/ is d:/, not /d:/
1554 # path of file:///d:/ is d:/, not /d:/
1550 if path and not hasdriveletter(path):
1555 if path and not hasdriveletter(path):
1551 path = '/' + path
1556 path = '/' + path
1552
1557
1553 if self.host and '@' in self.host:
1558 if self.host and '@' in self.host:
1554 self.user, self.host = self.host.rsplit('@', 1)
1559 self.user, self.host = self.host.rsplit('@', 1)
1555 if ':' in self.user:
1560 if ':' in self.user:
1556 self.user, self.passwd = self.user.split(':', 1)
1561 self.user, self.passwd = self.user.split(':', 1)
1557 if not self.host:
1562 if not self.host:
1558 self.host = None
1563 self.host = None
1559
1564
1560 # Don't split on colons in IPv6 addresses without ports
1565 # Don't split on colons in IPv6 addresses without ports
1561 if (self.host and ':' in self.host and
1566 if (self.host and ':' in self.host and
1562 not (self.host.startswith('[') and self.host.endswith(']'))):
1567 not (self.host.startswith('[') and self.host.endswith(']'))):
1563 self._hostport = self.host
1568 self._hostport = self.host
1564 self.host, self.port = self.host.rsplit(':', 1)
1569 self.host, self.port = self.host.rsplit(':', 1)
1565 if not self.host:
1570 if not self.host:
1566 self.host = None
1571 self.host = None
1567
1572
1568 if (self.host and self.scheme == 'file' and
1573 if (self.host and self.scheme == 'file' and
1569 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1574 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1570 raise Abort(_('file:// URLs can only refer to localhost'))
1575 raise Abort(_('file:// URLs can only refer to localhost'))
1571
1576
1572 self.path = path
1577 self.path = path
1573
1578
1574 # leave the query string escaped
1579 # leave the query string escaped
1575 for a in ('user', 'passwd', 'host', 'port',
1580 for a in ('user', 'passwd', 'host', 'port',
1576 'path', 'fragment'):
1581 'path', 'fragment'):
1577 v = getattr(self, a)
1582 v = getattr(self, a)
1578 if v is not None:
1583 if v is not None:
1579 setattr(self, a, _urlunquote(v))
1584 setattr(self, a, _urlunquote(v))
1580
1585
1581 def __repr__(self):
1586 def __repr__(self):
1582 attrs = []
1587 attrs = []
1583 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1588 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1584 'query', 'fragment'):
1589 'query', 'fragment'):
1585 v = getattr(self, a)
1590 v = getattr(self, a)
1586 if v is not None:
1591 if v is not None:
1587 attrs.append('%s: %r' % (a, v))
1592 attrs.append('%s: %r' % (a, v))
1588 return '<url %s>' % ', '.join(attrs)
1593 return '<url %s>' % ', '.join(attrs)
1589
1594
1590 def __str__(self):
1595 def __str__(self):
1591 r"""Join the URL's components back into a URL string.
1596 r"""Join the URL's components back into a URL string.
1592
1597
1593 Examples:
1598 Examples:
1594
1599
1595 >>> str(url('http://user:pw@host:80/?foo#bar'))
1600 >>> str(url('http://user:pw@host:80/?foo#bar'))
1596 'http://user:pw@host:80/?foo#bar'
1601 'http://user:pw@host:80/?foo#bar'
1597 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1602 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1598 'http://user:pw@host:80/?foo=bar&baz=42'
1603 'http://user:pw@host:80/?foo=bar&baz=42'
1599 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1604 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1600 'http://user:pw@host:80/?foo=bar%3dbaz'
1605 'http://user:pw@host:80/?foo=bar%3dbaz'
1601 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1606 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1602 'ssh://user:pw@[::1]:2200//home/joe#'
1607 'ssh://user:pw@[::1]:2200//home/joe#'
1603 >>> str(url('http://localhost:80//'))
1608 >>> str(url('http://localhost:80//'))
1604 'http://localhost:80//'
1609 'http://localhost:80//'
1605 >>> str(url('http://localhost:80/'))
1610 >>> str(url('http://localhost:80/'))
1606 'http://localhost:80/'
1611 'http://localhost:80/'
1607 >>> str(url('http://localhost:80'))
1612 >>> str(url('http://localhost:80'))
1608 'http://localhost:80/'
1613 'http://localhost:80/'
1609 >>> str(url('bundle:foo'))
1614 >>> str(url('bundle:foo'))
1610 'bundle:foo'
1615 'bundle:foo'
1611 >>> str(url('bundle://../foo'))
1616 >>> str(url('bundle://../foo'))
1612 'bundle:../foo'
1617 'bundle:../foo'
1613 >>> str(url('path'))
1618 >>> str(url('path'))
1614 'path'
1619 'path'
1615 >>> str(url('file:///tmp/foo/bar'))
1620 >>> str(url('file:///tmp/foo/bar'))
1616 'file:///tmp/foo/bar'
1621 'file:///tmp/foo/bar'
1617 >>> print url(r'bundle:foo\bar')
1622 >>> print url(r'bundle:foo\bar')
1618 bundle:foo\bar
1623 bundle:foo\bar
1619 """
1624 """
1620 if self._localpath:
1625 if self._localpath:
1621 s = self.path
1626 s = self.path
1622 if self.scheme == 'bundle':
1627 if self.scheme == 'bundle':
1623 s = 'bundle:' + s
1628 s = 'bundle:' + s
1624 if self.fragment:
1629 if self.fragment:
1625 s += '#' + self.fragment
1630 s += '#' + self.fragment
1626 return s
1631 return s
1627
1632
1628 s = self.scheme + ':'
1633 s = self.scheme + ':'
1629 if self.user or self.passwd or self.host:
1634 if self.user or self.passwd or self.host:
1630 s += '//'
1635 s += '//'
1631 elif self.scheme and (not self.path or self.path.startswith('/')):
1636 elif self.scheme and (not self.path or self.path.startswith('/')):
1632 s += '//'
1637 s += '//'
1633 if self.user:
1638 if self.user:
1634 s += urllib.quote(self.user, safe=self._safechars)
1639 s += urllib.quote(self.user, safe=self._safechars)
1635 if self.passwd:
1640 if self.passwd:
1636 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1641 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1637 if self.user or self.passwd:
1642 if self.user or self.passwd:
1638 s += '@'
1643 s += '@'
1639 if self.host:
1644 if self.host:
1640 if not (self.host.startswith('[') and self.host.endswith(']')):
1645 if not (self.host.startswith('[') and self.host.endswith(']')):
1641 s += urllib.quote(self.host)
1646 s += urllib.quote(self.host)
1642 else:
1647 else:
1643 s += self.host
1648 s += self.host
1644 if self.port:
1649 if self.port:
1645 s += ':' + urllib.quote(self.port)
1650 s += ':' + urllib.quote(self.port)
1646 if self.host:
1651 if self.host:
1647 s += '/'
1652 s += '/'
1648 if self.path:
1653 if self.path:
1649 # TODO: similar to the query string, we should not unescape the
1654 # TODO: similar to the query string, we should not unescape the
1650 # path when we store it, the path might contain '%2f' = '/',
1655 # path when we store it, the path might contain '%2f' = '/',
1651 # which we should *not* escape.
1656 # which we should *not* escape.
1652 s += urllib.quote(self.path, safe=self._safepchars)
1657 s += urllib.quote(self.path, safe=self._safepchars)
1653 if self.query:
1658 if self.query:
1654 # we store the query in escaped form.
1659 # we store the query in escaped form.
1655 s += '?' + self.query
1660 s += '?' + self.query
1656 if self.fragment is not None:
1661 if self.fragment is not None:
1657 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1662 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1658 return s
1663 return s
1659
1664
1660 def authinfo(self):
1665 def authinfo(self):
1661 user, passwd = self.user, self.passwd
1666 user, passwd = self.user, self.passwd
1662 try:
1667 try:
1663 self.user, self.passwd = None, None
1668 self.user, self.passwd = None, None
1664 s = str(self)
1669 s = str(self)
1665 finally:
1670 finally:
1666 self.user, self.passwd = user, passwd
1671 self.user, self.passwd = user, passwd
1667 if not self.user:
1672 if not self.user:
1668 return (s, None)
1673 return (s, None)
1669 # authinfo[1] is passed to urllib2 password manager, and its
1674 # authinfo[1] is passed to urllib2 password manager, and its
1670 # URIs must not contain credentials. The host is passed in the
1675 # URIs must not contain credentials. The host is passed in the
1671 # URIs list because Python < 2.4.3 uses only that to search for
1676 # URIs list because Python < 2.4.3 uses only that to search for
1672 # a password.
1677 # a password.
1673 return (s, (None, (s, self.host),
1678 return (s, (None, (s, self.host),
1674 self.user, self.passwd or ''))
1679 self.user, self.passwd or ''))
1675
1680
1676 def isabs(self):
1681 def isabs(self):
1677 if self.scheme and self.scheme != 'file':
1682 if self.scheme and self.scheme != 'file':
1678 return True # remote URL
1683 return True # remote URL
1679 if hasdriveletter(self.path):
1684 if hasdriveletter(self.path):
1680 return True # absolute for our purposes - can't be joined()
1685 return True # absolute for our purposes - can't be joined()
1681 if self.path.startswith(r'\\'):
1686 if self.path.startswith(r'\\'):
1682 return True # Windows UNC path
1687 return True # Windows UNC path
1683 if self.path.startswith('/'):
1688 if self.path.startswith('/'):
1684 return True # POSIX-style
1689 return True # POSIX-style
1685 return False
1690 return False
1686
1691
1687 def localpath(self):
1692 def localpath(self):
1688 if self.scheme == 'file' or self.scheme == 'bundle':
1693 if self.scheme == 'file' or self.scheme == 'bundle':
1689 path = self.path or '/'
1694 path = self.path or '/'
1690 # For Windows, we need to promote hosts containing drive
1695 # For Windows, we need to promote hosts containing drive
1691 # letters to paths with drive letters.
1696 # letters to paths with drive letters.
1692 if hasdriveletter(self._hostport):
1697 if hasdriveletter(self._hostport):
1693 path = self._hostport + '/' + self.path
1698 path = self._hostport + '/' + self.path
1694 elif self.host is not None and self.path:
1699 elif self.host is not None and self.path:
1695 path = '/' + path
1700 path = '/' + path
1696 return path
1701 return path
1697 return self._origpath
1702 return self._origpath
1698
1703
1699 def hasscheme(path):
1704 def hasscheme(path):
1700 return bool(url(path).scheme)
1705 return bool(url(path).scheme)
1701
1706
1702 def hasdriveletter(path):
1707 def hasdriveletter(path):
1703 return path[1:2] == ':' and path[0:1].isalpha()
1708 return path[1:2] == ':' and path[0:1].isalpha()
1704
1709
1705 def urllocalpath(path):
1710 def urllocalpath(path):
1706 return url(path, parsequery=False, parsefragment=False).localpath()
1711 return url(path, parsequery=False, parsefragment=False).localpath()
1707
1712
1708 def hidepassword(u):
1713 def hidepassword(u):
1709 '''hide user credential in a url string'''
1714 '''hide user credential in a url string'''
1710 u = url(u)
1715 u = url(u)
1711 if u.passwd:
1716 if u.passwd:
1712 u.passwd = '***'
1717 u.passwd = '***'
1713 return str(u)
1718 return str(u)
1714
1719
1715 def removeauth(u):
1720 def removeauth(u):
1716 '''remove all authentication information from a url string'''
1721 '''remove all authentication information from a url string'''
1717 u = url(u)
1722 u = url(u)
1718 u.user = u.passwd = None
1723 u.user = u.passwd = None
1719 return str(u)
1724 return str(u)
1720
1725
1721 def isatty(fd):
1726 def isatty(fd):
1722 try:
1727 try:
1723 return fd.isatty()
1728 return fd.isatty()
1724 except AttributeError:
1729 except AttributeError:
1725 return False
1730 return False
General Comments 0
You need to be logged in to leave comments. Login now