##// END OF EJS Templates
keepalive: track ready state with a bool...
Gregory Szorc -
r41454:1db94ebb default
parent child Browse files
Show More
@@ -1,808 +1,808 b''
1 # This library is free software; you can redistribute it and/or
1 # This library is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU Lesser General Public
2 # modify it under the terms of the GNU Lesser General Public
3 # License as published by the Free Software Foundation; either
3 # License as published by the Free Software Foundation; either
4 # version 2.1 of the License, or (at your option) any later version.
4 # version 2.1 of the License, or (at your option) any later version.
5 #
5 #
6 # This library is distributed in the hope that it will be useful,
6 # This library is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 # Lesser General Public License for more details.
9 # Lesser General Public License for more details.
10 #
10 #
11 # You should have received a copy of the GNU Lesser General Public
11 # You should have received a copy of the GNU Lesser General Public
12 # License along with this library; if not, see
12 # License along with this library; if not, see
13 # <http://www.gnu.org/licenses/>.
13 # <http://www.gnu.org/licenses/>.
14
14
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17
17
18 # Modified by Benoit Boissinot:
18 # Modified by Benoit Boissinot:
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 # Modified by Dirkjan Ochtman:
20 # Modified by Dirkjan Ochtman:
21 # - import md5 function from a local util module
21 # - import md5 function from a local util module
22 # Modified by Augie Fackler:
22 # Modified by Augie Fackler:
23 # - add safesend method and use it to prevent broken pipe errors
23 # - add safesend method and use it to prevent broken pipe errors
24 # on large POST requests
24 # on large POST requests
25
25
26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
27
27
28 >>> import urllib2
28 >>> import urllib2
29 >>> from keepalive import HTTPHandler
29 >>> from keepalive import HTTPHandler
30 >>> keepalive_handler = HTTPHandler()
30 >>> keepalive_handler = HTTPHandler()
31 >>> opener = urlreq.buildopener(keepalive_handler)
31 >>> opener = urlreq.buildopener(keepalive_handler)
32 >>> urlreq.installopener(opener)
32 >>> urlreq.installopener(opener)
33 >>>
33 >>>
34 >>> fo = urlreq.urlopen('http://www.python.org')
34 >>> fo = urlreq.urlopen('http://www.python.org')
35
35
36 If a connection to a given host is requested, and all of the existing
36 If a connection to a given host is requested, and all of the existing
37 connections are still in use, another connection will be opened. If
37 connections are still in use, another connection will be opened. If
38 the handler tries to use an existing connection but it fails in some
38 the handler tries to use an existing connection but it fails in some
39 way, it will be closed and removed from the pool.
39 way, it will be closed and removed from the pool.
40
40
41 To remove the handler, simply re-run build_opener with no arguments, and
41 To remove the handler, simply re-run build_opener with no arguments, and
42 install that opener.
42 install that opener.
43
43
44 You can explicitly close connections by using the close_connection()
44 You can explicitly close connections by using the close_connection()
45 method of the returned file-like object (described below) or you can
45 method of the returned file-like object (described below) or you can
46 use the handler methods:
46 use the handler methods:
47
47
48 close_connection(host)
48 close_connection(host)
49 close_all()
49 close_all()
50 open_connections()
50 open_connections()
51
51
52 NOTE: using the close_connection and close_all methods of the handler
52 NOTE: using the close_connection and close_all methods of the handler
53 should be done with care when using multiple threads.
53 should be done with care when using multiple threads.
54 * there is nothing that prevents another thread from creating new
54 * there is nothing that prevents another thread from creating new
55 connections immediately after connections are closed
55 connections immediately after connections are closed
56 * no checks are done to prevent in-use connections from being closed
56 * no checks are done to prevent in-use connections from being closed
57
57
58 >>> keepalive_handler.close_all()
58 >>> keepalive_handler.close_all()
59
59
60 EXTRA ATTRIBUTES AND METHODS
60 EXTRA ATTRIBUTES AND METHODS
61
61
62 Upon a status of 200, the object returned has a few additional
62 Upon a status of 200, the object returned has a few additional
63 attributes and methods, which should not be used if you want to
63 attributes and methods, which should not be used if you want to
64 remain consistent with the normal urllib2-returned objects:
64 remain consistent with the normal urllib2-returned objects:
65
65
66 close_connection() - close the connection to the host
66 close_connection() - close the connection to the host
67 readlines() - you know, readlines()
67 readlines() - you know, readlines()
68 status - the return status (i.e. 404)
68 status - the return status (i.e. 404)
69 reason - english translation of status (i.e. 'File not found')
69 reason - english translation of status (i.e. 'File not found')
70
70
71 If you want the best of both worlds, use this inside an
71 If you want the best of both worlds, use this inside an
72 AttributeError-catching try:
72 AttributeError-catching try:
73
73
74 >>> try: status = fo.status
74 >>> try: status = fo.status
75 >>> except AttributeError: status = None
75 >>> except AttributeError: status = None
76
76
77 Unfortunately, these are ONLY there if status == 200, so it's not
77 Unfortunately, these are ONLY there if status == 200, so it's not
78 easy to distinguish between non-200 responses. The reason is that
78 easy to distinguish between non-200 responses. The reason is that
79 urllib2 tries to do clever things with error codes 301, 302, 401,
79 urllib2 tries to do clever things with error codes 301, 302, 401,
80 and 407, and it wraps the object upon return.
80 and 407, and it wraps the object upon return.
81 """
81 """
82
82
83 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
83 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
84
84
85 from __future__ import absolute_import, print_function
85 from __future__ import absolute_import, print_function
86
86
87 import collections
87 import collections
88 import errno
88 import errno
89 import hashlib
89 import hashlib
90 import socket
90 import socket
91 import sys
91 import sys
92 import threading
92 import threading
93
93
94 from .i18n import _
94 from .i18n import _
95 from . import (
95 from . import (
96 node,
96 node,
97 pycompat,
97 pycompat,
98 urllibcompat,
98 urllibcompat,
99 util,
99 util,
100 )
100 )
101 from .utils import (
101 from .utils import (
102 procutil,
102 procutil,
103 )
103 )
104
104
105 httplib = util.httplib
105 httplib = util.httplib
106 urlerr = util.urlerr
106 urlerr = util.urlerr
107 urlreq = util.urlreq
107 urlreq = util.urlreq
108
108
109 DEBUG = None
109 DEBUG = None
110
110
111 class ConnectionManager(object):
111 class ConnectionManager(object):
112 """
112 """
113 The connection manager must be able to:
113 The connection manager must be able to:
114 * keep track of all existing
114 * keep track of all existing
115 """
115 """
116 def __init__(self):
116 def __init__(self):
117 self._lock = threading.Lock()
117 self._lock = threading.Lock()
118 self._hostmap = collections.defaultdict(list) # host -> [connection]
118 self._hostmap = collections.defaultdict(list) # host -> [connection]
119 self._connmap = {} # map connections to host
119 self._connmap = {} # map connections to host
120 self._readymap = {} # map connection to ready state
120 self._readymap = {} # map connection to ready state
121
121
122 def add(self, host, connection, ready):
122 def add(self, host, connection, ready):
123 self._lock.acquire()
123 self._lock.acquire()
124 try:
124 try:
125 self._hostmap[host].append(connection)
125 self._hostmap[host].append(connection)
126 self._connmap[connection] = host
126 self._connmap[connection] = host
127 self._readymap[connection] = ready
127 self._readymap[connection] = ready
128 finally:
128 finally:
129 self._lock.release()
129 self._lock.release()
130
130
131 def remove(self, connection):
131 def remove(self, connection):
132 self._lock.acquire()
132 self._lock.acquire()
133 try:
133 try:
134 try:
134 try:
135 host = self._connmap[connection]
135 host = self._connmap[connection]
136 except KeyError:
136 except KeyError:
137 pass
137 pass
138 else:
138 else:
139 del self._connmap[connection]
139 del self._connmap[connection]
140 del self._readymap[connection]
140 del self._readymap[connection]
141 self._hostmap[host].remove(connection)
141 self._hostmap[host].remove(connection)
142 if not self._hostmap[host]:
142 if not self._hostmap[host]:
143 del self._hostmap[host]
143 del self._hostmap[host]
144 finally:
144 finally:
145 self._lock.release()
145 self._lock.release()
146
146
147 def set_ready(self, connection, ready):
147 def set_ready(self, connection, ready):
148 try:
148 try:
149 self._readymap[connection] = ready
149 self._readymap[connection] = ready
150 except KeyError:
150 except KeyError:
151 pass
151 pass
152
152
153 def get_ready_conn(self, host):
153 def get_ready_conn(self, host):
154 conn = None
154 conn = None
155 self._lock.acquire()
155 self._lock.acquire()
156 try:
156 try:
157 for c in self._hostmap[host]:
157 for c in self._hostmap[host]:
158 if self._readymap[c]:
158 if self._readymap[c]:
159 self._readymap[c] = 0
159 self._readymap[c] = False
160 conn = c
160 conn = c
161 break
161 break
162 finally:
162 finally:
163 self._lock.release()
163 self._lock.release()
164 return conn
164 return conn
165
165
166 def get_all(self, host=None):
166 def get_all(self, host=None):
167 if host:
167 if host:
168 return list(self._hostmap[host])
168 return list(self._hostmap[host])
169 else:
169 else:
170 return dict(self._hostmap)
170 return dict(self._hostmap)
171
171
172 class KeepAliveHandler(object):
172 class KeepAliveHandler(object):
173 def __init__(self, timeout=None):
173 def __init__(self, timeout=None):
174 self._cm = ConnectionManager()
174 self._cm = ConnectionManager()
175 self._timeout = timeout
175 self._timeout = timeout
176 self.requestscount = 0
176 self.requestscount = 0
177 self.sentbytescount = 0
177 self.sentbytescount = 0
178
178
179 #### Connection Management
179 #### Connection Management
180 def open_connections(self):
180 def open_connections(self):
181 """return a list of connected hosts and the number of connections
181 """return a list of connected hosts and the number of connections
182 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
182 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
183 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
183 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
184
184
185 def close_connection(self, host):
185 def close_connection(self, host):
186 """close connection(s) to <host>
186 """close connection(s) to <host>
187 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
187 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
188 no error occurs if there is no connection to that host."""
188 no error occurs if there is no connection to that host."""
189 for h in self._cm.get_all(host):
189 for h in self._cm.get_all(host):
190 self._cm.remove(h)
190 self._cm.remove(h)
191 h.close()
191 h.close()
192
192
193 def close_all(self):
193 def close_all(self):
194 """close all open connections"""
194 """close all open connections"""
195 for host, conns in self._cm.get_all().iteritems():
195 for host, conns in self._cm.get_all().iteritems():
196 for h in conns:
196 for h in conns:
197 self._cm.remove(h)
197 self._cm.remove(h)
198 h.close()
198 h.close()
199
199
200 def _request_closed(self, request, host, connection):
200 def _request_closed(self, request, host, connection):
201 """tells us that this request is now closed and that the
201 """tells us that this request is now closed and that the
202 connection is ready for another request"""
202 connection is ready for another request"""
203 self._cm.set_ready(connection, 1)
203 self._cm.set_ready(connection, True)
204
204
205 def _remove_connection(self, host, connection, close=0):
205 def _remove_connection(self, host, connection, close=0):
206 if close:
206 if close:
207 connection.close()
207 connection.close()
208 self._cm.remove(connection)
208 self._cm.remove(connection)
209
209
210 #### Transaction Execution
210 #### Transaction Execution
211 def http_open(self, req):
211 def http_open(self, req):
212 return self.do_open(HTTPConnection, req)
212 return self.do_open(HTTPConnection, req)
213
213
214 def do_open(self, http_class, req):
214 def do_open(self, http_class, req):
215 host = urllibcompat.gethost(req)
215 host = urllibcompat.gethost(req)
216 if not host:
216 if not host:
217 raise urlerr.urlerror('no host given')
217 raise urlerr.urlerror('no host given')
218
218
219 try:
219 try:
220 h = self._cm.get_ready_conn(host)
220 h = self._cm.get_ready_conn(host)
221 while h:
221 while h:
222 r = self._reuse_connection(h, req, host)
222 r = self._reuse_connection(h, req, host)
223
223
224 # if this response is non-None, then it worked and we're
224 # if this response is non-None, then it worked and we're
225 # done. Break out, skipping the else block.
225 # done. Break out, skipping the else block.
226 if r:
226 if r:
227 break
227 break
228
228
229 # connection is bad - possibly closed by server
229 # connection is bad - possibly closed by server
230 # discard it and ask for the next free connection
230 # discard it and ask for the next free connection
231 h.close()
231 h.close()
232 self._cm.remove(h)
232 self._cm.remove(h)
233 h = self._cm.get_ready_conn(host)
233 h = self._cm.get_ready_conn(host)
234 else:
234 else:
235 # no (working) free connections were found. Create a new one.
235 # no (working) free connections were found. Create a new one.
236 h = http_class(host, timeout=self._timeout)
236 h = http_class(host, timeout=self._timeout)
237 if DEBUG:
237 if DEBUG:
238 DEBUG.info("creating new connection to %s (%d)",
238 DEBUG.info("creating new connection to %s (%d)",
239 host, id(h))
239 host, id(h))
240 self._cm.add(host, h, 0)
240 self._cm.add(host, h, False)
241 self._start_transaction(h, req)
241 self._start_transaction(h, req)
242 r = h.getresponse()
242 r = h.getresponse()
243 # The string form of BadStatusLine is the status line. Add some context
243 # The string form of BadStatusLine is the status line. Add some context
244 # to make the error message slightly more useful.
244 # to make the error message slightly more useful.
245 except httplib.BadStatusLine as err:
245 except httplib.BadStatusLine as err:
246 raise urlerr.urlerror(
246 raise urlerr.urlerror(
247 _('bad HTTP status line: %s') % pycompat.sysbytes(err.line))
247 _('bad HTTP status line: %s') % pycompat.sysbytes(err.line))
248 except (socket.error, httplib.HTTPException) as err:
248 except (socket.error, httplib.HTTPException) as err:
249 raise urlerr.urlerror(err)
249 raise urlerr.urlerror(err)
250
250
251 # If not a persistent connection, don't try to reuse it. Look
251 # If not a persistent connection, don't try to reuse it. Look
252 # for this using getattr() since vcr doesn't define this
252 # for this using getattr() since vcr doesn't define this
253 # attribute, and in that case always close the connection.
253 # attribute, and in that case always close the connection.
254 if getattr(r, r'will_close', True):
254 if getattr(r, r'will_close', True):
255 self._cm.remove(h)
255 self._cm.remove(h)
256
256
257 if DEBUG:
257 if DEBUG:
258 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
258 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
259 r._handler = self
259 r._handler = self
260 r._host = host
260 r._host = host
261 r._url = req.get_full_url()
261 r._url = req.get_full_url()
262 r._connection = h
262 r._connection = h
263 r.code = r.status
263 r.code = r.status
264 r.headers = r.msg
264 r.headers = r.msg
265 r.msg = r.reason
265 r.msg = r.reason
266
266
267 return r
267 return r
268
268
269 def _reuse_connection(self, h, req, host):
269 def _reuse_connection(self, h, req, host):
270 """start the transaction with a re-used connection
270 """start the transaction with a re-used connection
271 return a response object (r) upon success or None on failure.
271 return a response object (r) upon success or None on failure.
272 This DOES not close or remove bad connections in cases where
272 This DOES not close or remove bad connections in cases where
273 it returns. However, if an unexpected exception occurs, it
273 it returns. However, if an unexpected exception occurs, it
274 will close and remove the connection before re-raising.
274 will close and remove the connection before re-raising.
275 """
275 """
276 try:
276 try:
277 self._start_transaction(h, req)
277 self._start_transaction(h, req)
278 r = h.getresponse()
278 r = h.getresponse()
279 # note: just because we got something back doesn't mean it
279 # note: just because we got something back doesn't mean it
280 # worked. We'll check the version below, too.
280 # worked. We'll check the version below, too.
281 except (socket.error, httplib.HTTPException):
281 except (socket.error, httplib.HTTPException):
282 r = None
282 r = None
283 except: # re-raises
283 except: # re-raises
284 # adding this block just in case we've missed
284 # adding this block just in case we've missed
285 # something we will still raise the exception, but
285 # something we will still raise the exception, but
286 # lets try and close the connection and remove it
286 # lets try and close the connection and remove it
287 # first. We previously got into a nasty loop
287 # first. We previously got into a nasty loop
288 # where an exception was uncaught, and so the
288 # where an exception was uncaught, and so the
289 # connection stayed open. On the next try, the
289 # connection stayed open. On the next try, the
290 # same exception was raised, etc. The trade-off is
290 # same exception was raised, etc. The trade-off is
291 # that it's now possible this call will raise
291 # that it's now possible this call will raise
292 # a DIFFERENT exception
292 # a DIFFERENT exception
293 if DEBUG:
293 if DEBUG:
294 DEBUG.error("unexpected exception - closing "
294 DEBUG.error("unexpected exception - closing "
295 "connection to %s (%d)", host, id(h))
295 "connection to %s (%d)", host, id(h))
296 self._cm.remove(h)
296 self._cm.remove(h)
297 h.close()
297 h.close()
298 raise
298 raise
299
299
300 if r is None or r.version == 9:
300 if r is None or r.version == 9:
301 # httplib falls back to assuming HTTP 0.9 if it gets a
301 # httplib falls back to assuming HTTP 0.9 if it gets a
302 # bad header back. This is most likely to happen if
302 # bad header back. This is most likely to happen if
303 # the socket has been closed by the server since we
303 # the socket has been closed by the server since we
304 # last used the connection.
304 # last used the connection.
305 if DEBUG:
305 if DEBUG:
306 DEBUG.info("failed to re-use connection to %s (%d)",
306 DEBUG.info("failed to re-use connection to %s (%d)",
307 host, id(h))
307 host, id(h))
308 r = None
308 r = None
309 else:
309 else:
310 if DEBUG:
310 if DEBUG:
311 DEBUG.info("re-using connection to %s (%d)", host, id(h))
311 DEBUG.info("re-using connection to %s (%d)", host, id(h))
312
312
313 return r
313 return r
314
314
315 def _start_transaction(self, h, req):
315 def _start_transaction(self, h, req):
316 oldbytescount = getattr(h, 'sentbytescount', 0)
316 oldbytescount = getattr(h, 'sentbytescount', 0)
317
317
318 # What follows mostly reimplements HTTPConnection.request()
318 # What follows mostly reimplements HTTPConnection.request()
319 # except it adds self.parent.addheaders in the mix and sends headers
319 # except it adds self.parent.addheaders in the mix and sends headers
320 # in a deterministic order (to make testing easier).
320 # in a deterministic order (to make testing easier).
321 headers = util.sortdict(self.parent.addheaders)
321 headers = util.sortdict(self.parent.addheaders)
322 headers.update(sorted(req.headers.items()))
322 headers.update(sorted(req.headers.items()))
323 headers.update(sorted(req.unredirected_hdrs.items()))
323 headers.update(sorted(req.unredirected_hdrs.items()))
324 headers = util.sortdict((n.lower(), v) for n, v in headers.items())
324 headers = util.sortdict((n.lower(), v) for n, v in headers.items())
325 skipheaders = {}
325 skipheaders = {}
326 for n in (r'host', r'accept-encoding'):
326 for n in (r'host', r'accept-encoding'):
327 if n in headers:
327 if n in headers:
328 skipheaders[r'skip_' + n.replace(r'-', r'_')] = 1
328 skipheaders[r'skip_' + n.replace(r'-', r'_')] = 1
329 try:
329 try:
330 if urllibcompat.hasdata(req):
330 if urllibcompat.hasdata(req):
331 data = urllibcompat.getdata(req)
331 data = urllibcompat.getdata(req)
332 h.putrequest(
332 h.putrequest(
333 req.get_method(), urllibcompat.getselector(req),
333 req.get_method(), urllibcompat.getselector(req),
334 **skipheaders)
334 **skipheaders)
335 if r'content-type' not in headers:
335 if r'content-type' not in headers:
336 h.putheader(r'Content-type',
336 h.putheader(r'Content-type',
337 r'application/x-www-form-urlencoded')
337 r'application/x-www-form-urlencoded')
338 if r'content-length' not in headers:
338 if r'content-length' not in headers:
339 h.putheader(r'Content-length', r'%d' % len(data))
339 h.putheader(r'Content-length', r'%d' % len(data))
340 else:
340 else:
341 h.putrequest(
341 h.putrequest(
342 req.get_method(), urllibcompat.getselector(req),
342 req.get_method(), urllibcompat.getselector(req),
343 **skipheaders)
343 **skipheaders)
344 except socket.error as err:
344 except socket.error as err:
345 raise urlerr.urlerror(err)
345 raise urlerr.urlerror(err)
346 for k, v in headers.items():
346 for k, v in headers.items():
347 h.putheader(k, v)
347 h.putheader(k, v)
348 h.endheaders()
348 h.endheaders()
349 if urllibcompat.hasdata(req):
349 if urllibcompat.hasdata(req):
350 h.send(data)
350 h.send(data)
351
351
352 # This will fail to record events in case of I/O failure. That's OK.
352 # This will fail to record events in case of I/O failure. That's OK.
353 self.requestscount += 1
353 self.requestscount += 1
354 self.sentbytescount += getattr(h, 'sentbytescount', 0) - oldbytescount
354 self.sentbytescount += getattr(h, 'sentbytescount', 0) - oldbytescount
355
355
356 try:
356 try:
357 self.parent.requestscount += 1
357 self.parent.requestscount += 1
358 self.parent.sentbytescount += (
358 self.parent.sentbytescount += (
359 getattr(h, 'sentbytescount', 0) - oldbytescount)
359 getattr(h, 'sentbytescount', 0) - oldbytescount)
360 except AttributeError:
360 except AttributeError:
361 pass
361 pass
362
362
363 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
363 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
364 pass
364 pass
365
365
366 class HTTPResponse(httplib.HTTPResponse):
366 class HTTPResponse(httplib.HTTPResponse):
367 # we need to subclass HTTPResponse in order to
367 # we need to subclass HTTPResponse in order to
368 # 1) add readline(), readlines(), and readinto() methods
368 # 1) add readline(), readlines(), and readinto() methods
369 # 2) add close_connection() methods
369 # 2) add close_connection() methods
370 # 3) add info() and geturl() methods
370 # 3) add info() and geturl() methods
371
371
372 # in order to add readline(), read must be modified to deal with a
372 # in order to add readline(), read must be modified to deal with a
373 # buffer. example: readline must read a buffer and then spit back
373 # buffer. example: readline must read a buffer and then spit back
374 # one line at a time. The only real alternative is to read one
374 # one line at a time. The only real alternative is to read one
375 # BYTE at a time (ick). Once something has been read, it can't be
375 # BYTE at a time (ick). Once something has been read, it can't be
376 # put back (ok, maybe it can, but that's even uglier than this),
376 # put back (ok, maybe it can, but that's even uglier than this),
377 # so if you THEN do a normal read, you must first take stuff from
377 # so if you THEN do a normal read, you must first take stuff from
378 # the buffer.
378 # the buffer.
379
379
380 # the read method wraps the original to accommodate buffering,
380 # the read method wraps the original to accommodate buffering,
381 # although read() never adds to the buffer.
381 # although read() never adds to the buffer.
382 # Both readline and readlines have been stolen with almost no
382 # Both readline and readlines have been stolen with almost no
383 # modification from socket.py
383 # modification from socket.py
384
384
385
385
386 def __init__(self, sock, debuglevel=0, strict=0, method=None):
386 def __init__(self, sock, debuglevel=0, strict=0, method=None):
387 extrakw = {}
387 extrakw = {}
388 if not pycompat.ispy3:
388 if not pycompat.ispy3:
389 extrakw[r'strict'] = True
389 extrakw[r'strict'] = True
390 extrakw[r'buffering'] = True
390 extrakw[r'buffering'] = True
391 httplib.HTTPResponse.__init__(self, sock, debuglevel=debuglevel,
391 httplib.HTTPResponse.__init__(self, sock, debuglevel=debuglevel,
392 method=method, **extrakw)
392 method=method, **extrakw)
393 self.fileno = sock.fileno
393 self.fileno = sock.fileno
394 self.code = None
394 self.code = None
395 self.receivedbytescount = 0
395 self.receivedbytescount = 0
396 self._rbuf = ''
396 self._rbuf = ''
397 self._rbufsize = 8096
397 self._rbufsize = 8096
398 self._handler = None # inserted by the handler later
398 self._handler = None # inserted by the handler later
399 self._host = None # (same)
399 self._host = None # (same)
400 self._url = None # (same)
400 self._url = None # (same)
401 self._connection = None # (same)
401 self._connection = None # (same)
402
402
403 _raw_read = httplib.HTTPResponse.read
403 _raw_read = httplib.HTTPResponse.read
404 _raw_readinto = getattr(httplib.HTTPResponse, 'readinto', None)
404 _raw_readinto = getattr(httplib.HTTPResponse, 'readinto', None)
405
405
406 def close(self):
406 def close(self):
407 if self.fp:
407 if self.fp:
408 self.fp.close()
408 self.fp.close()
409 self.fp = None
409 self.fp = None
410 if self._handler:
410 if self._handler:
411 self._handler._request_closed(self, self._host,
411 self._handler._request_closed(self, self._host,
412 self._connection)
412 self._connection)
413
413
414 def close_connection(self):
414 def close_connection(self):
415 self._handler._remove_connection(self._host, self._connection, close=1)
415 self._handler._remove_connection(self._host, self._connection, close=1)
416 self.close()
416 self.close()
417
417
418 def info(self):
418 def info(self):
419 return self.headers
419 return self.headers
420
420
421 def geturl(self):
421 def geturl(self):
422 return self._url
422 return self._url
423
423
424 def read(self, amt=None):
424 def read(self, amt=None):
425 # the _rbuf test is only in this first if for speed. It's not
425 # the _rbuf test is only in this first if for speed. It's not
426 # logically necessary
426 # logically necessary
427 if self._rbuf and amt is not None:
427 if self._rbuf and amt is not None:
428 L = len(self._rbuf)
428 L = len(self._rbuf)
429 if amt > L:
429 if amt > L:
430 amt -= L
430 amt -= L
431 else:
431 else:
432 s = self._rbuf[:amt]
432 s = self._rbuf[:amt]
433 self._rbuf = self._rbuf[amt:]
433 self._rbuf = self._rbuf[amt:]
434 return s
434 return s
435 # Careful! http.client.HTTPResponse.read() on Python 3 is
435 # Careful! http.client.HTTPResponse.read() on Python 3 is
436 # implemented using readinto(), which can duplicate self._rbuf
436 # implemented using readinto(), which can duplicate self._rbuf
437 # if it's not empty.
437 # if it's not empty.
438 s = self._rbuf
438 s = self._rbuf
439 self._rbuf = ''
439 self._rbuf = ''
440 data = self._raw_read(amt)
440 data = self._raw_read(amt)
441
441
442 self.receivedbytescount += len(data)
442 self.receivedbytescount += len(data)
443 try:
443 try:
444 self._connection.receivedbytescount += len(data)
444 self._connection.receivedbytescount += len(data)
445 except AttributeError:
445 except AttributeError:
446 pass
446 pass
447 try:
447 try:
448 self._handler.parent.receivedbytescount += len(data)
448 self._handler.parent.receivedbytescount += len(data)
449 except AttributeError:
449 except AttributeError:
450 pass
450 pass
451
451
452 s += data
452 s += data
453 return s
453 return s
454
454
455 # stolen from Python SVN #68532 to fix issue1088
455 # stolen from Python SVN #68532 to fix issue1088
456 def _read_chunked(self, amt):
456 def _read_chunked(self, amt):
457 chunk_left = self.chunk_left
457 chunk_left = self.chunk_left
458 parts = []
458 parts = []
459
459
460 while True:
460 while True:
461 if chunk_left is None:
461 if chunk_left is None:
462 line = self.fp.readline()
462 line = self.fp.readline()
463 i = line.find(';')
463 i = line.find(';')
464 if i >= 0:
464 if i >= 0:
465 line = line[:i] # strip chunk-extensions
465 line = line[:i] # strip chunk-extensions
466 try:
466 try:
467 chunk_left = int(line, 16)
467 chunk_left = int(line, 16)
468 except ValueError:
468 except ValueError:
469 # close the connection as protocol synchronization is
469 # close the connection as protocol synchronization is
470 # probably lost
470 # probably lost
471 self.close()
471 self.close()
472 raise httplib.IncompleteRead(''.join(parts))
472 raise httplib.IncompleteRead(''.join(parts))
473 if chunk_left == 0:
473 if chunk_left == 0:
474 break
474 break
475 if amt is None:
475 if amt is None:
476 parts.append(self._safe_read(chunk_left))
476 parts.append(self._safe_read(chunk_left))
477 elif amt < chunk_left:
477 elif amt < chunk_left:
478 parts.append(self._safe_read(amt))
478 parts.append(self._safe_read(amt))
479 self.chunk_left = chunk_left - amt
479 self.chunk_left = chunk_left - amt
480 return ''.join(parts)
480 return ''.join(parts)
481 elif amt == chunk_left:
481 elif amt == chunk_left:
482 parts.append(self._safe_read(amt))
482 parts.append(self._safe_read(amt))
483 self._safe_read(2) # toss the CRLF at the end of the chunk
483 self._safe_read(2) # toss the CRLF at the end of the chunk
484 self.chunk_left = None
484 self.chunk_left = None
485 return ''.join(parts)
485 return ''.join(parts)
486 else:
486 else:
487 parts.append(self._safe_read(chunk_left))
487 parts.append(self._safe_read(chunk_left))
488 amt -= chunk_left
488 amt -= chunk_left
489
489
490 # we read the whole chunk, get another
490 # we read the whole chunk, get another
491 self._safe_read(2) # toss the CRLF at the end of the chunk
491 self._safe_read(2) # toss the CRLF at the end of the chunk
492 chunk_left = None
492 chunk_left = None
493
493
494 # read and discard trailer up to the CRLF terminator
494 # read and discard trailer up to the CRLF terminator
495 ### note: we shouldn't have any trailers!
495 ### note: we shouldn't have any trailers!
496 while True:
496 while True:
497 line = self.fp.readline()
497 line = self.fp.readline()
498 if not line:
498 if not line:
499 # a vanishingly small number of sites EOF without
499 # a vanishingly small number of sites EOF without
500 # sending the trailer
500 # sending the trailer
501 break
501 break
502 if line == '\r\n':
502 if line == '\r\n':
503 break
503 break
504
504
505 # we read everything; close the "file"
505 # we read everything; close the "file"
506 self.close()
506 self.close()
507
507
508 return ''.join(parts)
508 return ''.join(parts)
509
509
510 def readline(self):
510 def readline(self):
511 # Fast path for a line is already available in read buffer.
511 # Fast path for a line is already available in read buffer.
512 i = self._rbuf.find('\n')
512 i = self._rbuf.find('\n')
513 if i >= 0:
513 if i >= 0:
514 i += 1
514 i += 1
515 line = self._rbuf[:i]
515 line = self._rbuf[:i]
516 self._rbuf = self._rbuf[i:]
516 self._rbuf = self._rbuf[i:]
517 return line
517 return line
518
518
519 # No newline in local buffer. Read until we find one.
519 # No newline in local buffer. Read until we find one.
520 chunks = [self._rbuf]
520 chunks = [self._rbuf]
521 i = -1
521 i = -1
522 readsize = self._rbufsize
522 readsize = self._rbufsize
523 while True:
523 while True:
524 new = self._raw_read(readsize)
524 new = self._raw_read(readsize)
525 if not new:
525 if not new:
526 break
526 break
527
527
528 self.receivedbytescount += len(new)
528 self.receivedbytescount += len(new)
529 self._connection.receivedbytescount += len(new)
529 self._connection.receivedbytescount += len(new)
530 try:
530 try:
531 self._handler.parent.receivedbytescount += len(new)
531 self._handler.parent.receivedbytescount += len(new)
532 except AttributeError:
532 except AttributeError:
533 pass
533 pass
534
534
535 chunks.append(new)
535 chunks.append(new)
536 i = new.find('\n')
536 i = new.find('\n')
537 if i >= 0:
537 if i >= 0:
538 break
538 break
539
539
540 # We either have exhausted the stream or have a newline in chunks[-1].
540 # We either have exhausted the stream or have a newline in chunks[-1].
541
541
542 # EOF
542 # EOF
543 if i == -1:
543 if i == -1:
544 self._rbuf = ''
544 self._rbuf = ''
545 return ''.join(chunks)
545 return ''.join(chunks)
546
546
547 i += 1
547 i += 1
548 self._rbuf = chunks[-1][i:]
548 self._rbuf = chunks[-1][i:]
549 chunks[-1] = chunks[-1][:i]
549 chunks[-1] = chunks[-1][:i]
550 return ''.join(chunks)
550 return ''.join(chunks)
551
551
552 def readlines(self, sizehint=0):
552 def readlines(self, sizehint=0):
553 total = 0
553 total = 0
554 list = []
554 list = []
555 while True:
555 while True:
556 line = self.readline()
556 line = self.readline()
557 if not line:
557 if not line:
558 break
558 break
559 list.append(line)
559 list.append(line)
560 total += len(line)
560 total += len(line)
561 if sizehint and total >= sizehint:
561 if sizehint and total >= sizehint:
562 break
562 break
563 return list
563 return list
564
564
565 def readinto(self, dest):
565 def readinto(self, dest):
566 if self._raw_readinto is None:
566 if self._raw_readinto is None:
567 res = self.read(len(dest))
567 res = self.read(len(dest))
568 if not res:
568 if not res:
569 return 0
569 return 0
570 dest[0:len(res)] = res
570 dest[0:len(res)] = res
571 return len(res)
571 return len(res)
572 total = len(dest)
572 total = len(dest)
573 have = len(self._rbuf)
573 have = len(self._rbuf)
574 if have >= total:
574 if have >= total:
575 dest[0:total] = self._rbuf[:total]
575 dest[0:total] = self._rbuf[:total]
576 self._rbuf = self._rbuf[total:]
576 self._rbuf = self._rbuf[total:]
577 return total
577 return total
578 mv = memoryview(dest)
578 mv = memoryview(dest)
579 got = self._raw_readinto(mv[have:total])
579 got = self._raw_readinto(mv[have:total])
580
580
581 self.receivedbytescount += got
581 self.receivedbytescount += got
582 self._connection.receivedbytescount += got
582 self._connection.receivedbytescount += got
583 try:
583 try:
584 self._handler.receivedbytescount += got
584 self._handler.receivedbytescount += got
585 except AttributeError:
585 except AttributeError:
586 pass
586 pass
587
587
588 dest[0:have] = self._rbuf
588 dest[0:have] = self._rbuf
589 got += len(self._rbuf)
589 got += len(self._rbuf)
590 self._rbuf = ''
590 self._rbuf = ''
591 return got
591 return got
592
592
593 def safesend(self, str):
593 def safesend(self, str):
594 """Send `str' to the server.
594 """Send `str' to the server.
595
595
596 Shamelessly ripped off from httplib to patch a bad behavior.
596 Shamelessly ripped off from httplib to patch a bad behavior.
597 """
597 """
598 # _broken_pipe_resp is an attribute we set in this function
598 # _broken_pipe_resp is an attribute we set in this function
599 # if the socket is closed while we're sending data but
599 # if the socket is closed while we're sending data but
600 # the server sent us a response before hanging up.
600 # the server sent us a response before hanging up.
601 # In that case, we want to pretend to send the rest of the
601 # In that case, we want to pretend to send the rest of the
602 # outgoing data, and then let the user use getresponse()
602 # outgoing data, and then let the user use getresponse()
603 # (which we wrap) to get this last response before
603 # (which we wrap) to get this last response before
604 # opening a new socket.
604 # opening a new socket.
605 if getattr(self, '_broken_pipe_resp', None) is not None:
605 if getattr(self, '_broken_pipe_resp', None) is not None:
606 return
606 return
607
607
608 if self.sock is None:
608 if self.sock is None:
609 if self.auto_open:
609 if self.auto_open:
610 self.connect()
610 self.connect()
611 else:
611 else:
612 raise httplib.NotConnected
612 raise httplib.NotConnected
613
613
614 # send the data to the server. if we get a broken pipe, then close
614 # send the data to the server. if we get a broken pipe, then close
615 # the socket. we want to reconnect when somebody tries to send again.
615 # the socket. we want to reconnect when somebody tries to send again.
616 #
616 #
617 # NOTE: we DO propagate the error, though, because we cannot simply
617 # NOTE: we DO propagate the error, though, because we cannot simply
618 # ignore the error... the caller will know if they can retry.
618 # ignore the error... the caller will know if they can retry.
619 if self.debuglevel > 0:
619 if self.debuglevel > 0:
620 print("send:", repr(str))
620 print("send:", repr(str))
621 try:
621 try:
622 blocksize = 8192
622 blocksize = 8192
623 read = getattr(str, 'read', None)
623 read = getattr(str, 'read', None)
624 if read is not None:
624 if read is not None:
625 if self.debuglevel > 0:
625 if self.debuglevel > 0:
626 print("sending a read()able")
626 print("sending a read()able")
627 data = read(blocksize)
627 data = read(blocksize)
628 while data:
628 while data:
629 self.sock.sendall(data)
629 self.sock.sendall(data)
630 self.sentbytescount += len(data)
630 self.sentbytescount += len(data)
631 data = read(blocksize)
631 data = read(blocksize)
632 else:
632 else:
633 self.sock.sendall(str)
633 self.sock.sendall(str)
634 self.sentbytescount += len(str)
634 self.sentbytescount += len(str)
635 except socket.error as v:
635 except socket.error as v:
636 reraise = True
636 reraise = True
637 if v.args[0] == errno.EPIPE: # Broken pipe
637 if v.args[0] == errno.EPIPE: # Broken pipe
638 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
638 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
639 self._broken_pipe_resp = None
639 self._broken_pipe_resp = None
640 self._broken_pipe_resp = self.getresponse()
640 self._broken_pipe_resp = self.getresponse()
641 reraise = False
641 reraise = False
642 self.close()
642 self.close()
643 if reraise:
643 if reraise:
644 raise
644 raise
645
645
646 def wrapgetresponse(cls):
646 def wrapgetresponse(cls):
647 """Wraps getresponse in cls with a broken-pipe sane version.
647 """Wraps getresponse in cls with a broken-pipe sane version.
648 """
648 """
649 def safegetresponse(self):
649 def safegetresponse(self):
650 # In safesend() we might set the _broken_pipe_resp
650 # In safesend() we might set the _broken_pipe_resp
651 # attribute, in which case the socket has already
651 # attribute, in which case the socket has already
652 # been closed and we just need to give them the response
652 # been closed and we just need to give them the response
653 # back. Otherwise, we use the normal response path.
653 # back. Otherwise, we use the normal response path.
654 r = getattr(self, '_broken_pipe_resp', None)
654 r = getattr(self, '_broken_pipe_resp', None)
655 if r is not None:
655 if r is not None:
656 return r
656 return r
657 return cls.getresponse(self)
657 return cls.getresponse(self)
658 safegetresponse.__doc__ = cls.getresponse.__doc__
658 safegetresponse.__doc__ = cls.getresponse.__doc__
659 return safegetresponse
659 return safegetresponse
660
660
661 class HTTPConnection(httplib.HTTPConnection):
661 class HTTPConnection(httplib.HTTPConnection):
662 # url.httpsconnection inherits from this. So when adding/removing
662 # url.httpsconnection inherits from this. So when adding/removing
663 # attributes, be sure to audit httpsconnection() for unintended
663 # attributes, be sure to audit httpsconnection() for unintended
664 # consequences.
664 # consequences.
665
665
666 # use the modified response class
666 # use the modified response class
667 response_class = HTTPResponse
667 response_class = HTTPResponse
668 send = safesend
668 send = safesend
669 getresponse = wrapgetresponse(httplib.HTTPConnection)
669 getresponse = wrapgetresponse(httplib.HTTPConnection)
670
670
671 def __init__(self, *args, **kwargs):
671 def __init__(self, *args, **kwargs):
672 httplib.HTTPConnection.__init__(self, *args, **kwargs)
672 httplib.HTTPConnection.__init__(self, *args, **kwargs)
673 self.sentbytescount = 0
673 self.sentbytescount = 0
674 self.receivedbytescount = 0
674 self.receivedbytescount = 0
675
675
676 #########################################################################
676 #########################################################################
677 ##### TEST FUNCTIONS
677 ##### TEST FUNCTIONS
678 #########################################################################
678 #########################################################################
679
679
680
680
681 def continuity(url):
681 def continuity(url):
682 md5 = hashlib.md5
682 md5 = hashlib.md5
683 format = '%25s: %s'
683 format = '%25s: %s'
684
684
685 # first fetch the file with the normal http handler
685 # first fetch the file with the normal http handler
686 opener = urlreq.buildopener()
686 opener = urlreq.buildopener()
687 urlreq.installopener(opener)
687 urlreq.installopener(opener)
688 fo = urlreq.urlopen(url)
688 fo = urlreq.urlopen(url)
689 foo = fo.read()
689 foo = fo.read()
690 fo.close()
690 fo.close()
691 m = md5(foo)
691 m = md5(foo)
692 print(format % ('normal urllib', node.hex(m.digest())))
692 print(format % ('normal urllib', node.hex(m.digest())))
693
693
694 # now install the keepalive handler and try again
694 # now install the keepalive handler and try again
695 opener = urlreq.buildopener(HTTPHandler())
695 opener = urlreq.buildopener(HTTPHandler())
696 urlreq.installopener(opener)
696 urlreq.installopener(opener)
697
697
698 fo = urlreq.urlopen(url)
698 fo = urlreq.urlopen(url)
699 foo = fo.read()
699 foo = fo.read()
700 fo.close()
700 fo.close()
701 m = md5(foo)
701 m = md5(foo)
702 print(format % ('keepalive read', node.hex(m.digest())))
702 print(format % ('keepalive read', node.hex(m.digest())))
703
703
704 fo = urlreq.urlopen(url)
704 fo = urlreq.urlopen(url)
705 foo = ''
705 foo = ''
706 while True:
706 while True:
707 f = fo.readline()
707 f = fo.readline()
708 if f:
708 if f:
709 foo = foo + f
709 foo = foo + f
710 else:
710 else:
711 break
711 break
712 fo.close()
712 fo.close()
713 m = md5(foo)
713 m = md5(foo)
714 print(format % ('keepalive readline', node.hex(m.digest())))
714 print(format % ('keepalive readline', node.hex(m.digest())))
715
715
716 def comp(N, url):
716 def comp(N, url):
717 print(' making %i connections to:\n %s' % (N, url))
717 print(' making %i connections to:\n %s' % (N, url))
718
718
719 procutil.stdout.write(' first using the normal urllib handlers')
719 procutil.stdout.write(' first using the normal urllib handlers')
720 # first use normal opener
720 # first use normal opener
721 opener = urlreq.buildopener()
721 opener = urlreq.buildopener()
722 urlreq.installopener(opener)
722 urlreq.installopener(opener)
723 t1 = fetch(N, url)
723 t1 = fetch(N, url)
724 print(' TIME: %.3f s' % t1)
724 print(' TIME: %.3f s' % t1)
725
725
726 procutil.stdout.write(' now using the keepalive handler ')
726 procutil.stdout.write(' now using the keepalive handler ')
727 # now install the keepalive handler and try again
727 # now install the keepalive handler and try again
728 opener = urlreq.buildopener(HTTPHandler())
728 opener = urlreq.buildopener(HTTPHandler())
729 urlreq.installopener(opener)
729 urlreq.installopener(opener)
730 t2 = fetch(N, url)
730 t2 = fetch(N, url)
731 print(' TIME: %.3f s' % t2)
731 print(' TIME: %.3f s' % t2)
732 print(' improvement factor: %.2f' % (t1 / t2))
732 print(' improvement factor: %.2f' % (t1 / t2))
733
733
734 def fetch(N, url, delay=0):
734 def fetch(N, url, delay=0):
735 import time
735 import time
736 lens = []
736 lens = []
737 starttime = time.time()
737 starttime = time.time()
738 for i in range(N):
738 for i in range(N):
739 if delay and i > 0:
739 if delay and i > 0:
740 time.sleep(delay)
740 time.sleep(delay)
741 fo = urlreq.urlopen(url)
741 fo = urlreq.urlopen(url)
742 foo = fo.read()
742 foo = fo.read()
743 fo.close()
743 fo.close()
744 lens.append(len(foo))
744 lens.append(len(foo))
745 diff = time.time() - starttime
745 diff = time.time() - starttime
746
746
747 j = 0
747 j = 0
748 for i in lens[1:]:
748 for i in lens[1:]:
749 j = j + 1
749 j = j + 1
750 if not i == lens[0]:
750 if not i == lens[0]:
751 print("WARNING: inconsistent length on read %i: %i" % (j, i))
751 print("WARNING: inconsistent length on read %i: %i" % (j, i))
752
752
753 return diff
753 return diff
754
754
755 def test_timeout(url):
755 def test_timeout(url):
756 global DEBUG
756 global DEBUG
757 dbbackup = DEBUG
757 dbbackup = DEBUG
758 class FakeLogger(object):
758 class FakeLogger(object):
759 def debug(self, msg, *args):
759 def debug(self, msg, *args):
760 print(msg % args)
760 print(msg % args)
761 info = warning = error = debug
761 info = warning = error = debug
762 DEBUG = FakeLogger()
762 DEBUG = FakeLogger()
763 print(" fetching the file to establish a connection")
763 print(" fetching the file to establish a connection")
764 fo = urlreq.urlopen(url)
764 fo = urlreq.urlopen(url)
765 data1 = fo.read()
765 data1 = fo.read()
766 fo.close()
766 fo.close()
767
767
768 i = 20
768 i = 20
769 print(" waiting %i seconds for the server to close the connection" % i)
769 print(" waiting %i seconds for the server to close the connection" % i)
770 while i > 0:
770 while i > 0:
771 procutil.stdout.write('\r %2i' % i)
771 procutil.stdout.write('\r %2i' % i)
772 procutil.stdout.flush()
772 procutil.stdout.flush()
773 time.sleep(1)
773 time.sleep(1)
774 i -= 1
774 i -= 1
775 procutil.stderr.write('\r')
775 procutil.stderr.write('\r')
776
776
777 print(" fetching the file a second time")
777 print(" fetching the file a second time")
778 fo = urlreq.urlopen(url)
778 fo = urlreq.urlopen(url)
779 data2 = fo.read()
779 data2 = fo.read()
780 fo.close()
780 fo.close()
781
781
782 if data1 == data2:
782 if data1 == data2:
783 print(' data are identical')
783 print(' data are identical')
784 else:
784 else:
785 print(' ERROR: DATA DIFFER')
785 print(' ERROR: DATA DIFFER')
786
786
787 DEBUG = dbbackup
787 DEBUG = dbbackup
788
788
789
789
790 def test(url, N=10):
790 def test(url, N=10):
791 print("performing continuity test (making sure stuff isn't corrupted)")
791 print("performing continuity test (making sure stuff isn't corrupted)")
792 continuity(url)
792 continuity(url)
793 print('')
793 print('')
794 print("performing speed comparison")
794 print("performing speed comparison")
795 comp(N, url)
795 comp(N, url)
796 print('')
796 print('')
797 print("performing dropped-connection check")
797 print("performing dropped-connection check")
798 test_timeout(url)
798 test_timeout(url)
799
799
800 if __name__ == '__main__':
800 if __name__ == '__main__':
801 import time
801 import time
802 try:
802 try:
803 N = int(sys.argv[1])
803 N = int(sys.argv[1])
804 url = sys.argv[2]
804 url = sys.argv[2]
805 except (IndexError, ValueError):
805 except (IndexError, ValueError):
806 print("%s <integer> <url>" % sys.argv[0])
806 print("%s <integer> <url>" % sys.argv[0])
807 else:
807 else:
808 test(url, N)
808 test(url, N)
General Comments 0
You need to be logged in to leave comments. Login now