##// END OF EJS Templates
keepalive: send HTTP request headers in a deterministic order...
Gregory Szorc -
r31999:aa836f56 default
parent child Browse files
Show More
@@ -1,708 +1,709 b''
1 1 # This library is free software; you can redistribute it and/or
2 2 # modify it under the terms of the GNU Lesser General Public
3 3 # License as published by the Free Software Foundation; either
4 4 # version 2.1 of the License, or (at your option) any later version.
5 5 #
6 6 # This library is distributed in the hope that it will be useful,
7 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
9 9 # Lesser General Public License for more details.
10 10 #
11 11 # You should have received a copy of the GNU Lesser General Public
12 12 # License along with this library; if not, see
13 13 # <http://www.gnu.org/licenses/>.
14 14
15 15 # This file is part of urlgrabber, a high-level cross-protocol url-grabber
16 16 # Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko
17 17
18 18 # Modified by Benoit Boissinot:
19 19 # - fix for digest auth (inspired from urllib2.py @ Python v2.4)
20 20 # Modified by Dirkjan Ochtman:
21 21 # - import md5 function from a local util module
22 22 # Modified by Augie Fackler:
23 23 # - add safesend method and use it to prevent broken pipe errors
24 24 # on large POST requests
25 25
26 26 """An HTTP handler for urllib2 that supports HTTP 1.1 and keepalive.
27 27
28 28 >>> import urllib2
29 29 >>> from keepalive import HTTPHandler
30 30 >>> keepalive_handler = HTTPHandler()
31 31 >>> opener = urlreq.buildopener(keepalive_handler)
32 32 >>> urlreq.installopener(opener)
33 33 >>>
34 34 >>> fo = urlreq.urlopen('http://www.python.org')
35 35
36 36 If a connection to a given host is requested, and all of the existing
37 37 connections are still in use, another connection will be opened. If
38 38 the handler tries to use an existing connection but it fails in some
39 39 way, it will be closed and removed from the pool.
40 40
41 41 To remove the handler, simply re-run build_opener with no arguments, and
42 42 install that opener.
43 43
44 44 You can explicitly close connections by using the close_connection()
45 45 method of the returned file-like object (described below) or you can
46 46 use the handler methods:
47 47
48 48 close_connection(host)
49 49 close_all()
50 50 open_connections()
51 51
52 52 NOTE: using the close_connection and close_all methods of the handler
53 53 should be done with care when using multiple threads.
54 54 * there is nothing that prevents another thread from creating new
55 55 connections immediately after connections are closed
56 56 * no checks are done to prevent in-use connections from being closed
57 57
58 58 >>> keepalive_handler.close_all()
59 59
60 60 EXTRA ATTRIBUTES AND METHODS
61 61
62 62 Upon a status of 200, the object returned has a few additional
63 63 attributes and methods, which should not be used if you want to
64 64 remain consistent with the normal urllib2-returned objects:
65 65
66 66 close_connection() - close the connection to the host
67 67 readlines() - you know, readlines()
68 68 status - the return status (i.e. 404)
69 69 reason - english translation of status (i.e. 'File not found')
70 70
71 71 If you want the best of both worlds, use this inside an
72 72 AttributeError-catching try:
73 73
74 74 >>> try: status = fo.status
75 75 >>> except AttributeError: status = None
76 76
77 77 Unfortunately, these are ONLY there if status == 200, so it's not
78 78 easy to distinguish between non-200 responses. The reason is that
79 79 urllib2 tries to do clever things with error codes 301, 302, 401,
80 80 and 407, and it wraps the object upon return.
81 81 """
82 82
83 83 # $Id: keepalive.py,v 1.14 2006/04/04 21:00:32 mstenner Exp $
84 84
85 85 from __future__ import absolute_import, print_function
86 86
87 87 import errno
88 88 import hashlib
89 89 import socket
90 90 import sys
91 91 import threading
92 92
93 93 from . import (
94 94 util,
95 95 )
96 96
97 97 httplib = util.httplib
98 98 urlerr = util.urlerr
99 99 urlreq = util.urlreq
100 100
101 101 DEBUG = None
102 102
103 103 class ConnectionManager(object):
104 104 """
105 105 The connection manager must be able to:
106 106 * keep track of all existing
107 107 """
108 108 def __init__(self):
109 109 self._lock = threading.Lock()
110 110 self._hostmap = {} # map hosts to a list of connections
111 111 self._connmap = {} # map connections to host
112 112 self._readymap = {} # map connection to ready state
113 113
114 114 def add(self, host, connection, ready):
115 115 self._lock.acquire()
116 116 try:
117 117 if host not in self._hostmap:
118 118 self._hostmap[host] = []
119 119 self._hostmap[host].append(connection)
120 120 self._connmap[connection] = host
121 121 self._readymap[connection] = ready
122 122 finally:
123 123 self._lock.release()
124 124
125 125 def remove(self, connection):
126 126 self._lock.acquire()
127 127 try:
128 128 try:
129 129 host = self._connmap[connection]
130 130 except KeyError:
131 131 pass
132 132 else:
133 133 del self._connmap[connection]
134 134 del self._readymap[connection]
135 135 self._hostmap[host].remove(connection)
136 136 if not self._hostmap[host]: del self._hostmap[host]
137 137 finally:
138 138 self._lock.release()
139 139
140 140 def set_ready(self, connection, ready):
141 141 try:
142 142 self._readymap[connection] = ready
143 143 except KeyError:
144 144 pass
145 145
146 146 def get_ready_conn(self, host):
147 147 conn = None
148 148 self._lock.acquire()
149 149 try:
150 150 if host in self._hostmap:
151 151 for c in self._hostmap[host]:
152 152 if self._readymap[c]:
153 153 self._readymap[c] = 0
154 154 conn = c
155 155 break
156 156 finally:
157 157 self._lock.release()
158 158 return conn
159 159
160 160 def get_all(self, host=None):
161 161 if host:
162 162 return list(self._hostmap.get(host, []))
163 163 else:
164 164 return dict(self._hostmap)
165 165
166 166 class KeepAliveHandler(object):
167 167 def __init__(self):
168 168 self._cm = ConnectionManager()
169 169
170 170 #### Connection Management
171 171 def open_connections(self):
172 172 """return a list of connected hosts and the number of connections
173 173 to each. [('foo.com:80', 2), ('bar.org', 1)]"""
174 174 return [(host, len(li)) for (host, li) in self._cm.get_all().items()]
175 175
176 176 def close_connection(self, host):
177 177 """close connection(s) to <host>
178 178 host is the host:port spec, as in 'www.cnn.com:8080' as passed in.
179 179 no error occurs if there is no connection to that host."""
180 180 for h in self._cm.get_all(host):
181 181 self._cm.remove(h)
182 182 h.close()
183 183
184 184 def close_all(self):
185 185 """close all open connections"""
186 186 for host, conns in self._cm.get_all().iteritems():
187 187 for h in conns:
188 188 self._cm.remove(h)
189 189 h.close()
190 190
191 191 def _request_closed(self, request, host, connection):
192 192 """tells us that this request is now closed and that the
193 193 connection is ready for another request"""
194 194 self._cm.set_ready(connection, 1)
195 195
196 196 def _remove_connection(self, host, connection, close=0):
197 197 if close:
198 198 connection.close()
199 199 self._cm.remove(connection)
200 200
201 201 #### Transaction Execution
202 202 def http_open(self, req):
203 203 return self.do_open(HTTPConnection, req)
204 204
205 205 def do_open(self, http_class, req):
206 206 host = req.get_host()
207 207 if not host:
208 208 raise urlerr.urlerror('no host given')
209 209
210 210 try:
211 211 h = self._cm.get_ready_conn(host)
212 212 while h:
213 213 r = self._reuse_connection(h, req, host)
214 214
215 215 # if this response is non-None, then it worked and we're
216 216 # done. Break out, skipping the else block.
217 217 if r:
218 218 break
219 219
220 220 # connection is bad - possibly closed by server
221 221 # discard it and ask for the next free connection
222 222 h.close()
223 223 self._cm.remove(h)
224 224 h = self._cm.get_ready_conn(host)
225 225 else:
226 226 # no (working) free connections were found. Create a new one.
227 227 h = http_class(host)
228 228 if DEBUG:
229 229 DEBUG.info("creating new connection to %s (%d)",
230 230 host, id(h))
231 231 self._cm.add(host, h, 0)
232 232 self._start_transaction(h, req)
233 233 r = h.getresponse()
234 234 except (socket.error, httplib.HTTPException) as err:
235 235 raise urlerr.urlerror(err)
236 236
237 237 # if not a persistent connection, don't try to reuse it
238 238 if r.will_close:
239 239 self._cm.remove(h)
240 240
241 241 if DEBUG:
242 242 DEBUG.info("STATUS: %s, %s", r.status, r.reason)
243 243 r._handler = self
244 244 r._host = host
245 245 r._url = req.get_full_url()
246 246 r._connection = h
247 247 r.code = r.status
248 248 r.headers = r.msg
249 249 r.msg = r.reason
250 250
251 251 return r
252 252
253 253 def _reuse_connection(self, h, req, host):
254 254 """start the transaction with a re-used connection
255 255 return a response object (r) upon success or None on failure.
256 256 This DOES not close or remove bad connections in cases where
257 257 it returns. However, if an unexpected exception occurs, it
258 258 will close and remove the connection before re-raising.
259 259 """
260 260 try:
261 261 self._start_transaction(h, req)
262 262 r = h.getresponse()
263 263 # note: just because we got something back doesn't mean it
264 264 # worked. We'll check the version below, too.
265 265 except (socket.error, httplib.HTTPException):
266 266 r = None
267 267 except: # re-raises
268 268 # adding this block just in case we've missed
269 269 # something we will still raise the exception, but
270 270 # lets try and close the connection and remove it
271 271 # first. We previously got into a nasty loop
272 272 # where an exception was uncaught, and so the
273 273 # connection stayed open. On the next try, the
274 274 # same exception was raised, etc. The trade-off is
275 275 # that it's now possible this call will raise
276 276 # a DIFFERENT exception
277 277 if DEBUG:
278 278 DEBUG.error("unexpected exception - closing "
279 279 "connection to %s (%d)", host, id(h))
280 280 self._cm.remove(h)
281 281 h.close()
282 282 raise
283 283
284 284 if r is None or r.version == 9:
285 285 # httplib falls back to assuming HTTP 0.9 if it gets a
286 286 # bad header back. This is most likely to happen if
287 287 # the socket has been closed by the server since we
288 288 # last used the connection.
289 289 if DEBUG:
290 290 DEBUG.info("failed to re-use connection to %s (%d)",
291 291 host, id(h))
292 292 r = None
293 293 else:
294 294 if DEBUG:
295 295 DEBUG.info("re-using connection to %s (%d)", host, id(h))
296 296
297 297 return r
298 298
299 299 def _start_transaction(self, h, req):
300 300 # What follows mostly reimplements HTTPConnection.request()
301 # except it adds self.parent.addheaders in the mix.
302 headers = dict(self.parent.addheaders)
303 headers.update(req.headers)
304 headers.update(req.unredirected_hdrs)
305 headers = dict((n.lower(), v) for n, v in headers.items())
301 # except it adds self.parent.addheaders in the mix and sends headers
302 # in a deterministic order (to make testing easier).
303 headers = util.sortdict(self.parent.addheaders)
304 headers.update(sorted(req.headers.items()))
305 headers.update(sorted(req.unredirected_hdrs.items()))
306 headers = util.sortdict((n.lower(), v) for n, v in headers.items())
306 307 skipheaders = {}
307 308 for n in ('host', 'accept-encoding'):
308 309 if n in headers:
309 310 skipheaders['skip_' + n.replace('-', '_')] = 1
310 311 try:
311 312 if req.has_data():
312 313 data = req.get_data()
313 314 h.putrequest(
314 315 req.get_method(), req.get_selector(), **skipheaders)
315 316 if 'content-type' not in headers:
316 317 h.putheader('Content-type',
317 318 'application/x-www-form-urlencoded')
318 319 if 'content-length' not in headers:
319 320 h.putheader('Content-length', '%d' % len(data))
320 321 else:
321 322 h.putrequest(
322 323 req.get_method(), req.get_selector(), **skipheaders)
323 324 except socket.error as err:
324 325 raise urlerr.urlerror(err)
325 326 for k, v in headers.items():
326 327 h.putheader(k, v)
327 328 h.endheaders()
328 329 if req.has_data():
329 330 h.send(data)
330 331
331 332 class HTTPHandler(KeepAliveHandler, urlreq.httphandler):
332 333 pass
333 334
334 335 class HTTPResponse(httplib.HTTPResponse):
335 336 # we need to subclass HTTPResponse in order to
336 337 # 1) add readline() and readlines() methods
337 338 # 2) add close_connection() methods
338 339 # 3) add info() and geturl() methods
339 340
340 341 # in order to add readline(), read must be modified to deal with a
341 342 # buffer. example: readline must read a buffer and then spit back
342 343 # one line at a time. The only real alternative is to read one
343 344 # BYTE at a time (ick). Once something has been read, it can't be
344 345 # put back (ok, maybe it can, but that's even uglier than this),
345 346 # so if you THEN do a normal read, you must first take stuff from
346 347 # the buffer.
347 348
348 349 # the read method wraps the original to accommodate buffering,
349 350 # although read() never adds to the buffer.
350 351 # Both readline and readlines have been stolen with almost no
351 352 # modification from socket.py
352 353
353 354
354 355 def __init__(self, sock, debuglevel=0, strict=0, method=None):
355 356 httplib.HTTPResponse.__init__(self, sock, debuglevel, method)
356 357 self.fileno = sock.fileno
357 358 self.code = None
358 359 self._rbuf = ''
359 360 self._rbufsize = 8096
360 361 self._handler = None # inserted by the handler later
361 362 self._host = None # (same)
362 363 self._url = None # (same)
363 364 self._connection = None # (same)
364 365
365 366 _raw_read = httplib.HTTPResponse.read
366 367
367 368 def close(self):
368 369 if self.fp:
369 370 self.fp.close()
370 371 self.fp = None
371 372 if self._handler:
372 373 self._handler._request_closed(self, self._host,
373 374 self._connection)
374 375
375 376 def close_connection(self):
376 377 self._handler._remove_connection(self._host, self._connection, close=1)
377 378 self.close()
378 379
379 380 def info(self):
380 381 return self.headers
381 382
382 383 def geturl(self):
383 384 return self._url
384 385
385 386 def read(self, amt=None):
386 387 # the _rbuf test is only in this first if for speed. It's not
387 388 # logically necessary
388 389 if self._rbuf and not amt is None:
389 390 L = len(self._rbuf)
390 391 if amt > L:
391 392 amt -= L
392 393 else:
393 394 s = self._rbuf[:amt]
394 395 self._rbuf = self._rbuf[amt:]
395 396 return s
396 397
397 398 s = self._rbuf + self._raw_read(amt)
398 399 self._rbuf = ''
399 400 return s
400 401
401 402 # stolen from Python SVN #68532 to fix issue1088
402 403 def _read_chunked(self, amt):
403 404 chunk_left = self.chunk_left
404 405 parts = []
405 406
406 407 while True:
407 408 if chunk_left is None:
408 409 line = self.fp.readline()
409 410 i = line.find(';')
410 411 if i >= 0:
411 412 line = line[:i] # strip chunk-extensions
412 413 try:
413 414 chunk_left = int(line, 16)
414 415 except ValueError:
415 416 # close the connection as protocol synchronization is
416 417 # probably lost
417 418 self.close()
418 419 raise httplib.IncompleteRead(''.join(parts))
419 420 if chunk_left == 0:
420 421 break
421 422 if amt is None:
422 423 parts.append(self._safe_read(chunk_left))
423 424 elif amt < chunk_left:
424 425 parts.append(self._safe_read(amt))
425 426 self.chunk_left = chunk_left - amt
426 427 return ''.join(parts)
427 428 elif amt == chunk_left:
428 429 parts.append(self._safe_read(amt))
429 430 self._safe_read(2) # toss the CRLF at the end of the chunk
430 431 self.chunk_left = None
431 432 return ''.join(parts)
432 433 else:
433 434 parts.append(self._safe_read(chunk_left))
434 435 amt -= chunk_left
435 436
436 437 # we read the whole chunk, get another
437 438 self._safe_read(2) # toss the CRLF at the end of the chunk
438 439 chunk_left = None
439 440
440 441 # read and discard trailer up to the CRLF terminator
441 442 ### note: we shouldn't have any trailers!
442 443 while True:
443 444 line = self.fp.readline()
444 445 if not line:
445 446 # a vanishingly small number of sites EOF without
446 447 # sending the trailer
447 448 break
448 449 if line == '\r\n':
449 450 break
450 451
451 452 # we read everything; close the "file"
452 453 self.close()
453 454
454 455 return ''.join(parts)
455 456
456 457 def readline(self):
457 458 # Fast path for a line is already available in read buffer.
458 459 i = self._rbuf.find('\n')
459 460 if i >= 0:
460 461 i += 1
461 462 line = self._rbuf[:i]
462 463 self._rbuf = self._rbuf[i:]
463 464 return line
464 465
465 466 # No newline in local buffer. Read until we find one.
466 467 chunks = [self._rbuf]
467 468 i = -1
468 469 readsize = self._rbufsize
469 470 while True:
470 471 new = self._raw_read(readsize)
471 472 if not new:
472 473 break
473 474
474 475 chunks.append(new)
475 476 i = new.find('\n')
476 477 if i >= 0:
477 478 break
478 479
479 480 # We either have exhausted the stream or have a newline in chunks[-1].
480 481
481 482 # EOF
482 483 if i == -1:
483 484 self._rbuf = ''
484 485 return ''.join(chunks)
485 486
486 487 i += 1
487 488 self._rbuf = chunks[-1][i:]
488 489 chunks[-1] = chunks[-1][:i]
489 490 return ''.join(chunks)
490 491
491 492 def readlines(self, sizehint=0):
492 493 total = 0
493 494 list = []
494 495 while True:
495 496 line = self.readline()
496 497 if not line:
497 498 break
498 499 list.append(line)
499 500 total += len(line)
500 501 if sizehint and total >= sizehint:
501 502 break
502 503 return list
503 504
504 505 def safesend(self, str):
505 506 """Send `str' to the server.
506 507
507 508 Shamelessly ripped off from httplib to patch a bad behavior.
508 509 """
509 510 # _broken_pipe_resp is an attribute we set in this function
510 511 # if the socket is closed while we're sending data but
511 512 # the server sent us a response before hanging up.
512 513 # In that case, we want to pretend to send the rest of the
513 514 # outgoing data, and then let the user use getresponse()
514 515 # (which we wrap) to get this last response before
515 516 # opening a new socket.
516 517 if getattr(self, '_broken_pipe_resp', None) is not None:
517 518 return
518 519
519 520 if self.sock is None:
520 521 if self.auto_open:
521 522 self.connect()
522 523 else:
523 524 raise httplib.NotConnected
524 525
525 526 # send the data to the server. if we get a broken pipe, then close
526 527 # the socket. we want to reconnect when somebody tries to send again.
527 528 #
528 529 # NOTE: we DO propagate the error, though, because we cannot simply
529 530 # ignore the error... the caller will know if they can retry.
530 531 if self.debuglevel > 0:
531 532 print("send:", repr(str))
532 533 try:
533 534 blocksize = 8192
534 535 read = getattr(str, 'read', None)
535 536 if read is not None:
536 537 if self.debuglevel > 0:
537 538 print("sending a read()able")
538 539 data = read(blocksize)
539 540 while data:
540 541 self.sock.sendall(data)
541 542 data = read(blocksize)
542 543 else:
543 544 self.sock.sendall(str)
544 545 except socket.error as v:
545 546 reraise = True
546 547 if v[0] == errno.EPIPE: # Broken pipe
547 548 if self._HTTPConnection__state == httplib._CS_REQ_SENT:
548 549 self._broken_pipe_resp = None
549 550 self._broken_pipe_resp = self.getresponse()
550 551 reraise = False
551 552 self.close()
552 553 if reraise:
553 554 raise
554 555
555 556 def wrapgetresponse(cls):
556 557 """Wraps getresponse in cls with a broken-pipe sane version.
557 558 """
558 559 def safegetresponse(self):
559 560 # In safesend() we might set the _broken_pipe_resp
560 561 # attribute, in which case the socket has already
561 562 # been closed and we just need to give them the response
562 563 # back. Otherwise, we use the normal response path.
563 564 r = getattr(self, '_broken_pipe_resp', None)
564 565 if r is not None:
565 566 return r
566 567 return cls.getresponse(self)
567 568 safegetresponse.__doc__ = cls.getresponse.__doc__
568 569 return safegetresponse
569 570
570 571 class HTTPConnection(httplib.HTTPConnection):
571 572 # use the modified response class
572 573 response_class = HTTPResponse
573 574 send = safesend
574 575 getresponse = wrapgetresponse(httplib.HTTPConnection)
575 576
576 577
577 578 #########################################################################
578 579 ##### TEST FUNCTIONS
579 580 #########################################################################
580 581
581 582
582 583 def continuity(url):
583 584 md5 = hashlib.md5
584 585 format = '%25s: %s'
585 586
586 587 # first fetch the file with the normal http handler
587 588 opener = urlreq.buildopener()
588 589 urlreq.installopener(opener)
589 590 fo = urlreq.urlopen(url)
590 591 foo = fo.read()
591 592 fo.close()
592 593 m = md5(foo)
593 594 print(format % ('normal urllib', m.hexdigest()))
594 595
595 596 # now install the keepalive handler and try again
596 597 opener = urlreq.buildopener(HTTPHandler())
597 598 urlreq.installopener(opener)
598 599
599 600 fo = urlreq.urlopen(url)
600 601 foo = fo.read()
601 602 fo.close()
602 603 m = md5(foo)
603 604 print(format % ('keepalive read', m.hexdigest()))
604 605
605 606 fo = urlreq.urlopen(url)
606 607 foo = ''
607 608 while True:
608 609 f = fo.readline()
609 610 if f:
610 611 foo = foo + f
611 612 else: break
612 613 fo.close()
613 614 m = md5(foo)
614 615 print(format % ('keepalive readline', m.hexdigest()))
615 616
616 617 def comp(N, url):
617 618 print(' making %i connections to:\n %s' % (N, url))
618 619
619 620 util.stdout.write(' first using the normal urllib handlers')
620 621 # first use normal opener
621 622 opener = urlreq.buildopener()
622 623 urlreq.installopener(opener)
623 624 t1 = fetch(N, url)
624 625 print(' TIME: %.3f s' % t1)
625 626
626 627 util.stdout.write(' now using the keepalive handler ')
627 628 # now install the keepalive handler and try again
628 629 opener = urlreq.buildopener(HTTPHandler())
629 630 urlreq.installopener(opener)
630 631 t2 = fetch(N, url)
631 632 print(' TIME: %.3f s' % t2)
632 633 print(' improvement factor: %.2f' % (t1 / t2))
633 634
634 635 def fetch(N, url, delay=0):
635 636 import time
636 637 lens = []
637 638 starttime = time.time()
638 639 for i in range(N):
639 640 if delay and i > 0:
640 641 time.sleep(delay)
641 642 fo = urlreq.urlopen(url)
642 643 foo = fo.read()
643 644 fo.close()
644 645 lens.append(len(foo))
645 646 diff = time.time() - starttime
646 647
647 648 j = 0
648 649 for i in lens[1:]:
649 650 j = j + 1
650 651 if not i == lens[0]:
651 652 print("WARNING: inconsistent length on read %i: %i" % (j, i))
652 653
653 654 return diff
654 655
655 656 def test_timeout(url):
656 657 global DEBUG
657 658 dbbackup = DEBUG
658 659 class FakeLogger(object):
659 660 def debug(self, msg, *args):
660 661 print(msg % args)
661 662 info = warning = error = debug
662 663 DEBUG = FakeLogger()
663 664 print(" fetching the file to establish a connection")
664 665 fo = urlreq.urlopen(url)
665 666 data1 = fo.read()
666 667 fo.close()
667 668
668 669 i = 20
669 670 print(" waiting %i seconds for the server to close the connection" % i)
670 671 while i > 0:
671 672 util.stdout.write('\r %2i' % i)
672 673 util.stdout.flush()
673 674 time.sleep(1)
674 675 i -= 1
675 676 util.stderr.write('\r')
676 677
677 678 print(" fetching the file a second time")
678 679 fo = urlreq.urlopen(url)
679 680 data2 = fo.read()
680 681 fo.close()
681 682
682 683 if data1 == data2:
683 684 print(' data are identical')
684 685 else:
685 686 print(' ERROR: DATA DIFFER')
686 687
687 688 DEBUG = dbbackup
688 689
689 690
690 691 def test(url, N=10):
691 692 print("performing continuity test (making sure stuff isn't corrupted)")
692 693 continuity(url)
693 694 print('')
694 695 print("performing speed comparison")
695 696 comp(N, url)
696 697 print('')
697 698 print("performing dropped-connection check")
698 699 test_timeout(url)
699 700
700 701 if __name__ == '__main__':
701 702 import time
702 703 try:
703 704 N = int(sys.argv[1])
704 705 url = sys.argv[2]
705 706 except (IndexError, ValueError):
706 707 print("%s <integer> <url>" % sys.argv[0])
707 708 else:
708 709 test(url, N)
General Comments 0
You need to be logged in to leave comments. Login now