##// END OF EJS Templates
vcs: streaming will use now 100kb chunks readers for faster throughput
marcink -
r3344:6ad08535 stable
parent child Browse files
Show More
@@ -133,6 +133,18 b' class VcsHttpProxy(object):'
133 return _maybe_stream_response(response)
133 return _maybe_stream_response(response)
134
134
135
135
136 def read_in_chunks(stream_obj, block_size=1024, chunks=-1):
137 """
138 Read Stream in chunks, default chunk size: 1k.
139 """
140 while chunks:
141 data = stream_obj.read(block_size)
142 if not data:
143 break
144 yield data
145 chunks -= 1
146
147
136 def _is_request_chunked(environ):
148 def _is_request_chunked(environ):
137 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
149 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
138 return stream
150 return stream
@@ -144,7 +156,8 b' def _maybe_stream_request(environ):'
144 log.debug('handling request `%s` with stream support: %s', path, stream)
156 log.debug('handling request `%s` with stream support: %s', path, stream)
145
157
146 if stream:
158 if stream:
147 return environ['wsgi.input']
159 # set stream by 256k
160 return read_in_chunks(environ['wsgi.input'], block_size=1024 * 256)
148 else:
161 else:
149 return environ['wsgi.input'].read()
162 return environ['wsgi.input'].read()
150
163
@@ -156,7 +169,8 b' def _maybe_stream_response(response):'
156 stream = _is_chunked(response)
169 stream = _is_chunked(response)
157 log.debug('returning response with stream: %s', stream)
170 log.debug('returning response with stream: %s', stream)
158 if stream:
171 if stream:
159 return response.raw.read_chunked()
172 # read in 256k Chunks
173 return response.raw.read_chunked(amt=1024 * 256)
160 else:
174 else:
161 return [response.content]
175 return [response.content]
162
176
General Comments 0
You need to be logged in to leave comments. Login now