##// END OF EJS Templates
chore(configs): synced gunicorn configs with ce/ee
super-admin -
r1203:945790c2 default
parent child Browse files
Show More
@@ -3,24 +3,21 b' Gunicorn config extension and hooks. Thi'
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 """
4 """
5
5
6 import contextlib
7 import dataclasses
8 import gc
6 import gc
7 import os
8 import sys
9 import math
9 import math
10 import os
10 import time
11 import threading
12 import traceback
11 import random
13 import random
12 import socket
14 import socket
13 import sys
15 import dataclasses
14 import threading
15 import time
16 import traceback
17
18 from gunicorn.glogging import Logger
16 from gunicorn.glogging import Logger
19
17
20
18
21 def get_workers():
19 def get_workers():
22 import multiprocessing
20 import multiprocessing
23
24 return multiprocessing.cpu_count() * 2 + 1
21 return multiprocessing.cpu_count() * 2 + 1
25
22
26
23
@@ -28,10 +25,10 b' bind = "127.0.0.1:10010"'
28
25
29
26
30 # Error logging output for gunicorn (-) is stdout
27 # Error logging output for gunicorn (-) is stdout
31 errorlog = "-"
28 errorlog = '-'
32
29
33 # Access logging output for gunicorn (-) is stdout
30 # Access logging output for gunicorn (-) is stdout
34 accesslog = "-"
31 accesslog = '-'
35
32
36
33
37 # SERVER MECHANICS
34 # SERVER MECHANICS
@@ -41,39 +38,50 b' worker_tmp_dir = None'
41 tmp_upload_dir = None
38 tmp_upload_dir = None
42
39
43 # use re-use port logic
40 # use re-use port logic
44 # reuse_port = True
41 #reuse_port = True
45
42
46 # Custom log format
43 # Custom log format
47 # access_log_format = (
44 #access_log_format = (
48 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
49
46
50 # loki format for easier parsing in grafana
47 # loki format for easier parsing in grafana
51 access_log_format = 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"'
48 access_log_format = (
52
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
53
54 # Sets the number of process workers. More workers means more concurrent connections
55 # RhodeCode can handle at the same time. Each additional worker also it increases
56 # memory usage as each has it's own set of caches.
57 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
58 # than 8-10 unless for huge deployments .e.g 700-1000 users.
59 # `instance_id = *` must be set in the [app:main] section below (which is the default)
60 # when using more than 1 worker.
61 workers = 6
62
50
63 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
51 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
64 # workers = get_workers()
52 # workers = get_workers()
65
53
66 # Gunicorn access log level
54 # Gunicorn access log level
67 loglevel = "info"
55 loglevel = 'info'
68
56
69 # Process name visible in a process list
57 # Process name visible in a process list
70 proc_name = "rhodecode_vcsserver"
58 proc_name = "rhodecode_vcsserver"
71
59
72 # Type of worker class, one of `sync`, `gevent`
60 # Type of worker class, one of `sync`, `gevent` or `gthread`
73 # currently `sync` is the only option allowed.
61 # currently `sync` is the only option allowed for vcsserver and for rhodecode all of 3 are allowed
74 worker_class = "sync"
62 # gevent:
63 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
64 # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
65 # gthread:
66 # In this case, the maximum number of concurrent requests is (N workers * X threads)
67 # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled
68 worker_class = 'sync'
69
70 # Sets the number of process workers. More workers means more concurrent connections
71 # RhodeCode can handle at the same time. Each additional worker also it increases
72 # memory usage as each has its own set of caches.
73 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
74 # than 8-10 unless for huge deployments .e.g 700-1000 users.
75 # `instance_id = *` must be set in the [app:main] section below (which is the default)
76 # when using more than 1 worker.
77 workers = 2
78
79 # Threads numbers for worker class gthread
80 threads = 1
75
81
76 # The maximum number of simultaneous clients. Valid only for gevent
82 # The maximum number of simultaneous clients. Valid only for gevent
83 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
84 # e.g workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
77 worker_connections = 10
85 worker_connections = 10
78
86
79 # Max number of requests that worker will handle before being gracefully restarted.
87 # Max number of requests that worker will handle before being gracefully restarted.
@@ -138,27 +146,26 b' class MemoryCheckConfig:'
138
146
139
147
140 def _get_process_rss(pid=None):
148 def _get_process_rss(pid=None):
141 with contextlib.suppress(Exception):
149 try:
142 import psutil
150 import psutil
143
144 if pid:
151 if pid:
145 proc = psutil.Process(pid)
152 proc = psutil.Process(pid)
146 else:
153 else:
147 proc = psutil.Process()
154 proc = psutil.Process()
148 return proc.memory_info().rss
155 return proc.memory_info().rss
149
156 except Exception:
150 return None
157 return None
151
158
152
159
153 def _get_config(ini_path):
160 def _get_config(ini_path):
154 import configparser
161 import configparser
155
162
156 with contextlib.suppress(Exception):
163 try:
157 config = configparser.RawConfigParser()
164 config = configparser.RawConfigParser()
158 config.read(ini_path)
165 config.read(ini_path)
159 return config
166 return config
160
167 except Exception:
161 return None
168 return None
162
169
163
170
164 def get_memory_usage_params(config=None):
171 def get_memory_usage_params(config=None):
@@ -171,30 +178,30 b' def get_memory_usage_params(config=None)'
171 ini_path = os.path.abspath(config)
178 ini_path = os.path.abspath(config)
172 conf = _get_config(ini_path)
179 conf = _get_config(ini_path)
173
180
174 section = "server:main"
181 section = 'server:main'
175 if conf and conf.has_section(section):
182 if conf and conf.has_section(section):
176 if conf.has_option(section, "memory_max_usage"):
177 _memory_max_usage = conf.getint(section, "memory_max_usage")
178
183
179 if conf.has_option(section, "memory_usage_check_interval"):
184 if conf.has_option(section, 'memory_max_usage'):
180 _memory_usage_check_interval = conf.getint(section, "memory_usage_check_interval")
185 _memory_max_usage = conf.getint(section, 'memory_max_usage')
186
187 if conf.has_option(section, 'memory_usage_check_interval'):
188 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
181
189
182 if conf.has_option(section, "memory_usage_recovery_threshold"):
190 if conf.has_option(section, 'memory_usage_recovery_threshold'):
183 _memory_usage_recovery_threshold = conf.getfloat(section, "memory_usage_recovery_threshold")
191 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
184
192
185 _memory_max_usage = int(os.environ.get("RC_GUNICORN_MEMORY_MAX_USAGE", "") or _memory_max_usage)
193 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
186 _memory_usage_check_interval = int(
194 or _memory_max_usage)
187 os.environ.get("RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL", "") or _memory_usage_check_interval
195 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
188 )
196 or _memory_usage_check_interval)
189 _memory_usage_recovery_threshold = float(
197 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
190 os.environ.get("RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD", "") or _memory_usage_recovery_threshold
198 or _memory_usage_recovery_threshold)
191 )
192
199
193 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
200 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
194
201
195
202
196 def _time_with_offset(check_interval):
203 def _time_with_offset(check_interval):
197 return time.time() - random.randint(0, check_interval / 2.0)
204 return time.time() - random.randint(0, check_interval/2.0)
198
205
199
206
200 def pre_fork(server, worker):
207 def pre_fork(server, worker):
@@ -202,27 +209,26 b' def pre_fork(server, worker):'
202
209
203
210
204 def post_fork(server, worker):
211 def post_fork(server, worker):
212
205 memory_conf = get_memory_usage_params()
213 memory_conf = get_memory_usage_params()
206 _memory_max_usage = memory_conf.max_usage
214 _memory_max_usage = memory_conf.max_usage
207 _memory_usage_check_interval = memory_conf.check_interval
215 _memory_usage_check_interval = memory_conf.check_interval
208 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
216 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
209
217
210 worker._memory_max_usage = int(os.environ.get("RC_GUNICORN_MEMORY_MAX_USAGE", "") or _memory_max_usage)
218 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
211 worker._memory_usage_check_interval = int(
219 or _memory_max_usage)
212 os.environ.get("RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL", "") or _memory_usage_check_interval
220 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
213 )
221 or _memory_usage_check_interval)
214 worker._memory_usage_recovery_threshold = float(
222 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
215 os.environ.get("RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD", "") or _memory_usage_recovery_threshold
223 or _memory_usage_recovery_threshold)
216 )
217
224
218 # register memory last check time, with some random offset so we don't recycle all
225 # register memory last check time, with some random offset so we don't recycle all
219 # at once
226 # at once
220 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
227 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
221
228
222 if _memory_max_usage:
229 if _memory_max_usage:
223 server.log.info(
230 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
224 "pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid, _format_data_size(_memory_max_usage)
231 _format_data_size(_memory_max_usage))
225 )
226 else:
232 else:
227 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
233 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
228
234
@@ -232,9 +238,9 b' def pre_exec(server):'
232
238
233
239
234 def on_starting(server):
240 def on_starting(server):
235 server_lbl = "{} {}".format(server.proc_name, server.address)
241 server_lbl = '{} {}'.format(server.proc_name, server.address)
236 server.log.info("Server %s is starting.", server_lbl)
242 server.log.info("Server %s is starting.", server_lbl)
237 server.log.info("Config:")
243 server.log.info('Config:')
238 server.log.info(f"\n{server.cfg}")
244 server.log.info(f"\n{server.cfg}")
239 server.log.info(get_memory_usage_params())
245 server.log.info(get_memory_usage_params())
240
246
@@ -268,10 +274,10 b' def _format_data_size(size, unit="B", pr'
268
274
269 if not binary:
275 if not binary:
270 base = 1000
276 base = 1000
271 multiples = ("", "k", "M", "G", "T", "P", "E", "Z", "Y")
277 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
272 else:
278 else:
273 base = 1024
279 base = 1024
274 multiples = ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi")
280 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
275
281
276 sign = ""
282 sign = ""
277 if size > 0:
283 if size > 0:
@@ -286,13 +292,13 b' def _format_data_size(size, unit="B", pr'
286 m = 8
292 m = 8
287
293
288 if m == 0:
294 if m == 0:
289 precision = "%.0f"
295 precision = '%.0f'
290 else:
296 else:
291 precision = "%%.%df" % precision
297 precision = '%%.%df' % precision
292
298
293 size = precision % (size / math.pow(base, m))
299 size = precision % (size / math.pow(base, m))
294
300
295 return "%s%s %s%s" % (sign, size.strip(), multiples[m], unit)
301 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
296
302
297
303
298 def _check_memory_usage(worker):
304 def _check_memory_usage(worker):
@@ -308,8 +314,8 b' def _check_memory_usage(worker):'
308 mem_usage = _get_process_rss()
314 mem_usage = _get_process_rss()
309 if mem_usage and mem_usage > _memory_max_usage:
315 if mem_usage and mem_usage > _memory_max_usage:
310 worker.log.info(
316 worker.log.info(
311 "memory usage %s > %s, forcing gc", _format_data_size(mem_usage), _format_data_size(_memory_max_usage)
317 "memory usage %s > %s, forcing gc",
312 )
318 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
313 # Try to clean it up by forcing a full collection.
319 # Try to clean it up by forcing a full collection.
314 gc.collect()
320 gc.collect()
315 mem_usage = _get_process_rss()
321 mem_usage = _get_process_rss()
@@ -317,9 +323,7 b' def _check_memory_usage(worker):'
317 # Didn't clean up enough, we'll have to terminate.
323 # Didn't clean up enough, we'll have to terminate.
318 worker.log.warning(
324 worker.log.warning(
319 "memory usage %s > %s after gc, quitting",
325 "memory usage %s > %s after gc, quitting",
320 _format_data_size(mem_usage),
326 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
321 _format_data_size(_memory_max_usage),
322 )
323 # This will cause worker to auto-restart itself
327 # This will cause worker to auto-restart itself
324 worker.alive = False
328 worker.alive = False
325 worker._last_memory_check_time = time.time()
329 worker._last_memory_check_time = time.time()
@@ -335,7 +339,8 b' def worker_int(worker):'
335
339
336 code = []
340 code = []
337 for thread_id, stack in sys._current_frames().items(): # noqa
341 for thread_id, stack in sys._current_frames().items(): # noqa
338 code.append("\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
342 code.append(
343 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
339 for fname, lineno, name, line in traceback.extract_stack(stack):
344 for fname, lineno, name, line in traceback.extract_stack(stack):
340 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
345 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
341 if line:
346 if line:
@@ -357,21 +362,17 b' def child_exit(server, worker):'
357
362
358 def pre_request(worker, req):
363 def pre_request(worker, req):
359 worker.start_time = time.time()
364 worker.start_time = time.time()
360 worker.log.debug("GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
365 worker.log.debug(
366 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
361
367
362
368
363 def post_request(worker, req, environ, resp):
369 def post_request(worker, req, environ, resp):
364 total_time = time.time() - worker.start_time
370 total_time = time.time() - worker.start_time
365 # Gunicorn sometimes has problems with reading the status_code
371 # Gunicorn sometimes has problems with reading the status_code
366 status_code = getattr(resp, "status_code", "")
372 status_code = getattr(resp, 'status_code', '')
367 worker.log.debug(
373 worker.log.debug(
368 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
374 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
369 worker.nr,
375 worker.nr, req.method, req.path, status_code, total_time)
370 req.method,
371 req.path,
372 status_code,
373 total_time,
374 )
375 _check_memory_usage(worker)
376 _check_memory_usage(worker)
376
377
377
378
@@ -384,8 +385,8 b' def _filter_proxy(ip):'
384
385
385 :param ip: ip string from headers
386 :param ip: ip string from headers
386 """
387 """
387 if "," in ip:
388 if ',' in ip:
388 _ips = ip.split(",")
389 _ips = ip.split(',')
389 _first_ip = _ips[0].strip()
390 _first_ip = _ips[0].strip()
390 return _first_ip
391 return _first_ip
391 return ip
392 return ip
@@ -401,9 +402,8 b' def _filter_port(ip):'
401
402
402 :param ip:
403 :param ip:
403 """
404 """
404
405 def is_ipv6(ip_addr):
405 def is_ipv6(ip_addr):
406 if hasattr(socket, "inet_pton"):
406 if hasattr(socket, 'inet_pton'):
407 try:
407 try:
408 socket.inet_pton(socket.AF_INET6, ip_addr)
408 socket.inet_pton(socket.AF_INET6, ip_addr)
409 except socket.error:
409 except socket.error:
@@ -412,24 +412,24 b' def _filter_port(ip):'
412 return False
412 return False
413 return True
413 return True
414
414
415 if ":" not in ip: # must be ipv4 pure ip
415 if ':' not in ip: # must be ipv4 pure ip
416 return ip
416 return ip
417
417
418 if "[" in ip and "]" in ip: # ipv6 with port
418 if '[' in ip and ']' in ip: # ipv6 with port
419 return ip.split("]")[0][1:].lower()
419 return ip.split(']')[0][1:].lower()
420
420
421 # must be ipv6 or ipv4 with port
421 # must be ipv6 or ipv4 with port
422 if is_ipv6(ip):
422 if is_ipv6(ip):
423 return ip
423 return ip
424 else:
424 else:
425 ip, _port = ip.split(":")[:2] # means ipv4+port
425 ip, _port = ip.split(':')[:2] # means ipv4+port
426 return ip
426 return ip
427
427
428
428
429 def get_ip_addr(environ):
429 def get_ip_addr(environ):
430 proxy_key = "HTTP_X_REAL_IP"
430 proxy_key = 'HTTP_X_REAL_IP'
431 proxy_key2 = "HTTP_X_FORWARDED_FOR"
431 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
432 def_key = "REMOTE_ADDR"
432 def_key = 'REMOTE_ADDR'
433
433
434 def _filters(x):
434 def _filters(x):
435 return _filter_port(_filter_proxy(x))
435 return _filter_port(_filter_proxy(x))
@@ -442,7 +442,7 b' def get_ip_addr(environ):'
442 if ip:
442 if ip:
443 return _filters(ip)
443 return _filters(ip)
444
444
445 ip = environ.get(def_key, "0.0.0.0")
445 ip = environ.get(def_key, '0.0.0.0')
446 return _filters(ip)
446 return _filters(ip)
447
447
448
448
@@ -457,40 +457,43 b' class RhodeCodeLogger(Logger):'
457 Logger.__init__(self, cfg)
457 Logger.__init__(self, cfg)
458
458
459 def now(self):
459 def now(self):
460 """return date in RhodeCode Log format"""
460 """ return date in RhodeCode Log format """
461 now = time.time()
461 now = time.time()
462 msecs = int((now - int(now)) * 1000)
462 msecs = int((now - int(now)) * 1000)
463 return time.strftime(self.datefmt, time.localtime(now)) + ".{0:03d}".format(msecs)
463 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
464
464
465 def atoms(self, resp, req, environ, request_time):
465 def atoms(self, resp, req, environ, request_time):
466 """Gets atoms for log formatting."""
466 """ Gets atoms for log formatting.
467 """
467 status = resp.status
468 status = resp.status
468 if isinstance(status, str):
469 if isinstance(status, str):
469 status = status.split(None, 1)[0]
470 status = status.split(None, 1)[0]
470 atoms = {
471 atoms = {
471 "h": get_ip_addr(environ),
472 'h': get_ip_addr(environ),
472 "l": "-",
473 'l': '-',
473 "u": self._get_user(environ) or "-",
474 'u': self._get_user(environ) or '-',
474 "t": self.now(),
475 't': self.now(),
475 "r": "%s %s %s" % (environ["REQUEST_METHOD"], environ["RAW_URI"], environ["SERVER_PROTOCOL"]),
476 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
476 "s": status,
477 environ['RAW_URI'],
477 "m": environ.get("REQUEST_METHOD"),
478 environ["SERVER_PROTOCOL"]),
478 "U": environ.get("PATH_INFO"),
479 's': status,
479 "q": environ.get("QUERY_STRING"),
480 'm': environ.get('REQUEST_METHOD'),
480 "H": environ.get("SERVER_PROTOCOL"),
481 'U': environ.get('PATH_INFO'),
481 "b": getattr(resp, "sent", None) is not None and str(resp.sent) or "-",
482 'q': environ.get('QUERY_STRING'),
482 "B": getattr(resp, "sent", None),
483 'H': environ.get('SERVER_PROTOCOL'),
483 "f": environ.get("HTTP_REFERER", "-"),
484 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
484 "a": environ.get("HTTP_USER_AGENT", "-"),
485 'B': getattr(resp, 'sent', None),
485 "T": request_time.seconds,
486 'f': environ.get('HTTP_REFERER', '-'),
486 "D": (request_time.seconds * 1000000) + request_time.microseconds,
487 'a': environ.get('HTTP_USER_AGENT', '-'),
487 "M": (request_time.seconds * 1000) + int(request_time.microseconds / 1000),
488 'T': request_time.seconds,
488 "L": "%d.%06d" % (request_time.seconds, request_time.microseconds),
489 'D': (request_time.seconds * 1000000) + request_time.microseconds,
489 "p": "<%s>" % os.getpid(),
490 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
491 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
492 'p': "<%s>" % os.getpid()
490 }
493 }
491
494
492 # add request headers
495 # add request headers
493 if hasattr(req, "headers"):
496 if hasattr(req, 'headers'):
494 req_headers = req.headers
497 req_headers = req.headers
495 else:
498 else:
496 req_headers = req
499 req_headers = req
General Comments 0
You need to be logged in to leave comments. Login now