##// END OF EJS Templates
gunicorn: print config on startup
super-admin -
r1150:7050b122 default
parent child Browse files
Show More
@@ -1,112 +1,112 b''
1 1 # required for pushd to work..
2 2 #SHELL = /bin/bash
3 3
4 4
5 5 # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py
6 6 OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES}
7 7
8 8 .PHONY: clean
9 9 ## Cleanup compiled and cache py files
10 10 clean:
11 11 make test-clean
12 12 find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';'
13 13 find . -type d -name "build" -prune -exec rm -rf '{}' ';'
14 14
15 15
16 16 .PHONY: test
17 17 ## run test-clean and tests
18 18 test:
19 19 make test-clean
20 20 make test-only
21 21
22 22
23 23 .PHONY: test-clean
24 24 ## run test-clean and tests
25 25 test-clean:
26 26 rm -rf coverage.xml htmlcov junit.xml pylint.log result
27 27 find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';'
28 28 find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';'
29 29
30 30
31 31 .PHONY: test-only
32 32 ## Run tests only without cleanup
33 33 test-only:
34 34 PYTHONHASHSEED=random \
35 35 py.test -x -vv -r xw -p no:sugar \
36 36 --cov-report=term-missing --cov-report=html \
37 37 --cov=vcsserver vcsserver
38 38
39 39
40 40 .PHONY: pip-packages
41 41 ## Show outdated packages
42 42 pip-packages:
43 43 python ${OUTDATED_PACKAGES}
44 44
45 45
46 46 .PHONY: build
47 47 ## Build sdist/egg
48 48 build:
49 49 python -m build
50 50
51 51
52 52 .PHONY: dev-env
53 53 ## make dev-env based on the requirements files and install develop of packages
54 54 dev-env:
55 55 pip install build virtualenv
56 56 pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
57 57 pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
58 58 pip install -e .
59 59
60 60
61 61 .PHONY: dev-srv
62 62 ## run develop server instance
63 63 dev-srv:
64 64 pserve --reload .dev/dev.ini
65 65
66 66
67 67 .PHONY: dev-srv-g
68 68 ## run gunicorn multi process workers
69 69 dev-srv-g:
70 gunicorn --workers=4 --paste .dev/dev.ini --bind=0.0.0.0:10010 --worker-class=sync --threads=1 --config=configs/gunicorn_config.py --timeout=120
70 gunicorn --workers=4 --paste .dev/dev.ini --bind=0.0.0.0:10010 --config=.dev/gunicorn_config.py
71 71
72 72 # Default command on calling make
73 73 .DEFAULT_GOAL := show-help
74 74
75 75 .PHONY: show-help
76 76 show-help:
77 77 @echo "$$(tput bold)Available rules:$$(tput sgr0)"
78 78 @echo
79 79 @sed -n -e "/^## / { \
80 80 h; \
81 81 s/.*//; \
82 82 :doc" \
83 83 -e "H; \
84 84 n; \
85 85 s/^## //; \
86 86 t doc" \
87 87 -e "s/:.*//; \
88 88 G; \
89 89 s/\\n## /---/; \
90 90 s/\\n/ /g; \
91 91 p; \
92 92 }" ${MAKEFILE_LIST} \
93 93 | LC_ALL='C' sort --ignore-case \
94 94 | awk -F '---' \
95 95 -v ncol=$$(tput cols) \
96 96 -v indent=19 \
97 97 -v col_on="$$(tput setaf 6)" \
98 98 -v col_off="$$(tput sgr0)" \
99 99 '{ \
100 100 printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
101 101 n = split($$2, words, " "); \
102 102 line_length = ncol - indent; \
103 103 for (i = 1; i <= n; i++) { \
104 104 line_length -= length(words[i]) + 1; \
105 105 if (line_length <= 0) { \
106 106 line_length = ncol - indent - length(words[i]) - 1; \
107 107 printf "\n%*s ", -indent, " "; \
108 108 } \
109 109 printf "%s ", words[i]; \
110 110 } \
111 111 printf "\n"; \
112 112 }'
@@ -1,506 +1,510 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 import socket
15 15 import dataclasses
16 16 from gunicorn.glogging import Logger
17 17
18 18
19 19 def get_workers():
20 20 import multiprocessing
21 21 return multiprocessing.cpu_count() * 2 + 1
22 22
23 23
24 24 bind = "127.0.0.1:10010"
25 25
26 26
27 27 # Error logging output for gunicorn (-) is stdout
28 28 errorlog = '-'
29 29
30 30 # Access logging output for gunicorn (-) is stdout
31 31 accesslog = '-'
32 32
33 33
34 34 # SERVER MECHANICS
35 35 # None == system temp dir
36 36 # worker_tmp_dir is recommended to be set to some tmpfs
37 37 worker_tmp_dir = None
38 38 tmp_upload_dir = None
39 39
40 40 # use re-use port logic
41 41 #reuse_port = True
42 42
43 43 # Custom log format
44 44 #access_log_format = (
45 45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
46 46
47 47 # loki format for easier parsing in grafana
48 48 access_log_format = (
49 49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
50 50
51 51
52 52 # Sets the number of process workers. More workers means more concurrent connections
53 53 # RhodeCode can handle at the same time. Each additional worker also it increases
54 54 # memory usage as each has it's own set of caches.
55 55 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
56 56 # than 8-10 unless for huge deployments .e.g 700-1000 users.
57 57 # `instance_id = *` must be set in the [app:main] section below (which is the default)
58 58 # when using more than 1 worker.
59 59 workers = 6
60 60
61 61 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
62 62 # workers = get_workers()
63 63
64 64 # Gunicorn access log level
65 65 loglevel = 'info'
66 66
67 67 # Process name visible in process list
68 68 proc_name = 'rhodecode_vcsserver'
69 69
70 70 # Type of worker class, one of `sync`, `gevent`
71 71 # currently `sync` is the only option allowed.
72 72 worker_class = 'sync'
73 73
74 74 # The maximum number of simultaneous clients. Valid only for gevent
75 75 worker_connections = 10
76 76
77 77 # Max number of requests that worker will handle before being gracefully restarted.
78 78 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
79 79 max_requests = 2000
80 80 max_requests_jitter = 30
81 81
82 82 # The maximum number of pending connections.
83 83 # Exceeding this number results in the client getting an error when attempting to connect.
84 84 backlog = 64
85 85
86 86 # Amount of time a worker can spend with handling a request before it
87 87 # gets killed and restarted. By default set to 21600 (6hrs)
88 88 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 timeout = 21600
90 90
91 91 # The maximum size of HTTP request line in bytes.
92 92 # 0 for unlimited
93 93 limit_request_line = 0
94 94
95 95 # Limit the number of HTTP headers fields in a request.
96 96 # By default this value is 100 and can't be larger than 32768.
97 97 limit_request_fields = 32768
98 98
99 99 # Limit the allowed size of an HTTP request header field.
100 100 # Value is a positive number or 0.
101 101 # Setting it to 0 will allow unlimited header field sizes.
102 102 limit_request_field_size = 0
103 103
104 104 # Timeout for graceful workers restart.
105 105 # After receiving a restart signal, workers have this much time to finish
106 106 # serving requests. Workers still alive after the timeout (starting from the
107 107 # receipt of the restart signal) are force killed.
108 108 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
109 109 graceful_timeout = 21600
110 110
111 111 # The number of seconds to wait for requests on a Keep-Alive connection.
112 112 # Generally set in the 1-5 seconds range.
113 113 keepalive = 2
114 114
115 115 # Maximum memory usage that each worker can use before it will receive a
116 116 # graceful restart signal 0 = memory monitoring is disabled
117 117 # Examples: 268435456 (256MB), 536870912 (512MB)
118 118 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
119 # Dynamic formula 1024 * 1024 * 256 == 256MBs
119 120 memory_max_usage = 0
120 121
121 122 # How often in seconds to check for memory usage for each gunicorn worker
122 123 memory_usage_check_interval = 60
123 124
124 125 # Threshold value for which we don't recycle worker if GarbageCollection
125 # frees up enough resources. Before each restart we try to run GC on worker
126 # frees up enough resources. Before each restart, we try to run GC on worker
126 127 # in case we get enough free memory after that, restart will not happen.
127 128 memory_usage_recovery_threshold = 0.8
128 129
129 130
130 131 @dataclasses.dataclass
131 132 class MemoryCheckConfig:
132 133 max_usage: int
133 134 check_interval: int
134 135 recovery_threshold: float
135 136
136 137
137 138 def _get_process_rss(pid=None):
138 139 try:
139 140 import psutil
140 141 if pid:
141 142 proc = psutil.Process(pid)
142 143 else:
143 144 proc = psutil.Process()
144 145 return proc.memory_info().rss
145 146 except Exception:
146 147 return None
147 148
148 149
149 150 def _get_config(ini_path):
150 151 import configparser
151 152
152 153 try:
153 154 config = configparser.RawConfigParser()
154 155 config.read(ini_path)
155 156 return config
156 157 except Exception:
157 158 return None
158 159
159 160
160 161 def get_memory_usage_params(config=None):
161 162 # memory spec defaults
162 163 _memory_max_usage = memory_max_usage
163 164 _memory_usage_check_interval = memory_usage_check_interval
164 165 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
165 166
166 167 if config:
167 168 ini_path = os.path.abspath(config)
168 169 conf = _get_config(ini_path)
169 170
170 171 section = 'server:main'
171 172 if conf and conf.has_section(section):
172 173
173 174 if conf.has_option(section, 'memory_max_usage'):
174 175 _memory_max_usage = conf.getint(section, 'memory_max_usage')
175 176
176 177 if conf.has_option(section, 'memory_usage_check_interval'):
177 178 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
178 179
179 180 if conf.has_option(section, 'memory_usage_recovery_threshold'):
180 181 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
181 182
182 183 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
183 184 or _memory_max_usage)
184 185 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
185 186 or _memory_usage_check_interval)
186 187 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
187 188 or _memory_usage_recovery_threshold)
188 189
189 190 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
190 191
191 192
192 193 def _time_with_offset(check_interval):
193 194 return time.time() - random.randint(0, check_interval/2.0)
194 195
195 196
196 197 def pre_fork(server, worker):
197 198 pass
198 199
199 200
200 201 def post_fork(server, worker):
201 202
202 203 memory_conf = get_memory_usage_params()
203 204 _memory_max_usage = memory_conf.max_usage
204 205 _memory_usage_check_interval = memory_conf.check_interval
205 206 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
206 207
207 208 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
208 209 or _memory_max_usage)
209 210 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
210 211 or _memory_usage_check_interval)
211 212 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
212 213 or _memory_usage_recovery_threshold)
213 214
214 215 # register memory last check time, with some random offset so we don't recycle all
215 216 # at once
216 217 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
217 218
218 219 if _memory_max_usage:
219 220 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
220 221 _format_data_size(_memory_max_usage))
221 222 else:
222 223 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
223 224
224 225
225 226 def pre_exec(server):
226 227 server.log.info("Forked child, re-executing.")
227 228
228 229
229 230 def on_starting(server):
230 231 server_lbl = '{} {}'.format(server.proc_name, server.address)
231 232 server.log.info("Server %s is starting.", server_lbl)
233 server.log.info('Config:')
234 server.log.info(f"\n{server.cfg}")
235 server.log.info(get_memory_usage_params())
232 236
233 237
234 238 def when_ready(server):
235 239 server.log.info("Server %s is ready. Spawning workers", server)
236 240
237 241
238 242 def on_reload(server):
239 243 pass
240 244
241 245
242 246 def _format_data_size(size, unit="B", precision=1, binary=True):
243 247 """Format a number using SI units (kilo, mega, etc.).
244 248
245 249 ``size``: The number as a float or int.
246 250
247 251 ``unit``: The unit name in plural form. Examples: "bytes", "B".
248 252
249 253 ``precision``: How many digits to the right of the decimal point. Default
250 254 is 1. 0 suppresses the decimal point.
251 255
252 256 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
253 257 If true, use base-2 binary prefixes (kibi = Ki = 1024).
254 258
255 259 ``full_name``: If false (default), use the prefix abbreviation ("k" or
256 260 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
257 261 use abbreviation ("k" or "Ki").
258 262
259 263 """
260 264
261 265 if not binary:
262 266 base = 1000
263 267 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
264 268 else:
265 269 base = 1024
266 270 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
267 271
268 272 sign = ""
269 273 if size > 0:
270 274 m = int(math.log(size, base))
271 275 elif size < 0:
272 276 sign = "-"
273 277 size = -size
274 278 m = int(math.log(size, base))
275 279 else:
276 280 m = 0
277 281 if m > 8:
278 282 m = 8
279 283
280 284 if m == 0:
281 285 precision = '%.0f'
282 286 else:
283 287 precision = '%%.%df' % precision
284 288
285 289 size = precision % (size / math.pow(base, m))
286 290
287 291 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
288 292
289 293
290 294 def _check_memory_usage(worker):
291 295 _memory_max_usage = worker._memory_max_usage
292 296 if not _memory_max_usage:
293 297 return
294 298
295 299 _memory_usage_check_interval = worker._memory_usage_check_interval
296 300 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
297 301
298 302 elapsed = time.time() - worker._last_memory_check_time
299 303 if elapsed > _memory_usage_check_interval:
300 304 mem_usage = _get_process_rss()
301 305 if mem_usage and mem_usage > _memory_max_usage:
302 306 worker.log.info(
303 307 "memory usage %s > %s, forcing gc",
304 308 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
305 309 # Try to clean it up by forcing a full collection.
306 310 gc.collect()
307 311 mem_usage = _get_process_rss()
308 312 if mem_usage > _memory_usage_recovery_threshold:
309 313 # Didn't clean up enough, we'll have to terminate.
310 314 worker.log.warning(
311 315 "memory usage %s > %s after gc, quitting",
312 316 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
313 317 # This will cause worker to auto-restart itself
314 318 worker.alive = False
315 319 worker._last_memory_check_time = time.time()
316 320
317 321
318 322 def worker_int(worker):
319 323 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
320 324
321 325 # get traceback info, on worker crash
322 326 def get_thread_id(t_id):
323 327 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
324 328 return id2name.get(t_id, "unknown_thread_id")
325 329
326 330 code = []
327 331 for thread_id, stack in sys._current_frames().items(): # noqa
328 332 code.append(
329 333 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
330 334 for fname, lineno, name, line in traceback.extract_stack(stack):
331 335 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
332 336 if line:
333 337 code.append(" %s" % (line.strip()))
334 338 worker.log.debug("\n".join(code))
335 339
336 340
337 341 def worker_abort(worker):
338 342 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
339 343
340 344
341 345 def worker_exit(server, worker):
342 346 worker.log.info("pid=[%-10s] worker exit", worker.pid)
343 347
344 348
345 349 def child_exit(server, worker):
346 350 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
347 351
348 352
349 353 def pre_request(worker, req):
350 354 worker.start_time = time.time()
351 355 worker.log.debug(
352 356 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
353 357
354 358
355 359 def post_request(worker, req, environ, resp):
356 360 total_time = time.time() - worker.start_time
357 361 # Gunicorn sometimes has problems with reading the status_code
358 362 status_code = getattr(resp, 'status_code', '')
359 363 worker.log.debug(
360 364 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
361 365 worker.nr, req.method, req.path, status_code, total_time)
362 366 _check_memory_usage(worker)
363 367
364 368
365 369 def _filter_proxy(ip):
366 370 """
367 371 Passed in IP addresses in HEADERS can be in a special format of multiple
368 372 ips. Those comma separated IPs are passed from various proxies in the
369 373 chain of request processing. The left-most being the original client.
370 374 We only care about the first IP which came from the org. client.
371 375
372 376 :param ip: ip string from headers
373 377 """
374 378 if ',' in ip:
375 379 _ips = ip.split(',')
376 380 _first_ip = _ips[0].strip()
377 381 return _first_ip
378 382 return ip
379 383
380 384
381 385 def _filter_port(ip):
382 386 """
383 387 Removes a port from ip, there are 4 main cases to handle here.
384 388 - ipv4 eg. 127.0.0.1
385 389 - ipv6 eg. ::1
386 390 - ipv4+port eg. 127.0.0.1:8080
387 391 - ipv6+port eg. [::1]:8080
388 392
389 393 :param ip:
390 394 """
391 395 def is_ipv6(ip_addr):
392 396 if hasattr(socket, 'inet_pton'):
393 397 try:
394 398 socket.inet_pton(socket.AF_INET6, ip_addr)
395 399 except socket.error:
396 400 return False
397 401 else:
398 402 return False
399 403 return True
400 404
401 405 if ':' not in ip: # must be ipv4 pure ip
402 406 return ip
403 407
404 408 if '[' in ip and ']' in ip: # ipv6 with port
405 409 return ip.split(']')[0][1:].lower()
406 410
407 411 # must be ipv6 or ipv4 with port
408 412 if is_ipv6(ip):
409 413 return ip
410 414 else:
411 415 ip, _port = ip.split(':')[:2] # means ipv4+port
412 416 return ip
413 417
414 418
415 419 def get_ip_addr(environ):
416 420 proxy_key = 'HTTP_X_REAL_IP'
417 421 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
418 422 def_key = 'REMOTE_ADDR'
419 423
420 424 def _filters(x):
421 425 return _filter_port(_filter_proxy(x))
422 426
423 427 ip = environ.get(proxy_key)
424 428 if ip:
425 429 return _filters(ip)
426 430
427 431 ip = environ.get(proxy_key2)
428 432 if ip:
429 433 return _filters(ip)
430 434
431 435 ip = environ.get(def_key, '0.0.0.0')
432 436 return _filters(ip)
433 437
434 438
435 439 class RhodeCodeLogger(Logger):
436 440 """
437 441 Custom Logger that allows some customization that gunicorn doesn't allow
438 442 """
439 443
440 444 datefmt = r"%Y-%m-%d %H:%M:%S"
441 445
442 446 def __init__(self, cfg):
443 447 Logger.__init__(self, cfg)
444 448
445 449 def now(self):
446 450 """ return date in RhodeCode Log format """
447 451 now = time.time()
448 452 msecs = int((now - int(now)) * 1000)
449 453 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
450 454
451 455 def atoms(self, resp, req, environ, request_time):
452 456 """ Gets atoms for log formatting.
453 457 """
454 458 status = resp.status
455 459 if isinstance(status, str):
456 460 status = status.split(None, 1)[0]
457 461 atoms = {
458 462 'h': get_ip_addr(environ),
459 463 'l': '-',
460 464 'u': self._get_user(environ) or '-',
461 465 't': self.now(),
462 466 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
463 467 environ['RAW_URI'],
464 468 environ["SERVER_PROTOCOL"]),
465 469 's': status,
466 470 'm': environ.get('REQUEST_METHOD'),
467 471 'U': environ.get('PATH_INFO'),
468 472 'q': environ.get('QUERY_STRING'),
469 473 'H': environ.get('SERVER_PROTOCOL'),
470 474 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
471 475 'B': getattr(resp, 'sent', None),
472 476 'f': environ.get('HTTP_REFERER', '-'),
473 477 'a': environ.get('HTTP_USER_AGENT', '-'),
474 478 'T': request_time.seconds,
475 479 'D': (request_time.seconds * 1000000) + request_time.microseconds,
476 480 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
477 481 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
478 482 'p': "<%s>" % os.getpid()
479 483 }
480 484
481 485 # add request headers
482 486 if hasattr(req, 'headers'):
483 487 req_headers = req.headers
484 488 else:
485 489 req_headers = req
486 490
487 491 if hasattr(req_headers, "items"):
488 492 req_headers = req_headers.items()
489 493
490 494 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
491 495
492 496 resp_headers = resp.headers
493 497 if hasattr(resp_headers, "items"):
494 498 resp_headers = resp_headers.items()
495 499
496 500 # add response headers
497 501 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
498 502
499 503 # add environ variables
500 504 environ_variables = environ.items()
501 505 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
502 506
503 507 return atoms
504 508
505 509
506 510 logger_class = RhodeCodeLogger
General Comments 0
You need to be logged in to leave comments. Login now