##// END OF EJS Templates
chore(configs): synced gunicorn configs with ce/ee
super-admin -
r1203:945790c2 default
parent child Browse files
Show More
@@ -1,517 +1,520 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 import contextlib
7 import dataclasses
8 6 import gc
7 import os
8 import sys
9 9 import math
10 import os
10 import time
11 import threading
12 import traceback
11 13 import random
12 14 import socket
13 import sys
14 import threading
15 import time
16 import traceback
17
15 import dataclasses
18 16 from gunicorn.glogging import Logger
19 17
20 18
21 19 def get_workers():
22 20 import multiprocessing
23
24 21 return multiprocessing.cpu_count() * 2 + 1
25 22
26 23
27 24 bind = "127.0.0.1:10010"
28 25
29 26
30 27 # Error logging output for gunicorn (-) is stdout
31 errorlog = "-"
28 errorlog = '-'
32 29
33 30 # Access logging output for gunicorn (-) is stdout
34 accesslog = "-"
31 accesslog = '-'
35 32
36 33
37 34 # SERVER MECHANICS
38 35 # None == system temp dir
39 36 # worker_tmp_dir is recommended to be set to some tmpfs
40 37 worker_tmp_dir = None
41 38 tmp_upload_dir = None
42 39
43 40 # use re-use port logic
44 41 # reuse_port = True
45 42
46 43 # Custom log format
47 44 # access_log_format = (
48 45 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
49 46
50 47 # loki format for easier parsing in grafana
51 access_log_format = 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"'
52
53
54 # Sets the number of process workers. More workers means more concurrent connections
55 # RhodeCode can handle at the same time. Each additional worker also it increases
56 # memory usage as each has it's own set of caches.
57 # Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
58 # than 8-10 unless for huge deployments .e.g 700-1000 users.
59 # `instance_id = *` must be set in the [app:main] section below (which is the default)
60 # when using more than 1 worker.
61 workers = 6
48 access_log_format = (
49 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
62 50
63 51 # self adjust workers based on CPU count, to use maximum of CPU and not overquota the resources
64 52 # workers = get_workers()
65 53
66 54 # Gunicorn access log level
67 loglevel = "info"
55 loglevel = 'info'
68 56
69 57 # Process name visible in a process list
70 58 proc_name = "rhodecode_vcsserver"
71 59
72 # Type of worker class, one of `sync`, `gevent`
73 # currently `sync` is the only option allowed.
74 worker_class = "sync"
60 # Type of worker class, one of `sync`, `gevent` or `gthread`
61 # currently `sync` is the only option allowed for vcsserver and for rhodecode all of 3 are allowed
62 # gevent:
63 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
64 # e.g. workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
65 # gthread:
66 # In this case, the maximum number of concurrent requests is (N workers * X threads)
67 # e.g. workers = 3 threads=3 = 3*3, 9 concurrent requests can be handled
68 worker_class = 'sync'
69
70 # Sets the number of process workers. More workers means more concurrent connections
71 # RhodeCode can handle at the same time. Each additional worker also it increases
72 # memory usage as each has its own set of caches.
73 # The Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
74 # than 8-10 unless for huge deployments .e.g 700-1000 users.
75 # `instance_id = *` must be set in the [app:main] section below (which is the default)
76 # when using more than 1 worker.
77 workers = 2
78
79 # Threads numbers for worker class gthread
80 threads = 1
75 81
76 82 # The maximum number of simultaneous clients. Valid only for gevent
83 # In this case, the maximum number of concurrent requests is (N workers * X worker_connections)
84 # e.g workers =3 worker_connections=10 = 3*10, 30 concurrent requests can be handled
77 85 worker_connections = 10
78 86
79 87 # Max number of requests that worker will handle before being gracefully restarted.
80 88 # Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
81 89 max_requests = 2000
82 90 max_requests_jitter = int(max_requests * 0.2) # 20% of max_requests
83 91
84 92 # The maximum number of pending connections.
85 93 # Exceeding this number results in the client getting an error when attempting to connect.
86 94 backlog = 64
87 95
88 96 # The Amount of time a worker can spend with handling a request before it
89 97 # gets killed and restarted. By default, set to 21600 (6hrs)
90 98 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
91 99 timeout = 21600
92 100
93 101 # The maximum size of HTTP request line in bytes.
94 102 # 0 for unlimited
95 103 limit_request_line = 0
96 104
97 105 # Limit the number of HTTP headers fields in a request.
98 106 # By default this value is 100 and can't be larger than 32768.
99 107 limit_request_fields = 32768
100 108
101 109 # Limit the allowed size of an HTTP request header field.
102 110 # Value is a positive number or 0.
103 111 # Setting it to 0 will allow unlimited header field sizes.
104 112 limit_request_field_size = 0
105 113
106 114 # Timeout for graceful workers restart.
107 115 # After receiving a restart signal, workers have this much time to finish
108 116 # serving requests. Workers still alive after the timeout (starting from the
109 117 # receipt of the restart signal) are force killed.
110 118 # Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
111 119 graceful_timeout = 21600
112 120
113 121 # The number of seconds to wait for requests on a Keep-Alive connection.
114 122 # Generally set in the 1-5 seconds range.
115 123 keepalive = 2
116 124
117 125 # Maximum memory usage that each worker can use before it will receive a
118 126 # graceful restart signal 0 = memory monitoring is disabled
119 127 # Examples: 268435456 (256MB), 536870912 (512MB)
120 128 # 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
121 129 # Dynamic formula 1024 * 1024 * 256 == 256MBs
122 130 memory_max_usage = 0
123 131
124 132 # How often in seconds to check for memory usage for each gunicorn worker
125 133 memory_usage_check_interval = 60
126 134
127 135 # Threshold value for which we don't recycle worker if GarbageCollection
128 136 # frees up enough resources. Before each restart, we try to run GC on worker
129 137 # in case we get enough free memory after that; restart will not happen.
130 138 memory_usage_recovery_threshold = 0.8
131 139
132 140
133 141 @dataclasses.dataclass
134 142 class MemoryCheckConfig:
135 143 max_usage: int
136 144 check_interval: int
137 145 recovery_threshold: float
138 146
139 147
140 148 def _get_process_rss(pid=None):
141 with contextlib.suppress(Exception):
149 try:
142 150 import psutil
143
144 151 if pid:
145 152 proc = psutil.Process(pid)
146 153 else:
147 154 proc = psutil.Process()
148 155 return proc.memory_info().rss
149
156 except Exception:
150 157 return None
151 158
152 159
153 160 def _get_config(ini_path):
154 161 import configparser
155 162
156 with contextlib.suppress(Exception):
163 try:
157 164 config = configparser.RawConfigParser()
158 165 config.read(ini_path)
159 166 return config
160
167 except Exception:
161 168 return None
162 169
163 170
164 171 def get_memory_usage_params(config=None):
165 172 # memory spec defaults
166 173 _memory_max_usage = memory_max_usage
167 174 _memory_usage_check_interval = memory_usage_check_interval
168 175 _memory_usage_recovery_threshold = memory_usage_recovery_threshold
169 176
170 177 if config:
171 178 ini_path = os.path.abspath(config)
172 179 conf = _get_config(ini_path)
173 180
174 section = "server:main"
181 section = 'server:main'
175 182 if conf and conf.has_section(section):
176 if conf.has_option(section, "memory_max_usage"):
177 _memory_max_usage = conf.getint(section, "memory_max_usage")
178 183
179 if conf.has_option(section, "memory_usage_check_interval"):
180 _memory_usage_check_interval = conf.getint(section, "memory_usage_check_interval")
184 if conf.has_option(section, 'memory_max_usage'):
185 _memory_max_usage = conf.getint(section, 'memory_max_usage')
186
187 if conf.has_option(section, 'memory_usage_check_interval'):
188 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
181 189
182 if conf.has_option(section, "memory_usage_recovery_threshold"):
183 _memory_usage_recovery_threshold = conf.getfloat(section, "memory_usage_recovery_threshold")
190 if conf.has_option(section, 'memory_usage_recovery_threshold'):
191 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
184 192
185 _memory_max_usage = int(os.environ.get("RC_GUNICORN_MEMORY_MAX_USAGE", "") or _memory_max_usage)
186 _memory_usage_check_interval = int(
187 os.environ.get("RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL", "") or _memory_usage_check_interval
188 )
189 _memory_usage_recovery_threshold = float(
190 os.environ.get("RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD", "") or _memory_usage_recovery_threshold
191 )
193 _memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
194 or _memory_max_usage)
195 _memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
196 or _memory_usage_check_interval)
197 _memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
198 or _memory_usage_recovery_threshold)
192 199
193 200 return MemoryCheckConfig(_memory_max_usage, _memory_usage_check_interval, _memory_usage_recovery_threshold)
194 201
195 202
196 203 def _time_with_offset(check_interval):
197 204 return time.time() - random.randint(0, check_interval / 2.0)
198 205
199 206
200 207 def pre_fork(server, worker):
201 208 pass
202 209
203 210
204 211 def post_fork(server, worker):
212
205 213 memory_conf = get_memory_usage_params()
206 214 _memory_max_usage = memory_conf.max_usage
207 215 _memory_usage_check_interval = memory_conf.check_interval
208 216 _memory_usage_recovery_threshold = memory_conf.recovery_threshold
209 217
210 worker._memory_max_usage = int(os.environ.get("RC_GUNICORN_MEMORY_MAX_USAGE", "") or _memory_max_usage)
211 worker._memory_usage_check_interval = int(
212 os.environ.get("RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL", "") or _memory_usage_check_interval
213 )
214 worker._memory_usage_recovery_threshold = float(
215 os.environ.get("RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD", "") or _memory_usage_recovery_threshold
216 )
218 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
219 or _memory_max_usage)
220 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
221 or _memory_usage_check_interval)
222 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
223 or _memory_usage_recovery_threshold)
217 224
218 225 # register memory last check time, with some random offset so we don't recycle all
219 226 # at once
220 227 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
221 228
222 229 if _memory_max_usage:
223 server.log.info(
224 "pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid, _format_data_size(_memory_max_usage)
225 )
230 server.log.info("pid=[%-10s] WORKER spawned with max memory set at %s", worker.pid,
231 _format_data_size(_memory_max_usage))
226 232 else:
227 233 server.log.info("pid=[%-10s] WORKER spawned", worker.pid)
228 234
229 235
230 236 def pre_exec(server):
231 237 server.log.info("Forked child, re-executing.")
232 238
233 239
234 240 def on_starting(server):
235 server_lbl = "{} {}".format(server.proc_name, server.address)
241 server_lbl = '{} {}'.format(server.proc_name, server.address)
236 242 server.log.info("Server %s is starting.", server_lbl)
237 server.log.info("Config:")
243 server.log.info('Config:')
238 244 server.log.info(f"\n{server.cfg}")
239 245 server.log.info(get_memory_usage_params())
240 246
241 247
242 248 def when_ready(server):
243 249 server.log.info("Server %s is ready. Spawning workers", server)
244 250
245 251
246 252 def on_reload(server):
247 253 pass
248 254
249 255
250 256 def _format_data_size(size, unit="B", precision=1, binary=True):
251 257 """Format a number using SI units (kilo, mega, etc.).
252 258
253 259 ``size``: The number as a float or int.
254 260
255 261 ``unit``: The unit name in plural form. Examples: "bytes", "B".
256 262
257 263 ``precision``: How many digits to the right of the decimal point. Default
258 264 is 1. 0 suppresses the decimal point.
259 265
260 266 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
261 267 If true, use base-2 binary prefixes (kibi = Ki = 1024).
262 268
263 269 ``full_name``: If false (default), use the prefix abbreviation ("k" or
264 270 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
265 271 use abbreviation ("k" or "Ki").
266 272
267 273 """
268 274
269 275 if not binary:
270 276 base = 1000
271 multiples = ("", "k", "M", "G", "T", "P", "E", "Z", "Y")
277 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
272 278 else:
273 279 base = 1024
274 multiples = ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi")
280 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
275 281
276 282 sign = ""
277 283 if size > 0:
278 284 m = int(math.log(size, base))
279 285 elif size < 0:
280 286 sign = "-"
281 287 size = -size
282 288 m = int(math.log(size, base))
283 289 else:
284 290 m = 0
285 291 if m > 8:
286 292 m = 8
287 293
288 294 if m == 0:
289 precision = "%.0f"
295 precision = '%.0f'
290 296 else:
291 precision = "%%.%df" % precision
297 precision = '%%.%df' % precision
292 298
293 299 size = precision % (size / math.pow(base, m))
294 300
295 return "%s%s %s%s" % (sign, size.strip(), multiples[m], unit)
301 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
296 302
297 303
298 304 def _check_memory_usage(worker):
299 305 _memory_max_usage = worker._memory_max_usage
300 306 if not _memory_max_usage:
301 307 return
302 308
303 309 _memory_usage_check_interval = worker._memory_usage_check_interval
304 310 _memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
305 311
306 312 elapsed = time.time() - worker._last_memory_check_time
307 313 if elapsed > _memory_usage_check_interval:
308 314 mem_usage = _get_process_rss()
309 315 if mem_usage and mem_usage > _memory_max_usage:
310 316 worker.log.info(
311 "memory usage %s > %s, forcing gc", _format_data_size(mem_usage), _format_data_size(_memory_max_usage)
312 )
317 "memory usage %s > %s, forcing gc",
318 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
313 319 # Try to clean it up by forcing a full collection.
314 320 gc.collect()
315 321 mem_usage = _get_process_rss()
316 322 if mem_usage > _memory_usage_recovery_threshold:
317 323 # Didn't clean up enough, we'll have to terminate.
318 324 worker.log.warning(
319 325 "memory usage %s > %s after gc, quitting",
320 _format_data_size(mem_usage),
321 _format_data_size(_memory_max_usage),
322 )
326 _format_data_size(mem_usage), _format_data_size(_memory_max_usage))
323 327 # This will cause worker to auto-restart itself
324 328 worker.alive = False
325 329 worker._last_memory_check_time = time.time()
326 330
327 331
328 332 def worker_int(worker):
329 333 worker.log.info("pid=[%-10s] worker received INT or QUIT signal", worker.pid)
330 334
331 335 # get traceback info, when a worker crashes
332 336 def get_thread_id(t_id):
333 337 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
334 338 return id2name.get(t_id, "unknown_thread_id")
335 339
336 340 code = []
337 341 for thread_id, stack in sys._current_frames().items(): # noqa
338 code.append("\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
342 code.append(
343 "\n# Thread: %s(%d)" % (get_thread_id(thread_id), thread_id))
339 344 for fname, lineno, name, line in traceback.extract_stack(stack):
340 345 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
341 346 if line:
342 347 code.append(" %s" % (line.strip()))
343 348 worker.log.debug("\n".join(code))
344 349
345 350
346 351 def worker_abort(worker):
347 352 worker.log.info("pid=[%-10s] worker received SIGABRT signal", worker.pid)
348 353
349 354
350 355 def worker_exit(server, worker):
351 356 worker.log.info("pid=[%-10s] worker exit", worker.pid)
352 357
353 358
354 359 def child_exit(server, worker):
355 360 worker.log.info("pid=[%-10s] worker child exit", worker.pid)
356 361
357 362
358 363 def pre_request(worker, req):
359 364 worker.start_time = time.time()
360 worker.log.debug("GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
365 worker.log.debug(
366 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
361 367
362 368
363 369 def post_request(worker, req, environ, resp):
364 370 total_time = time.time() - worker.start_time
365 371 # Gunicorn sometimes has problems with reading the status_code
366 status_code = getattr(resp, "status_code", "")
372 status_code = getattr(resp, 'status_code', '')
367 373 worker.log.debug(
368 374 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
369 worker.nr,
370 req.method,
371 req.path,
372 status_code,
373 total_time,
374 )
375 worker.nr, req.method, req.path, status_code, total_time)
375 376 _check_memory_usage(worker)
376 377
377 378
378 379 def _filter_proxy(ip):
379 380 """
380 381 Passed in IP addresses in HEADERS can be in a special format of multiple
381 382 ips. Those comma separated IPs are passed from various proxies in the
382 383 chain of request processing. The left-most being the original client.
383 384 We only care about the first IP which came from the org. client.
384 385
385 386 :param ip: ip string from headers
386 387 """
387 if "," in ip:
388 _ips = ip.split(",")
388 if ',' in ip:
389 _ips = ip.split(',')
389 390 _first_ip = _ips[0].strip()
390 391 return _first_ip
391 392 return ip
392 393
393 394
394 395 def _filter_port(ip):
395 396 """
396 397 Removes a port from ip, there are 4 main cases to handle here.
397 398 - ipv4 eg. 127.0.0.1
398 399 - ipv6 eg. ::1
399 400 - ipv4+port eg. 127.0.0.1:8080
400 401 - ipv6+port eg. [::1]:8080
401 402
402 403 :param ip:
403 404 """
404
405 405 def is_ipv6(ip_addr):
406 if hasattr(socket, "inet_pton"):
406 if hasattr(socket, 'inet_pton'):
407 407 try:
408 408 socket.inet_pton(socket.AF_INET6, ip_addr)
409 409 except socket.error:
410 410 return False
411 411 else:
412 412 return False
413 413 return True
414 414
415 if ":" not in ip: # must be ipv4 pure ip
415 if ':' not in ip: # must be ipv4 pure ip
416 416 return ip
417 417
418 if "[" in ip and "]" in ip: # ipv6 with port
419 return ip.split("]")[0][1:].lower()
418 if '[' in ip and ']' in ip: # ipv6 with port
419 return ip.split(']')[0][1:].lower()
420 420
421 421 # must be ipv6 or ipv4 with port
422 422 if is_ipv6(ip):
423 423 return ip
424 424 else:
425 ip, _port = ip.split(":")[:2] # means ipv4+port
425 ip, _port = ip.split(':')[:2] # means ipv4+port
426 426 return ip
427 427
428 428
429 429 def get_ip_addr(environ):
430 proxy_key = "HTTP_X_REAL_IP"
431 proxy_key2 = "HTTP_X_FORWARDED_FOR"
432 def_key = "REMOTE_ADDR"
430 proxy_key = 'HTTP_X_REAL_IP'
431 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
432 def_key = 'REMOTE_ADDR'
433 433
434 434 def _filters(x):
435 435 return _filter_port(_filter_proxy(x))
436 436
437 437 ip = environ.get(proxy_key)
438 438 if ip:
439 439 return _filters(ip)
440 440
441 441 ip = environ.get(proxy_key2)
442 442 if ip:
443 443 return _filters(ip)
444 444
445 ip = environ.get(def_key, "0.0.0.0")
445 ip = environ.get(def_key, '0.0.0.0')
446 446 return _filters(ip)
447 447
448 448
449 449 class RhodeCodeLogger(Logger):
450 450 """
451 451 Custom Logger that allows some customization that gunicorn doesn't allow
452 452 """
453 453
454 454 datefmt = r"%Y-%m-%d %H:%M:%S"
455 455
456 456 def __init__(self, cfg):
457 457 Logger.__init__(self, cfg)
458 458
459 459 def now(self):
460 460 """return date in RhodeCode Log format"""
461 461 now = time.time()
462 462 msecs = int((now - int(now)) * 1000)
463 return time.strftime(self.datefmt, time.localtime(now)) + ".{0:03d}".format(msecs)
463 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
464 464
465 465 def atoms(self, resp, req, environ, request_time):
466 """Gets atoms for log formatting."""
466 """ Gets atoms for log formatting.
467 """
467 468 status = resp.status
468 469 if isinstance(status, str):
469 470 status = status.split(None, 1)[0]
470 471 atoms = {
471 "h": get_ip_addr(environ),
472 "l": "-",
473 "u": self._get_user(environ) or "-",
474 "t": self.now(),
475 "r": "%s %s %s" % (environ["REQUEST_METHOD"], environ["RAW_URI"], environ["SERVER_PROTOCOL"]),
476 "s": status,
477 "m": environ.get("REQUEST_METHOD"),
478 "U": environ.get("PATH_INFO"),
479 "q": environ.get("QUERY_STRING"),
480 "H": environ.get("SERVER_PROTOCOL"),
481 "b": getattr(resp, "sent", None) is not None and str(resp.sent) or "-",
482 "B": getattr(resp, "sent", None),
483 "f": environ.get("HTTP_REFERER", "-"),
484 "a": environ.get("HTTP_USER_AGENT", "-"),
485 "T": request_time.seconds,
486 "D": (request_time.seconds * 1000000) + request_time.microseconds,
487 "M": (request_time.seconds * 1000) + int(request_time.microseconds / 1000),
488 "L": "%d.%06d" % (request_time.seconds, request_time.microseconds),
489 "p": "<%s>" % os.getpid(),
472 'h': get_ip_addr(environ),
473 'l': '-',
474 'u': self._get_user(environ) or '-',
475 't': self.now(),
476 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
477 environ['RAW_URI'],
478 environ["SERVER_PROTOCOL"]),
479 's': status,
480 'm': environ.get('REQUEST_METHOD'),
481 'U': environ.get('PATH_INFO'),
482 'q': environ.get('QUERY_STRING'),
483 'H': environ.get('SERVER_PROTOCOL'),
484 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
485 'B': getattr(resp, 'sent', None),
486 'f': environ.get('HTTP_REFERER', '-'),
487 'a': environ.get('HTTP_USER_AGENT', '-'),
488 'T': request_time.seconds,
489 'D': (request_time.seconds * 1000000) + request_time.microseconds,
490 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
491 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
492 'p': "<%s>" % os.getpid()
490 493 }
491 494
492 495 # add request headers
493 if hasattr(req, "headers"):
496 if hasattr(req, 'headers'):
494 497 req_headers = req.headers
495 498 else:
496 499 req_headers = req
497 500
498 501 if hasattr(req_headers, "items"):
499 502 req_headers = req_headers.items()
500 503
501 504 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
502 505
503 506 resp_headers = resp.headers
504 507 if hasattr(resp_headers, "items"):
505 508 resp_headers = resp_headers.items()
506 509
507 510 # add response headers
508 511 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
509 512
510 513 # add environ variables
511 514 environ_variables = environ.items()
512 515 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
513 516
514 517 return atoms
515 518
516 519
517 520 logger_class = RhodeCodeLogger
General Comments 0
You need to be logged in to leave comments. Login now