##// END OF EJS Templates
configs: moved most of configuration back to .ini files instead of gunicorn file
marcink -
r801:ecde08b9 default
parent child Browse files
Show More
@@ -1,91 +1,175 b''
1 1 ################################################################################
2 2 # RhodeCode VCSServer with HTTP Backend - configuration #
3 3 ################################################################################
4 4
5 5
6 6 [server:main]
7 7 ## COMMON ##
8 8 host = 0.0.0.0
9 9 port = 9900
10 10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
11 15 use = egg:waitress#main
16 ## number of worker threads
17 threads = 5
18 ## MAX BODY SIZE 100GB
19 max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 asyncore_use_poll = true
23
24
25 ##########################
26 ## GUNICORN WSGI SERVER ##
27 ##########################
28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
30 #use = egg:gunicorn#main
31 ## Sets the number of process workers. More workers means more concurrent connections
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
38 #workers = 2
39
40 ## Gunicorn access log level
41 #loglevel = info
42
43 ## process name visible in process list
44 #proc_name = rhodecode_vcsserver
45
46 ## type of worker class, currently `sync` is the only option allowed.
47 #worker_class = sync
48
49 ## The maximum number of simultaneous clients. Valid only for Gevent
50 #worker_connections = 10
51
52 ## max number of requests that worker will handle before being gracefully
53 ## restarted, could prevent memory leaks
54 #max_requests = 1000
55 #max_requests_jitter = 30
56
57 ## amount of time a worker can spend with handling a request before it
58 ## gets killed and restarted. Set to 6hrs
59 #timeout = 21600
60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 #limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 #limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 #limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 #graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 #keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 #memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 #memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 #memory_usage_recovery_threshold = 0.8
12 96
13 97
14 98 [app:main]
15 99 use = egg:rhodecode-vcsserver
16 100
17 101 pyramid.default_locale_name = en
18 102 pyramid.includes =
19 103
20 104 ## default locale used by VCS systems
21 105 locale = en_US.UTF-8
22 106
23 107
24 108 ## path to binaries for vcsserver, it should be set by the installer
25 109 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
26 110 core.binary_dir = ""
27 111
28 112 ## Custom exception store path, defaults to TMPDIR
29 113 ## This is used to store exception from RhodeCode in shared directory
30 114 #exception_tracker.store_path =
31 115
32 116 ## Default cache dir for caches. Putting this into a ramdisk
33 117 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
34 118 ## large amount of space
35 119 cache_dir = %(here)s/rcdev/data
36 120
37 121 ## cache region for storing repo_objects cache
38 122 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
39 123 ## cache auto-expires after N seconds (2592000 == 30 days)
40 124 rc_cache.repo_object.expiration_time = 2592000
41 125
42 126 ## cache file store path, if empty set automatically to tmp dir location
43 127 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
44 128
45 129 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
46 130 rc_cache.repo_object.max_size = 100
47 131
48 132
49 133 ################################
50 134 ### LOGGING CONFIGURATION ####
51 135 ################################
52 136 [loggers]
53 137 keys = root, vcsserver
54 138
55 139 [handlers]
56 140 keys = console
57 141
58 142 [formatters]
59 143 keys = generic
60 144
61 145 #############
62 146 ## LOGGERS ##
63 147 #############
64 148 [logger_root]
65 149 level = NOTSET
66 150 handlers = console
67 151
68 152 [logger_vcsserver]
69 153 level = DEBUG
70 154 handlers =
71 155 qualname = vcsserver
72 156 propagate = 1
73 157
74 158
75 159 ##############
76 160 ## HANDLERS ##
77 161 ##############
78 162
79 163 [handler_console]
80 164 class = StreamHandler
81 165 args = (sys.stderr,)
82 166 level = DEBUG
83 167 formatter = generic
84 168
85 169 ################
86 170 ## FORMATTERS ##
87 171 ################
88 172
89 173 [formatter_generic]
90 174 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
91 175 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,259 +1,270 b''
1 1 """
2 2 gunicorn config extension and hooks. Sets additional configuration that is
3 3 available post the .ini config.
4 4
5 5 - workers = ${cpu_number}
6 6 - threads = 1
7 7 - proc_name = ${gunicorn_proc_name}
8 8 - worker_class = sync
9 9 - worker_connections = 10
10 10 - max_requests = 1000
11 11 - max_requests_jitter = 30
12 12 - timeout = 21600
13 13
14 14 """
15 15
16 import gc
17 import os
18 import sys
16 19 import math
17 import gc
18 import sys
19 20 import time
20 21 import threading
21 22 import traceback
22 23 import random
23 24 from gunicorn.glogging import Logger
24 25
25 26
26 27 def get_workers():
27 28 import multiprocessing
28 29 return multiprocessing.cpu_count() * 2 + 1
29 30
30 31 # GLOBAL
31 32 errorlog = '-'
32 33 accesslog = '-'
33 loglevel = 'info'
34 34
35 # SECURITY
36
37 # The maximum size of HTTP request line in bytes.
38 # 0 for unlimited
39 limit_request_line = 0
40
41 # Limit the number of HTTP headers fields in a request.
42 # By default this value is 100 and can't be larger than 32768.
43 limit_request_fields = 32768
44
45 # Limit the allowed size of an HTTP request header field.
46 # Value is a positive number or 0.
47 # Setting it to 0 will allow unlimited header field sizes.
48 limit_request_field_size = 0
49
50 # Timeout for graceful workers restart.
51 # After receiving a restart signal, workers have this much time to finish
52 # serving requests. Workers still alive after the timeout (starting from the
53 # receipt of the restart signal) are force killed.
54 graceful_timeout = 60 * 60
55
56 # The number of seconds to wait for requests on a Keep-Alive connection.
57 # Generally set in the 1-5 seconds range.
58 keepalive = 2
59 35
60 36 # SERVER MECHANICS
61 37 # None == system temp dir
62 38 # worker_tmp_dir is recommended to be set to some tmpfs
63 39 worker_tmp_dir = None
64 40 tmp_upload_dir = None
65 41
66 42 # Custom log format
67 43 access_log_format = (
68 44 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
69 45
70 46 # self adjust workers based on CPU count
71 47 # workers = get_workers()
72 48
73 # n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled
74 MAX_MEMORY_USAGE = 0 * 1024 * 1024
75
76 # How often in seconds to check for memory usage
77 MEMORY_USAGE_CHECK_INTERVAL = 30
78
79 # If a gc brings us back below this threshold, we can avoid termination.
80 MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8
81
82 49
83 50 def _get_process_rss(pid=None):
84 51 try:
85 52 import psutil
86 53 if pid:
87 54 proc = psutil.Process(pid)
88 55 else:
89 56 proc = psutil.Process()
90 57 return proc.memory_info().rss
91 58 except Exception:
92 59 return None
93 60
94 61
95 def _time_with_offset():
96 return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0)
62 def _get_config(ini_path):
63
64 try:
65 import configparser
66 except ImportError:
67 import ConfigParser as configparser
68 try:
69 config = configparser.ConfigParser()
70 config.read(ini_path)
71 return config
72 except Exception:
73 return None
74
75
76 def _time_with_offset(memory_usage_check_interval):
77 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
97 78
98 79
99 80 def pre_fork(server, worker):
100 81 pass
101 82
102 83
103 84 def post_fork(server, worker):
104 server.log.info("<%s> WORKER spawned", worker.pid)
85
86 # memory spec defaults
87 _memory_max_usage = 0
88 _memory_usage_check_interval = 60
89 _memory_usage_recovery_threshold = 0.8
90
91 ini_path = os.path.abspath(server.cfg.paste)
92 conf = _get_config(ini_path)
93 if conf and 'server:main' in conf:
94 section = conf['server:main']
95
96 if section.get('memory_max_usage'):
97 _memory_max_usage = int(section.get('memory_max_usage'))
98 if section.get('memory_usage_check_interval'):
99 _memory_usage_check_interval = int(section.get('memory_usage_check_interval'))
100 if section.get('memory_usage_recovery_threshold'):
101 _memory_usage_recovery_threshold = float(section.get('memory_usage_recovery_threshold'))
102
103 worker._memory_max_usage = _memory_max_usage
104 worker._memory_usage_check_interval = _memory_usage_check_interval
105 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
106
105 107 # register memory last check time, with some random offset so we don't recycle all
106 108 # at once
107 worker._last_memory_check_time = _time_with_offset()
109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110
111 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 _format_data_size(_memory_max_usage))
114 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
108 116
109 117
110 118 def pre_exec(server):
111 119 server.log.info("Forked child, re-executing.")
112 120
113 121
114 122 def on_starting(server):
115 123 server_lbl = '{} {}'.format(server.proc_name, server.address)
116 124 server.log.info("Server %s is starting.", server_lbl)
117 125
118 126
119 127 def when_ready(server):
120 128 server.log.info("Server %s is ready. Spawning workers", server)
121 129
122 130
123 131 def on_reload(server):
124 132 pass
125 133
126 134
127 135 def _format_data_size(size, unit="B", precision=1, binary=True):
128 136 """Format a number using SI units (kilo, mega, etc.).
129 137
130 138 ``size``: The number as a float or int.
131 139
132 140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
133 141
134 142 ``precision``: How many digits to the right of the decimal point. Default
135 143 is 1. 0 suppresses the decimal point.
136 144
137 145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
138 146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
139 147
140 148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
141 149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
142 150 use abbreviation ("k" or "Ki").
143 151
144 152 """
145 153
146 154 if not binary:
147 155 base = 1000
148 156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
149 157 else:
150 158 base = 1024
151 159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
152 160
153 161 sign = ""
154 162 if size > 0:
155 163 m = int(math.log(size, base))
156 164 elif size < 0:
157 165 sign = "-"
158 166 size = -size
159 167 m = int(math.log(size, base))
160 168 else:
161 169 m = 0
162 170 if m > 8:
163 171 m = 8
164 172
165 173 if m == 0:
166 174 precision = '%.0f'
167 175 else:
168 176 precision = '%%.%df' % precision
169 177
170 178 size = precision % (size / math.pow(base, m))
171 179
172 180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
173 181
174 182
175 183 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
186 return
176 187
177 if not MAX_MEMORY_USAGE:
178 return
188 memory_usage_check_interval = worker._memory_usage_check_interval
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
179 190
180 191 elapsed = time.time() - worker._last_memory_check_time
181 if elapsed > MEMORY_USAGE_CHECK_INTERVAL:
192 if elapsed > memory_usage_check_interval:
182 193 mem_usage = _get_process_rss()
183 if mem_usage and mem_usage > MAX_MEMORY_USAGE:
194 if mem_usage and mem_usage > memory_max_usage:
184 195 worker.log.info(
185 196 "memory usage %s > %s, forcing gc",
186 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
187 198 # Try to clean it up by forcing a full collection.
188 199 gc.collect()
189 200 mem_usage = _get_process_rss()
190 if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD:
201 if mem_usage > memory_usage_recovery_threshold:
191 202 # Didn't clean up enough, we'll have to terminate.
192 203 worker.log.warning(
193 204 "memory usage %s > %s after gc, quitting",
194 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
195 206 # This will cause worker to auto-restart itself
196 207 worker.alive = False
197 208 worker._last_memory_check_time = time.time()
198 209
199 210
200 211 def worker_int(worker):
201 worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
202 213
203 214 # get traceback info, on worker crash
204 215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
205 216 code = []
206 217 for thread_id, stack in sys._current_frames().items():
207 218 code.append(
208 219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
209 220 for fname, lineno, name, line in traceback.extract_stack(stack):
210 221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
211 222 if line:
212 223 code.append(" %s" % (line.strip()))
213 224 worker.log.debug("\n".join(code))
214 225
215 226
216 227 def worker_abort(worker):
217 worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
218 229
219 230
220 231 def worker_exit(server, worker):
221 worker.log.info("[<%-10s>] worker exit", worker.pid)
232 worker.log.info("[%-10s] worker exit", worker.pid)
222 233
223 234
224 235 def child_exit(server, worker):
225 worker.log.info("[<%-10s>] worker child exit", worker.pid)
236 worker.log.info("[%-10s] worker child exit", worker.pid)
226 237
227 238
228 239 def pre_request(worker, req):
229 240 worker.start_time = time.time()
230 241 worker.log.debug(
231 242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
232 243
233 244
234 245 def post_request(worker, req, environ, resp):
235 246 total_time = time.time() - worker.start_time
236 247 worker.log.debug(
237 248 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
238 249 worker.nr, req.method, req.path, resp.status_code, total_time)
239 250 _check_memory_usage(worker)
240 251
241 252
242 253 class RhodeCodeLogger(Logger):
243 254 """
244 255 Custom Logger that allows some customization that gunicorn doesn't allow
245 256 """
246 257
247 258 datefmt = r"%Y-%m-%d %H:%M:%S"
248 259
249 260 def __init__(self, cfg):
250 261 Logger.__init__(self, cfg)
251 262
252 263 def now(self):
253 264 """ return date in RhodeCode Log format """
254 265 now = time.time()
255 266 msecs = int((now - long(now)) * 1000)
256 267 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
257 268
258 269
259 270 logger_class = RhodeCodeLogger
@@ -1,112 +1,175 b''
1 1 ################################################################################
2 2 # RhodeCode VCSServer with HTTP Backend - configuration #
3 3 ################################################################################
4 4
5 5
6 6 [server:main]
7 7 ## COMMON ##
8 8 host = 127.0.0.1
9 9 port = 9900
10 10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
15 #use = egg:waitress#main
16 ## number of worker threads
17 #threads = 5
18 ## MAX BODY SIZE 100GB
19 #max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 #asyncore_use_poll = true
23
11 24
12 25 ##########################
13 26 ## GUNICORN WSGI SERVER ##
14 27 ##########################
15 28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
16 30 use = egg:gunicorn#main
17 ## Sets the number of process workers. Recommended
18 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
31 ## Sets the number of process workers. More workers means more concurrent connections
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
19 38 workers = 2
20 ## process name
39
40 ## Gunicorn access log level
41 loglevel = info
42
43 ## process name visible in process list
21 44 proc_name = rhodecode_vcsserver
45
22 46 ## type of worker class, currently `sync` is the only option allowed.
23 47 worker_class = sync
48
24 49 ## The maximum number of simultaneous clients. Valid only for Gevent
25 #worker_connections = 10
50 worker_connections = 10
51
26 52 ## max number of requests that worker will handle before being gracefully
27 53 ## restarted, could prevent memory leaks
28 54 max_requests = 1000
29 55 max_requests_jitter = 30
56
30 57 ## amount of time a worker can spend with handling a request before it
31 58 ## gets killed and restarted. Set to 6hrs
32 59 timeout = 21600
33 60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 memory_usage_recovery_threshold = 0.8
96
34 97
35 98 [app:main]
36 99 use = egg:rhodecode-vcsserver
37 100
38 101 pyramid.default_locale_name = en
39 102 pyramid.includes =
40 103
41 104 ## default locale used by VCS systems
42 105 locale = en_US.UTF-8
43 106
44 107
45 108 ## path to binaries for vcsserver, it should be set by the installer
46 109 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
47 110 core.binary_dir = ""
48 111
49 112 ## Custom exception store path, defaults to TMPDIR
50 113 ## This is used to store exception from RhodeCode in shared directory
51 114 #exception_tracker.store_path =
52 115
53 116 ## Default cache dir for caches. Putting this into a ramdisk
54 117 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
55 118 ## large amount of space
56 119 cache_dir = %(here)s/rcdev/data
57 120
58 121 ## cache region for storing repo_objects cache
59 122 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
60 123 ## cache auto-expires after N seconds (2592000 == 30 days)
61 124 rc_cache.repo_object.expiration_time = 2592000
62 125
63 126 ## cache file store path, if empty set automatically to tmp dir location
64 127 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
65 128
66 129 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
67 130 rc_cache.repo_object.max_size = 100
68 131
69 132
70 133 ################################
71 134 ### LOGGING CONFIGURATION ####
72 135 ################################
73 136 [loggers]
74 137 keys = root, vcsserver
75 138
76 139 [handlers]
77 140 keys = console
78 141
79 142 [formatters]
80 143 keys = generic
81 144
82 145 #############
83 146 ## LOGGERS ##
84 147 #############
85 148 [logger_root]
86 149 level = NOTSET
87 150 handlers = console
88 151
89 152 [logger_vcsserver]
90 153 level = DEBUG
91 154 handlers =
92 155 qualname = vcsserver
93 156 propagate = 1
94 157
95 158
96 159 ##############
97 160 ## HANDLERS ##
98 161 ##############
99 162
100 163 [handler_console]
101 164 class = StreamHandler
102 165 args = (sys.stderr,)
103 166 level = DEBUG
104 167 formatter = generic
105 168
106 169 ################
107 170 ## FORMATTERS ##
108 171 ################
109 172
110 173 [formatter_generic]
111 174 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
112 175 datefmt = %Y-%m-%d %H:%M:%S
General Comments 0
You need to be logged in to leave comments. Login now