##// END OF EJS Templates
configs: moved most of configuration back to .ini files instead of gunicorn file
marcink -
r801:ecde08b9 default
parent child Browse files
Show More
@@ -1,91 +1,175 b''
1 ################################################################################
1 ################################################################################
2 # RhodeCode VCSServer with HTTP Backend - configuration #
2 # RhodeCode VCSServer with HTTP Backend - configuration #
3 ################################################################################
3 ################################################################################
4
4
5
5
6 [server:main]
6 [server:main]
7 ## COMMON ##
7 ## COMMON ##
8 host = 0.0.0.0
8 host = 0.0.0.0
9 port = 9900
9 port = 9900
10
10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
11 use = egg:waitress#main
15 use = egg:waitress#main
16 ## number of worker threads
17 threads = 5
18 ## MAX BODY SIZE 100GB
19 max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 asyncore_use_poll = true
23
24
25 ##########################
26 ## GUNICORN WSGI SERVER ##
27 ##########################
28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
30 #use = egg:gunicorn#main
31 ## Sets the number of process workers. More workers means more concurrent connections
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
38 #workers = 2
39
40 ## Gunicorn access log level
41 #loglevel = info
42
43 ## process name visible in process list
44 #proc_name = rhodecode_vcsserver
45
46 ## type of worker class, currently `sync` is the only option allowed.
47 #worker_class = sync
48
49 ## The maximum number of simultaneous clients. Valid only for Gevent
50 #worker_connections = 10
51
52 ## max number of requests that worker will handle before being gracefully
53 ## restarted, could prevent memory leaks
54 #max_requests = 1000
55 #max_requests_jitter = 30
56
57 ## amount of time a worker can spend with handling a request before it
58 ## gets killed and restarted. Set to 6hrs
59 #timeout = 21600
60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 #limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 #limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 #limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 #graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 #keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 #memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 #memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 #memory_usage_recovery_threshold = 0.8
12
96
13
97
14 [app:main]
98 [app:main]
15 use = egg:rhodecode-vcsserver
99 use = egg:rhodecode-vcsserver
16
100
17 pyramid.default_locale_name = en
101 pyramid.default_locale_name = en
18 pyramid.includes =
102 pyramid.includes =
19
103
20 ## default locale used by VCS systems
104 ## default locale used by VCS systems
21 locale = en_US.UTF-8
105 locale = en_US.UTF-8
22
106
23
107
24 ## path to binaries for vcsserver, it should be set by the installer
108 ## path to binaries for vcsserver, it should be set by the installer
25 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
109 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
26 core.binary_dir = ""
110 core.binary_dir = ""
27
111
28 ## Custom exception store path, defaults to TMPDIR
112 ## Custom exception store path, defaults to TMPDIR
29 ## This is used to store exception from RhodeCode in shared directory
113 ## This is used to store exception from RhodeCode in shared directory
30 #exception_tracker.store_path =
114 #exception_tracker.store_path =
31
115
32 ## Default cache dir for caches. Putting this into a ramdisk
116 ## Default cache dir for caches. Putting this into a ramdisk
33 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
117 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
34 ## large amount of space
118 ## large amount of space
35 cache_dir = %(here)s/rcdev/data
119 cache_dir = %(here)s/rcdev/data
36
120
37 ## cache region for storing repo_objects cache
121 ## cache region for storing repo_objects cache
38 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
122 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
39 ## cache auto-expires after N seconds (2592000 == 30 days)
123 ## cache auto-expires after N seconds (2592000 == 30 days)
40 rc_cache.repo_object.expiration_time = 2592000
124 rc_cache.repo_object.expiration_time = 2592000
41
125
42 ## cache file store path, if empty set automatically to tmp dir location
126 ## cache file store path, if empty set automatically to tmp dir location
43 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
127 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
44
128
45 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
129 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
46 rc_cache.repo_object.max_size = 100
130 rc_cache.repo_object.max_size = 100
47
131
48
132
49 ################################
133 ################################
50 ### LOGGING CONFIGURATION ####
134 ### LOGGING CONFIGURATION ####
51 ################################
135 ################################
52 [loggers]
136 [loggers]
53 keys = root, vcsserver
137 keys = root, vcsserver
54
138
55 [handlers]
139 [handlers]
56 keys = console
140 keys = console
57
141
58 [formatters]
142 [formatters]
59 keys = generic
143 keys = generic
60
144
61 #############
145 #############
62 ## LOGGERS ##
146 ## LOGGERS ##
63 #############
147 #############
64 [logger_root]
148 [logger_root]
65 level = NOTSET
149 level = NOTSET
66 handlers = console
150 handlers = console
67
151
68 [logger_vcsserver]
152 [logger_vcsserver]
69 level = DEBUG
153 level = DEBUG
70 handlers =
154 handlers =
71 qualname = vcsserver
155 qualname = vcsserver
72 propagate = 1
156 propagate = 1
73
157
74
158
75 ##############
159 ##############
76 ## HANDLERS ##
160 ## HANDLERS ##
77 ##############
161 ##############
78
162
79 [handler_console]
163 [handler_console]
80 class = StreamHandler
164 class = StreamHandler
81 args = (sys.stderr,)
165 args = (sys.stderr,)
82 level = DEBUG
166 level = DEBUG
83 formatter = generic
167 formatter = generic
84
168
85 ################
169 ################
86 ## FORMATTERS ##
170 ## FORMATTERS ##
87 ################
171 ################
88
172
89 [formatter_generic]
173 [formatter_generic]
90 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
174 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
91 datefmt = %Y-%m-%d %H:%M:%S
175 datefmt = %Y-%m-%d %H:%M:%S
@@ -1,259 +1,270 b''
1 """
1 """
2 gunicorn config extension and hooks. Sets additional configuration that is
2 gunicorn config extension and hooks. Sets additional configuration that is
3 available post the .ini config.
3 available post the .ini config.
4
4
5 - workers = ${cpu_number}
5 - workers = ${cpu_number}
6 - threads = 1
6 - threads = 1
7 - proc_name = ${gunicorn_proc_name}
7 - proc_name = ${gunicorn_proc_name}
8 - worker_class = sync
8 - worker_class = sync
9 - worker_connections = 10
9 - worker_connections = 10
10 - max_requests = 1000
10 - max_requests = 1000
11 - max_requests_jitter = 30
11 - max_requests_jitter = 30
12 - timeout = 21600
12 - timeout = 21600
13
13
14 """
14 """
15
15
16 import gc
17 import os
18 import sys
16 import math
19 import math
17 import gc
18 import sys
19 import time
20 import time
20 import threading
21 import threading
21 import traceback
22 import traceback
22 import random
23 import random
23 from gunicorn.glogging import Logger
24 from gunicorn.glogging import Logger
24
25
25
26
26 def get_workers():
27 def get_workers():
27 import multiprocessing
28 import multiprocessing
28 return multiprocessing.cpu_count() * 2 + 1
29 return multiprocessing.cpu_count() * 2 + 1
29
30
30 # GLOBAL
31 # GLOBAL
31 errorlog = '-'
32 errorlog = '-'
32 accesslog = '-'
33 accesslog = '-'
33 loglevel = 'info'
34
34
35 # SECURITY
36
37 # The maximum size of HTTP request line in bytes.
38 # 0 for unlimited
39 limit_request_line = 0
40
41 # Limit the number of HTTP headers fields in a request.
42 # By default this value is 100 and can't be larger than 32768.
43 limit_request_fields = 32768
44
45 # Limit the allowed size of an HTTP request header field.
46 # Value is a positive number or 0.
47 # Setting it to 0 will allow unlimited header field sizes.
48 limit_request_field_size = 0
49
50 # Timeout for graceful workers restart.
51 # After receiving a restart signal, workers have this much time to finish
52 # serving requests. Workers still alive after the timeout (starting from the
53 # receipt of the restart signal) are force killed.
54 graceful_timeout = 60 * 60
55
56 # The number of seconds to wait for requests on a Keep-Alive connection.
57 # Generally set in the 1-5 seconds range.
58 keepalive = 2
59
35
60 # SERVER MECHANICS
36 # SERVER MECHANICS
61 # None == system temp dir
37 # None == system temp dir
62 # worker_tmp_dir is recommended to be set to some tmpfs
38 # worker_tmp_dir is recommended to be set to some tmpfs
63 worker_tmp_dir = None
39 worker_tmp_dir = None
64 tmp_upload_dir = None
40 tmp_upload_dir = None
65
41
66 # Custom log format
42 # Custom log format
67 access_log_format = (
43 access_log_format = (
68 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
44 '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
69
45
70 # self adjust workers based on CPU count
46 # self adjust workers based on CPU count
71 # workers = get_workers()
47 # workers = get_workers()
72
48
73 # n * 1024 * 0124 == n MBs, 0 = memory monitoring is disabled
74 MAX_MEMORY_USAGE = 0 * 1024 * 1024
75
76 # How often in seconds to check for memory usage
77 MEMORY_USAGE_CHECK_INTERVAL = 30
78
79 # If a gc brings us back below this threshold, we can avoid termination.
80 MEMORY_USAGE_RECOVERY_THRESHOLD = MAX_MEMORY_USAGE * 0.8
81
82
49
83 def _get_process_rss(pid=None):
50 def _get_process_rss(pid=None):
84 try:
51 try:
85 import psutil
52 import psutil
86 if pid:
53 if pid:
87 proc = psutil.Process(pid)
54 proc = psutil.Process(pid)
88 else:
55 else:
89 proc = psutil.Process()
56 proc = psutil.Process()
90 return proc.memory_info().rss
57 return proc.memory_info().rss
91 except Exception:
58 except Exception:
92 return None
59 return None
93
60
94
61
95 def _time_with_offset():
62 def _get_config(ini_path):
96 return time.time() - random.randint(0, MEMORY_USAGE_CHECK_INTERVAL/2.0)
63
64 try:
65 import configparser
66 except ImportError:
67 import ConfigParser as configparser
68 try:
69 config = configparser.ConfigParser()
70 config.read(ini_path)
71 return config
72 except Exception:
73 return None
74
75
76 def _time_with_offset(memory_usage_check_interval):
77 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
97
78
98
79
99 def pre_fork(server, worker):
80 def pre_fork(server, worker):
100 pass
81 pass
101
82
102
83
103 def post_fork(server, worker):
84 def post_fork(server, worker):
104 server.log.info("<%s> WORKER spawned", worker.pid)
85
86 # memory spec defaults
87 _memory_max_usage = 0
88 _memory_usage_check_interval = 60
89 _memory_usage_recovery_threshold = 0.8
90
91 ini_path = os.path.abspath(server.cfg.paste)
92 conf = _get_config(ini_path)
93 if conf and 'server:main' in conf:
94 section = conf['server:main']
95
96 if section.get('memory_max_usage'):
97 _memory_max_usage = int(section.get('memory_max_usage'))
98 if section.get('memory_usage_check_interval'):
99 _memory_usage_check_interval = int(section.get('memory_usage_check_interval'))
100 if section.get('memory_usage_recovery_threshold'):
101 _memory_usage_recovery_threshold = float(section.get('memory_usage_recovery_threshold'))
102
103 worker._memory_max_usage = _memory_max_usage
104 worker._memory_usage_check_interval = _memory_usage_check_interval
105 worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold
106
105 # register memory last check time, with some random offset so we don't recycle all
107 # register memory last check time, with some random offset so we don't recycle all
106 # at once
108 # at once
107 worker._last_memory_check_time = _time_with_offset()
109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
110
111 if _memory_max_usage:
112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
113 _format_data_size(_memory_max_usage))
114 else:
115 server.log.info("[%-10s] WORKER spawned", worker.pid)
108
116
109
117
110 def pre_exec(server):
118 def pre_exec(server):
111 server.log.info("Forked child, re-executing.")
119 server.log.info("Forked child, re-executing.")
112
120
113
121
114 def on_starting(server):
122 def on_starting(server):
115 server_lbl = '{} {}'.format(server.proc_name, server.address)
123 server_lbl = '{} {}'.format(server.proc_name, server.address)
116 server.log.info("Server %s is starting.", server_lbl)
124 server.log.info("Server %s is starting.", server_lbl)
117
125
118
126
119 def when_ready(server):
127 def when_ready(server):
120 server.log.info("Server %s is ready. Spawning workers", server)
128 server.log.info("Server %s is ready. Spawning workers", server)
121
129
122
130
123 def on_reload(server):
131 def on_reload(server):
124 pass
132 pass
125
133
126
134
127 def _format_data_size(size, unit="B", precision=1, binary=True):
135 def _format_data_size(size, unit="B", precision=1, binary=True):
128 """Format a number using SI units (kilo, mega, etc.).
136 """Format a number using SI units (kilo, mega, etc.).
129
137
130 ``size``: The number as a float or int.
138 ``size``: The number as a float or int.
131
139
132 ``unit``: The unit name in plural form. Examples: "bytes", "B".
140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
133
141
134 ``precision``: How many digits to the right of the decimal point. Default
142 ``precision``: How many digits to the right of the decimal point. Default
135 is 1. 0 suppresses the decimal point.
143 is 1. 0 suppresses the decimal point.
136
144
137 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
138 If true, use base-2 binary prefixes (kibi = Ki = 1024).
146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
139
147
140 ``full_name``: If false (default), use the prefix abbreviation ("k" or
148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
141 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
142 use abbreviation ("k" or "Ki").
150 use abbreviation ("k" or "Ki").
143
151
144 """
152 """
145
153
146 if not binary:
154 if not binary:
147 base = 1000
155 base = 1000
148 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
149 else:
157 else:
150 base = 1024
158 base = 1024
151 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
152
160
153 sign = ""
161 sign = ""
154 if size > 0:
162 if size > 0:
155 m = int(math.log(size, base))
163 m = int(math.log(size, base))
156 elif size < 0:
164 elif size < 0:
157 sign = "-"
165 sign = "-"
158 size = -size
166 size = -size
159 m = int(math.log(size, base))
167 m = int(math.log(size, base))
160 else:
168 else:
161 m = 0
169 m = 0
162 if m > 8:
170 if m > 8:
163 m = 8
171 m = 8
164
172
165 if m == 0:
173 if m == 0:
166 precision = '%.0f'
174 precision = '%.0f'
167 else:
175 else:
168 precision = '%%.%df' % precision
176 precision = '%%.%df' % precision
169
177
170 size = precision % (size / math.pow(base, m))
178 size = precision % (size / math.pow(base, m))
171
179
172 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
173
181
174
182
175 def _check_memory_usage(worker):
183 def _check_memory_usage(worker):
184 memory_max_usage = worker._memory_max_usage
185 if not memory_max_usage:
186 return
176
187
177 if not MAX_MEMORY_USAGE:
188 memory_usage_check_interval = worker._memory_usage_check_interval
178 return
189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
179
190
180 elapsed = time.time() - worker._last_memory_check_time
191 elapsed = time.time() - worker._last_memory_check_time
181 if elapsed > MEMORY_USAGE_CHECK_INTERVAL:
192 if elapsed > memory_usage_check_interval:
182 mem_usage = _get_process_rss()
193 mem_usage = _get_process_rss()
183 if mem_usage and mem_usage > MAX_MEMORY_USAGE:
194 if mem_usage and mem_usage > memory_max_usage:
184 worker.log.info(
195 worker.log.info(
185 "memory usage %s > %s, forcing gc",
196 "memory usage %s > %s, forcing gc",
186 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
187 # Try to clean it up by forcing a full collection.
198 # Try to clean it up by forcing a full collection.
188 gc.collect()
199 gc.collect()
189 mem_usage = _get_process_rss()
200 mem_usage = _get_process_rss()
190 if mem_usage > MEMORY_USAGE_RECOVERY_THRESHOLD:
201 if mem_usage > memory_usage_recovery_threshold:
191 # Didn't clean up enough, we'll have to terminate.
202 # Didn't clean up enough, we'll have to terminate.
192 worker.log.warning(
203 worker.log.warning(
193 "memory usage %s > %s after gc, quitting",
204 "memory usage %s > %s after gc, quitting",
194 _format_data_size(mem_usage), _format_data_size(MAX_MEMORY_USAGE))
205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
195 # This will cause worker to auto-restart itself
206 # This will cause worker to auto-restart itself
196 worker.alive = False
207 worker.alive = False
197 worker._last_memory_check_time = time.time()
208 worker._last_memory_check_time = time.time()
198
209
199
210
200 def worker_int(worker):
211 def worker_int(worker):
201 worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid)
212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
202
213
203 # get traceback info, on worker crash
214 # get traceback info, on worker crash
204 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
205 code = []
216 code = []
206 for thread_id, stack in sys._current_frames().items():
217 for thread_id, stack in sys._current_frames().items():
207 code.append(
218 code.append(
208 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
209 for fname, lineno, name, line in traceback.extract_stack(stack):
220 for fname, lineno, name, line in traceback.extract_stack(stack):
210 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
211 if line:
222 if line:
212 code.append(" %s" % (line.strip()))
223 code.append(" %s" % (line.strip()))
213 worker.log.debug("\n".join(code))
224 worker.log.debug("\n".join(code))
214
225
215
226
216 def worker_abort(worker):
227 def worker_abort(worker):
217 worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid)
228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
218
229
219
230
220 def worker_exit(server, worker):
231 def worker_exit(server, worker):
221 worker.log.info("[<%-10s>] worker exit", worker.pid)
232 worker.log.info("[%-10s] worker exit", worker.pid)
222
233
223
234
224 def child_exit(server, worker):
235 def child_exit(server, worker):
225 worker.log.info("[<%-10s>] worker child exit", worker.pid)
236 worker.log.info("[%-10s] worker child exit", worker.pid)
226
237
227
238
228 def pre_request(worker, req):
239 def pre_request(worker, req):
229 worker.start_time = time.time()
240 worker.start_time = time.time()
230 worker.log.debug(
241 worker.log.debug(
231 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
232
243
233
244
234 def post_request(worker, req, environ, resp):
245 def post_request(worker, req, environ, resp):
235 total_time = time.time() - worker.start_time
246 total_time = time.time() - worker.start_time
236 worker.log.debug(
247 worker.log.debug(
237 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
248 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
238 worker.nr, req.method, req.path, resp.status_code, total_time)
249 worker.nr, req.method, req.path, resp.status_code, total_time)
239 _check_memory_usage(worker)
250 _check_memory_usage(worker)
240
251
241
252
242 class RhodeCodeLogger(Logger):
253 class RhodeCodeLogger(Logger):
243 """
254 """
244 Custom Logger that allows some customization that gunicorn doesn't allow
255 Custom Logger that allows some customization that gunicorn doesn't allow
245 """
256 """
246
257
247 datefmt = r"%Y-%m-%d %H:%M:%S"
258 datefmt = r"%Y-%m-%d %H:%M:%S"
248
259
249 def __init__(self, cfg):
260 def __init__(self, cfg):
250 Logger.__init__(self, cfg)
261 Logger.__init__(self, cfg)
251
262
252 def now(self):
263 def now(self):
253 """ return date in RhodeCode Log format """
264 """ return date in RhodeCode Log format """
254 now = time.time()
265 now = time.time()
255 msecs = int((now - long(now)) * 1000)
266 msecs = int((now - long(now)) * 1000)
256 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
267 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
257
268
258
269
259 logger_class = RhodeCodeLogger
270 logger_class = RhodeCodeLogger
@@ -1,112 +1,175 b''
1 ################################################################################
1 ################################################################################
2 # RhodeCode VCSServer with HTTP Backend - configuration #
2 # RhodeCode VCSServer with HTTP Backend - configuration #
3 ################################################################################
3 ################################################################################
4
4
5
5
6 [server:main]
6 [server:main]
7 ## COMMON ##
7 ## COMMON ##
8 host = 127.0.0.1
8 host = 127.0.0.1
9 port = 9900
9 port = 9900
10
10
11 ###########################################################
12 ## WAITRESS WSGI SERVER - Recommended for Development ####
13 ###########################################################
14
15 #use = egg:waitress#main
16 ## number of worker threads
17 #threads = 5
18 ## MAX BODY SIZE 100GB
19 #max_request_body_size = 107374182400
20 ## Use poll instead of select, fixes file descriptors limits problems.
21 ## May not work on old windows systems.
22 #asyncore_use_poll = true
23
11
24
12 ##########################
25 ##########################
13 ## GUNICORN WSGI SERVER ##
26 ## GUNICORN WSGI SERVER ##
14 ##########################
27 ##########################
15 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
28 ## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini
29
16 use = egg:gunicorn#main
30 use = egg:gunicorn#main
17 ## Sets the number of process workers. Recommended
31 ## Sets the number of process workers. More workers means more concurrent connections
18 ## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers
32 ## RhodeCode can handle at the same time. Each additional worker also it increases
33 ## memory usage as each has it's own set of caches.
34 ## Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
35 ## than 8-10 unless for really big deployments .e.g 700-1000 users.
36 ## `instance_id = *` must be set in the [app:main] section below (which is the default)
37 ## when using more than 1 worker.
19 workers = 2
38 workers = 2
20 ## process name
39
40 ## Gunicorn access log level
41 loglevel = info
42
43 ## process name visible in process list
21 proc_name = rhodecode_vcsserver
44 proc_name = rhodecode_vcsserver
45
22 ## type of worker class, currently `sync` is the only option allowed.
46 ## type of worker class, currently `sync` is the only option allowed.
23 worker_class = sync
47 worker_class = sync
48
24 ## The maximum number of simultaneous clients. Valid only for Gevent
49 ## The maximum number of simultaneous clients. Valid only for Gevent
25 #worker_connections = 10
50 worker_connections = 10
51
26 ## max number of requests that worker will handle before being gracefully
52 ## max number of requests that worker will handle before being gracefully
27 ## restarted, could prevent memory leaks
53 ## restarted, could prevent memory leaks
28 max_requests = 1000
54 max_requests = 1000
29 max_requests_jitter = 30
55 max_requests_jitter = 30
56
30 ## amount of time a worker can spend with handling a request before it
57 ## amount of time a worker can spend with handling a request before it
31 ## gets killed and restarted. Set to 6hrs
58 ## gets killed and restarted. Set to 6hrs
32 timeout = 21600
59 timeout = 21600
33
60
61 ## The maximum size of HTTP request line in bytes.
62 ## 0 for unlimited
63 limit_request_line = 0
64
65 ## Limit the number of HTTP headers fields in a request.
66 ## By default this value is 100 and can't be larger than 32768.
67 limit_request_fields = 32768
68
69 ## Limit the allowed size of an HTTP request header field.
70 ## Value is a positive number or 0.
71 ## Setting it to 0 will allow unlimited header field sizes.
72 limit_request_field_size = 0
73
74 ## Timeout for graceful workers restart.
75 ## After receiving a restart signal, workers have this much time to finish
76 ## serving requests. Workers still alive after the timeout (starting from the
77 ## receipt of the restart signal) are force killed.
78 graceful_timeout = 3600
79
80 # The number of seconds to wait for requests on a Keep-Alive connection.
81 # Generally set in the 1-5 seconds range.
82 keepalive = 2
83
84 ## Maximum memory usage that each worker can use before it will receive a
85 ## graceful restart signal, e.g 10MB = 10485760 (10 * 1024 * 1024)
86 # 0 = memory monitoring is disabled
87 memory_max_usage = 0
88
89 ## How often in seconds to check for memory usage for each gunicorn worker
90 memory_usage_check_interval = 60
91
92 ## Threshold value for which we don't recycle worker if GarbageCollection
93 ## frees up enough resources. Before each restart we try to run GC on worker
94 ## in case we get enough free memory after that, restart will not happen.
95 memory_usage_recovery_threshold = 0.8
96
34
97
35 [app:main]
98 [app:main]
36 use = egg:rhodecode-vcsserver
99 use = egg:rhodecode-vcsserver
37
100
38 pyramid.default_locale_name = en
101 pyramid.default_locale_name = en
39 pyramid.includes =
102 pyramid.includes =
40
103
41 ## default locale used by VCS systems
104 ## default locale used by VCS systems
42 locale = en_US.UTF-8
105 locale = en_US.UTF-8
43
106
44
107
45 ## path to binaries for vcsserver, it should be set by the installer
108 ## path to binaries for vcsserver, it should be set by the installer
46 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
109 ## at installation time, e.g /home/user/vcsserver-1/profile/bin
47 core.binary_dir = ""
110 core.binary_dir = ""
48
111
49 ## Custom exception store path, defaults to TMPDIR
112 ## Custom exception store path, defaults to TMPDIR
50 ## This is used to store exception from RhodeCode in shared directory
113 ## This is used to store exception from RhodeCode in shared directory
51 #exception_tracker.store_path =
114 #exception_tracker.store_path =
52
115
53 ## Default cache dir for caches. Putting this into a ramdisk
116 ## Default cache dir for caches. Putting this into a ramdisk
54 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
117 ## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require
55 ## large amount of space
118 ## large amount of space
56 cache_dir = %(here)s/rcdev/data
119 cache_dir = %(here)s/rcdev/data
57
120
58 ## cache region for storing repo_objects cache
121 ## cache region for storing repo_objects cache
59 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
122 rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
60 ## cache auto-expires after N seconds (2592000 == 30 days)
123 ## cache auto-expires after N seconds (2592000 == 30 days)
61 rc_cache.repo_object.expiration_time = 2592000
124 rc_cache.repo_object.expiration_time = 2592000
62
125
63 ## cache file store path, if empty set automatically to tmp dir location
126 ## cache file store path, if empty set automatically to tmp dir location
64 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
127 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db
65
128
66 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
129 ## max size of LRU, old values will be discarded if the size of cache reaches max_size
67 rc_cache.repo_object.max_size = 100
130 rc_cache.repo_object.max_size = 100
68
131
69
132
70 ################################
133 ################################
71 ### LOGGING CONFIGURATION ####
134 ### LOGGING CONFIGURATION ####
72 ################################
135 ################################
73 [loggers]
136 [loggers]
74 keys = root, vcsserver
137 keys = root, vcsserver
75
138
76 [handlers]
139 [handlers]
77 keys = console
140 keys = console
78
141
79 [formatters]
142 [formatters]
80 keys = generic
143 keys = generic
81
144
82 #############
145 #############
83 ## LOGGERS ##
146 ## LOGGERS ##
84 #############
147 #############
85 [logger_root]
148 [logger_root]
86 level = NOTSET
149 level = NOTSET
87 handlers = console
150 handlers = console
88
151
89 [logger_vcsserver]
152 [logger_vcsserver]
90 level = DEBUG
153 level = DEBUG
91 handlers =
154 handlers =
92 qualname = vcsserver
155 qualname = vcsserver
93 propagate = 1
156 propagate = 1
94
157
95
158
96 ##############
159 ##############
97 ## HANDLERS ##
160 ## HANDLERS ##
98 ##############
161 ##############
99
162
100 [handler_console]
163 [handler_console]
101 class = StreamHandler
164 class = StreamHandler
102 args = (sys.stderr,)
165 args = (sys.stderr,)
103 level = DEBUG
166 level = DEBUG
104 formatter = generic
167 formatter = generic
105
168
106 ################
169 ################
107 ## FORMATTERS ##
170 ## FORMATTERS ##
108 ################
171 ################
109
172
110 [formatter_generic]
173 [formatter_generic]
111 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
174 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
112 datefmt = %Y-%m-%d %H:%M:%S
175 datefmt = %Y-%m-%d %H:%M:%S
General Comments 0
You need to be logged in to leave comments. Login now