##// END OF EJS Templates
feat(redis): added redis prefix support for cache backend
super-admin -
r1313:cbb3f19b default
parent child Browse files
Show More
@@ -1,191 +1,195 b''
1
1
2 ; #################################
2 ; #################################
3 ; RHODECODE VCSSERVER CONFIGURATION
3 ; RHODECODE VCSSERVER CONFIGURATION
4 ; #################################
4 ; #################################
5
5
6 [server:main]
6 [server:main]
7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
8 ; Host port for gunicorn are controlled by gunicorn_conf.py
8 ; Host port for gunicorn are controlled by gunicorn_conf.py
9 host = 0.0.0.0
9 host = 0.0.0.0
10 port = 10010
10 port = 10010
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 [app:main]
22 [app:main]
23 ; The %(here)s variable will be replaced with the absolute path of parent directory
23 ; The %(here)s variable will be replaced with the absolute path of parent directory
24 ; of this file
24 ; of this file
25 ; Each option in the app:main can be override by an environmental variable
25 ; Each option in the app:main can be override by an environmental variable
26 ;
26 ;
27 ;To override an option:
27 ;To override an option:
28 ;
28 ;
29 ;RC_<KeyName>
29 ;RC_<KeyName>
30 ;Everything should be uppercase, . and - should be replaced by _.
30 ;Everything should be uppercase, . and - should be replaced by _.
31 ;For example, if you have these configuration settings:
31 ;For example, if you have these configuration settings:
32 ;rc_cache.repo_object.backend = foo
32 ;rc_cache.repo_object.backend = foo
33 ;can be overridden by
33 ;can be overridden by
34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
35
35
36 use = egg:rhodecode-vcsserver
36 use = egg:rhodecode-vcsserver
37
37
38
38
39 ; #############
39 ; #############
40 ; DEBUG OPTIONS
40 ; DEBUG OPTIONS
41 ; #############
41 ; #############
42
42
43 # During development the we want to have the debug toolbar enabled
43 # During development the we want to have the debug toolbar enabled
44 pyramid.includes =
44 pyramid.includes =
45 pyramid_debugtoolbar
45 pyramid_debugtoolbar
46
46
47 debugtoolbar.hosts = 0.0.0.0/0
47 debugtoolbar.hosts = 0.0.0.0/0
48 debugtoolbar.exclude_prefixes =
48 debugtoolbar.exclude_prefixes =
49 /css
49 /css
50 /fonts
50 /fonts
51 /images
51 /images
52 /js
52 /js
53
53
54 ; #################
54 ; #################
55 ; END DEBUG OPTIONS
55 ; END DEBUG OPTIONS
56 ; #################
56 ; #################
57
57
58 ; Pyramid default locales, we need this to be set
58 ; Pyramid default locales, we need this to be set
59 #pyramid.default_locale_name = en
59 #pyramid.default_locale_name = en
60
60
61 ; default locale used by VCS systems
61 ; default locale used by VCS systems
62 #locale = en_US.UTF-8
62 #locale = en_US.UTF-8
63
63
64 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
64 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
65 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
65 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
66 ; or /usr/local/bin/rhodecode_bin/vcs_bin
66 ; or /usr/local/bin/rhodecode_bin/vcs_bin
67 core.binary_dir =
67 core.binary_dir =
68
68
69 ; Redis connection settings for svn integrations logic
69 ; Redis connection settings for svn integrations logic
70 ; This connection string needs to be the same on ce and vcsserver
70 ; This connection string needs to be the same on ce and vcsserver
71 vcs.svn.redis_conn = redis://redis:6379/0
71 vcs.svn.redis_conn = redis://redis:6379/0
72
72
73 ; Custom exception store path, defaults to TMPDIR
73 ; Custom exception store path, defaults to TMPDIR
74 ; This is used to store exception from RhodeCode in shared directory
74 ; This is used to store exception from RhodeCode in shared directory
75 #exception_tracker.store_path =
75 #exception_tracker.store_path =
76
76
77 ; #############
77 ; #############
78 ; DOGPILE CACHE
78 ; DOGPILE CACHE
79 ; #############
79 ; #############
80
80
81 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
81 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
82 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
82 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
83 #cache_dir = %(here)s/data
83 #cache_dir = %(here)s/data
84
84
85 ; ***************************************
85 ; ***************************************
86 ; `repo_object` cache, default file based
86 ; `repo_object` cache, default file based
87 ; ***************************************
87 ; ***************************************
88
88
89 ; `repo_object` cache settings for vcs methods for repositories
89 ; `repo_object` cache settings for vcs methods for repositories
90 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
90 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
91
91
92 ; cache auto-expires after N seconds
92 ; cache auto-expires after N seconds
93 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
93 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
94 #rc_cache.repo_object.expiration_time = 2592000
94 #rc_cache.repo_object.expiration_time = 2592000
95
95
96 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
96 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
97 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
97 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
98
98
99 ; ***********************************************************
99 ; ***********************************************************
100 ; `repo_object` cache with redis backend
100 ; `repo_object` cache with redis backend
101 ; recommended for larger instance, and for better performance
101 ; recommended for larger instance, and for better performance
102 ; ***********************************************************
102 ; ***********************************************************
103
103
104 ; `repo_object` cache settings for vcs methods for repositories
104 ; `repo_object` cache settings for vcs methods for repositories
105 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
105 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
106
106
107 ; cache auto-expires after N seconds
107 ; cache auto-expires after N seconds
108 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
108 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
109 #rc_cache.repo_object.expiration_time = 2592000
109 #rc_cache.repo_object.expiration_time = 2592000
110
110
111 ; redis_expiration_time needs to be greater then expiration_time
111 ; redis_expiration_time needs to be greater then expiration_time
112 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
112 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
113
113
114 #rc_cache.repo_object.arguments.host = localhost
114 #rc_cache.repo_object.arguments.host = localhost
115 #rc_cache.repo_object.arguments.port = 6379
115 #rc_cache.repo_object.arguments.port = 6379
116 #rc_cache.repo_object.arguments.db = 5
116 #rc_cache.repo_object.arguments.db = 5
117 #rc_cache.repo_object.arguments.socket_timeout = 30
117 #rc_cache.repo_object.arguments.socket_timeout = 30
118 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
118 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
119 #rc_cache.repo_object.arguments.distributed_lock = true
119 #rc_cache.repo_object.arguments.distributed_lock = true
120
120
121 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
121 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
122 #rc_cache.repo_object.arguments.lock_auto_renewal = true
122 #rc_cache.repo_object.arguments.lock_auto_renewal = true
123
123
124 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
125 #rc_cache.repo_object.arguments.key_prefix = custom-prefix-
126
127
124 ; Statsd client config, this is used to send metrics to statsd
128 ; Statsd client config, this is used to send metrics to statsd
125 ; We recommend setting statsd_exported and scrape them using Promethues
129 ; We recommend setting statsd_exported and scrape them using Promethues
126 #statsd.enabled = false
130 #statsd.enabled = false
127 #statsd.statsd_host = 0.0.0.0
131 #statsd.statsd_host = 0.0.0.0
128 #statsd.statsd_port = 8125
132 #statsd.statsd_port = 8125
129 #statsd.statsd_prefix =
133 #statsd.statsd_prefix =
130 #statsd.statsd_ipv6 = false
134 #statsd.statsd_ipv6 = false
131
135
132 ; configure logging automatically at server startup set to false
136 ; configure logging automatically at server startup set to false
133 ; to use the below custom logging config.
137 ; to use the below custom logging config.
134 ; RC_LOGGING_FORMATTER
138 ; RC_LOGGING_FORMATTER
135 ; RC_LOGGING_LEVEL
139 ; RC_LOGGING_LEVEL
136 ; env variables can control the settings for logging in case of autoconfigure
140 ; env variables can control the settings for logging in case of autoconfigure
137
141
138 #logging.autoconfigure = true
142 #logging.autoconfigure = true
139
143
140 ; specify your own custom logging config file to configure logging
144 ; specify your own custom logging config file to configure logging
141 #logging.logging_conf_file = /path/to/custom_logging.ini
145 #logging.logging_conf_file = /path/to/custom_logging.ini
142
146
143 ; #####################
147 ; #####################
144 ; LOGGING CONFIGURATION
148 ; LOGGING CONFIGURATION
145 ; #####################
149 ; #####################
146
150
147 [loggers]
151 [loggers]
148 keys = root, vcsserver
152 keys = root, vcsserver
149
153
150 [handlers]
154 [handlers]
151 keys = console
155 keys = console
152
156
153 [formatters]
157 [formatters]
154 keys = generic, json
158 keys = generic, json
155
159
156 ; #######
160 ; #######
157 ; LOGGERS
161 ; LOGGERS
158 ; #######
162 ; #######
159 [logger_root]
163 [logger_root]
160 level = NOTSET
164 level = NOTSET
161 handlers = console
165 handlers = console
162
166
163 [logger_vcsserver]
167 [logger_vcsserver]
164 level = DEBUG
168 level = DEBUG
165 handlers =
169 handlers =
166 qualname = vcsserver
170 qualname = vcsserver
167 propagate = 1
171 propagate = 1
168
172
169 ; ########
173 ; ########
170 ; HANDLERS
174 ; HANDLERS
171 ; ########
175 ; ########
172
176
173 [handler_console]
177 [handler_console]
174 class = StreamHandler
178 class = StreamHandler
175 args = (sys.stderr, )
179 args = (sys.stderr, )
176 level = DEBUG
180 level = DEBUG
177 ; To enable JSON formatted logs replace 'generic' with 'json'
181 ; To enable JSON formatted logs replace 'generic' with 'json'
178 ; This allows sending properly formatted logs to grafana loki or elasticsearch
182 ; This allows sending properly formatted logs to grafana loki or elasticsearch
179 formatter = generic
183 formatter = generic
180
184
181 ; ##########
185 ; ##########
182 ; FORMATTERS
186 ; FORMATTERS
183 ; ##########
187 ; ##########
184
188
185 [formatter_generic]
189 [formatter_generic]
186 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
190 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
187 datefmt = %Y-%m-%d %H:%M:%S
191 datefmt = %Y-%m-%d %H:%M:%S
188
192
189 [formatter_json]
193 [formatter_json]
190 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
194 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
191 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
195 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,171 +1,175 b''
1
1
2 ; #################################
2 ; #################################
3 ; RHODECODE VCSSERVER CONFIGURATION
3 ; RHODECODE VCSSERVER CONFIGURATION
4 ; #################################
4 ; #################################
5
5
6 [server:main]
6 [server:main]
7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
7 ; COMMON HOST/IP CONFIG, This applies mostly to develop setup,
8 ; Host port for gunicorn are controlled by gunicorn_conf.py
8 ; Host port for gunicorn are controlled by gunicorn_conf.py
9 host = 0.0.0.0
9 host = 0.0.0.0
10 port = 10010
10 port = 10010
11
11
12
12
13 ; ###########################
13 ; ###########################
14 ; GUNICORN APPLICATION SERVER
14 ; GUNICORN APPLICATION SERVER
15 ; ###########################
15 ; ###########################
16
16
17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
17 ; run with gunicorn --config gunicorn_conf.py --paste vcsserver.ini
18
18
19 ; Module to use, this setting shouldn't be changed
19 ; Module to use, this setting shouldn't be changed
20 use = egg:gunicorn#main
20 use = egg:gunicorn#main
21
21
22 [app:main]
22 [app:main]
23 ; The %(here)s variable will be replaced with the absolute path of parent directory
23 ; The %(here)s variable will be replaced with the absolute path of parent directory
24 ; of this file
24 ; of this file
25 ; Each option in the app:main can be override by an environmental variable
25 ; Each option in the app:main can be override by an environmental variable
26 ;
26 ;
27 ;To override an option:
27 ;To override an option:
28 ;
28 ;
29 ;RC_<KeyName>
29 ;RC_<KeyName>
30 ;Everything should be uppercase, . and - should be replaced by _.
30 ;Everything should be uppercase, . and - should be replaced by _.
31 ;For example, if you have these configuration settings:
31 ;For example, if you have these configuration settings:
32 ;rc_cache.repo_object.backend = foo
32 ;rc_cache.repo_object.backend = foo
33 ;can be overridden by
33 ;can be overridden by
34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
34 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
35
35
36 use = egg:rhodecode-vcsserver
36 use = egg:rhodecode-vcsserver
37
37
38 ; Pyramid default locales, we need this to be set
38 ; Pyramid default locales, we need this to be set
39 #pyramid.default_locale_name = en
39 #pyramid.default_locale_name = en
40
40
41 ; default locale used by VCS systems
41 ; default locale used by VCS systems
42 #locale = en_US.UTF-8
42 #locale = en_US.UTF-8
43
43
44 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
44 ; path to binaries (hg,git,svn) for vcsserver, it should be set by the installer
45 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
45 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
46 ; or /usr/local/bin/rhodecode_bin/vcs_bin
46 ; or /usr/local/bin/rhodecode_bin/vcs_bin
47 core.binary_dir =
47 core.binary_dir =
48
48
49 ; Redis connection settings for svn integrations logic
49 ; Redis connection settings for svn integrations logic
50 ; This connection string needs to be the same on ce and vcsserver
50 ; This connection string needs to be the same on ce and vcsserver
51 vcs.svn.redis_conn = redis://redis:6379/0
51 vcs.svn.redis_conn = redis://redis:6379/0
52
52
53 ; Custom exception store path, defaults to TMPDIR
53 ; Custom exception store path, defaults to TMPDIR
54 ; This is used to store exception from RhodeCode in shared directory
54 ; This is used to store exception from RhodeCode in shared directory
55 #exception_tracker.store_path =
55 #exception_tracker.store_path =
56
56
57 ; #############
57 ; #############
58 ; DOGPILE CACHE
58 ; DOGPILE CACHE
59 ; #############
59 ; #############
60
60
61 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
61 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
62 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
62 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
63 #cache_dir = %(here)s/data
63 #cache_dir = %(here)s/data
64
64
65 ; ***************************************
65 ; ***************************************
66 ; `repo_object` cache, default file based
66 ; `repo_object` cache, default file based
67 ; ***************************************
67 ; ***************************************
68
68
69 ; `repo_object` cache settings for vcs methods for repositories
69 ; `repo_object` cache settings for vcs methods for repositories
70 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
70 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
71
71
72 ; cache auto-expires after N seconds
72 ; cache auto-expires after N seconds
73 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
73 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
74 #rc_cache.repo_object.expiration_time = 2592000
74 #rc_cache.repo_object.expiration_time = 2592000
75
75
76 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
76 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
77 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
77 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
78
78
79 ; ***********************************************************
79 ; ***********************************************************
80 ; `repo_object` cache with redis backend
80 ; `repo_object` cache with redis backend
81 ; recommended for larger instance, and for better performance
81 ; recommended for larger instance, and for better performance
82 ; ***********************************************************
82 ; ***********************************************************
83
83
84 ; `repo_object` cache settings for vcs methods for repositories
84 ; `repo_object` cache settings for vcs methods for repositories
85 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
85 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
86
86
87 ; cache auto-expires after N seconds
87 ; cache auto-expires after N seconds
88 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
88 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
89 #rc_cache.repo_object.expiration_time = 2592000
89 #rc_cache.repo_object.expiration_time = 2592000
90
90
91 ; redis_expiration_time needs to be greater then expiration_time
91 ; redis_expiration_time needs to be greater then expiration_time
92 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
92 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
93
93
94 #rc_cache.repo_object.arguments.host = localhost
94 #rc_cache.repo_object.arguments.host = localhost
95 #rc_cache.repo_object.arguments.port = 6379
95 #rc_cache.repo_object.arguments.port = 6379
96 #rc_cache.repo_object.arguments.db = 5
96 #rc_cache.repo_object.arguments.db = 5
97 #rc_cache.repo_object.arguments.socket_timeout = 30
97 #rc_cache.repo_object.arguments.socket_timeout = 30
98 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
98 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
99 #rc_cache.repo_object.arguments.distributed_lock = true
99 #rc_cache.repo_object.arguments.distributed_lock = true
100
100
101 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
101 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
102 #rc_cache.repo_object.arguments.lock_auto_renewal = true
102 #rc_cache.repo_object.arguments.lock_auto_renewal = true
103
103
104 ; prefix for redis keys used for this cache backend, the final key is constructed using {custom-prefix}{key}
105 #rc_cache.repo_object.arguments.key_prefix = custom-prefix-
106
107
104 ; Statsd client config, this is used to send metrics to statsd
108 ; Statsd client config, this is used to send metrics to statsd
105 ; We recommend setting statsd_exported and scrape them using Promethues
109 ; We recommend setting statsd_exported and scrape them using Promethues
106 #statsd.enabled = false
110 #statsd.enabled = false
107 #statsd.statsd_host = 0.0.0.0
111 #statsd.statsd_host = 0.0.0.0
108 #statsd.statsd_port = 8125
112 #statsd.statsd_port = 8125
109 #statsd.statsd_prefix =
113 #statsd.statsd_prefix =
110 #statsd.statsd_ipv6 = false
114 #statsd.statsd_ipv6 = false
111
115
112 ; configure logging automatically at server startup set to false
116 ; configure logging automatically at server startup set to false
113 ; to use the below custom logging config.
117 ; to use the below custom logging config.
114 ; RC_LOGGING_FORMATTER
118 ; RC_LOGGING_FORMATTER
115 ; RC_LOGGING_LEVEL
119 ; RC_LOGGING_LEVEL
116 ; env variables can control the settings for logging in case of autoconfigure
120 ; env variables can control the settings for logging in case of autoconfigure
117
121
118 #logging.autoconfigure = true
122 #logging.autoconfigure = true
119
123
120 ; specify your own custom logging config file to configure logging
124 ; specify your own custom logging config file to configure logging
121 #logging.logging_conf_file = /path/to/custom_logging.ini
125 #logging.logging_conf_file = /path/to/custom_logging.ini
122
126
123 ; #####################
127 ; #####################
124 ; LOGGING CONFIGURATION
128 ; LOGGING CONFIGURATION
125 ; #####################
129 ; #####################
126
130
127 [loggers]
131 [loggers]
128 keys = root, vcsserver
132 keys = root, vcsserver
129
133
130 [handlers]
134 [handlers]
131 keys = console
135 keys = console
132
136
133 [formatters]
137 [formatters]
134 keys = generic, json
138 keys = generic, json
135
139
136 ; #######
140 ; #######
137 ; LOGGERS
141 ; LOGGERS
138 ; #######
142 ; #######
139 [logger_root]
143 [logger_root]
140 level = NOTSET
144 level = NOTSET
141 handlers = console
145 handlers = console
142
146
143 [logger_vcsserver]
147 [logger_vcsserver]
144 level = INFO
148 level = INFO
145 handlers =
149 handlers =
146 qualname = vcsserver
150 qualname = vcsserver
147 propagate = 1
151 propagate = 1
148
152
149 ; ########
153 ; ########
150 ; HANDLERS
154 ; HANDLERS
151 ; ########
155 ; ########
152
156
153 [handler_console]
157 [handler_console]
154 class = StreamHandler
158 class = StreamHandler
155 args = (sys.stderr, )
159 args = (sys.stderr, )
156 level = INFO
160 level = INFO
157 ; To enable JSON formatted logs replace 'generic' with 'json'
161 ; To enable JSON formatted logs replace 'generic' with 'json'
158 ; This allows sending properly formatted logs to grafana loki or elasticsearch
162 ; This allows sending properly formatted logs to grafana loki or elasticsearch
159 formatter = generic
163 formatter = generic
160
164
161 ; ##########
165 ; ##########
162 ; FORMATTERS
166 ; FORMATTERS
163 ; ##########
167 ; ##########
164
168
165 [formatter_generic]
169 [formatter_generic]
166 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
170 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
167 datefmt = %Y-%m-%d %H:%M:%S
171 datefmt = %Y-%m-%d %H:%M:%S
168
172
169 [formatter_json]
173 [formatter_json]
170 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
174 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
171 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
175 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,310 +1,313 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
3 #
4 # This program is free software; you can redistribute it and/or modify
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
7 # (at your option) any later version.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 #import errno
18 #import errno
19 import fcntl
19 import fcntl
20 import functools
20 import functools
21 import logging
21 import logging
22 import os
22 import os
23 import pickle
23 import pickle
24 #import time
24 #import time
25
25
26 #import gevent
26 #import gevent
27 import msgpack
27 import msgpack
28 import redis
28 import redis
29
29
30 flock_org = fcntl.flock
30 flock_org = fcntl.flock
31 from typing import Union
31 from typing import Union
32
32
33 from dogpile.cache.api import Deserializer, Serializer
33 from dogpile.cache.api import Deserializer, Serializer
34 from dogpile.cache.backends import file as file_backend
34 from dogpile.cache.backends import file as file_backend
35 from dogpile.cache.backends import memory as memory_backend
35 from dogpile.cache.backends import memory as memory_backend
36 from dogpile.cache.backends import redis as redis_backend
36 from dogpile.cache.backends import redis as redis_backend
37 from dogpile.cache.backends.file import FileLock
37 from dogpile.cache.backends.file import FileLock
38 from dogpile.cache.util import memoized_property
38 from dogpile.cache.util import memoized_property
39
39
40 from ...lib.memory_lru_dict import LRUDict, LRUDictDebug
40 from ...lib.memory_lru_dict import LRUDict, LRUDictDebug
41 from ...lib.str_utils import safe_bytes, safe_str
41 from ...lib.str_utils import safe_bytes, safe_str
42 from ...lib.type_utils import str2bool
42 from ...lib.type_utils import str2bool
43
43
44 _default_max_size = 1024
44 _default_max_size = 1024
45
45
46 log = logging.getLogger(__name__)
46 log = logging.getLogger(__name__)
47
47
48
48
49 class LRUMemoryBackend(memory_backend.MemoryBackend):
49 class LRUMemoryBackend(memory_backend.MemoryBackend):
50 key_prefix = 'lru_mem_backend'
50 key_prefix = 'lru_mem_backend'
51 pickle_values = False
51 pickle_values = False
52
52
53 def __init__(self, arguments):
53 def __init__(self, arguments):
54 self.max_size = arguments.pop('max_size', _default_max_size)
54 self.max_size = arguments.pop('max_size', _default_max_size)
55
55
56 LRUDictClass = LRUDict
56 LRUDictClass = LRUDict
57 if arguments.pop('log_key_count', None):
57 if arguments.pop('log_key_count', None):
58 LRUDictClass = LRUDictDebug
58 LRUDictClass = LRUDictDebug
59
59
60 arguments['cache_dict'] = LRUDictClass(self.max_size)
60 arguments['cache_dict'] = LRUDictClass(self.max_size)
61 super().__init__(arguments)
61 super().__init__(arguments)
62
62
63 def __repr__(self):
63 def __repr__(self):
64 return f'{self.__class__}(maxsize=`{self.max_size}`)'
64 return f'{self.__class__}(maxsize=`{self.max_size}`)'
65
65
66 def __str__(self):
66 def __str__(self):
67 return self.__repr__()
67 return self.__repr__()
68
68
69 def delete(self, key):
69 def delete(self, key):
70 try:
70 try:
71 del self._cache[key]
71 del self._cache[key]
72 except KeyError:
72 except KeyError:
73 # we don't care if key isn't there at deletion
73 # we don't care if key isn't there at deletion
74 pass
74 pass
75
75
76 def list_keys(self, prefix):
76 def list_keys(self, prefix):
77 return list(self._cache.keys())
77 return list(self._cache.keys())
78
78
79 def delete_multi(self, keys):
79 def delete_multi(self, keys):
80 for key in keys:
80 for key in keys:
81 self.delete(key)
81 self.delete(key)
82
82
83 def delete_multi_by_prefix(self, prefix):
83 def delete_multi_by_prefix(self, prefix):
84 cache_keys = self.list_keys(prefix=prefix)
84 cache_keys = self.list_keys(prefix=prefix)
85 num_affected_keys = len(cache_keys)
85 num_affected_keys = len(cache_keys)
86 if num_affected_keys:
86 if num_affected_keys:
87 self.delete_multi(cache_keys)
87 self.delete_multi(cache_keys)
88 return num_affected_keys
88 return num_affected_keys
89
89
90
90
91 class PickleSerializer:
91 class PickleSerializer:
92 serializer: None | Serializer = staticmethod( # type: ignore
92 serializer: None | Serializer = staticmethod( # type: ignore
93 functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
93 functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
94 )
94 )
95 deserializer: None | Deserializer = staticmethod( # type: ignore
95 deserializer: None | Deserializer = staticmethod( # type: ignore
96 functools.partial(pickle.loads)
96 functools.partial(pickle.loads)
97 )
97 )
98
98
99
99
100 class MsgPackSerializer:
100 class MsgPackSerializer:
101 serializer: None | Serializer = staticmethod( # type: ignore
101 serializer: None | Serializer = staticmethod( # type: ignore
102 msgpack.packb
102 msgpack.packb
103 )
103 )
104 deserializer: None | Deserializer = staticmethod( # type: ignore
104 deserializer: None | Deserializer = staticmethod( # type: ignore
105 functools.partial(msgpack.unpackb, use_list=False)
105 functools.partial(msgpack.unpackb, use_list=False)
106 )
106 )
107
107
108
108
109 class CustomLockFactory(FileLock):
109 class CustomLockFactory(FileLock):
110
110
111 pass
111 pass
112
112
113
113
114 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
114 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
115 key_prefix = 'file_backend'
115 key_prefix = 'file_backend'
116
116
117 def __init__(self, arguments):
117 def __init__(self, arguments):
118 arguments['lock_factory'] = CustomLockFactory
118 arguments['lock_factory'] = CustomLockFactory
119 db_file = arguments.get('filename')
119 db_file = arguments.get('filename')
120
120
121 log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file)
121 log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file)
122 db_file_dir = os.path.dirname(db_file)
122 db_file_dir = os.path.dirname(db_file)
123 if not os.path.isdir(db_file_dir):
123 if not os.path.isdir(db_file_dir):
124 os.makedirs(db_file_dir)
124 os.makedirs(db_file_dir)
125
125
126 try:
126 try:
127 super().__init__(arguments)
127 super().__init__(arguments)
128 except Exception:
128 except Exception:
129 log.exception('Failed to initialize db at: %s', db_file)
129 log.exception('Failed to initialize db at: %s', db_file)
130 raise
130 raise
131
131
132 def __repr__(self):
132 def __repr__(self):
133 return f'{self.__class__}(file=`{self.filename}`)'
133 return f'{self.__class__}(file=`{self.filename}`)'
134
134
135 def __str__(self):
135 def __str__(self):
136 return self.__repr__()
136 return self.__repr__()
137
137
138 def _get_keys_pattern(self, prefix: bytes = b''):
138 def _get_keys_pattern(self, prefix: bytes = b''):
139 return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
139 return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
140
140
141 def list_keys(self, prefix: bytes = b''):
141 def list_keys(self, prefix: bytes = b''):
142 prefix = self._get_keys_pattern(prefix)
142 prefix = self._get_keys_pattern(prefix)
143
143
144 def cond(dbm_key: bytes):
144 def cond(dbm_key: bytes):
145 if not prefix:
145 if not prefix:
146 return True
146 return True
147
147
148 if dbm_key.startswith(prefix):
148 if dbm_key.startswith(prefix):
149 return True
149 return True
150 return False
150 return False
151
151
152 with self._dbm_file(True) as dbm:
152 with self._dbm_file(True) as dbm:
153 try:
153 try:
154 return list(filter(cond, dbm.keys()))
154 return list(filter(cond, dbm.keys()))
155 except Exception:
155 except Exception:
156 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
156 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
157 raise
157 raise
158
158
159 def delete_multi_by_prefix(self, prefix):
159 def delete_multi_by_prefix(self, prefix):
160 cache_keys = self.list_keys(prefix=prefix)
160 cache_keys = self.list_keys(prefix=prefix)
161 num_affected_keys = len(cache_keys)
161 num_affected_keys = len(cache_keys)
162 if num_affected_keys:
162 if num_affected_keys:
163 self.delete_multi(cache_keys)
163 self.delete_multi(cache_keys)
164 return num_affected_keys
164 return num_affected_keys
165
165
166 def get_store(self):
166 def get_store(self):
167 return self.filename
167 return self.filename
168
168
169 def cleanup_store(self):
169 def cleanup_store(self):
170 for ext in ("db", "dat", "pag", "dir"):
170 for ext in ("db", "dat", "pag", "dir"):
171 final_filename = self.filename + os.extsep + ext
171 final_filename = self.filename + os.extsep + ext
172 if os.path.exists(final_filename):
172 if os.path.exists(final_filename):
173 os.remove(final_filename)
173 os.remove(final_filename)
174 log.warning('Removed dbm file %s', final_filename)
174 log.warning('Removed dbm file %s', final_filename)
175
175
176
176
177 class BaseRedisBackend(redis_backend.RedisBackend):
177 class BaseRedisBackend(redis_backend.RedisBackend):
178 key_prefix = ''
178 key_prefix = ''
179
179
180 def __init__(self, arguments):
180 def __init__(self, arguments):
181 self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host'
181 self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host'
182 super().__init__(arguments)
182 super().__init__(arguments)
183
183
184 self._lock_timeout = self.lock_timeout
184 self._lock_timeout = self.lock_timeout
185 self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True))
185 self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True))
186
186
187 self._store_key_prefix = arguments.pop('key_prefix', '')
188 self.key_prefix = f'{self._store_key_prefix}{self.key_prefix}'
189
187 if self._lock_auto_renewal and not self._lock_timeout:
190 if self._lock_auto_renewal and not self._lock_timeout:
188 # set default timeout for auto_renewal
191 # set default timeout for auto_renewal
189 self._lock_timeout = 30
192 self._lock_timeout = 30
190
193
191 def __repr__(self):
194 def __repr__(self):
192 return f'{self.__class__}(conn=`{self.db_conn}`)'
195 return f'{self.__class__}(conn=`{self.db_conn}`)'
193
196
194 def __str__(self):
197 def __str__(self):
195 return self.__repr__()
198 return self.__repr__()
196
199
197 def _create_client(self):
200 def _create_client(self):
198 args = {}
201 args = {}
199
202
200 if self.url is not None:
203 if self.url is not None:
201 args.update(url=self.url)
204 args.update(url=self.url)
202
205
203 else:
206 else:
204 args.update(
207 args.update(
205 host=self.host, password=self.password,
208 host=self.host, password=self.password,
206 port=self.port, db=self.db
209 port=self.port, db=self.db
207 )
210 )
208
211
209 connection_pool = redis.ConnectionPool(**args)
212 connection_pool = redis.ConnectionPool(**args)
210 self.writer_client = redis.StrictRedis(
213 self.writer_client = redis.StrictRedis(
211 connection_pool=connection_pool
214 connection_pool=connection_pool
212 )
215 )
213 self.reader_client = self.writer_client
216 self.reader_client = self.writer_client
214
217
215 def _get_keys_pattern(self, prefix: bytes = b''):
218 def _get_keys_pattern(self, prefix: bytes = b''):
216 return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
219 return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
217
220
218 def list_keys(self, prefix: bytes = b''):
221 def list_keys(self, prefix: bytes = b''):
219 prefix = self._get_keys_pattern(prefix)
222 prefix = self._get_keys_pattern(prefix)
220 return self.reader_client.keys(prefix)
223 return self.reader_client.keys(prefix)
221
224
222 def delete_multi_by_prefix(self, prefix, use_lua=False):
225 def delete_multi_by_prefix(self, prefix, use_lua=False):
223 if use_lua:
226 if use_lua:
224 # high efficient LUA script to delete ALL keys by prefix...
227 # high efficient LUA script to delete ALL keys by prefix...
225 lua = """local keys = redis.call('keys', ARGV[1])
228 lua = """local keys = redis.call('keys', ARGV[1])
226 for i=1,#keys,5000 do
229 for i=1,#keys,5000 do
227 redis.call('del', unpack(keys, i, math.min(i+(5000-1), #keys)))
230 redis.call('del', unpack(keys, i, math.min(i+(5000-1), #keys)))
228 end
231 end
229 return #keys"""
232 return #keys"""
230 num_affected_keys = self.writer_client.eval(
233 num_affected_keys = self.writer_client.eval(
231 lua,
234 lua,
232 0,
235 0,
233 f"{prefix}*")
236 f"{prefix}*")
234 else:
237 else:
235 cache_keys = self.list_keys(prefix=prefix)
238 cache_keys = self.list_keys(prefix=prefix)
236 num_affected_keys = len(cache_keys)
239 num_affected_keys = len(cache_keys)
237 if num_affected_keys:
240 if num_affected_keys:
238 self.delete_multi(cache_keys)
241 self.delete_multi(cache_keys)
239 return num_affected_keys
242 return num_affected_keys
240
243
241 def get_store(self):
244 def get_store(self):
242 return self.reader_client.connection_pool
245 return self.reader_client.connection_pool
243
246
244 def get_mutex(self, key):
247 def get_mutex(self, key):
245 if self.distributed_lock:
248 if self.distributed_lock:
246 lock_key = f'_lock_{safe_str(key)}'
249 lock_key = f'{self._store_key_prefix}_lock_{safe_str(key)}'
247 return get_mutex_lock(
250 return get_mutex_lock(
248 self.writer_client, lock_key,
251 self.writer_client, lock_key,
249 self._lock_timeout,
252 self._lock_timeout,
250 auto_renewal=self._lock_auto_renewal
253 auto_renewal=self._lock_auto_renewal
251 )
254 )
252 else:
255 else:
253 return None
256 return None
254
257
255
258
256 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
259 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
257 key_prefix = 'redis_pickle_backend'
260 key_prefix = 'redis_pickle_backend'
258 pass
261 pass
259
262
260
263
261 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
264 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
262 key_prefix = 'redis_msgpack_backend'
265 key_prefix = 'redis_msgpack_backend'
263 pass
266 pass
264
267
265
268
266 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
269 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
267 from ...lib._vendor import redis_lock
270 from ...lib._vendor import redis_lock
268
271
269 class _RedisLockWrapper:
272 class _RedisLockWrapper:
270 """LockWrapper for redis_lock"""
273 """LockWrapper for redis_lock"""
271
274
272 @classmethod
275 @classmethod
273 def get_lock(cls):
276 def get_lock(cls):
274 return redis_lock.Lock(
277 return redis_lock.Lock(
275 redis_client=client,
278 redis_client=client,
276 name=lock_key,
279 name=lock_key,
277 expire=lock_timeout,
280 expire=lock_timeout,
278 auto_renewal=auto_renewal,
281 auto_renewal=auto_renewal,
279 strict=True,
282 strict=True,
280 )
283 )
281
284
282 def __repr__(self):
285 def __repr__(self):
283 return f"{self.__class__.__name__}:{lock_key}"
286 return f"{self.__class__.__name__}:{lock_key}"
284
287
285 def __str__(self):
288 def __str__(self):
286 return f"{self.__class__.__name__}:{lock_key}"
289 return f"{self.__class__.__name__}:{lock_key}"
287
290
288 def __init__(self):
291 def __init__(self):
289 self.lock = self.get_lock()
292 self.lock = self.get_lock()
290 self.lock_key = lock_key
293 self.lock_key = lock_key
291
294
292 def acquire(self, wait=True):
295 def acquire(self, wait=True):
293 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
296 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
294 try:
297 try:
295 acquired = self.lock.acquire(wait)
298 acquired = self.lock.acquire(wait)
296 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
299 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
297 return acquired
300 return acquired
298 except redis_lock.AlreadyAcquired:
301 except redis_lock.AlreadyAcquired:
299 return False
302 return False
300 except redis_lock.AlreadyStarted:
303 except redis_lock.AlreadyStarted:
301 # refresh thread exists, but it also means we acquired the lock
304 # refresh thread exists, but it also means we acquired the lock
302 return True
305 return True
303
306
304 def release(self):
307 def release(self):
305 try:
308 try:
306 self.lock.release()
309 self.lock.release()
307 except redis_lock.NotAcquired:
310 except redis_lock.NotAcquired:
308 pass
311 pass
309
312
310 return _RedisLockWrapper()
313 return _RedisLockWrapper()
@@ -1,245 +1,243 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
3 #
4 # This program is free software; you can redistribute it and/or modify
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
7 # (at your option) any later version.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU General Public License
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
17
18 import functools
18 import functools
19 import logging
19 import logging
20 import os
20 import os
21 import threading
21 import threading
22 import time
22 import time
23
23
24 import decorator
24 import decorator
25 from dogpile.cache import CacheRegion
25 from dogpile.cache import CacheRegion
26
26
27
27
28 from ...lib.hash_utils import sha1
28 from ...lib.hash_utils import sha1
29 from ...lib.str_utils import safe_bytes
29 from ...lib.str_utils import safe_bytes
30 from ...lib.type_utils import str2bool # noqa :required by imports from .utils
30 from ...lib.type_utils import str2bool # noqa :required by imports from .utils
31
31
32 from . import region_meta
32 from . import region_meta
33
33
34 log = logging.getLogger(__name__)
34 log = logging.getLogger(__name__)
35
35
36
36
37 class RhodeCodeCacheRegion(CacheRegion):
37 class RhodeCodeCacheRegion(CacheRegion):
38
38
39 def __repr__(self):
39 def __repr__(self):
40 return f'`{self.__class__.__name__}(name={self.name}, backend={self.backend.__class__})`'
40 return f'`{self.__class__.__name__}(name={self.name}, backend={self.backend.__class__})`'
41
41
42 def conditional_cache_on_arguments(
42 def conditional_cache_on_arguments(
43 self, namespace=None,
43 self, namespace=None,
44 expiration_time=None,
44 expiration_time=None,
45 should_cache_fn=None,
45 should_cache_fn=None,
46 to_str=str,
46 to_str=str,
47 function_key_generator=None,
47 function_key_generator=None,
48 condition=True):
48 condition=True):
49 """
49 """
50 Custom conditional decorator, that will not touch any dogpile internals if
50 Custom conditional decorator, that will not touch any dogpile internals if
51 condition isn't meet. This works a bit different from should_cache_fn
51 condition isn't meet. This works a bit different from should_cache_fn
52 And it's faster in cases we don't ever want to compute cached values
52 And it's faster in cases we don't ever want to compute cached values
53 """
53 """
54 expiration_time_is_callable = callable(expiration_time)
54 expiration_time_is_callable = callable(expiration_time)
55 if not namespace:
55 if not namespace:
56 namespace = getattr(self, '_default_namespace', None)
56 namespace = getattr(self, '_default_namespace', None)
57
57
58 if function_key_generator is None:
58 if function_key_generator is None:
59 function_key_generator = self.function_key_generator
59 function_key_generator = self.function_key_generator
60
60
61 def get_or_create_for_user_func(func_key_generator, user_func, *arg, **kw):
61 def get_or_create_for_user_func(func_key_generator, user_func, *arg, **kw):
62
62
63 if not condition:
63 if not condition:
64 log.debug('Calling un-cached method:%s', user_func.__name__)
64 log.debug('Calling un-cached method:%s', user_func.__name__)
65 start = time.time()
65 start = time.time()
66 result = user_func(*arg, **kw)
66 result = user_func(*arg, **kw)
67 total = time.time() - start
67 total = time.time() - start
68 log.debug('un-cached method:%s took %.4fs', user_func.__name__, total)
68 log.debug('un-cached method:%s took %.4fs', user_func.__name__, total)
69 return result
69 return result
70
70
71 key = func_key_generator(*arg, **kw)
71 key = func_key_generator(*arg, **kw)
72 timeout = expiration_time() if expiration_time_is_callable else expiration_time
73 log.debug('Calling cached (timeout=%s) method:`%s`', timeout, user_func.__name__)
72
74
73 timeout = expiration_time() if expiration_time_is_callable \
74 else expiration_time
75
76 log.debug('Calling cached method:`%s`', user_func.__name__)
77 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
75 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
78
76
79 def cache_decorator(user_func):
77 def cache_decorator(user_func):
80 if to_str is str:
78 if to_str is str:
81 # backwards compatible
79 # backwards compatible
82 key_generator = function_key_generator(namespace, user_func)
80 key_generator = function_key_generator(namespace, user_func)
83 else:
81 else:
84 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
82 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
85
83
86 def refresh(*arg, **kw):
84 def refresh(*arg, **kw):
87 """
85 """
88 Like invalidate, but regenerates the value instead
86 Like invalidate, but regenerates the value instead
89 """
87 """
90 key = key_generator(*arg, **kw)
88 key = key_generator(*arg, **kw)
91 value = user_func(*arg, **kw)
89 value = user_func(*arg, **kw)
92 self.set(key, value)
90 self.set(key, value)
93 return value
91 return value
94
92
95 def invalidate(*arg, **kw):
93 def invalidate(*arg, **kw):
96 key = key_generator(*arg, **kw)
94 key = key_generator(*arg, **kw)
97 self.delete(key)
95 self.delete(key)
98
96
99 def set_(value, *arg, **kw):
97 def set_(value, *arg, **kw):
100 key = key_generator(*arg, **kw)
98 key = key_generator(*arg, **kw)
101 self.set(key, value)
99 self.set(key, value)
102
100
103 def get(*arg, **kw):
101 def get(*arg, **kw):
104 key = key_generator(*arg, **kw)
102 key = key_generator(*arg, **kw)
105 return self.get(key)
103 return self.get(key)
106
104
107 user_func.set = set_
105 user_func.set = set_
108 user_func.invalidate = invalidate
106 user_func.invalidate = invalidate
109 user_func.get = get
107 user_func.get = get
110 user_func.refresh = refresh
108 user_func.refresh = refresh
111 user_func.key_generator = key_generator
109 user_func.key_generator = key_generator
112 user_func.original = user_func
110 user_func.original = user_func
113
111
114 # Use `decorate` to preserve the signature of :param:`user_func`.
112 # Use `decorate` to preserve the signature of :param:`user_func`.
115 return decorator.decorate(user_func, functools.partial(
113 return decorator.decorate(user_func, functools.partial(
116 get_or_create_for_user_func, key_generator))
114 get_or_create_for_user_func, key_generator))
117
115
118 return cache_decorator
116 return cache_decorator
119
117
120
118
121 def make_region(*arg, **kw):
119 def make_region(*arg, **kw):
122 return RhodeCodeCacheRegion(*arg, **kw)
120 return RhodeCodeCacheRegion(*arg, **kw)
123
121
124
122
125 def get_default_cache_settings(settings, prefixes=None):
123 def get_default_cache_settings(settings, prefixes=None):
126 prefixes = prefixes or []
124 prefixes = prefixes or []
127 cache_settings = {}
125 cache_settings = {}
128 for key in settings.keys():
126 for key in settings.keys():
129 for prefix in prefixes:
127 for prefix in prefixes:
130 if key.startswith(prefix):
128 if key.startswith(prefix):
131 name = key.split(prefix)[1].strip()
129 name = key.split(prefix)[1].strip()
132 val = settings[key]
130 val = settings[key]
133 if isinstance(val, str):
131 if isinstance(val, str):
134 val = val.strip()
132 val = val.strip()
135 cache_settings[name] = val
133 cache_settings[name] = val
136 return cache_settings
134 return cache_settings
137
135
138
136
139 def compute_key_from_params(*args):
137 def compute_key_from_params(*args):
140 """
138 """
141 Helper to compute key from given params to be used in cache manager
139 Helper to compute key from given params to be used in cache manager
142 """
140 """
143 return sha1(safe_bytes("_".join(map(str, args))))
141 return sha1(safe_bytes("_".join(map(str, args))))
144
142
145
143
146 def custom_key_generator(backend, namespace, fn):
144 def custom_key_generator(backend, namespace, fn):
147 func_name = fn.__name__
145 func_name = fn.__name__
148
146
149 def generate_key(*args):
147 def generate_key(*args):
150 backend_pref = getattr(backend, 'key_prefix', None) or 'backend_prefix'
148 backend_pref = getattr(backend, 'key_prefix', None) or 'backend_prefix'
151 namespace_pref = namespace or 'default_namespace'
149 namespace_pref = namespace or 'default_namespace'
152 arg_key = compute_key_from_params(*args)
150 arg_key = compute_key_from_params(*args)
153 final_key = f"{backend_pref}:{namespace_pref}:{func_name}_{arg_key}"
151 final_key = f"{backend_pref}:{namespace_pref}:{func_name}_{arg_key}"
154
152
155 return final_key
153 return final_key
156
154
157 return generate_key
155 return generate_key
158
156
159
157
160 def backend_key_generator(backend):
158 def backend_key_generator(backend):
161 """
159 """
162 Special wrapper that also sends over the backend to the key generator
160 Special wrapper that also sends over the backend to the key generator
163 """
161 """
164 def wrapper(namespace, fn):
162 def wrapper(namespace, fn):
165 return custom_key_generator(backend, namespace, fn)
163 return custom_key_generator(backend, namespace, fn)
166 return wrapper
164 return wrapper
167
165
168
166
169 def get_or_create_region(region_name, region_namespace: str = None, use_async_runner=False, force=False):
167 def get_or_create_region(region_name, region_namespace: str = None, use_async_runner=False, force=False):
170 from .backends import FileNamespaceBackend
168 from .backends import FileNamespaceBackend
171 from . import async_creation_runner
169 from . import async_creation_runner
172
170
173 region_obj = region_meta.dogpile_cache_regions.get(region_name)
171 region_obj = region_meta.dogpile_cache_regions.get(region_name)
174 if not region_obj:
172 if not region_obj:
175 reg_keys = list(region_meta.dogpile_cache_regions.keys())
173 reg_keys = list(region_meta.dogpile_cache_regions.keys())
176 raise OSError(f'Region `{region_name}` not in configured: {reg_keys}.')
174 raise OSError(f'Region `{region_name}` not in configured: {reg_keys}.')
177
175
178 region_uid_name = f'{region_name}:{region_namespace}'
176 region_uid_name = f'{region_name}:{region_namespace}'
179
177
180 # Special case for ONLY the FileNamespaceBackend backend. We register one-file-per-region
178 # Special case for ONLY the FileNamespaceBackend backend. We register one-file-per-region
181 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
179 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
182 if not region_namespace:
180 if not region_namespace:
183 raise ValueError(f'{FileNamespaceBackend} used requires to specify region_namespace param')
181 raise ValueError(f'{FileNamespaceBackend} used requires to specify region_namespace param')
184
182
185 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
183 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
186 if region_exist and not force:
184 if region_exist and not force:
187 log.debug('Using already configured region: %s', region_namespace)
185 log.debug('Using already configured region: %s', region_namespace)
188 return region_exist
186 return region_exist
189
187
190 expiration_time = region_obj.expiration_time
188 expiration_time = region_obj.expiration_time
191
189
192 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
190 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
193 namespace_cache_dir = cache_dir
191 namespace_cache_dir = cache_dir
194
192
195 # we default the namespace_cache_dir to our default cache dir.
193 # we default the namespace_cache_dir to our default cache dir.
196 # however, if this backend is configured with filename= param, we prioritize that
194 # however, if this backend is configured with filename= param, we prioritize that
197 # so all caches within that particular region, even those namespaced end up in the same path
195 # so all caches within that particular region, even those namespaced end up in the same path
198 if region_obj.actual_backend.filename:
196 if region_obj.actual_backend.filename:
199 namespace_cache_dir = os.path.dirname(region_obj.actual_backend.filename)
197 namespace_cache_dir = os.path.dirname(region_obj.actual_backend.filename)
200
198
201 if not os.path.isdir(namespace_cache_dir):
199 if not os.path.isdir(namespace_cache_dir):
202 os.makedirs(namespace_cache_dir)
200 os.makedirs(namespace_cache_dir)
203 new_region = make_region(
201 new_region = make_region(
204 name=region_uid_name,
202 name=region_uid_name,
205 function_key_generator=backend_key_generator(region_obj.actual_backend)
203 function_key_generator=backend_key_generator(region_obj.actual_backend)
206 )
204 )
207
205
208 namespace_filename = os.path.join(
206 namespace_filename = os.path.join(
209 namespace_cache_dir, f"{region_name}_{region_namespace}.cache_db")
207 namespace_cache_dir, f"{region_name}_{region_namespace}.cache_db")
210 # special type that allows 1db per namespace
208 # special type that allows 1db per namespace
211 new_region.configure(
209 new_region.configure(
212 backend='dogpile.cache.rc.file_namespace',
210 backend='dogpile.cache.rc.file_namespace',
213 expiration_time=expiration_time,
211 expiration_time=expiration_time,
214 arguments={"filename": namespace_filename}
212 arguments={"filename": namespace_filename}
215 )
213 )
216
214
217 # create and save in region caches
215 # create and save in region caches
218 log.debug('configuring new region: %s', region_uid_name)
216 log.debug('configuring new region: %s', region_uid_name)
219 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
217 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
220
218
221 region_obj._default_namespace = region_namespace
219 region_obj._default_namespace = region_namespace
222 if use_async_runner:
220 if use_async_runner:
223 region_obj.async_creation_runner = async_creation_runner
221 region_obj.async_creation_runner = async_creation_runner
224 return region_obj
222 return region_obj
225
223
226
224
227 def clear_cache_namespace(cache_region: str | RhodeCodeCacheRegion, cache_namespace_uid: str, method: str) -> int:
225 def clear_cache_namespace(cache_region: str | RhodeCodeCacheRegion, cache_namespace_uid: str, method: str) -> int:
228 from . import CLEAR_DELETE, CLEAR_INVALIDATE
226 from . import CLEAR_DELETE, CLEAR_INVALIDATE
229
227
230 if not isinstance(cache_region, RhodeCodeCacheRegion):
228 if not isinstance(cache_region, RhodeCodeCacheRegion):
231 cache_region = get_or_create_region(cache_region, cache_namespace_uid)
229 cache_region = get_or_create_region(cache_region, cache_namespace_uid)
232 log.debug('clearing cache region: %s [prefix:%s] with method=%s',
230 log.debug('clearing cache region: %s [prefix:%s] with method=%s',
233 cache_region, cache_namespace_uid, method)
231 cache_region, cache_namespace_uid, method)
234
232
235 num_affected_keys = 0
233 num_affected_keys = 0
236
234
237 if method == CLEAR_INVALIDATE:
235 if method == CLEAR_INVALIDATE:
238 # NOTE: The CacheRegion.invalidate() method’s default mode of
236 # NOTE: The CacheRegion.invalidate() method’s default mode of
239 # operation is to set a timestamp local to this CacheRegion in this Python process only.
237 # operation is to set a timestamp local to this CacheRegion in this Python process only.
240 # It does not impact other Python processes or regions as the timestamp is only stored locally in memory.
238 # It does not impact other Python processes or regions as the timestamp is only stored locally in memory.
241 cache_region.invalidate(hard=True)
239 cache_region.invalidate(hard=True)
242
240
243 if method == CLEAR_DELETE:
241 if method == CLEAR_DELETE:
244 num_affected_keys = cache_region.backend.delete_multi_by_prefix(prefix=cache_namespace_uid)
242 num_affected_keys = cache_region.backend.delete_multi_by_prefix(prefix=cache_namespace_uid)
245 return num_affected_keys
243 return num_affected_keys
General Comments 0
You need to be logged in to leave comments. Login now