##// END OF EJS Templates
merged stable into default
super-admin -
r947:4c8695a6 merge default
parent child Browse files
Show More
@@ -0,0 +1,16 b''
1 [DEFAULT]
2 done = false
3
4 [task:bump_version]
5 done = true
6
7 [task:fixes_on_stable]
8 done = true
9
10 [task:pip2nix_generated]
11 done = true
12
13 [release]
14 state = prepared
15 version = 4.25.2
16
@@ -0,0 +1,390 b''
1 import sys
2 import threading
3 import weakref
4 from base64 import b64encode
5 from logging import getLogger
6 from os import urandom
7
8 from redis import StrictRedis
9
10 __version__ = '3.7.0'
11
12 loggers = {
13 k: getLogger("vcsserver." + ".".join((__name__, k)))
14 for k in [
15 "acquire",
16 "refresh.thread.start",
17 "refresh.thread.stop",
18 "refresh.thread.exit",
19 "refresh.start",
20 "refresh.shutdown",
21 "refresh.exit",
22 "release",
23 ]
24 }
25
26 PY3 = sys.version_info[0] == 3
27
28 if PY3:
29 text_type = str
30 binary_type = bytes
31 else:
32 text_type = unicode # noqa
33 binary_type = str
34
35
36 # Check if the id match. If not, return an error code.
37 UNLOCK_SCRIPT = b"""
38 if redis.call("get", KEYS[1]) ~= ARGV[1] then
39 return 1
40 else
41 redis.call("del", KEYS[2])
42 redis.call("lpush", KEYS[2], 1)
43 redis.call("pexpire", KEYS[2], ARGV[2])
44 redis.call("del", KEYS[1])
45 return 0
46 end
47 """
48
49 # Covers both cases when key doesn't exist and doesn't equal to lock's id
50 EXTEND_SCRIPT = b"""
51 if redis.call("get", KEYS[1]) ~= ARGV[1] then
52 return 1
53 elseif redis.call("ttl", KEYS[1]) < 0 then
54 return 2
55 else
56 redis.call("expire", KEYS[1], ARGV[2])
57 return 0
58 end
59 """
60
61 RESET_SCRIPT = b"""
62 redis.call('del', KEYS[2])
63 redis.call('lpush', KEYS[2], 1)
64 redis.call('pexpire', KEYS[2], ARGV[2])
65 return redis.call('del', KEYS[1])
66 """
67
68 RESET_ALL_SCRIPT = b"""
69 local locks = redis.call('keys', 'lock:*')
70 local signal
71 for _, lock in pairs(locks) do
72 signal = 'lock-signal:' .. string.sub(lock, 6)
73 redis.call('del', signal)
74 redis.call('lpush', signal, 1)
75 redis.call('expire', signal, 1)
76 redis.call('del', lock)
77 end
78 return #locks
79 """
80
81
82 class AlreadyAcquired(RuntimeError):
83 pass
84
85
86 class NotAcquired(RuntimeError):
87 pass
88
89
90 class AlreadyStarted(RuntimeError):
91 pass
92
93
94 class TimeoutNotUsable(RuntimeError):
95 pass
96
97
98 class InvalidTimeout(RuntimeError):
99 pass
100
101
102 class TimeoutTooLarge(RuntimeError):
103 pass
104
105
106 class NotExpirable(RuntimeError):
107 pass
108
109
110 class Lock(object):
111 """
112 A Lock context manager implemented via redis SETNX/BLPOP.
113 """
114 unlock_script = None
115 extend_script = None
116 reset_script = None
117 reset_all_script = None
118
119 def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000):
120 """
121 :param redis_client:
122 An instance of :class:`~StrictRedis`.
123 :param name:
124 The name (redis key) the lock should have.
125 :param expire:
126 The lock expiry time in seconds. If left at the default (None)
127 the lock will not expire.
128 :param id:
129 The ID (redis value) the lock should have. A random value is
130 generated when left at the default.
131
132 Note that if you specify this then the lock is marked as "held". Acquires
133 won't be possible.
134 :param auto_renewal:
135 If set to ``True``, Lock will automatically renew the lock so that it
136 doesn't expire for as long as the lock is held (acquire() called
137 or running in a context manager).
138
139 Implementation note: Renewal will happen using a daemon thread with
140 an interval of ``expire*2/3``. If wishing to use a different renewal
141 time, subclass Lock, call ``super().__init__()`` then set
142 ``self._lock_renewal_interval`` to your desired interval.
143 :param strict:
144 If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``.
145 :param signal_expire:
146 Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``.
147 """
148 if strict and not isinstance(redis_client, StrictRedis):
149 raise ValueError("redis_client must be instance of StrictRedis. "
150 "Use strict=False if you know what you're doing.")
151 if auto_renewal and expire is None:
152 raise ValueError("Expire may not be None when auto_renewal is set")
153
154 self._client = redis_client
155
156 if expire:
157 expire = int(expire)
158 if expire < 0:
159 raise ValueError("A negative expire is not acceptable.")
160 else:
161 expire = None
162 self._expire = expire
163
164 self._signal_expire = signal_expire
165 if id is None:
166 self._id = b64encode(urandom(18)).decode('ascii')
167 elif isinstance(id, binary_type):
168 try:
169 self._id = id.decode('ascii')
170 except UnicodeDecodeError:
171 self._id = b64encode(id).decode('ascii')
172 elif isinstance(id, text_type):
173 self._id = id
174 else:
175 raise TypeError("Incorrect type for `id`. Must be bytes/str not %s." % type(id))
176 self._name = 'lock:' + name
177 self._signal = 'lock-signal:' + name
178 self._lock_renewal_interval = (float(expire) * 2 / 3
179 if auto_renewal
180 else None)
181 self._lock_renewal_thread = None
182
183 self.register_scripts(redis_client)
184
185 @classmethod
186 def register_scripts(cls, redis_client):
187 global reset_all_script
188 if reset_all_script is None:
189 reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
190 cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT)
191 cls.extend_script = redis_client.register_script(EXTEND_SCRIPT)
192 cls.reset_script = redis_client.register_script(RESET_SCRIPT)
193 cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
194
195 @property
196 def _held(self):
197 return self.id == self.get_owner_id()
198
199 def reset(self):
200 """
201 Forcibly deletes the lock. Use this with care.
202 """
203 self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire))
204
205 @property
206 def id(self):
207 return self._id
208
209 def get_owner_id(self):
210 owner_id = self._client.get(self._name)
211 if isinstance(owner_id, binary_type):
212 owner_id = owner_id.decode('ascii', 'replace')
213 return owner_id
214
215 def acquire(self, blocking=True, timeout=None):
216 """
217 :param blocking:
218 Boolean value specifying whether lock should be blocking or not.
219 :param timeout:
220 An integer value specifying the maximum number of seconds to block.
221 """
222 logger = loggers["acquire"]
223
224 logger.debug("Getting acquire on %r ...", self._name)
225
226 if self._held:
227 owner_id = self.get_owner_id()
228 raise AlreadyAcquired("Already acquired from this Lock instance. Lock id: {}".format(owner_id))
229
230 if not blocking and timeout is not None:
231 raise TimeoutNotUsable("Timeout cannot be used if blocking=False")
232
233 if timeout:
234 timeout = int(timeout)
235 if timeout < 0:
236 raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout)
237
238 if self._expire and not self._lock_renewal_interval and timeout > self._expire:
239 raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire))
240
241 busy = True
242 blpop_timeout = timeout or self._expire or 0
243 timed_out = False
244 while busy:
245 busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
246 if busy:
247 if timed_out:
248 return False
249 elif blocking:
250 timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout
251 else:
252 logger.warning("Failed to get %r.", self._name)
253 return False
254
255 logger.info("Got lock for %r.", self._name)
256 if self._lock_renewal_interval is not None:
257 self._start_lock_renewer()
258 return True
259
260 def extend(self, expire=None):
261 """Extends expiration time of the lock.
262
263 :param expire:
264 New expiration time. If ``None`` - `expire` provided during
265 lock initialization will be taken.
266 """
267 if expire:
268 expire = int(expire)
269 if expire < 0:
270 raise ValueError("A negative expire is not acceptable.")
271 elif self._expire is not None:
272 expire = self._expire
273 else:
274 raise TypeError(
275 "To extend a lock 'expire' must be provided as an "
276 "argument to extend() method or at initialization time."
277 )
278
279 error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire))
280 if error == 1:
281 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
282 elif error == 2:
283 raise NotExpirable("Lock %s has no assigned expiration time" % self._name)
284 elif error:
285 raise RuntimeError("Unsupported error code %s from EXTEND script" % error)
286
287 @staticmethod
288 def _lock_renewer(lockref, interval, stop):
289 """
290 Renew the lock key in redis every `interval` seconds for as long
291 as `self._lock_renewal_thread.should_exit` is False.
292 """
293 while not stop.wait(timeout=interval):
294 loggers["refresh.thread.start"].debug("Refreshing lock")
295 lock = lockref()
296 if lock is None:
297 loggers["refresh.thread.stop"].debug(
298 "The lock no longer exists, stopping lock refreshing"
299 )
300 break
301 lock.extend(expire=lock._expire)
302 del lock
303 loggers["refresh.thread.exit"].debug("Exit requested, stopping lock refreshing")
304
305 def _start_lock_renewer(self):
306 """
307 Starts the lock refresher thread.
308 """
309 if self._lock_renewal_thread is not None:
310 raise AlreadyStarted("Lock refresh thread already started")
311
312 loggers["refresh.start"].debug(
313 "Starting thread to refresh lock every %s seconds",
314 self._lock_renewal_interval
315 )
316 self._lock_renewal_stop = threading.Event()
317 self._lock_renewal_thread = threading.Thread(
318 group=None,
319 target=self._lock_renewer,
320 kwargs={'lockref': weakref.ref(self),
321 'interval': self._lock_renewal_interval,
322 'stop': self._lock_renewal_stop}
323 )
324 self._lock_renewal_thread.setDaemon(True)
325 self._lock_renewal_thread.start()
326
327 def _stop_lock_renewer(self):
328 """
329 Stop the lock renewer.
330
331 This signals the renewal thread and waits for its exit.
332 """
333 if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
334 return
335 loggers["refresh.shutdown"].debug("Signalling the lock refresher to stop")
336 self._lock_renewal_stop.set()
337 self._lock_renewal_thread.join()
338 self._lock_renewal_thread = None
339 loggers["refresh.exit"].debug("Lock refresher has stopped")
340
341 def __enter__(self):
342 acquired = self.acquire(blocking=True)
343 assert acquired, "Lock wasn't acquired, but blocking=True"
344 return self
345
346 def __exit__(self, exc_type=None, exc_value=None, traceback=None):
347 self.release()
348
349 def release(self):
350 """Releases the lock, that was acquired with the same object.
351
352 .. note::
353
354 If you want to release a lock that you acquired in a different place you have two choices:
355
356 * Use ``Lock("name", id=id_from_other_place).release()``
357 * Use ``Lock("name").reset()``
358 """
359 if self._lock_renewal_thread is not None:
360 self._stop_lock_renewer()
361 loggers["release"].debug("Releasing %r.", self._name)
362 error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire))
363 if error == 1:
364 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
365 elif error:
366 raise RuntimeError("Unsupported error code %s from EXTEND script." % error)
367
368 def locked(self):
369 """
370 Return true if the lock is acquired.
371
372 Checks that lock with same name already exists. This method returns true, even if
373 lock have another id.
374 """
375 return self._client.exists(self._name) == 1
376
377
378 reset_all_script = None
379
380
381 def reset_all(redis_client):
382 """
383 Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
384
385 :param redis_client:
386 An instance of :class:`~StrictRedis`.
387 """
388 Lock.register_scripts(redis_client)
389
390 reset_all_script(client=redis_client) # noqa
@@ -1,5 +1,6 b''
1 [bumpversion]
1 [bumpversion]
2 current_version = 4.25.0
2 current_version = 4.25.2
3 message = release: Bump version {current_version} to {new_version}
3 message = release: Bump version {current_version} to {new_version}
4
4
5 [bumpversion:file:vcsserver/VERSION]
5 [bumpversion:file:vcsserver/VERSION]
6
@@ -74,3 +74,6 b' 179d989bcfe02c6227f9f6aa9236cbbe1c14c400'
74 383aee8b1652affaa26aefe336a89ee366b2b26d v4.23.2
74 383aee8b1652affaa26aefe336a89ee366b2b26d v4.23.2
75 bc1a8141cc51fc23c455ebc50c6609c810b46f8d v4.24.0
75 bc1a8141cc51fc23c455ebc50c6609c810b46f8d v4.24.0
76 530a1c03caabc806ea1ef34605f8f67f18c70e55 v4.24.1
76 530a1c03caabc806ea1ef34605f8f67f18c70e55 v4.24.1
77 5908ae65cee1043982e1b26d7b618af5fcfebbb3 v4.25.0
78 cce8bcdf75090d5943a1e9706fe5212d7b5d1fa1 v4.25.1
79 8610c4bf846c63bbc95d3ddfb53fadaaa9c7aa42 v4.25.2
@@ -784,7 +784,7 b' self: super: {'
784 };
784 };
785 };
785 };
786 "rhodecode-vcsserver" = super.buildPythonPackage {
786 "rhodecode-vcsserver" = super.buildPythonPackage {
787 name = "rhodecode-vcsserver-4.25.0";
787 name = "rhodecode-vcsserver-4.25.2";
788 buildInputs = [
788 buildInputs = [
789 self."pytest"
789 self."pytest"
790 self."py"
790 self."py"
@@ -1,1 +1,1 b''
1 4.25.0 No newline at end of file
1 4.25.2 No newline at end of file
@@ -609,6 +609,7 b' class HgRemote(RemoteBase):'
609 @reraise_safe_exceptions
609 @reraise_safe_exceptions
610 def lookup(self, wire, revision, both):
610 def lookup(self, wire, revision, both):
611 cache_on, context_uid, repo_id = self._cache_on(wire)
611 cache_on, context_uid, repo_id = self._cache_on(wire)
612
612 @self.region.conditional_cache_on_arguments(condition=cache_on)
613 @self.region.conditional_cache_on_arguments(condition=cache_on)
613 def _lookup(_context_uid, _repo_id, _revision, _both):
614 def _lookup(_context_uid, _repo_id, _revision, _both):
614
615
@@ -67,7 +67,7 b' from mercurial.url import httpbasicauthh'
67 def get_ctx(repo, ref):
67 def get_ctx(repo, ref):
68 try:
68 try:
69 ctx = repo[ref]
69 ctx = repo[ref]
70 except ProgrammingError:
70 except (ProgrammingError, TypeError):
71 # we're unable to find the rev using a regular lookup, we fallback
71 # we're unable to find the rev using a regular lookup, we fallback
72 # to slower, but backward compat revsymbol usage
72 # to slower, but backward compat revsymbol usage
73 ctx = revsymbol(repo, ref)
73 ctx = revsymbol(repo, ref)
@@ -433,7 +433,7 b' class HTTPApplication(object):'
433 should_store_exc = False
433 should_store_exc = False
434
434
435 if should_store_exc:
435 if should_store_exc:
436 store_exception(id(exc_info), exc_info)
436 store_exception(id(exc_info), exc_info, request_path=request.path)
437
437
438 tb_info = ''.join(
438 tb_info = ''.join(
439 traceback.format_exception(exc_type, exc_value, exc_traceback))
439 traceback.format_exception(exc_type, exc_value, exc_traceback))
@@ -452,6 +452,7 b' class HTTPApplication(object):'
452 'type': type_
452 'type': type_
453 }
453 }
454 }
454 }
455
455 try:
456 try:
456 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
457 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
457 except AttributeError:
458 except AttributeError:
@@ -68,7 +68,7 b' def get_exc_store():'
68 return _exc_store_path
68 return _exc_store_path
69
69
70
70
71 def _store_exception(exc_id, exc_info, prefix):
71 def _store_exception(exc_id, exc_info, prefix, request_path=''):
72 exc_type, exc_value, exc_traceback = exc_info
72 exc_type, exc_value, exc_traceback = exc_info
73
73
74 tb = ''.join(traceback.format_exception(
74 tb = ''.join(traceback.format_exception(
@@ -101,8 +101,13 b' def _store_exception(exc_id, exc_info, p'
101 f.write(exc_data)
101 f.write(exc_data)
102 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
102 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
103
103
104 log.error(
105 'error occurred handling this request.\n'
106 'Path: `%s`, tb: %s',
107 request_path, tb)
104
108
105 def store_exception(exc_id, exc_info, prefix=global_prefix):
109
110 def store_exception(exc_id, exc_info, prefix=global_prefix, request_path=''):
106 """
111 """
107 Example usage::
112 Example usage::
108
113
@@ -111,7 +116,8 b' def store_exception(exc_id, exc_info, pr'
111 """
116 """
112
117
113 try:
118 try:
114 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix)
119 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix,
120 request_path=request_path)
115 except Exception:
121 except Exception:
116 log.exception('Failed to store exception `%s` information', exc_id)
122 log.exception('Failed to store exception `%s` information', exc_id)
117 # there's no way this can fail, it will crash server badly if it does.
123 # there's no way this can fail, it will crash server badly if it does.
@@ -124,7 +124,14 b' class FileNamespaceBackend(PickleSeriali'
124
124
125 def __init__(self, arguments):
125 def __init__(self, arguments):
126 arguments['lock_factory'] = CustomLockFactory
126 arguments['lock_factory'] = CustomLockFactory
127 super(FileNamespaceBackend, self).__init__(arguments)
127 db_file = arguments.get('filename')
128
129 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
130 try:
131 super(FileNamespaceBackend, self).__init__(arguments)
132 except Exception:
133 log.error('Failed to initialize db at: %s', db_file)
134 raise
128
135
129 def __repr__(self):
136 def __repr__(self):
130 return '{} `{}`'.format(self.__class__, self.filename)
137 return '{} `{}`'.format(self.__class__, self.filename)
@@ -141,13 +148,16 b' class FileNamespaceBackend(PickleSeriali'
141 return False
148 return False
142
149
143 with self._dbm_file(True) as dbm:
150 with self._dbm_file(True) as dbm:
144
151 try:
145 return filter(cond, dbm.keys())
152 return filter(cond, dbm.keys())
153 except Exception:
154 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
155 raise
146
156
147 def get_store(self):
157 def get_store(self):
148 return self.filename
158 return self.filename
149
159
150 def get(self, key):
160 def _dbm_get(self, key):
151 with self._dbm_file(False) as dbm:
161 with self._dbm_file(False) as dbm:
152 if hasattr(dbm, 'get'):
162 if hasattr(dbm, 'get'):
153 value = dbm.get(key, NO_VALUE)
163 value = dbm.get(key, NO_VALUE)
@@ -161,6 +171,13 b' class FileNamespaceBackend(PickleSeriali'
161 value = self._loads(value)
171 value = self._loads(value)
162 return value
172 return value
163
173
174 def get(self, key):
175 try:
176 return self._dbm_get(key)
177 except Exception:
178 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
179 raise
180
164 def set(self, key, value):
181 def set(self, key, value):
165 with self._dbm_file(True) as dbm:
182 with self._dbm_file(True) as dbm:
166 dbm[key] = self._dumps(value)
183 dbm[key] = self._dumps(value)
@@ -234,11 +251,17 b' class BaseRedisBackend(redis_backend.Red'
234 pipe.execute()
251 pipe.execute()
235
252
236 def get_mutex(self, key):
253 def get_mutex(self, key):
237 u = redis_backend.u
238 if self.distributed_lock:
254 if self.distributed_lock:
239 lock_key = u('_lock_{0}').format(key)
255 lock_key = redis_backend.u('_lock_{0}').format(key)
240 log.debug('Trying to acquire Redis lock for key %s', lock_key)
256 log.debug('Trying to acquire Redis lock for key %s', lock_key)
241 return self.client.lock(lock_key, self.lock_timeout, self.lock_sleep)
257
258 auto_renewal = True
259 lock_timeout = self.lock_timeout
260 if auto_renewal and not self.lock_timeout:
261 # set default timeout for auto_renewal
262 lock_timeout = 10
263 return get_mutex_lock(self.client, lock_key, lock_timeout,
264 auto_renewal=auto_renewal)
242 else:
265 else:
243 return None
266 return None
244
267
@@ -251,3 +274,34 b' class RedisPickleBackend(PickleSerialize'
251 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
274 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
252 key_prefix = 'redis_msgpack_backend'
275 key_prefix = 'redis_msgpack_backend'
253 pass
276 pass
277
278
279 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
280 import redis_lock
281
282 class _RedisLockWrapper(object):
283 """LockWrapper for redis_lock"""
284
285 def __init__(self):
286 pass
287
288 @property
289 def lock(self):
290 return redis_lock.Lock(
291 redis_client=client,
292 name=lock_key,
293 expire=lock_timeout,
294 auto_renewal=auto_renewal,
295 strict=True,
296 )
297
298 def acquire(self, wait=True):
299 return self.lock.acquire(wait)
300
301 def release(self):
302 try:
303 self.lock.release()
304 except redis_lock.NotAcquired:
305 pass
306
307 return _RedisLockWrapper()
General Comments 0
You need to be logged in to leave comments. Login now