# HG changeset patch # User Marcin Kuzminski # Date 2020-01-08 12:48:45 # Node ID 8ec05ec7408966d47bb281b3132db66ab6051cc6 # Parent fb0610219174da6fe56eacd7cf4d8c257683887a # Parent 07f610ae2bd6bd1f536d0de6f1a4c11add414a55 release: Merge default into stable for release preparation diff --git a/.bumpversion.cfg b/.bumpversion.cfg --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 4.17.4 +current_version = 4.18.0 message = release: Bump version {current_version} to {new_version} [bumpversion:file:vcsserver/VERSION] diff --git a/.release.cfg b/.release.cfg --- a/.release.cfg +++ b/.release.cfg @@ -5,12 +5,10 @@ done = false done = true [task:fixes_on_stable] -done = true [task:pip2nix_generated] -done = true [release] -state = prepared -version = 4.17.4 +state = in_progress +version = 4.18.0 diff --git a/configs/development.ini b/configs/development.ini --- a/configs/development.ini +++ b/configs/development.ini @@ -1,50 +1,200 @@ -################################################################################ -# RhodeCode VCSServer with HTTP Backend - configuration # -################################################################################ +## -*- coding: utf-8 -*- +; ################################# +; RHODECODE VCSSERVER CONFIGURATION +; ################################# [server:main] -## COMMON ## +; COMMON HOST/IP CONFIG host = 0.0.0.0 port = 9900 +; ################################################## +; WAITRESS WSGI SERVER - Recommended for Development +; ################################################## + +; use server type use = egg:waitress#main +; number of worker threads +threads = 5 + +; MAX BODY SIZE 100GB +max_request_body_size = 107374182400 + +; Use poll instead of select, fixes file descriptors limits problems. +; May not work on old windows systems. +asyncore_use_poll = true + + +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### + +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini + +; Module to use, this setting shouldn't be changed +#use = egg:gunicorn#main + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. +#workers = 2 + +; Gunicorn access log level +#loglevel = info + +; Process name visible in process list +#proc_name = rhodecode_vcsserver + +; Type of worker class, one of `sync`, `gevent` +; currently `sync` is the only option allowed. +#worker_class = sync + +; The maximum number of simultaneous clients. Valid only for gevent +#worker_connections = 10 + +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. +#max_requests = 1000 +#max_requests_jitter = 30 + +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +#timeout = 21600 + +; The maximum size of HTTP request line in bytes. +; 0 for unlimited +#limit_request_line = 0 + +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. +#limit_request_fields = 32768 + +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. +#limit_request_field_size = 0 + +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +#graceful_timeout = 3600 + +# The number of seconds to wait for requests on a Keep-Alive connection. +# Generally set in the 1-5 seconds range. +#keepalive = 2 + +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) +#memory_max_usage = 0 + +; How often in seconds to check for memory usage for each gunicorn worker +#memory_usage_check_interval = 60 + +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. +#memory_usage_recovery_threshold = 0.8 + [app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file use = egg:rhodecode-vcsserver -pyramid.default_locale_name = en + +; ############# +; DEBUG OPTIONS +; ############# + +# During development the we want to have the debug toolbar enabled pyramid.includes = + pyramid_debugtoolbar -## default locale used by VCS systems +debugtoolbar.hosts = 0.0.0.0/0 +debugtoolbar.exclude_prefixes = + /css + /fonts + /images + /js + +; ################# +; END DEBUG OPTIONS +; ################# + +; Pyramid default locales, we need this to be set +pyramid.default_locale_name = en + +; default locale used by VCS systems locale = en_US.UTF-8 - -## path to binaries for vcsserver, it should be set by the installer -## at installation time, e.g /home/user/vcsserver-1/profile/bin +; path to binaries for vcsserver, it should be set by the installer +; at installation time, e.g /home/user/vcsserver-1/profile/bin +; it can also be a path to nix-build output in case of development core.binary_dir = "" -## Custom exception store path, defaults to TMPDIR -## This is used to store exception from RhodeCode in shared directory +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory #exception_tracker.store_path = -## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require -## large amount of space -cache_dir = %(here)s/rcdev/data +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = %(here)s/data + +; *************************************** +; `repo_object` cache, default file based +; *************************************** + +; `repo_object` cache settings for vcs methods for repositories +rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +rc_cache.repo_object.expiration_time = 2592000 + +; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set +#rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db -## cache region for storing repo_objects cache -rc_cache.repo_object.backend = dogpile.cache.rc.memory_lru -## cache auto-expires after N seconds -rc_cache.repo_object.expiration_time = 300 -## max size of LRU, old values will be discarded if the size of cache reaches max_size -rc_cache.repo_object.max_size = 100 +; *********************************************************** +; `repo_object` cache with redis backend +; recommended for larger instance, and for better performance +; *********************************************************** + +; `repo_object` cache settings for vcs methods for repositories +#rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +#rc_cache.repo_object.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +#rc_cache.repo_object.arguments.redis_expiration_time = 3592000 + +#rc_cache.repo_object.arguments.host = localhost +#rc_cache.repo_object.arguments.port = 6379 +#rc_cache.repo_object.arguments.db = 5 +#rc_cache.repo_object.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.repo_object.arguments.distributed_lock = true -################################ -### LOGGING CONFIGURATION #### -################################ +; ##################### +; LOGGING CONFIGURATION +; ##################### [loggers] keys = root, vcsserver @@ -54,9 +204,9 @@ keys = console [formatters] keys = generic -############# -## LOGGERS ## -############# +; ####### +; LOGGERS +; ####### [logger_root] level = NOTSET handlers = console @@ -68,19 +218,19 @@ qualname = vcsserver propagate = 1 -############## -## HANDLERS ## -############## +; ######## +; HANDLERS +; ######## [handler_console] class = StreamHandler -args = (sys.stderr,) +args = (sys.stderr, ) level = DEBUG formatter = generic -################ -## FORMATTERS ## -################ +; ########## +; FORMATTERS +; ########## [formatter_generic] format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s diff --git a/configs/gunicorn_config.py b/configs/gunicorn_config.py --- a/configs/gunicorn_config.py +++ b/configs/gunicorn_config.py @@ -1,58 +1,26 @@ """ -gunicorn config extension and hooks. Sets additional configuration that is -available post the .ini config. - -- workers = ${cpu_number} -- threads = 1 -- proc_name = ${gunicorn_proc_name} -- worker_class = sync -- worker_connections = 10 -- max_requests = 1000 -- max_requests_jitter = 30 -- timeout = 21600 - +Gunicorn config extension and hooks. This config file adds some extra settings and memory management. +Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer """ -import multiprocessing +import gc +import os import sys +import math import time -import datetime import threading import traceback +import random from gunicorn.glogging import Logger +def get_workers(): + import multiprocessing + return multiprocessing.cpu_count() * 2 + 1 + # GLOBAL errorlog = '-' accesslog = '-' -loglevel = 'debug' - -# SECURITY - -# The maximum size of HTTP request line in bytes. -# 0 for unlimited -limit_request_line = 0 - -# Limit the number of HTTP headers fields in a request. -# By default this value is 100 and can't be larger than 32768. -limit_request_fields = 10240 - -# Limit the allowed size of an HTTP request header field. -# Value is a positive number or 0. -# Setting it to 0 will allow unlimited header field sizes. -limit_request_field_size = 0 - - -# Timeout for graceful workers restart. -# After receiving a restart signal, workers have this much time to finish -# serving requests. Workers still alive after the timeout (starting from the -# receipt of the restart signal) are force killed. -graceful_timeout = 30 - - -# The number of seconds to wait for requests on a Keep-Alive connection. -# Generally set in the 1-5 seconds range. -keepalive = 2 # SERVER MECHANICS @@ -63,38 +31,178 @@ tmp_upload_dir = None # Custom log format access_log_format = ( - '%(t)s [%(p)-8s] GNCRN %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') + '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"') # self adjust workers based on CPU count -# workers = multiprocessing.cpu_count() * 2 + 1 +# workers = get_workers() + + +def _get_process_rss(pid=None): + try: + import psutil + if pid: + proc = psutil.Process(pid) + else: + proc = psutil.Process() + return proc.memory_info().rss + except Exception: + return None -def post_fork(server, worker): - server.log.info("[<%-10s>] WORKER spawned", worker.pid) +def _get_config(ini_path): + + try: + import configparser + except ImportError: + import ConfigParser as configparser + try: + config = configparser.RawConfigParser() + config.read(ini_path) + return config + except Exception: + return None + + +def _time_with_offset(memory_usage_check_interval): + return time.time() - random.randint(0, memory_usage_check_interval/2.0) def pre_fork(server, worker): pass +def post_fork(server, worker): + + # memory spec defaults + _memory_max_usage = 0 + _memory_usage_check_interval = 60 + _memory_usage_recovery_threshold = 0.8 + + ini_path = os.path.abspath(server.cfg.paste) + conf = _get_config(ini_path) + + section = 'server:main' + if conf and conf.has_section(section): + + if conf.has_option(section, 'memory_max_usage'): + _memory_max_usage = conf.getint(section, 'memory_max_usage') + + if conf.has_option(section, 'memory_usage_check_interval'): + _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval') + + if conf.has_option(section, 'memory_usage_recovery_threshold'): + _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold') + + worker._memory_max_usage = _memory_max_usage + worker._memory_usage_check_interval = _memory_usage_check_interval + worker._memory_usage_recovery_threshold = _memory_usage_recovery_threshold + + # register memory last check time, with some random offset so we don't recycle all + # at once + worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval) + + if _memory_max_usage: + server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid, + _format_data_size(_memory_max_usage)) + else: + server.log.info("[%-10s] WORKER spawned", worker.pid) + + def pre_exec(server): server.log.info("Forked child, re-executing.") def on_starting(server): - server.log.info("Server is starting.") + server_lbl = '{} {}'.format(server.proc_name, server.address) + server.log.info("Server %s is starting.", server_lbl) def when_ready(server): - server.log.info("Server is ready. Spawning workers") + server.log.info("Server %s is ready. Spawning workers", server) def on_reload(server): pass +def _format_data_size(size, unit="B", precision=1, binary=True): + """Format a number using SI units (kilo, mega, etc.). + + ``size``: The number as a float or int. + + ``unit``: The unit name in plural form. Examples: "bytes", "B". + + ``precision``: How many digits to the right of the decimal point. Default + is 1. 0 suppresses the decimal point. + + ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000). + If true, use base-2 binary prefixes (kibi = Ki = 1024). + + ``full_name``: If false (default), use the prefix abbreviation ("k" or + "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false, + use abbreviation ("k" or "Ki"). + + """ + + if not binary: + base = 1000 + multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') + else: + base = 1024 + multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi') + + sign = "" + if size > 0: + m = int(math.log(size, base)) + elif size < 0: + sign = "-" + size = -size + m = int(math.log(size, base)) + else: + m = 0 + if m > 8: + m = 8 + + if m == 0: + precision = '%.0f' + else: + precision = '%%.%df' % precision + + size = precision % (size / math.pow(base, m)) + + return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit) + + +def _check_memory_usage(worker): + memory_max_usage = worker._memory_max_usage + if not memory_max_usage: + return + + memory_usage_check_interval = worker._memory_usage_check_interval + memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold + + elapsed = time.time() - worker._last_memory_check_time + if elapsed > memory_usage_check_interval: + mem_usage = _get_process_rss() + if mem_usage and mem_usage > memory_max_usage: + worker.log.info( + "memory usage %s > %s, forcing gc", + _format_data_size(mem_usage), _format_data_size(memory_max_usage)) + # Try to clean it up by forcing a full collection. + gc.collect() + mem_usage = _get_process_rss() + if mem_usage > memory_usage_recovery_threshold: + # Didn't clean up enough, we'll have to terminate. + worker.log.warning( + "memory usage %s > %s after gc, quitting", + _format_data_size(mem_usage), _format_data_size(memory_max_usage)) + # This will cause worker to auto-restart itself + worker.alive = False + worker._last_memory_check_time = time.time() + + def worker_int(worker): - worker.log.info("[<%-10s>] worker received INT or QUIT signal", worker.pid) + worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid) # get traceback info, on worker crash id2name = dict([(th.ident, th.name) for th in threading.enumerate()]) @@ -110,15 +218,15 @@ def worker_int(worker): def worker_abort(worker): - worker.log.info("[<%-10s>] worker received SIGABRT signal", worker.pid) + worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid) def worker_exit(server, worker): - worker.log.info("[<%-10s>] worker exit", worker.pid) + worker.log.info("[%-10s] worker exit", worker.pid) def child_exit(server, worker): - worker.log.info("[<%-10s>] worker child exit", worker.pid) + worker.log.info("[%-10s] worker child exit", worker.pid) def pre_request(worker, req): @@ -129,9 +237,12 @@ def pre_request(worker, req): def post_request(worker, req, environ, resp): total_time = time.time() - worker.start_time + # Gunicorn sometimes has problems with reading the status_code + status_code = getattr(resp, 'status_code', '') worker.log.debug( - "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.3fs", - worker.nr, req.method, req.path, resp.status_code, total_time) + "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs", + worker.nr, req.method, req.path, status_code, total_time) + _check_memory_usage(worker) class RhodeCodeLogger(Logger): diff --git a/configs/production.ini b/configs/production.ini --- a/configs/production.ini +++ b/configs/production.ini @@ -1,71 +1,163 @@ -################################################################################ -# RhodeCode VCSServer with HTTP Backend - configuration # -################################################################################ +## -*- coding: utf-8 -*- +; ################################# +; RHODECODE VCSSERVER CONFIGURATION +; ################################# [server:main] -## COMMON ## +; COMMON HOST/IP CONFIG host = 127.0.0.1 port = 9900 -########################## -## GUNICORN WSGI SERVER ## -########################## -## run with gunicorn --log-config vcsserver.ini --paste vcsserver.ini +; ########################### +; GUNICORN APPLICATION SERVER +; ########################### + +; run with gunicorn --log-config rhodecode.ini --paste rhodecode.ini + +; Module to use, this setting shouldn't be changed use = egg:gunicorn#main -## Sets the number of process workers. Recommended -## value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers + +; Sets the number of process workers. More workers means more concurrent connections +; RhodeCode can handle at the same time. Each additional worker also it increases +; memory usage as each has it's own set of caches. +; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more +; than 8-10 unless for really big deployments .e.g 700-1000 users. +; `instance_id = *` must be set in the [app:main] section below (which is the default) +; when using more than 1 worker. workers = 2 -## process name + +; Gunicorn access log level +loglevel = info + +; Process name visible in process list proc_name = rhodecode_vcsserver -## type of worker class, currently `sync` is the only option allowed. + +; Type of worker class, one of `sync`, `gevent` +; currently `sync` is the only option allowed. worker_class = sync -## The maximum number of simultaneous clients. Valid only for Gevent -#worker_connections = 10 -## max number of requests that worker will handle before being gracefully -## restarted, could prevent memory leaks + +; The maximum number of simultaneous clients. Valid only for gevent +worker_connections = 10 + +; Max number of requests that worker will handle before being gracefully restarted. +; Prevents memory leaks, jitter adds variability so not all workers are restarted at once. max_requests = 1000 max_requests_jitter = 30 -## amount of time a worker can spend with handling a request before it -## gets killed and restarted. Set to 6hrs + +; Amount of time a worker can spend with handling a request before it +; gets killed and restarted. By default set to 21600 (6hrs) +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) timeout = 21600 +; The maximum size of HTTP request line in bytes. +; 0 for unlimited +limit_request_line = 0 + +; Limit the number of HTTP headers fields in a request. +; By default this value is 100 and can't be larger than 32768. +limit_request_fields = 32768 + +; Limit the allowed size of an HTTP request header field. +; Value is a positive number or 0. +; Setting it to 0 will allow unlimited header field sizes. +limit_request_field_size = 0 + +; Timeout for graceful workers restart. +; After receiving a restart signal, workers have this much time to finish +; serving requests. Workers still alive after the timeout (starting from the +; receipt of the restart signal) are force killed. +; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h) +graceful_timeout = 3600 + +# The number of seconds to wait for requests on a Keep-Alive connection. +# Generally set in the 1-5 seconds range. +keepalive = 2 + +; Maximum memory usage that each worker can use before it will receive a +; graceful restart signal 0 = memory monitoring is disabled +; Examples: 268435456 (256MB), 536870912 (512MB) +; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB) +memory_max_usage = 0 + +; How often in seconds to check for memory usage for each gunicorn worker +memory_usage_check_interval = 60 + +; Threshold value for which we don't recycle worker if GarbageCollection +; frees up enough resources. Before each restart we try to run GC on worker +; in case we get enough free memory after that, restart will not happen. +memory_usage_recovery_threshold = 0.8 + [app:main] +; The %(here)s variable will be replaced with the absolute path of parent directory +; of this file use = egg:rhodecode-vcsserver +; Pyramid default locales, we need this to be set pyramid.default_locale_name = en -pyramid.includes = -## default locale used by VCS systems +; default locale used by VCS systems locale = en_US.UTF-8 - -## path to binaries for vcsserver, it should be set by the installer -## at installation time, e.g /home/user/vcsserver-1/profile/bin +; path to binaries for vcsserver, it should be set by the installer +; at installation time, e.g /home/user/vcsserver-1/profile/bin +; it can also be a path to nix-build output in case of development core.binary_dir = "" -## Custom exception store path, defaults to TMPDIR -## This is used to store exception from RhodeCode in shared directory +; Custom exception store path, defaults to TMPDIR +; This is used to store exception from RhodeCode in shared directory #exception_tracker.store_path = -## Default cache dir for caches. Putting this into a ramdisk -## can boost performance, eg. /tmpfs/data_ramdisk, however this directory might require -## large amount of space -cache_dir = %(here)s/rcdev/data +; ############# +; DOGPILE CACHE +; ############# + +; Default cache dir for caches. Putting this into a ramdisk can boost performance. +; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space +cache_dir = %(here)s/data + +; *************************************** +; `repo_object` cache, default file based +; *************************************** + +; `repo_object` cache settings for vcs methods for repositories +rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +rc_cache.repo_object.expiration_time = 2592000 + +; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set +#rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache.db -## cache region for storing repo_objects cache -rc_cache.repo_object.backend = dogpile.cache.rc.memory_lru -## cache auto-expires after N seconds -rc_cache.repo_object.expiration_time = 300 -## max size of LRU, old values will be discarded if the size of cache reaches max_size -rc_cache.repo_object.max_size = 100 +; *********************************************************** +; `repo_object` cache with redis backend +; recommended for larger instance, and for better performance +; *********************************************************** + +; `repo_object` cache settings for vcs methods for repositories +#rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack + +; cache auto-expires after N seconds +; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days) +#rc_cache.repo_object.expiration_time = 2592000 + +; redis_expiration_time needs to be greater then expiration_time +#rc_cache.repo_object.arguments.redis_expiration_time = 3592000 + +#rc_cache.repo_object.arguments.host = localhost +#rc_cache.repo_object.arguments.port = 6379 +#rc_cache.repo_object.arguments.db = 5 +#rc_cache.repo_object.arguments.socket_timeout = 30 +; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends +#rc_cache.repo_object.arguments.distributed_lock = true -################################ -### LOGGING CONFIGURATION #### -################################ +; ##################### +; LOGGING CONFIGURATION +; ##################### [loggers] keys = root, vcsserver @@ -75,9 +167,9 @@ keys = console [formatters] keys = generic -############# -## LOGGERS ## -############# +; ####### +; LOGGERS +; ####### [logger_root] level = NOTSET handlers = console @@ -89,19 +181,19 @@ qualname = vcsserver propagate = 1 -############## -## HANDLERS ## -############## +; ######## +; HANDLERS +; ######## [handler_console] class = StreamHandler -args = (sys.stderr,) -level = DEBUG +args = (sys.stderr, ) +level = INFO formatter = generic -################ -## FORMATTERS ## -################ +; ########## +; FORMATTERS +; ########## [formatter_generic] format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s diff --git a/pkgs/overlays.nix b/pkgs/overlays.nix --- a/pkgs/overlays.nix +++ b/pkgs/overlays.nix @@ -1,10 +1,11 @@ self: super: { + # bump GIT version git = super.lib.overrideDerivation super.git (oldAttrs: { - name = "git-2.19.2"; + name = "git-2.24.1"; src = self.fetchurl { - url = "https://www.kernel.org/pub/software/scm/git/git-2.19.2.tar.xz"; - sha256 = "1scbggzghkzzfqg4ky3qh7h9w87c3zya4ls5disz7dbx56is7sgw"; + url = "https://www.kernel.org/pub/software/scm/git/git-2.24.1.tar.xz"; + sha256 = "0ql5z31vgl7b785gwrf00m129mg7zi9pa65n12ij3mpxx3f28gvj"; }; # patches come from: https://github.com/NixOS/nixpkgs/tree/master/pkgs/applications/version-management/git-and-tools/git @@ -18,6 +19,29 @@ self: super: { }); + libgit2rc = super.lib.overrideDerivation super.libgit2 (oldAttrs: { + name = "libgit2-0.28.2"; + version = "0.28.2"; + + src = self.fetchFromGitHub { + owner = "libgit2"; + repo = "libgit2"; + rev = "v0.28.2"; + sha256 = "0cm8fvs05rj0baigs2133q5a0sm3pa234y8h6hmwhl2bz9xq3k4b"; + }; + + cmakeFlags = [ "-DTHREADSAFE=ON" "-DUSE_HTTPS=no"]; + + buildInputs = [ + super.zlib + super.libssh2 + super.openssl + super.curl + ]; + + + }); + # Override subversion derivation to # - activate python bindings subversion = @@ -29,10 +53,10 @@ self: super: { }; in super.lib.overrideDerivation subversionWithPython (oldAttrs: { - name = "subversion-1.10.2"; + name = "subversion-1.12.2"; src = self.fetchurl { - url = "https://archive.apache.org/dist/subversion/subversion-1.10.2.tar.gz"; - sha256 = "0xv5z2bg0lw7057g913yc13f60nfj257wvmsq22pr33m4syf26sg"; + url = "https://archive.apache.org/dist/subversion/subversion-1.12.2.tar.gz"; + sha256 = "1wr1pklnq67xdzmf237zj6l1hg43yshfkbxvpvd5sv6r0dk7v4pl"; }; ## use internal lz4/utf8proc because it is stable and shipped with SVN @@ -41,7 +65,7 @@ self: super: { " --with-utf8proc=internal" ]; - }); + } diff --git a/pkgs/patches/git/docbook2texi.patch b/pkgs/patches/git/docbook2texi.patch --- a/pkgs/patches/git/docbook2texi.patch +++ b/pkgs/patches/git/docbook2texi.patch @@ -3,9 +3,10 @@ and (2) make sure `gitman.info' isn't pr node names). diff --git a/Documentation/Makefile b/Documentation/Makefile +index 26a2342bea..ceccd67ebb 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile -@@ -122,7 +122,7 @@ +@@ -132,7 +132,7 @@ HTML_REPO = ../../git-htmldocs MAKEINFO = makeinfo INSTALL_INFO = install-info @@ -14,7 +15,7 @@ diff --git a/Documentation/Makefile b/Do DBLATEX = dblatex ASCIIDOC_DBLATEX_DIR = /etc/asciidoc/dblatex DBLATEX_COMMON = -p $(ASCIIDOC_DBLATEX_DIR)/asciidoc-dblatex.xsl -s $(ASCIIDOC_DBLATEX_DIR)/asciidoc-dblatex.sty -@@ -240,7 +240,7 @@ +@@ -250,7 +250,7 @@ man1: $(DOC_MAN1) man5: $(DOC_MAN5) man7: $(DOC_MAN7) @@ -23,7 +24,7 @@ diff --git a/Documentation/Makefile b/Do pdf: user-manual.pdf -@@ -256,10 +256,9 @@ +@@ -266,10 +266,9 @@ install-man: man install-info: info $(INSTALL) -d -m 755 $(DESTDIR)$(infodir) diff --git a/pkgs/patches/git/git-send-email-honor-PATH.patch b/pkgs/patches/git/git-send-email-honor-PATH.patch --- a/pkgs/patches/git/git-send-email-honor-PATH.patch +++ b/pkgs/patches/git/git-send-email-honor-PATH.patch @@ -1,7 +1,8 @@ diff --git a/Documentation/git-send-email.txt b/Documentation/git-send-email.txt +index 1afe9fc858..05dd7c3a90 100644 --- a/Documentation/git-send-email.txt +++ b/Documentation/git-send-email.txt -@@ -208,8 +208,7 @@ a password is obtained using 'git-credential'. +@@ -215,8 +215,7 @@ a password is obtained using 'git-credential'. specify a full pathname of a sendmail-like program instead; the program must support the `-i` option. Default value can be specified by the `sendemail.smtpServer` configuration @@ -12,9 +13,10 @@ diff --git a/Documentation/git-send-emai --smtp-server-port=:: diff --git a/git-send-email.perl b/git-send-email.perl +index 8eb63b5a2f..74a61d8213 100755 --- a/git-send-email.perl +++ b/git-send-email.perl -@@ -944,8 +944,7 @@ if (defined $reply_to) { +@@ -956,8 +956,7 @@ sub expand_one_alias { } if (!defined $smtp_server) { diff --git a/pkgs/patches/git/git-sh-i18n.patch b/pkgs/patches/git/git-sh-i18n.patch --- a/pkgs/patches/git/git-sh-i18n.patch +++ b/pkgs/patches/git/git-sh-i18n.patch @@ -1,94 +1,23 @@ +diff --git a/git-sh-i18n.sh b/git-sh-i18n.sh +index e1d917fd27..e90f8e1414 100644 --- a/git-sh-i18n.sh +++ b/git-sh-i18n.sh -@@ -15,87 +15,11 @@ - fi - export TEXTDOMAINDIR - --# First decide what scheme to use... --GIT_INTERNAL_GETTEXT_SH_SCHEME=fallthrough --if test -n "$GIT_GETTEXT_POISON" --then -- GIT_INTERNAL_GETTEXT_SH_SCHEME=poison --elif test -n "@@USE_GETTEXT_SCHEME@@" --then -- GIT_INTERNAL_GETTEXT_SH_SCHEME="@@USE_GETTEXT_SCHEME@@" --elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS" --then -- : no probing necessary +@@ -26,7 +26,7 @@ then + elif test -n "$GIT_INTERNAL_GETTEXT_TEST_FALLBACKS" + then + : no probing necessary -elif type gettext.sh >/dev/null 2>&1 --then -- # GNU libintl's gettext.sh -- GIT_INTERNAL_GETTEXT_SH_SCHEME=gnu --elif test "$(gettext -h 2>&1)" = "-h" --then -- # gettext binary exists but no gettext.sh. likely to be a gettext -- # binary on a Solaris or something that is not GNU libintl and -- # lack eval_gettext. -- GIT_INTERNAL_GETTEXT_SH_SCHEME=gettext_without_eval_gettext --fi --export GIT_INTERNAL_GETTEXT_SH_SCHEME -- --# ... and then follow that decision. --case "$GIT_INTERNAL_GETTEXT_SH_SCHEME" in --gnu) -- # Use libintl's gettext.sh, or fall back to English if we can't. ++elif type @gettext@/bin/gettext.sh >/dev/null 2>&1 + then + # GNU libintl's gettext.sh + GIT_INTERNAL_GETTEXT_SH_SCHEME=gnu +@@ -43,7 +43,8 @@ export GIT_INTERNAL_GETTEXT_SH_SCHEME + case "$GIT_INTERNAL_GETTEXT_SH_SCHEME" in + gnu) + # Use libintl's gettext.sh, or fall back to English if we can't. - . gettext.sh -- ;; --gettext_without_eval_gettext) -- # Solaris has a gettext(1) but no eval_gettext(1) -- eval_gettext () { -- gettext "$1" | ( -- export PATH $(git sh-i18n--envsubst --variables "$1"); -- git sh-i18n--envsubst "$1" -- ) -- } -- -- eval_ngettext () { -- ngettext "$1" "$2" "$3" | ( -- export PATH $(git sh-i18n--envsubst --variables "$2"); -- git sh-i18n--envsubst "$2" -- ) -- } -- ;; --poison) -- # Emit garbage so that tests that incorrectly rely on translatable -- # strings will fail. -- gettext () { -- printf "%s" "# GETTEXT POISON #" -- } -- -- eval_gettext () { -- printf "%s" "# GETTEXT POISON #" -- } -- -- eval_ngettext () { -- printf "%s" "# GETTEXT POISON #" -- } -- ;; --*) -- gettext () { -- printf "%s" "$1" -- } -- -- eval_gettext () { -- printf "%s" "$1" | ( -- export PATH $(git sh-i18n--envsubst --variables "$1"); -- git sh-i18n--envsubst "$1" -- ) -- } -+# GNU gettext -+export GIT_INTERNAL_GETTEXT_SH_SCHEME=gnu -+export PATH=@gettext@/bin:$PATH - -- eval_ngettext () { -- (test "$3" = 1 && printf "%s" "$1" || printf "%s" "$2") | ( -- export PATH $(git sh-i18n--envsubst --variables "$2"); -- git sh-i18n--envsubst "$2" -- ) -- } -- ;; --esac -+. @gettext@/bin/gettext.sh - - # Git-specific wrapper functions - gettextln () { ++ . @gettext@/bin/gettext.sh ++ export PATH=@gettext@/bin:$PATH + ;; + gettext_without_eval_gettext) + # Solaris has a gettext(1) but no eval_gettext(1) diff --git a/pkgs/patches/git/installCheck-path.patch b/pkgs/patches/git/installCheck-path.patch --- a/pkgs/patches/git/installCheck-path.patch +++ b/pkgs/patches/git/installCheck-path.patch @@ -1,12 +1,13 @@ diff --git a/t/test-lib.sh b/t/test-lib.sh +index 8665b0a9b6..8bb892b1af 100644 --- a/t/test-lib.sh +++ b/t/test-lib.sh -@@ -923,7 +923,7 @@ +@@ -1227,7 +1227,7 @@ elif test -n "$GIT_TEST_INSTALLED" then GIT_EXEC_PATH=$($GIT_TEST_INSTALLED/git --exec-path) || error "Cannot run git from $GIT_TEST_INSTALLED." -- PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR:$PATH +- PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR/t/helper:$PATH + PATH=$GIT_TEST_INSTALLED:$GIT_BUILD_DIR/t/helper:$GIT_BUILD_DIR:$PATH GIT_EXEC_PATH=${GIT_TEST_EXEC_PATH:-$GIT_EXEC_PATH} else # normal case, use ../bin-wrappers only unless $with_dashes: - git_bin_dir="$GIT_BUILD_DIR/bin-wrappers" + if test -n "$no_bin_wrappers" diff --git a/pkgs/patches/git/ssh-path.patch b/pkgs/patches/git/ssh-path.patch --- a/pkgs/patches/git/ssh-path.patch +++ b/pkgs/patches/git/ssh-path.patch @@ -1,8 +1,8 @@ diff --git a/connect.c b/connect.c -index c3a014c5b..fbca3262b 100644 +index 4813f005ab..b3f12f3268 100644 --- a/connect.c +++ b/connect.c -@@ -1010,7 +1010,7 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host, +@@ -1183,7 +1183,7 @@ static void fill_ssh_args(struct child_process *conn, const char *ssh_host, ssh = getenv("GIT_SSH"); if (!ssh) @@ -12,7 +12,7 @@ index c3a014c5b..fbca3262b 100644 } diff --git a/git-gui/lib/remote_add.tcl b/git-gui/lib/remote_add.tcl -index 480a6b30d..781720424 100644 +index 480a6b30d0..7817204241 100644 --- a/git-gui/lib/remote_add.tcl +++ b/git-gui/lib/remote_add.tcl @@ -139,7 +139,7 @@ method _add {} { diff --git a/pkgs/python-packages-overrides.nix b/pkgs/python-packages-overrides.nix --- a/pkgs/python-packages-overrides.nix +++ b/pkgs/python-packages-overrides.nix @@ -15,6 +15,12 @@ in self: super: { + "cffi" = super."cffi".override (attrs: { + buildInputs = [ + pkgs.libffi + ]; + }); + "gevent" = super."gevent".override (attrs: { propagatedBuildInputs = attrs.propagatedBuildInputs ++ [ # NOTE: (marcink) odd requirements from gevent aren't set properly, @@ -52,6 +58,12 @@ self: super: { ]; }); + "pygit2" = super."pygit2".override (attrs: { + propagatedBuildInputs = attrs.propagatedBuildInputs ++ [ + pkgs.libffi + pkgs.libgit2rc + ]; + }); # Avoid that base packages screw up the build process inherit (basePythonPackages) diff --git a/pkgs/python-packages.nix b/pkgs/python-packages.nix --- a/pkgs/python-packages.nix +++ b/pkgs/python-packages.nix @@ -5,22 +5,22 @@ self: super: { "atomicwrites" = super.buildPythonPackage { - name = "atomicwrites-1.2.1"; + name = "atomicwrites-1.3.0"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/ac/ed/a311712ef6b4355035489f665e63e1a73f9eb371929e3c98e5efd451069e/atomicwrites-1.2.1.tar.gz"; - sha256 = "1vmkbw9j0qammwxbxycrs39gvdg4lc2d4lk98kwf8ag2manyi6pc"; + url = "https://files.pythonhosted.org/packages/ec/0f/cd484ac8820fed363b374af30049adc8fd13065720fd4f4c6be8a2309da7/atomicwrites-1.3.0.tar.gz"; + sha256 = "19ngcscdf3jsqmpcxn6zl5b6anmsajb6izp1smcd1n02midl9abm"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "attrs" = super.buildPythonPackage { - name = "attrs-18.2.0"; + name = "attrs-19.3.0"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/0f/9e/26b1d194aab960063b266170e53c39f73ea0d0d3f5ce23313e0ec8ee9bdf/attrs-18.2.0.tar.gz"; - sha256 = "0s9ydh058wmmf5v391pym877x4ahxg45dw6a0w4c7s5wgpigdjqh"; + url = "https://files.pythonhosted.org/packages/98/c3/2c227e66b5e896e15ccdae2e00bbc69aa46e9a8ce8869cc5fa96310bf612/attrs-19.3.0.tar.gz"; + sha256 = "0wky4h28n7xnr6xv69p9z6kv8bzn50d10c3drmd9ds8gawbcxdzp"; }; meta = { license = [ pkgs.lib.licenses.mit ]; @@ -48,6 +48,20 @@ self: super: { license = [ pkgs.lib.licenses.mit ]; }; }; + "cffi" = super.buildPythonPackage { + name = "cffi-1.12.3"; + doCheck = false; + propagatedBuildInputs = [ + self."pycparser" + ]; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/93/1a/ab8c62b5838722f29f3daffcc8d4bd61844aa9b5f437341cc890ceee483b/cffi-1.12.3.tar.gz"; + sha256 = "0x075521fxwv0mfp4cqzk7lvmw4n94bjw601qkcv314z5s182704"; + }; + meta = { + license = [ pkgs.lib.licenses.mit ]; + }; + }; "configobj" = super.buildPythonPackage { name = "configobj-5.0.6"; doCheck = false; @@ -62,6 +76,28 @@ self: super: { license = [ pkgs.lib.licenses.bsdOriginal ]; }; }; + "configparser" = super.buildPythonPackage { + name = "configparser-4.0.2"; + doCheck = false; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/16/4f/48975536bd488d3a272549eb795ac4a13a5f7fcdc8995def77fbef3532ee/configparser-4.0.2.tar.gz"; + sha256 = "1priacxym85yjcf68hh38w55nqswaxp71ryjyfdk222kg9l85ln7"; + }; + meta = { + license = [ pkgs.lib.licenses.mit ]; + }; + }; + "contextlib2" = super.buildPythonPackage { + name = "contextlib2-0.6.0.post1"; + doCheck = false; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/02/54/669207eb72e3d8ae8b38aa1f0703ee87a0e9f88f30d3c0a47bebdb6de242/contextlib2-0.6.0.post1.tar.gz"; + sha256 = "0bhnr2ac7wy5l85ji909gyljyk85n92w8pdvslmrvc8qih4r1x01"; + }; + meta = { + license = [ pkgs.lib.licenses.psfl ]; + }; + }; "cov-core" = super.buildPythonPackage { name = "cov-core-1.15.0"; doCheck = false; @@ -77,11 +113,11 @@ self: super: { }; }; "coverage" = super.buildPythonPackage { - name = "coverage-4.5.3"; + name = "coverage-4.5.4"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/82/70/2280b5b29a0352519bb95ab0ef1ea942d40466ca71c53a2085bdeff7b0eb/coverage-4.5.3.tar.gz"; - sha256 = "02f6m073qdispn96rc616hg0rnmw1pgqzw3bgxwiwza4zf9hirlx"; + url = "https://files.pythonhosted.org/packages/85/d5/818d0e603685c4a613d56f065a721013e942088047ff1027a632948bdae6/coverage-4.5.4.tar.gz"; + sha256 = "0p0j4di6h8k6ica7jwwj09azdcg4ycxq60i9qsskmsg94cd9yzg0"; }; meta = { license = [ pkgs.lib.licenses.asl20 ]; @@ -99,14 +135,14 @@ self: super: { }; }; "dogpile.cache" = super.buildPythonPackage { - name = "dogpile.cache-0.7.1"; + name = "dogpile.cache-0.9.0"; doCheck = false; propagatedBuildInputs = [ self."decorator" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/84/3e/dbf1cfc5228f1d3dca80ef714db2c5aaec5cd9efaf54d7e3daef6bc48b19/dogpile.cache-0.7.1.tar.gz"; - sha256 = "0caazmrzhnfqb5yrp8myhw61ny637jj69wcngrpbvi31jlcpy6v9"; + url = "https://files.pythonhosted.org/packages/ac/6a/9ac405686a94b7f009a20a50070a5786b0e1aedc707b88d40d0c4b51a82e/dogpile.cache-0.9.0.tar.gz"; + sha256 = "0sr1fn6b4k5bh0cscd9yi8csqxvj4ngzildav58x5p694mc86j5k"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal ]; @@ -204,11 +240,11 @@ self: super: { }; }; "hg-evolve" = super.buildPythonPackage { - name = "hg-evolve-8.5.1"; + name = "hg-evolve-9.1.0"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/e3/ce/6594aa403e3464831d4daf20e45fd2e3ef553d968ac13d2c7fa791d4eedd/hg-evolve-8.5.1.tar.gz"; - sha256 = "09avqn7c1biz97vb1zw91q6nfzydpcqv43mgpfrj7ywp0fscfgf3"; + url = "https://files.pythonhosted.org/packages/20/36/5a6655975aa0c663be91098d31a0b24841acad44fe896aa2bdee77c6b883/hg-evolve-9.1.0.tar.gz"; + sha256 = "1mna81cmzxxn7s2nwz3g1xgdjlcc1axkvfmwg7gjqghwn3pdraps"; }; meta = { license = [ { fullName = "GPLv2+"; } ]; @@ -230,16 +266,33 @@ self: super: { }; }; "hupper" = super.buildPythonPackage { - name = "hupper-1.6.1"; + name = "hupper-1.9.1"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/85/d9/e005d357b11249c5d70ddf5b7adab2e4c0da4e8b0531ff146917a04fe6c0/hupper-1.6.1.tar.gz"; - sha256 = "0d3cvkc8ssgwk54wvhbifj56ry97qi10pfzwfk8vwzzcikbfp3zy"; + url = "https://files.pythonhosted.org/packages/09/3a/4f215659f31eeffe364a984dba486bfa3907bfcc54b7013bdfe825cebb5f/hupper-1.9.1.tar.gz"; + sha256 = "0pyg879fv9mbwlnbzw2a3234qqycqs9l97h5mpkmk0bvxhi2471v"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; + "importlib-metadata" = super.buildPythonPackage { + name = "importlib-metadata-0.23"; + doCheck = false; + propagatedBuildInputs = [ + self."zipp" + self."contextlib2" + self."configparser" + self."pathlib2" + ]; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/5d/44/636bcd15697791943e2dedda0dbe098d8530a38d113b202817133e0b06c0/importlib_metadata-0.23.tar.gz"; + sha256 = "09mdqdfv5rdrwz80jh9m379gxmvk2vhjfz0fg53hid00icvxf65a"; + }; + meta = { + license = [ pkgs.lib.licenses.asl20 ]; + }; + }; "ipdb" = super.buildPythonPackage { name = "ipdb-0.12"; doCheck = false; @@ -291,50 +344,54 @@ self: super: { }; }; "mako" = super.buildPythonPackage { - name = "mako-1.0.7"; + name = "mako-1.1.0"; doCheck = false; propagatedBuildInputs = [ self."markupsafe" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/eb/f3/67579bb486517c0d49547f9697e36582cd19dafb5df9e687ed8e22de57fa/Mako-1.0.7.tar.gz"; - sha256 = "1bi5gnr8r8dva06qpyx4kgjc6spm2k1y908183nbbaylggjzs0jf"; + url = "https://files.pythonhosted.org/packages/b0/3c/8dcd6883d009f7cae0f3157fb53e9afb05a0d3d33b3db1268ec2e6f4a56b/Mako-1.1.0.tar.gz"; + sha256 = "0jqa3qfpykyn4fmkn0kh6043sfls7br8i2bsdbccazcvk9cijsd3"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "markupsafe" = super.buildPythonPackage { - name = "markupsafe-1.1.0"; + name = "markupsafe-1.1.1"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/ac/7e/1b4c2e05809a4414ebce0892fe1e32c14ace86ca7d50c70f00979ca9b3a3/MarkupSafe-1.1.0.tar.gz"; - sha256 = "1lxirjypbdd3l9jl4vliilhfnhy7c7f2vlldqg1b0i74khn375sf"; + url = "https://files.pythonhosted.org/packages/b9/2e/64db92e53b86efccfaea71321f597fa2e1b2bd3853d8ce658568f7a13094/MarkupSafe-1.1.1.tar.gz"; + sha256 = "0sqipg4fk7xbixqd8kq6rlkxj664d157bdwbh93farcphf92x1r9"; }; meta = { - license = [ pkgs.lib.licenses.bsdOriginal ]; + license = [ pkgs.lib.licenses.bsdOriginal pkgs.lib.licenses.bsd3 ]; }; }; "mercurial" = super.buildPythonPackage { - name = "mercurial-4.9.1"; + name = "mercurial-5.1.1"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/60/58/a1c52d5f5c0b755e231faf7c4f507dc51fe26d979d36346bc9d28f4f8a75/mercurial-4.9.1.tar.gz"; - sha256 = "0iybbkd9add066729zg01kwz5hhc1s6lhp9rrnsmzq6ihyxj3p8v"; + url = "https://files.pythonhosted.org/packages/22/39/e1a95f6048aa0785b82f5faad8281ae7320894a635cb4a57e19479639c92/mercurial-5.1.1.tar.gz"; + sha256 = "17z42rfjdkrks4grzgac66nfh285zf1pwxd2zwx1p71pw2jqpz1m"; }; meta = { license = [ pkgs.lib.licenses.gpl1 pkgs.lib.licenses.gpl2Plus ]; }; }; "mock" = super.buildPythonPackage { - name = "mock-1.0.1"; + name = "mock-3.0.5"; doCheck = false; + propagatedBuildInputs = [ + self."six" + self."funcsigs" + ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/a2/52/7edcd94f0afb721a2d559a5b9aae8af4f8f2c79bc63fdbe8a8a6c9b23bbe/mock-1.0.1.tar.gz"; - sha256 = "0kzlsbki6q0awf89rc287f3aj8x431lrajf160a70z0ikhnxsfdq"; + url = "https://files.pythonhosted.org/packages/2e/ab/4fe657d78b270aa6a32f027849513b829b41b0f28d9d8d7f8c3d29ea559a/mock-3.0.5.tar.gz"; + sha256 = "1hrp6j0yrx2xzylfv02qa8kph661m6yq4p0mc8fnimch9j4psrc3"; }; meta = { - license = [ pkgs.lib.licenses.bsdOriginal ]; + license = [ pkgs.lib.licenses.bsdOriginal { fullName = "OSI Approved :: BSD License"; } ]; }; }; "more-itertools" = super.buildPythonPackage { @@ -362,6 +419,21 @@ self: super: { license = [ pkgs.lib.licenses.asl20 ]; }; }; + "packaging" = super.buildPythonPackage { + name = "packaging-19.2"; + doCheck = false; + propagatedBuildInputs = [ + self."pyparsing" + self."six" + ]; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/5a/2f/449ded84226d0e2fda8da9252e5ee7731bdf14cd338f622dfcd9934e0377/packaging-19.2.tar.gz"; + sha256 = "0izwlz9h0bw171a1chr311g2y7n657zjaf4mq4rgm8pp9lbj9f98"; + }; + meta = { + license = [ pkgs.lib.licenses.bsdOriginal { fullName = "BSD or Apache License, Version 2.0"; } pkgs.lib.licenses.asl20 ]; + }; + }; "pastedeploy" = super.buildPythonPackage { name = "pastedeploy-2.0.1"; doCheck = false; @@ -374,15 +446,15 @@ self: super: { }; }; "pathlib2" = super.buildPythonPackage { - name = "pathlib2-2.3.4"; + name = "pathlib2-2.3.5"; doCheck = false; propagatedBuildInputs = [ self."six" self."scandir" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/b5/f4/9c7cc726ece2498b6c8b62d3262aa43f59039b953fe23c9964ac5e18d40b/pathlib2-2.3.4.tar.gz"; - sha256 = "1y0f9rkm1924zrc5dn4bwxlhgdkbml82lkcc28l5rgmr7d918q24"; + url = "https://files.pythonhosted.org/packages/94/d8/65c86584e7e97ef824a1845c72bbe95d79f5b306364fa778a3c3e401b309/pathlib2-2.3.5.tar.gz"; + sha256 = "0s4qa8c082fdkb17izh4mfgwrjd1n5pya18wvrbwqdvvb5xs9nbc"; }; meta = { license = [ pkgs.lib.licenses.mit ]; @@ -446,37 +518,40 @@ self: super: { }; }; "pluggy" = super.buildPythonPackage { - name = "pluggy-0.11.0"; + name = "pluggy-0.13.1"; doCheck = false; + propagatedBuildInputs = [ + self."importlib-metadata" + ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/0d/a1/862ab336e8128fde20981d2c1aa8506693412daf5083b1911d539412676b/pluggy-0.11.0.tar.gz"; - sha256 = "10511a54dvafw1jrk75mrhml53c7b7w4yaw7241696lc2hfvr895"; + url = "https://files.pythonhosted.org/packages/f8/04/7a8542bed4b16a65c2714bf76cf5a0b026157da7f75e87cc88774aa10b14/pluggy-0.13.1.tar.gz"; + sha256 = "1c35qyhvy27q9ih9n899f3h4sdnpgq027dbiilly2qb5cvgarchm"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "prompt-toolkit" = super.buildPythonPackage { - name = "prompt-toolkit-1.0.16"; + name = "prompt-toolkit-1.0.18"; doCheck = false; propagatedBuildInputs = [ self."six" self."wcwidth" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/f1/03/bb36771dc9fa7553ac4bdc639a9ecdf6fda0ff4176faf940d97e3c16e41d/prompt_toolkit-1.0.16.tar.gz"; - sha256 = "1d65hm6nf0cbq0q0121m60zzy4s1fpg9fn761s1yxf08dridvkn1"; + url = "https://files.pythonhosted.org/packages/c5/64/c170e5b1913b540bf0c8ab7676b21fdd1d25b65ddeb10025c6ca43cccd4c/prompt_toolkit-1.0.18.tar.gz"; + sha256 = "09h1153wgr5x2ny7ds0w2m81n3bb9j8hjb8sjfnrg506r01clkyx"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal ]; }; }; "psutil" = super.buildPythonPackage { - name = "psutil-5.5.1"; + name = "psutil-5.6.5"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/c7/01/7c30b247cdc5ba29623faa5c8cf1f1bbf7e041783c340414b0ed7e067c64/psutil-5.5.1.tar.gz"; - sha256 = "045qaqvn6k90bj5bcy259yrwcd2afgznaav3sfhphy9b8ambzkkj"; + url = "https://files.pythonhosted.org/packages/03/9a/95c4b3d0424426e5fd94b5302ff74cea44d5d4f53466e1228ac8e73e14b4/psutil-5.6.5.tar.gz"; + sha256 = "0isil5jxwwd8awz54qk28rpgjg43i5l6yl70g40vxwa4r4m56lfh"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal ]; @@ -494,16 +569,42 @@ self: super: { }; }; "py" = super.buildPythonPackage { - name = "py-1.6.0"; + name = "py-1.8.0"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/4f/38/5f427d1eedae73063ce4da680d2bae72014995f9fdeaa57809df61c968cd/py-1.6.0.tar.gz"; - sha256 = "1wcs3zv9wl5m5x7p16avqj2gsrviyb23yvc3pr330isqs0sh98q6"; + url = "https://files.pythonhosted.org/packages/f1/5a/87ca5909f400a2de1561f1648883af74345fe96349f34f737cdfc94eba8c/py-1.8.0.tar.gz"; + sha256 = "0lsy1gajva083pzc7csj1cvbmminb7b4l6a0prdzyb3fd829nqyw"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; + "pycparser" = super.buildPythonPackage { + name = "pycparser-2.19"; + doCheck = false; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/68/9e/49196946aee219aead1290e00d1e7fdeab8567783e83e1b9ab5585e6206a/pycparser-2.19.tar.gz"; + sha256 = "1cr5dcj9628lkz1qlwq3fv97c25363qppkmcayqvd05dpy573259"; + }; + meta = { + license = [ pkgs.lib.licenses.bsdOriginal ]; + }; + }; + "pygit2" = super.buildPythonPackage { + name = "pygit2-0.28.2"; + doCheck = false; + propagatedBuildInputs = [ + self."cffi" + self."six" + ]; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/4c/64/88c2a4eb2d22ca1982b364f41ff5da42d61de791d7eb68140e7f8f7eb721/pygit2-0.28.2.tar.gz"; + sha256 = "11kzj5mjkspvplnpdb6bj8dcj6rgmkk986k8hjcklyg5yaxkz32d"; + }; + meta = { + license = [ { fullName = "GPLv2 with linking exception"; } ]; + }; + }; "pygments" = super.buildPythonPackage { name = "pygments-2.4.2"; doCheck = false; @@ -515,6 +616,17 @@ self: super: { license = [ pkgs.lib.licenses.bsdOriginal ]; }; }; + "pyparsing" = super.buildPythonPackage { + name = "pyparsing-2.4.5"; + doCheck = false; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/00/32/8076fa13e832bb4dcff379f18f228e5a53412be0631808b9ca2610c0f566/pyparsing-2.4.5.tar.gz"; + sha256 = "0fk8gsybiw1gm146mkjdjvaajwh20xwvpv4j7syh2zrnpq0j19jc"; + }; + meta = { + license = [ pkgs.lib.licenses.mit ]; + }; + }; "pyramid" = super.buildPythonPackage { name = "pyramid-1.10.4"; doCheck = false; @@ -539,59 +651,61 @@ self: super: { }; }; "pyramid-mako" = super.buildPythonPackage { - name = "pyramid-mako-1.0.2"; + name = "pyramid-mako-1.1.0"; doCheck = false; propagatedBuildInputs = [ self."pyramid" self."mako" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/f1/92/7e69bcf09676d286a71cb3bbb887b16595b96f9ba7adbdc239ffdd4b1eb9/pyramid_mako-1.0.2.tar.gz"; - sha256 = "18gk2vliq8z4acblsl6yzgbvnr9rlxjlcqir47km7kvlk1xri83d"; + url = "https://files.pythonhosted.org/packages/63/7b/5e2af68f675071a6bad148c1c393928f0ef5fcd94e95cbf53b89d6471a83/pyramid_mako-1.1.0.tar.gz"; + sha256 = "1qj0m091mnii86j2q1d82yir22nha361rvhclvg3s70z8iiwhrh0"; }; meta = { license = [ { fullName = "Repoze Public License"; } { fullName = "BSD-derived (http://www.repoze.org/LICENSE.txt)"; } ]; }; }; "pytest" = super.buildPythonPackage { - name = "pytest-3.8.2"; + name = "pytest-4.6.5"; doCheck = false; propagatedBuildInputs = [ self."py" self."six" - self."setuptools" + self."packaging" self."attrs" - self."more-itertools" self."atomicwrites" self."pluggy" + self."importlib-metadata" + self."wcwidth" self."funcsigs" self."pathlib2" + self."more-itertools" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/5f/d2/7f77f406ac505abda02ab4afb50d06ebf304f6ea42fca34f8f37529106b2/pytest-3.8.2.tar.gz"; - sha256 = "18nrwzn61kph2y6gxwfz9ms68rfvr9d4vcffsxng9p7jk9z18clk"; + url = "https://files.pythonhosted.org/packages/2a/c6/1d1f32f6a5009900521b12e6560fb6b7245b0d4bc3fb771acd63d10e30e1/pytest-4.6.5.tar.gz"; + sha256 = "0iykwwfp4h181nd7rsihh2120b0rkawlw7rvbl19sgfspncr3hwg"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "pytest-cov" = super.buildPythonPackage { - name = "pytest-cov-2.6.0"; + name = "pytest-cov-2.7.1"; doCheck = false; propagatedBuildInputs = [ self."pytest" self."coverage" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/d9/e2/58f90a316fbd94dd50bf5c826a23f3f5d079fb3cc448c1e9f0e3c33a3d2a/pytest-cov-2.6.0.tar.gz"; - sha256 = "0qnpp9y3ygx4jk4pf5ad71fh2skbvnr6gl54m7rg5qysnx4g0q73"; + url = "https://files.pythonhosted.org/packages/bb/0f/3db7ff86801883b21d5353b258c994b1b8e2abbc804e2273b8d0fd19004b/pytest-cov-2.7.1.tar.gz"; + sha256 = "0filvmmyqm715azsl09ql8hy2x7h286n6d8z5x42a1wpvvys83p0"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal pkgs.lib.licenses.mit ]; }; }; "pytest-profiling" = super.buildPythonPackage { - name = "pytest-profiling-1.3.0"; + name = "pytest-profiling-1.7.0"; doCheck = false; propagatedBuildInputs = [ self."six" @@ -599,53 +713,65 @@ self: super: { self."gprof2dot" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/f5/34/4626126e041a51ef50a80d0619519b18d20aef249aac25b0d0fdd47e57ee/pytest-profiling-1.3.0.tar.gz"; - sha256 = "08r5afx5z22yvpmsnl91l4amsy1yxn8qsmm61mhp06mz8zjs51kb"; + url = "https://files.pythonhosted.org/packages/39/70/22a4b33739f07f1732a63e33bbfbf68e0fa58cfba9d200e76d01921eddbf/pytest-profiling-1.7.0.tar.gz"; + sha256 = "0abz9gi26jpcfdzgsvwad91555lpgdc8kbymicmms8k2fqa8z4wk"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "pytest-runner" = super.buildPythonPackage { - name = "pytest-runner-4.2"; + name = "pytest-runner-5.1"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/9e/b7/fe6e8f87f9a756fd06722216f1b6698ccba4d269eac6329d9f0c441d0f93/pytest-runner-4.2.tar.gz"; - sha256 = "1gkpyphawxz38ni1gdq1fmwyqcg02m7ypzqvv46z06crwdxi2gyj"; + url = "https://files.pythonhosted.org/packages/d9/6d/4b41a74b31720e25abd4799be72d54811da4b4d0233e38b75864dcc1f7ad/pytest-runner-5.1.tar.gz"; + sha256 = "0ykfcnpp8c22winj63qzc07l5axwlc9ikl8vn05sc32gv3417815"; }; meta = { license = [ pkgs.lib.licenses.mit ]; }; }; "pytest-sugar" = super.buildPythonPackage { - name = "pytest-sugar-0.9.1"; + name = "pytest-sugar-0.9.2"; doCheck = false; propagatedBuildInputs = [ self."pytest" self."termcolor" + self."packaging" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/3e/6a/a3f909083079d03bde11d06ab23088886bbe25f2c97fbe4bb865e2bf05bc/pytest-sugar-0.9.1.tar.gz"; - sha256 = "0b4av40dv30727m54v211r0nzwjp2ajkjgxix6j484qjmwpw935b"; + url = "https://files.pythonhosted.org/packages/55/59/f02f78d1c80f7e03e23177f60624c8106d4f23d124c921df103f65692464/pytest-sugar-0.9.2.tar.gz"; + sha256 = "1asq7yc4g8bx2sn7yy974mhc9ywvaihasjab4inkirdwn9s7mn7w"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal ]; }; }; "pytest-timeout" = super.buildPythonPackage { - name = "pytest-timeout-1.3.2"; + name = "pytest-timeout-1.3.3"; doCheck = false; propagatedBuildInputs = [ self."pytest" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/8c/3e/1b6a319d12ae7baa3acb7c18ff2c8630a09471a0319d43535c683b4d03eb/pytest-timeout-1.3.2.tar.gz"; - sha256 = "09wnmzvnls2mnsdz7x3c3sk2zdp6jl4dryvyj5i8hqz16q2zq5qi"; + url = "https://files.pythonhosted.org/packages/13/48/7a166eaa29c1dca6cc253e3ba5773ff2e4aa4f567c1ea3905808e95ac5c1/pytest-timeout-1.3.3.tar.gz"; + sha256 = "1cczcjhw4xx5sjkhxlhc5c1bkr7x6fcyx12wrnvwfckshdvblc2a"; }; meta = { license = [ pkgs.lib.licenses.mit { fullName = "DFSG approved"; } ]; }; }; + "redis" = super.buildPythonPackage { + name = "redis-3.3.11"; + doCheck = false; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/06/ca/00557c74279d2f256d3c42cabf237631355f3a132e4c74c2000e6647ad98/redis-3.3.11.tar.gz"; + sha256 = "1hicqbi5xl92hhml82awrr2rxl9jar5fp8nbcycj9qgmsdwc43wd"; + }; + meta = { + license = [ pkgs.lib.licenses.mit ]; + }; + }; "repoze.lru" = super.buildPythonPackage { name = "repoze.lru-0.7"; doCheck = false; @@ -658,7 +784,7 @@ self: super: { }; }; "rhodecode-vcsserver" = super.buildPythonPackage { - name = "rhodecode-vcsserver-4.17.4"; + name = "rhodecode-vcsserver-4.18.0"; buildInputs = [ self."pytest" self."py" @@ -691,7 +817,9 @@ self: super: { self."pastedeploy" self."pyramid" self."pyramid-mako" + self."pygit2" self."repoze.lru" + self."redis" self."simplejson" self."subprocess32" self."subvertpy" @@ -748,11 +876,11 @@ self: super: { }; }; "setuptools" = super.buildPythonPackage { - name = "setuptools-41.0.1"; + name = "setuptools-44.0.0"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/1d/64/a18a487b4391a05b9c7f938b94a16d80305bf0369c6b0b9509e86165e1d3/setuptools-41.0.1.zip"; - sha256 = "04sns22y2hhsrwfy1mha2lgslvpjsjsz8xws7h2rh5a7ylkd28m2"; + url = "https://files.pythonhosted.org/packages/b0/f3/44da7482ac6da3f36f68e253cb04de37365b3dba9036a3c70773b778b485/setuptools-44.0.0.zip"; + sha256 = "025h5cnxcmda1893l6i12hrwdvs1n8r31qs6q4pkif2v7rrggfp5"; }; meta = { license = [ pkgs.lib.licenses.mit ]; @@ -825,7 +953,7 @@ self: super: { }; }; "traitlets" = super.buildPythonPackage { - name = "traitlets-4.3.2"; + name = "traitlets-4.3.3"; doCheck = false; propagatedBuildInputs = [ self."ipython-genutils" @@ -834,8 +962,8 @@ self: super: { self."enum34" ]; src = fetchurl { - url = "https://files.pythonhosted.org/packages/a5/98/7f5ef2fe9e9e071813aaf9cb91d1a732e0a68b6c44a32b38cb8e14c3f069/traitlets-4.3.2.tar.gz"; - sha256 = "0dbq7sx26xqz5ixs711k5nc88p8a0nqyz6162pwks5dpcz9d4jww"; + url = "https://files.pythonhosted.org/packages/75/b0/43deb021bc943f18f07cbe3dac1d681626a48997b7ffa1e7fb14ef922b21/traitlets-4.3.3.tar.gz"; + sha256 = "1xsrwgivpkxlbr4dfndfsi098s29yqgswgjc1qqn69yxklvfw8yh"; }; meta = { license = [ pkgs.lib.licenses.bsdOriginal ]; @@ -864,11 +992,11 @@ self: super: { }; }; "waitress" = super.buildPythonPackage { - name = "waitress-1.3.0"; + name = "waitress-1.3.1"; doCheck = false; src = fetchurl { - url = "https://files.pythonhosted.org/packages/43/50/9890471320d5ad22761ae46661cf745f487b1c8c4ec49352b99e1078b970/waitress-1.3.0.tar.gz"; - sha256 = "09j5dzbbcxib7vdskhx39s1qsydlr4n2p2png71d7mjnr9pnwajf"; + url = "https://files.pythonhosted.org/packages/a6/e6/708da7bba65898e5d759ade8391b1077e49d07be0b0223c39f5be04def56/waitress-1.3.1.tar.gz"; + sha256 = "1iysl8ka3l4cdrr0r19fh1cv28q41mwpvgsb81ji7k4shkb0k3i7"; }; meta = { license = [ pkgs.lib.licenses.zpl21 ]; @@ -913,6 +1041,20 @@ self: super: { license = [ pkgs.lib.licenses.mit ]; }; }; + "zipp" = super.buildPythonPackage { + name = "zipp-0.6.0"; + doCheck = false; + propagatedBuildInputs = [ + self."more-itertools" + ]; + src = fetchurl { + url = "https://files.pythonhosted.org/packages/57/dd/585d728479d97d25aeeb9aa470d36a4ad8d0ba5610f84e14770128ce6ff7/zipp-0.6.0.tar.gz"; + sha256 = "13ndkf7vklw978a4gdl1yfvn8hch28429a0iam67sg4nrp5v261p"; + }; + meta = { + license = [ pkgs.lib.licenses.mit ]; + }; + }; "zope.deprecation" = super.buildPythonPackage { name = "zope.deprecation-4.4.0"; doCheck = false; diff --git a/pkgs/shell-generate.nix b/pkgs/shell-generate.nix --- a/pkgs/shell-generate.nix +++ b/pkgs/shell-generate.nix @@ -23,6 +23,7 @@ pkgs.stdenv.mkDerivation { pythonPackages.pip-tools pkgs.apr pkgs.aprutil + pkgs.libffi ]; shellHook = '' diff --git a/requirements.txt b/requirements.txt --- a/requirements.txt +++ b/requirements.txt @@ -3,22 +3,24 @@ # our custom configobj https://code.rhodecode.com/upstream/configobj/artifacts/download/0-012de99a-b1e1-4f64-a5c0-07a98a41b324.tar.gz?md5=6a513f51fe04b2c18cf84c1395a7c626#egg=configobj==5.0.6 -dogpile.cache==0.7.1 +dogpile.cache==0.9.0 dogpile.core==0.4.1 decorator==4.1.2 dulwich==0.13.0 hgsubversion==1.9.3 -hg-evolve==8.5.1 -mako==1.0.7 -markupsafe==1.1.0 -mercurial==4.9.1 +hg-evolve==9.1.0 +mako==1.1.0 +markupsafe==1.1.1 +mercurial==5.1.1 msgpack-python==0.5.6 pastedeploy==2.0.1 pyramid==1.10.4 -pyramid-mako==1.0.2 +pyramid-mako==1.1.0 +pygit2==0.28.2 repoze.lru==0.7 +redis==3.3.11 simplejson==3.16.0 subprocess32==3.5.4 subvertpy==0.10.1 @@ -33,7 +35,7 @@ zope.interface==4.6.0 gevent==1.4.0 greenlet==0.4.15 gunicorn==19.9.0 -waitress==1.3.0 +waitress==1.3.1 ## debug ipdb==0.12.0 @@ -41,3 +43,6 @@ ipython==5.1.0 ## test related requirements -r requirements_test.txt + +## uncomment to add the debug libraries +#-r requirements_debug.txt diff --git a/requirements_debug.txt b/requirements_debug.txt new file mode 100644 --- /dev/null +++ b/requirements_debug.txt @@ -0,0 +1,8 @@ +## special libraries we could extend the requirements.txt file with to add some +## custom libraries useful for debug and memory tracing + +## uncomment inclusion of this file in requirements.txt run make generate-pkgs and nix-shell + +objgraph +memory-profiler +pympler diff --git a/requirements_pinned.txt b/requirements_pinned.txt --- a/requirements_pinned.txt +++ b/requirements_pinned.txt @@ -1,12 +1,18 @@ # contains not directly required libraries we want to pin the version. -atomicwrites==1.2.1 -attrs==18.2.0 -hupper==1.6.1 -pathlib2==2.3.4 +atomicwrites==1.3.0 +attrs==19.3.0 +contextlib2==0.6.0.post1 +cffi==1.12.3 +hupper==1.9.1 +importlib-metadata==0.23 +packaging==19.2.0 +pathlib2==2.3.5 pygments==2.4.2 -psutil==5.5.1 -pluggy==0.11.0 +pyparsing==2.4.5 +psutil==5.6.5 +pluggy==0.13.1 scandir==1.10.0 setproctitle==1.1.10 venusian==1.2.0 +wcwidth==0.1.7 diff --git a/requirements_test.txt b/requirements_test.txt --- a/requirements_test.txt +++ b/requirements_test.txt @@ -1,16 +1,16 @@ # test related requirements -pytest==3.8.2 -py==1.6.0 -pytest-cov==2.6.0 -pytest-sugar==0.9.1 -pytest-runner==4.2.0 -pytest-profiling==1.3.0 -pytest-timeout==1.3.2 +pytest==4.6.5 +py==1.8.0 +pytest-cov==2.7.1 +pytest-sugar==0.9.2 +pytest-runner==5.1.0 +pytest-profiling==1.7.0 +pytest-timeout==1.3.3 gprof2dot==2017.9.19 -mock==1.0.1 +mock==3.0.5 cov-core==1.15.0 -coverage==4.5.3 +coverage==4.5.4 webtest==2.0.33 beautifulsoup4==4.6.3 diff --git a/vcsserver/VERSION b/vcsserver/VERSION --- a/vcsserver/VERSION +++ b/vcsserver/VERSION @@ -1,1 +1,1 @@ -4.17.4 \ No newline at end of file +4.18.0 \ No newline at end of file diff --git a/vcsserver/base.py b/vcsserver/base.py --- a/vcsserver/base.py +++ b/vcsserver/base.py @@ -44,25 +44,7 @@ class RepoFactory(object): raise NotImplementedError() def repo(self, wire, create=False): - """ - Get a repository instance for the given path. - - Uses internally the low level beaker API since the decorators introduce - significant overhead. - """ - region = self._cache_region - context = wire.get('context', None) - repo_path = wire.get('path', '') - context_uid = '{}'.format(context) - cache = wire.get('cache', True) - cache_on = context and cache - - @region.conditional_cache_on_arguments(condition=cache_on) - def create_new_repo(_repo_type, _repo_path, _context_uid): - return self._create_repo(wire, create) - - repo = create_new_repo(self.repo_type, repo_path, context_uid) - return repo + raise NotImplementedError() def obfuscate_qs(query_string): diff --git a/vcsserver/git.py b/vcsserver/git.py --- a/vcsserver/git.py +++ b/vcsserver/git.py @@ -14,6 +14,7 @@ # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + import collections import logging import os @@ -26,42 +27,54 @@ import urllib2 from functools import wraps import more_itertools +import pygit2 +from pygit2 import Repository as LibGit2Repo from dulwich import index, objects from dulwich.client import HttpGitClient, LocalGitClient from dulwich.errors import ( NotGitRepository, ChecksumMismatch, WrongObjectException, MissingCommitError, ObjectMissing, HangupException, UnexpectedCommandError) -from dulwich.repo import Repo as DulwichRepo, Tag +from dulwich.repo import Repo as DulwichRepo from dulwich.server import update_server_info from vcsserver import exceptions, settings, subprocessio -from vcsserver.utils import safe_str -from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original +from vcsserver.utils import safe_str, safe_int +from vcsserver.base import RepoFactory, obfuscate_qs from vcsserver.hgcompat import ( hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler) from vcsserver.git_lfs.lib import LFSOidStore +from vcsserver.vcs_base import RemoteBase DIR_STAT = stat.S_IFDIR FILE_MODE = stat.S_IFMT GIT_LINK = objects.S_IFGITLINK +PEELED_REF_MARKER = '^{}' + log = logging.getLogger(__name__) +def str_to_dulwich(value): + """ + Dulwich 0.10.1a requires `unicode` objects to be passed in. + """ + return value.decode(settings.WIRE_ENCODING) + + def reraise_safe_exceptions(func): """Converts Dulwich exceptions to something neutral.""" + @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) - except (ChecksumMismatch, WrongObjectException, MissingCommitError, - ObjectMissing) as e: - exc = exceptions.LookupException(e) - raise exc(e) + except (ChecksumMismatch, WrongObjectException, MissingCommitError, ObjectMissing,) as e: + exc = exceptions.LookupException(org_exc=e) + raise exc(safe_str(e)) except (HangupException, UnexpectedCommandError) as e: - exc = exceptions.VcsException(e) - raise exc(e) + exc = exceptions.VcsException(org_exc=e) + raise exc(safe_str(e)) except Exception as e: # NOTE(marcink): becuase of how dulwich handles some exceptions # (KeyError on empty repos), we cannot track this and catch all @@ -80,33 +93,51 @@ class Repo(DulwichRepo): Since dulwich is sometimes keeping .idx file descriptors open, it leads to "Too many open files" error. We need to close all opened file descriptors once the repo object is destroyed. - - TODO: mikhail: please check if we need this wrapper after updating dulwich - to 0.12.0 + """ def __del__(self): if hasattr(self, 'object_store'): self.close() +class Repository(LibGit2Repo): + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.free() + + class GitFactory(RepoFactory): repo_type = 'git' - def _create_repo(self, wire, create): - repo_path = str_to_dulwich(wire['path']) - return Repo(repo_path) + def _create_repo(self, wire, create, use_libgit2=False): + if use_libgit2: + return Repository(wire['path']) + else: + repo_path = str_to_dulwich(wire['path']) + return Repo(repo_path) + + def repo(self, wire, create=False, use_libgit2=False): + """ + Get a repository instance for the given path. + """ + return self._create_repo(wire, create, use_libgit2) + + def repo_libgit2(self, wire): + return self.repo(wire, use_libgit2=True) -class GitRemote(object): +class GitRemote(RemoteBase): def __init__(self, factory): self._factory = factory - self.peeled_ref_marker = '^{}' self._bulk_methods = { - "author": self.commit_attribute, - "date": self.get_object_attrs, - "message": self.commit_attribute, - "parents": self.commit_attribute, + "date": self.date, + "author": self.author, + "branch": self.branch, + "message": self.message, + "parents": self.parents, "_commit": self.revision, } @@ -115,10 +146,6 @@ class GitRemote(object): return dict([(x[0] + '_' + x[1], x[2]) for x in wire['config']]) return {} - def _assign_ref(self, wire, ref, commit_id): - repo = self._factory.repo(wire) - repo[ref] = commit_id - def _remote_conf(self, config): params = [ '-c', 'core.askpass=""', @@ -129,49 +156,75 @@ class GitRemote(object): return params @reraise_safe_exceptions - def is_empty(self, wire): - repo = self._factory.repo(wire) - try: - return not repo.head() - except Exception: - log.exception("failed to read object_store") - return True + def discover_git_version(self): + stdout, _ = self.run_git_command( + {}, ['--version'], _bare=True, _safe=True) + prefix = 'git version' + if stdout.startswith(prefix): + stdout = stdout[len(prefix):] + return stdout.strip() @reraise_safe_exceptions - def add_object(self, wire, content): - repo = self._factory.repo(wire) - blob = objects.Blob() - blob.set_raw_string(content) - repo.object_store.add_object(blob) - return blob.id + def is_empty(self, wire): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + + try: + has_head = repo.head.name + if has_head: + return False + + # NOTE(marcink): check again using more expensive method + return repo.is_empty + except Exception: + pass + + return True @reraise_safe_exceptions def assert_correct_path(self, wire): - path = wire.get('path') - try: - self._factory.repo(wire) - except NotGitRepository as e: - tb = traceback.format_exc() - log.debug("Invalid Git path `%s`, tb: %s", path, tb) - return False + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _assert_correct_path(_context_uid, _repo_id): + try: + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + pass + except pygit2.GitError: + path = wire.get('path') + tb = traceback.format_exc() + log.debug("Invalid Git path `%s`, tb: %s", path, tb) + return False - return True + return True + return _assert_correct_path(context_uid, repo_id) @reraise_safe_exceptions def bare(self, wire): - repo = self._factory.repo(wire) - return repo.bare + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + return repo.is_bare @reraise_safe_exceptions def blob_as_pretty_string(self, wire, sha): - repo = self._factory.repo(wire) - return repo[sha].as_pretty_string() + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + blob_obj = repo[sha] + blob = blob_obj.data + return blob @reraise_safe_exceptions def blob_raw_length(self, wire, sha): - repo = self._factory.repo(wire) - blob = repo[sha] - return blob.raw_length() + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _blob_raw_length(_repo_id, _sha): + + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + blob = repo[sha] + return blob.size + + return _blob_raw_length(repo_id, sha) def _parse_lfs_pointer(self, raw_content): @@ -191,19 +244,44 @@ class GitRemote(object): return {} @reraise_safe_exceptions - def is_large_file(self, wire, sha): - repo = self._factory.repo(wire) - blob = repo[sha] - return self._parse_lfs_pointer(blob.as_raw_string()) + def is_large_file(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _is_large_file(_repo_id, _sha): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + blob = repo[commit_id] + if blob.is_binary: + return {} + + return self._parse_lfs_pointer(blob.data) + + return _is_large_file(repo_id, commit_id) + + @reraise_safe_exceptions + def is_binary(self, wire, tree_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _is_binary(_repo_id, _tree_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + blob_obj = repo[tree_id] + return blob_obj.is_binary + + return _is_binary(repo_id, tree_id) @reraise_safe_exceptions def in_largefiles_store(self, wire, oid): - repo = self._factory.repo(wire) conf = self._wire_to_config(wire) + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + repo_name = repo.path store_location = conf.get('vcs_git_lfs_store_location') if store_location: - repo_name = repo.path + store = LFSOidStore( oid=oid, repo=repo_name, store_location=store_location) return store.has_oid() @@ -212,12 +290,13 @@ class GitRemote(object): @reraise_safe_exceptions def store_path(self, wire, oid): - repo = self._factory.repo(wire) conf = self._wire_to_config(wire) + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + repo_name = repo.path store_location = conf.get('vcs_git_lfs_store_location') if store_location: - repo_name = repo.path store = LFSOidStore( oid=oid, repo=repo_name, store_location=store_location) return store.oid_path @@ -225,20 +304,21 @@ class GitRemote(object): @reraise_safe_exceptions def bulk_request(self, wire, rev, pre_load): - result = {} - for attr in pre_load: - try: - method = self._bulk_methods[attr] - args = [wire, rev] - if attr == "date": - args.extend(["commit_time", "commit_timezone"]) - elif attr in ["author", "message", "parents"]: - args.append(attr) - result[attr] = method(*args) - except KeyError as e: - raise exceptions.VcsException(e)( - "Unknown bulk attribute: %s" % attr) - return result + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _bulk_request(_repo_id, _rev, _pre_load): + result = {} + for attr in pre_load: + try: + method = self._bulk_methods[attr] + args = [wire, rev] + result[attr] = method(*args) + except KeyError as e: + raise exceptions.VcsException(e)( + "Unknown bulk attribute: %s" % attr) + return result + + return _bulk_request(repo_id, rev, sorted(pre_load)) def _build_opener(self, url): handlers = [] @@ -255,6 +335,14 @@ class GitRemote(object): return urllib2.build_opener(*handlers) + def _type_id_to_name(self, type_id): + return { + 1: b'commit', + 2: b'tree', + 3: b'blob', + 4: b'tag' + }[type_id] + @reraise_safe_exceptions def check_url(self, url, config): url_obj = url_parser(url) @@ -317,6 +405,42 @@ class GitRemote(object): index.build_index_from_tree(repo.path, repo.index_path(), repo.object_store, repo["HEAD"].tree) + @reraise_safe_exceptions + def branch(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _branch(_context_uid, _repo_id, _commit_id): + regex = re.compile('^refs/heads') + + def filter_with(ref): + return regex.match(ref[0]) and ref[1] == _commit_id + + branches = filter(filter_with, self.get_refs(wire).items()) + return [x[0].split('refs/heads/')[-1] for x in branches] + + return _branch(context_uid, repo_id, commit_id) + + @reraise_safe_exceptions + def commit_branches(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _commit_branches(_context_uid, _repo_id, _commit_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + branches = [x for x in repo.branches.with_commit(_commit_id)] + return branches + + return _commit_branches(context_uid, repo_id, commit_id) + + @reraise_safe_exceptions + def add_object(self, wire, content): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + blob = objects.Blob() + blob.set_raw_string(content) + repo.object_store.add_object(blob) + return blob.id + # TODO: this is quite complex, check if that can be simplified @reraise_safe_exceptions def commit(self, wire, commit_data, branch, commit_tree, updated, removed): @@ -367,8 +491,7 @@ class GitRemote(object): curtree = newtree parent[reversed_dirnames[-1]] = (DIR_STAT, curtree.id) else: - parent.add( - name=node['node_path'], mode=node['mode'], hexsha=blob.id) + parent.add(name=node['node_path'], mode=node['mode'], hexsha=blob.id) new_trees.append(parent) # Update ancestors @@ -412,6 +535,9 @@ class GitRemote(object): setattr(commit, k, v) object_store.add_object(commit) + self.create_branch(wire, branch, commit.id) + + # dulwich set-ref ref = 'refs/heads/%s' % branch repo.refs[ref] = commit.id @@ -454,7 +580,7 @@ class GitRemote(object): # that contains a tag object, so that we would end up with # a peeled ref at this point. for k in remote_refs: - if k.endswith(self.peeled_ref_marker): + if k.endswith(PEELED_REF_MARKER): log.debug("Skipping peeled reference %s", k) continue repo[k] = remote_refs[k] @@ -471,14 +597,19 @@ class GitRemote(object): return remote_refs @reraise_safe_exceptions - def sync_fetch(self, wire, url, refs=None): + def sync_fetch(self, wire, url, refs=None, all_refs=False): repo = self._factory.repo(wire) if refs and not isinstance(refs, (list, tuple)): refs = [refs] + config = self._wire_to_config(wire) # get all remote refs we'll use to fetch later + cmd = ['ls-remote'] + if not all_refs: + cmd += ['--heads', '--tags'] + cmd += [url] output, __ = self.run_git_command( - wire, ['ls-remote', url], fail_on_stderr=False, + wire, cmd, fail_on_stderr=False, _copts=self._remote_conf(config), extra_env={'GIT_TERMINAL_PROMPT': '0'}) @@ -491,7 +622,7 @@ class GitRemote(object): if ref in remote_refs: # duplicate, skip continue - if ref.endswith(self.peeled_ref_marker): + if ref.endswith(PEELED_REF_MARKER): log.debug("Skipping peeled reference %s", ref) continue # don't sync HEAD @@ -506,6 +637,7 @@ class GitRemote(object): elif not refs: fetch_refs.append('{}:{}'.format(ref, ref)) log.debug('Finished obtaining fetch refs, total: %s', len(fetch_refs)) + if fetch_refs: for chunk in more_itertools.chunked(fetch_refs, 1024 * 4): fetch_refs_chunks = list(chunk) @@ -523,7 +655,7 @@ class GitRemote(object): if not self.check_url(url, wire): return config = self._wire_to_config(wire) - repo = self._factory.repo(wire) + self._factory.repo(wire) self.run_git_command( wire, ['push', url, '--mirror'], fail_on_stderr=False, _copts=self._remote_conf(config), @@ -556,48 +688,92 @@ class GitRemote(object): @reraise_safe_exceptions def get_object(self, wire, sha): - repo = self._factory.repo(wire) - obj = repo.get_object(sha) - commit_id = obj.id + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_object(_context_uid, _repo_id, _sha): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: - if isinstance(obj, Tag): - commit_id = obj.object[1] + missing_commit_err = 'Commit {} does not exist for `{}`'.format(sha, wire['path']) + try: + commit = repo.revparse_single(sha) + except (KeyError, ValueError) as e: + raise exceptions.LookupException(e)(missing_commit_err) + + is_tag = False + if isinstance(commit, pygit2.Tag): + commit = repo.get(commit.target) + is_tag = True + + check_dangling = True + if is_tag: + check_dangling = False - return { - 'id': obj.id, - 'type': obj.type_name, - 'commit_id': commit_id, - 'idx': 0 - } + # we used a reference and it parsed means we're not having a dangling commit + if sha != commit.hex: + check_dangling = False + + if check_dangling: + # check for dangling commit + for branch in repo.branches.with_commit(commit.hex): + if branch: + break + else: + raise exceptions.LookupException(None)(missing_commit_err) - @reraise_safe_exceptions - def get_object_attrs(self, wire, sha, *attrs): - repo = self._factory.repo(wire) - obj = repo.get_object(sha) - return list(getattr(obj, a) for a in attrs) + commit_id = commit.hex + type_id = commit.type + + return { + 'id': commit_id, + 'type': self._type_id_to_name(type_id), + 'commit_id': commit_id, + 'idx': 0 + } + + return _get_object(context_uid, repo_id, sha) @reraise_safe_exceptions def get_refs(self, wire): - repo = self._factory.repo(wire) - result = {} - for ref, sha in repo.refs.as_dict().items(): - peeled_sha = repo.get_peeled(ref) - result[ref] = peeled_sha - return result + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_refs(_context_uid, _repo_id): + + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + regex = re.compile('^refs/(heads|tags)/') + return {x.name: x.target.hex for x in + filter(lambda ref: regex.match(ref.name) ,repo.listall_reference_objects())} + + return _get_refs(context_uid, repo_id) @reraise_safe_exceptions - def get_refs_path(self, wire): - repo = self._factory.repo(wire) - return repo.refs.path + def get_branch_pointers(self, wire): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_branch_pointers(_context_uid, _repo_id): + + repo_init = self._factory.repo_libgit2(wire) + regex = re.compile('^refs/heads') + with repo_init as repo: + branches = filter(lambda ref: regex.match(ref.name), repo.listall_reference_objects()) + return {x.target.hex: x.shorthand for x in branches} + + return _get_branch_pointers(context_uid, repo_id) @reraise_safe_exceptions def head(self, wire, show_exc=True): - repo = self._factory.repo(wire) - try: - return repo.head() - except Exception: - if show_exc: - raise + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _head(_context_uid, _repo_id, _show_exc): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + try: + return repo.head.peel().hex + except Exception: + if show_exc: + raise + return _head(context_uid, repo_id, show_exc) @reraise_safe_exceptions def init(self, wire): @@ -611,35 +787,141 @@ class GitRemote(object): @reraise_safe_exceptions def revision(self, wire, rev): - repo = self._factory.repo(wire) - obj = repo[rev] - obj_data = { - 'id': obj.id, - } - try: - obj_data['tree'] = obj.tree - except AttributeError: - pass - return obj_data + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _revision(_context_uid, _repo_id, _rev): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[rev] + obj_data = { + 'id': commit.id.hex, + } + # tree objects itself don't have tree_id attribute + if hasattr(commit, 'tree_id'): + obj_data['tree'] = commit.tree_id.hex + + return obj_data + return _revision(context_uid, repo_id, rev) + + @reraise_safe_exceptions + def date(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _date(_repo_id, _commit_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[commit_id] + + if hasattr(commit, 'commit_time'): + commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset + else: + commit = commit.get_object() + commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset + + # TODO(marcink): check dulwich difference of offset vs timezone + return [commit_time, commit_time_offset] + return _date(repo_id, commit_id) @reraise_safe_exceptions - def commit_attribute(self, wire, rev, attr): - repo = self._factory.repo(wire) - obj = repo[rev] - return getattr(obj, attr) + def author(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _author(_repo_id, _commit_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[commit_id] + + if hasattr(commit, 'author'): + author = commit.author + else: + author = commit.get_object().author + + if author.email: + return u"{} <{}>".format(author.name, author.email) + + return u"{}".format(author.raw_name) + return _author(repo_id, commit_id) + + @reraise_safe_exceptions + def message(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _message(_repo_id, _commit_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[commit_id] + return commit.message + return _message(repo_id, commit_id) + + @reraise_safe_exceptions + def parents(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _parents(_repo_id, _commit_id): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[commit_id] + if hasattr(commit, 'parent_ids'): + parent_ids = commit.parent_ids + else: + parent_ids = commit.get_object().parent_ids + + return [x.hex for x in parent_ids] + return _parents(repo_id, commit_id) + + @reraise_safe_exceptions + def children(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _children(_repo_id, _commit_id): + output, __ = self.run_git_command( + wire, ['rev-list', '--all', '--children']) + + child_ids = [] + pat = re.compile(r'^%s' % commit_id) + for l in output.splitlines(): + if pat.match(l): + found_ids = l.split(' ')[1:] + child_ids.extend(found_ids) + + return child_ids + return _children(repo_id, commit_id) @reraise_safe_exceptions def set_refs(self, wire, key, value): - repo = self._factory.repo(wire) - repo.refs[key] = value + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + repo.references.create(key, value, force=True) + + @reraise_safe_exceptions + def create_branch(self, wire, branch_name, commit_id, force=False): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + commit = repo[commit_id] + + if force: + repo.branches.local.create(branch_name, commit, force=force) + elif not repo.branches.get(branch_name): + # create only if that branch isn't existing + repo.branches.local.create(branch_name, commit, force=force) @reraise_safe_exceptions def remove_ref(self, wire, key): - repo = self._factory.repo(wire) - del repo.refs[key] + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + repo.references.delete(key) + + @reraise_safe_exceptions + def tag_remove(self, wire, tag_name): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + key = 'refs/tags/{}'.format(tag_name) + repo.references.delete(key) @reraise_safe_exceptions def tree_changes(self, wire, source_id, target_id): + # TODO(marcink): remove this seems it's only used by tests repo = self._factory.repo(wire) source = repo[source_id].tree if source_id else None target = repo[target_id].tree @@ -647,21 +929,158 @@ class GitRemote(object): return list(result) @reraise_safe_exceptions + def tree_and_type_for_path(self, wire, commit_id, path): + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _tree_and_type_for_path(_context_uid, _repo_id, _commit_id, _path): + repo_init = self._factory.repo_libgit2(wire) + + with repo_init as repo: + commit = repo[commit_id] + try: + tree = commit.tree[path] + except KeyError: + return None, None, None + + return tree.id.hex, tree.type, tree.filemode + return _tree_and_type_for_path(context_uid, repo_id, commit_id, path) + + @reraise_safe_exceptions def tree_items(self, wire, tree_id): - repo = self._factory.repo(wire) - tree = repo[tree_id] + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _tree_items(_repo_id, _tree_id): + + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + try: + tree = repo[tree_id] + except KeyError: + raise ObjectMissing('No tree with id: {}'.format(tree_id)) + + result = [] + for item in tree: + item_sha = item.hex + item_mode = item.filemode + item_type = item.type + + if item_type == 'commit': + # NOTE(marcink): submodules we translate to 'link' for backward compat + item_type = 'link' + + result.append((item.name, item_mode, item_sha, item_type)) + return result + return _tree_items(repo_id, tree_id) + + @reraise_safe_exceptions + def diff_2(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context): + """ + Old version that uses subprocess to call diff + """ + + flags = [ + '-U%s' % context, '--patch', + '--binary', + '--find-renames', + '--no-indent-heuristic', + # '--indent-heuristic', + #'--full-index', + #'--abbrev=40' + ] + + if opt_ignorews: + flags.append('--ignore-all-space') + + if commit_id_1 == self.EMPTY_COMMIT: + cmd = ['show'] + flags + [commit_id_2] + else: + cmd = ['diff'] + flags + [commit_id_1, commit_id_2] + + if file_filter: + cmd.extend(['--', file_filter]) + + diff, __ = self.run_git_command(wire, cmd) + # If we used 'show' command, strip first few lines (until actual diff + # starts) + if commit_id_1 == self.EMPTY_COMMIT: + lines = diff.splitlines() + x = 0 + for line in lines: + if line.startswith('diff'): + break + x += 1 + # Append new line just like 'diff' command do + diff = '\n'.join(lines[x:]) + '\n' + return diff + + @reraise_safe_exceptions + def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context): + repo_init = self._factory.repo_libgit2(wire) + with repo_init as repo: + swap = True + flags = 0 + flags |= pygit2.GIT_DIFF_SHOW_BINARY + + if opt_ignorews: + flags |= pygit2.GIT_DIFF_IGNORE_WHITESPACE + + if commit_id_1 == self.EMPTY_COMMIT: + comm1 = repo[commit_id_2] + diff_obj = comm1.tree.diff_to_tree( + flags=flags, context_lines=context, swap=swap) + + else: + comm1 = repo[commit_id_2] + comm2 = repo[commit_id_1] + diff_obj = comm1.tree.diff_to_tree( + comm2.tree, flags=flags, context_lines=context, swap=swap) + similar_flags = 0 + similar_flags |= pygit2.GIT_DIFF_FIND_RENAMES + diff_obj.find_similar(flags=similar_flags) + + if file_filter: + for p in diff_obj: + if p.delta.old_file.path == file_filter: + return p.patch or '' + # fo matching path == no diff + return '' + return diff_obj.patch or '' + + @reraise_safe_exceptions + def node_history(self, wire, commit_id, path, limit): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _node_history(_context_uid, _repo_id, _commit_id, _path, _limit): + # optimize for n==1, rev-list is much faster for that use-case + if limit == 1: + cmd = ['rev-list', '-1', commit_id, '--', path] + else: + cmd = ['log'] + if limit: + cmd.extend(['-n', str(safe_int(limit, 0))]) + cmd.extend(['--pretty=format: %H', '-s', commit_id, '--', path]) + + output, __ = self.run_git_command(wire, cmd) + commit_ids = re.findall(r'[0-9a-fA-F]{40}', output) + + return [x for x in commit_ids] + return _node_history(context_uid, repo_id, commit_id, path, limit) + + @reraise_safe_exceptions + def node_annotate(self, wire, commit_id, path): + + cmd = ['blame', '-l', '--root', '-r', commit_id, '--', path] + # -l ==> outputs long shas (and we need all 40 characters) + # --root ==> doesn't put '^' character for boundaries + # -r commit_id ==> blames for the given commit + output, __ = self.run_git_command(wire, cmd) result = [] - for item in tree.iteritems(): - item_sha = item.sha - item_mode = item.mode - - if FILE_MODE(item_mode) == GIT_LINK: - item_type = "link" - else: - item_type = repo[item_sha].type_name - - result.append((item.path, item_mode, item_sha, item_type)) + for i, blame_line in enumerate(output.split('\n')[:-1]): + line_no = i + 1 + commit_id, line = re.split(r' ', blame_line, 1) + result.append((line_no, commit_id, line)) return result @reraise_safe_exceptions @@ -670,13 +1089,20 @@ class GitRemote(object): update_server_info(repo) @reraise_safe_exceptions - def discover_git_version(self): - stdout, _ = self.run_git_command( - {}, ['--version'], _bare=True, _safe=True) - prefix = 'git version' - if stdout.startswith(prefix): - stdout = stdout[len(prefix):] - return stdout.strip() + def get_all_commit_ids(self, wire): + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_all_commit_ids(_context_uid, _repo_id): + + cmd = ['rev-list', '--reverse', '--date-order', '--branches', '--tags'] + try: + output, __ = self.run_git_command(wire, cmd) + return output.splitlines() + except Exception: + # Can be raised for empty repositories + return [] + return _get_all_commit_ids(context_uid, repo_id) @reraise_safe_exceptions def run_git_command(self, wire, cmd, **opts): @@ -711,11 +1137,12 @@ class GitRemote(object): cmd = [settings.GIT_EXECUTABLE] + _copts + cmd _opts = {'env': gitenv, 'shell': False} + proc = None try: _opts.update(opts) - p = subprocessio.SubprocessIOChunker(cmd, **_opts) + proc = subprocessio.SubprocessIOChunker(cmd, **_opts) - return ''.join(p), ''.join(p.error) + return ''.join(proc), ''.join(proc.error) except (EnvironmentError, OSError) as err: cmd = ' '.join(cmd) # human friendly CMD tb_err = ("Couldn't run git command (%s).\n" @@ -727,26 +1154,24 @@ class GitRemote(object): return '', err else: raise exceptions.VcsException()(tb_err) + finally: + if proc: + proc.close() @reraise_safe_exceptions def install_hooks(self, wire, force=False): from vcsserver.hook_utils import install_git_hooks - repo = self._factory.repo(wire) - return install_git_hooks(repo.path, repo.bare, force_create=force) + bare = self.bare(wire) + path = wire['path'] + return install_git_hooks(path, bare, force_create=force) @reraise_safe_exceptions def get_hooks_info(self, wire): from vcsserver.hook_utils import ( get_git_pre_hook_version, get_git_post_hook_version) - repo = self._factory.repo(wire) + bare = self.bare(wire) + path = wire['path'] return { - 'pre_version': get_git_pre_hook_version(repo.path, repo.bare), - 'post_version': get_git_post_hook_version(repo.path, repo.bare), + 'pre_version': get_git_pre_hook_version(path, bare), + 'post_version': get_git_post_hook_version(path, bare), } - - -def str_to_dulwich(value): - """ - Dulwich 0.10.1a requires `unicode` objects to be passed in. - """ - return value.decode(settings.WIRE_ENCODING) diff --git a/vcsserver/hg.py b/vcsserver/hg.py --- a/vcsserver/hg.py +++ b/vcsserver/hg.py @@ -22,7 +22,7 @@ import urllib import urllib2 import traceback -from hgext import largefiles, rebase +from hgext import largefiles, rebase, purge from hgext.strip import strip as hgext_strip from mercurial import commands from mercurial import unionrepo @@ -37,6 +37,7 @@ from vcsserver.hgcompat import ( makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge, patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError, RepoLookupError, InterventionRequired, RequirementError) +from vcsserver.vcs_base import RemoteBase log = logging.getLogger(__name__) @@ -98,6 +99,7 @@ def make_ui_from_config(repo_config): def reraise_safe_exceptions(func): """Decorator for converting mercurial exceptions to something neutral.""" + def wrapper(*args, **kwargs): try: return func(*args, **kwargs) @@ -142,12 +144,17 @@ class MercurialFactory(RepoFactory): baseui = self._create_config(wire["config"]) return instance(baseui, wire["path"], create) + def repo(self, wire, create=False): + """ + Get a repository instance for the given path. + """ + return self._create_repo(wire, create) -class HgRemote(object): + +class HgRemote(RemoteBase): def __init__(self, factory): self._factory = factory - self._bulk_methods = { "affected_files": self.ctx_files, "author": self.ctx_user, @@ -199,113 +206,68 @@ class HgRemote(object): @reraise_safe_exceptions def bookmarks(self, wire): - repo = self._factory.repo(wire) - return dict(repo._bookmarks) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _bookmarks(_context_uid, _repo_id): + repo = self._factory.repo(wire) + return dict(repo._bookmarks) + + return _bookmarks(context_uid, repo_id) @reraise_safe_exceptions def branches(self, wire, normal, closed): - repo = self._factory.repo(wire) - iter_branches = repo.branchmap().iterbranches() - bt = {} - for branch_name, _heads, tip, is_closed in iter_branches: - if normal and not is_closed: - bt[branch_name] = tip - if closed and is_closed: - bt[branch_name] = tip - - return bt + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _branches(_context_uid, _repo_id, _normal, _closed): + repo = self._factory.repo(wire) + iter_branches = repo.branchmap().iterbranches() + bt = {} + for branch_name, _heads, tip, is_closed in iter_branches: + if normal and not is_closed: + bt[branch_name] = tip + if closed and is_closed: + bt[branch_name] = tip - @reraise_safe_exceptions - def bulk_request(self, wire, rev, pre_load): - result = {} - for attr in pre_load: - try: - method = self._bulk_methods[attr] - result[attr] = method(wire, rev) - except KeyError as e: - raise exceptions.VcsException(e)( - 'Unknown bulk attribute: "%s"' % attr) - return result + return bt - @reraise_safe_exceptions - def clone(self, wire, source, dest, update_after_clone=False, hooks=True): - baseui = self._factory._create_config(wire["config"], hooks=hooks) - clone(baseui, source, dest, noupdate=not update_after_clone) + return _branches(context_uid, repo_id, normal, closed) @reraise_safe_exceptions - def commitctx( - self, wire, message, parents, commit_time, commit_timezone, - user, files, extra, removed, updated): - - repo = self._factory.repo(wire) - baseui = self._factory._create_config(wire['config']) - publishing = baseui.configbool('phases', 'publish') - if publishing: - new_commit = 'public' - else: - new_commit = 'draft' - - def _filectxfn(_repo, ctx, path): - """ - Marks given path as added/changed/removed in a given _repo. This is - for internal mercurial commit function. - """ - - # check if this path is removed - if path in removed: - # returning None is a way to mark node for removal - return None + def bulk_request(self, wire, commit_id, pre_load): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _bulk_request(_repo_id, _commit_id, _pre_load): + result = {} + for attr in pre_load: + try: + method = self._bulk_methods[attr] + result[attr] = method(wire, commit_id) + except KeyError as e: + raise exceptions.VcsException(e)( + 'Unknown bulk attribute: "%s"' % attr) + return result - # check if this path is added - for node in updated: - if node['path'] == path: - return memfilectx( - _repo, - changectx=ctx, - path=node['path'], - data=node['content'], - islink=False, - isexec=bool(node['mode'] & stat.S_IXUSR), - copied=False) - - raise exceptions.AbortException()( - "Given path haven't been marked as added, " - "changed or removed (%s)" % path) - - with repo.ui.configoverride({('phases', 'new-commit'): new_commit}): - - commit_ctx = memctx( - repo=repo, - parents=parents, - text=message, - files=files, - filectxfn=_filectxfn, - user=user, - date=(commit_time, commit_timezone), - extra=extra) - - n = repo.commitctx(commit_ctx) - new_id = hex(n) - - return new_id + return _bulk_request(repo_id, commit_id, sorted(pre_load)) @reraise_safe_exceptions - def ctx_branch(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return ctx.branch() + def ctx_branch(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_branch(_repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return ctx.branch() + return _ctx_branch(repo_id, commit_id) @reraise_safe_exceptions - def ctx_children(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return [child.rev() for child in ctx.children()] - - @reraise_safe_exceptions - def ctx_date(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return ctx.date() + def ctx_date(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_date(_repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return ctx.date() + return _ctx_date(repo_id, commit_id) @reraise_safe_exceptions def ctx_description(self, wire, revision): @@ -314,10 +276,15 @@ class HgRemote(object): return ctx.description() @reraise_safe_exceptions - def ctx_files(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return ctx.files() + def ctx_files(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_files(_repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return ctx.files() + + return _ctx_files(repo_id, commit_id) @reraise_safe_exceptions def ctx_list(self, path, revision): @@ -326,29 +293,59 @@ class HgRemote(object): return list(ctx) @reraise_safe_exceptions - def ctx_parents(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return [parent.rev() for parent in ctx.parents()] + def ctx_parents(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_parents(_repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return [parent.hex() for parent in ctx.parents() + if not (parent.hidden() or parent.obsolete())] + + return _ctx_parents(repo_id, commit_id) + + @reraise_safe_exceptions + def ctx_children(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_children(_repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return [child.hex() for child in ctx.children() + if not (child.hidden() or child.obsolete())] + + return _ctx_children(repo_id, commit_id) @reraise_safe_exceptions - def ctx_phase(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - # public=0, draft=1, secret=3 - return ctx.phase() + def ctx_phase(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_phase(_context_uid, _repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + # public=0, draft=1, secret=3 + return ctx.phase() + return _ctx_phase(context_uid, repo_id, commit_id) @reraise_safe_exceptions - def ctx_obsolete(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return ctx.obsolete() + def ctx_obsolete(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_obsolete(_context_uid, _repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return ctx.obsolete() + return _ctx_obsolete(context_uid, repo_id, commit_id) @reraise_safe_exceptions - def ctx_hidden(self, wire, revision): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - return ctx.hidden() + def ctx_hidden(self, wire, commit_id): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _ctx_hidden(_context_uid, _repo_id, _commit_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + return ctx.hidden() + return _ctx_hidden(context_uid, repo_id, commit_id) @reraise_safe_exceptions def ctx_substate(self, wire, revision): @@ -438,9 +435,7 @@ class HgRemote(object): return True @reraise_safe_exceptions - def diff( - self, wire, rev1, rev2, file_filter, opt_git, opt_ignorews, - context): + def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_git, opt_ignorews, context): repo = self._factory.repo(wire) if file_filter: @@ -451,48 +446,56 @@ class HgRemote(object): try: return "".join(patch.diff( - repo, node1=rev1, node2=rev2, match=match_filter, opts=opts)) + repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts)) except RepoLookupError as e: raise exceptions.LookupException(e)() @reraise_safe_exceptions def node_history(self, wire, revision, path, limit): - repo = self._factory.repo(wire) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _node_history(_context_uid, _repo_id, _revision, _path, _limit): + repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - fctx = ctx.filectx(path) + ctx = self._get_ctx(repo, revision) + fctx = ctx.filectx(path) - def history_iter(): - limit_rev = fctx.rev() - for obj in reversed(list(fctx.filelog())): - obj = fctx.filectx(obj) - ctx = obj.changectx() - if ctx.hidden() or ctx.obsolete(): - continue + def history_iter(): + limit_rev = fctx.rev() + for obj in reversed(list(fctx.filelog())): + obj = fctx.filectx(obj) + ctx = obj.changectx() + if ctx.hidden() or ctx.obsolete(): + continue - if limit_rev >= obj.rev(): - yield obj + if limit_rev >= obj.rev(): + yield obj - history = [] - for cnt, obj in enumerate(history_iter()): - if limit and cnt >= limit: - break - history.append(hex(obj.node())) + history = [] + for cnt, obj in enumerate(history_iter()): + if limit and cnt >= limit: + break + history.append(hex(obj.node())) - return [x for x in history] + return [x for x in history] + return _node_history(context_uid, repo_id, revision, path, limit) @reraise_safe_exceptions def node_history_untill(self, wire, revision, path, limit): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - fctx = ctx.filectx(path) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _node_history_until(_context_uid, _repo_id): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, revision) + fctx = ctx.filectx(path) - file_log = list(fctx.filelog()) - if limit: - # Limit to the last n items - file_log = file_log[-limit:] + file_log = list(fctx.filelog()) + if limit: + # Limit to the last n items + file_log = file_log[-limit:] - return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)] + return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)] + return _node_history_until(context_uid, repo_id, revision, path, limit) @reraise_safe_exceptions def fctx_annotate(self, wire, revision, path): @@ -509,32 +512,45 @@ class HgRemote(object): return result @reraise_safe_exceptions - def fctx_data(self, wire, revision, path): + def fctx_node_data(self, wire, revision, path): repo = self._factory.repo(wire) ctx = self._get_ctx(repo, revision) fctx = ctx.filectx(path) return fctx.data() @reraise_safe_exceptions - def fctx_flags(self, wire, revision, path): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - fctx = ctx.filectx(path) - return fctx.flags() + def fctx_flags(self, wire, commit_id, path): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _fctx_flags(_repo_id, _commit_id, _path): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + fctx = ctx.filectx(path) + return fctx.flags() + + return _fctx_flags(repo_id, commit_id, path) @reraise_safe_exceptions - def fctx_size(self, wire, revision, path): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - fctx = ctx.filectx(path) - return fctx.size() + def fctx_size(self, wire, commit_id, path): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _fctx_size(_repo_id, _revision, _path): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, commit_id) + fctx = ctx.filectx(path) + return fctx.size() + return _fctx_size(repo_id, commit_id, path) @reraise_safe_exceptions def get_all_commit_ids(self, wire, name): - repo = self._factory.repo(wire) - repo = repo.filtered(name) - revs = map(lambda x: hex(x[7]), repo.changelog.index) - return revs + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_all_commit_ids(_context_uid, _repo_id, _name): + repo = self._factory.repo(wire) + repo = repo.filtered(name) + revs = map(lambda x: hex(x[7]), repo.changelog.index) + return revs + return _get_all_commit_ids(context_uid, repo_id, name) @reraise_safe_exceptions def get_config_value(self, wire, section, name, untrusted=False): @@ -542,18 +558,26 @@ class HgRemote(object): return repo.ui.config(section, name, untrusted=untrusted) @reraise_safe_exceptions - def get_config_bool(self, wire, section, name, untrusted=False): - repo = self._factory.repo(wire) - return repo.ui.configbool(section, name, untrusted=untrusted) + def is_large_file(self, wire, commit_id, path): + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _is_large_file(_context_uid, _repo_id, _commit_id, _path): + return largefiles.lfutil.isstandin(path) + + return _is_large_file(context_uid, repo_id, commit_id, path) @reraise_safe_exceptions - def get_config_list(self, wire, section, name, untrusted=False): - repo = self._factory.repo(wire) - return repo.ui.configlist(section, name, untrusted=untrusted) + def is_binary(self, wire, revision, path): + cache_on, context_uid, repo_id = self._cache_on(wire) - @reraise_safe_exceptions - def is_large_file(self, wire, path): - return largefiles.lfutil.isstandin(path) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _is_binary(_repo_id, _sha, _path): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, revision) + fctx = ctx.filectx(path) + return fctx.isbinary() + + return _is_binary(repo_id, revision, path) @reraise_safe_exceptions def in_largefiles_store(self, wire, sha): @@ -582,47 +606,36 @@ class HgRemote(object): @reraise_safe_exceptions def lookup(self, wire, revision, both): - - repo = self._factory.repo(wire) - - if isinstance(revision, int): - # NOTE(marcink): - # since Mercurial doesn't support negative indexes properly - # we need to shift accordingly by one to get proper index, e.g - # repo[-1] => repo[-2] - # repo[0] => repo[-1] - if revision <= 0: - revision = revision + -1 - try: - ctx = self._get_ctx(repo, revision) - except (TypeError, RepoLookupError) as e: - e._org_exc_tb = traceback.format_exc() - raise exceptions.LookupException(e)(revision) - except LookupError as e: - e._org_exc_tb = traceback.format_exc() - raise exceptions.LookupException(e)(e.name) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _lookup(_context_uid, _repo_id, _revision, _both): - if not both: - return ctx.hex() - - ctx = repo[ctx.hex()] - return ctx.hex(), ctx.rev() + repo = self._factory.repo(wire) + rev = _revision + if isinstance(rev, int): + # NOTE(marcink): + # since Mercurial doesn't support negative indexes properly + # we need to shift accordingly by one to get proper index, e.g + # repo[-1] => repo[-2] + # repo[0] => repo[-1] + if rev <= 0: + rev = rev + -1 + try: + ctx = self._get_ctx(repo, rev) + except (TypeError, RepoLookupError) as e: + e._org_exc_tb = traceback.format_exc() + raise exceptions.LookupException(e)(rev) + except LookupError as e: + e._org_exc_tb = traceback.format_exc() + raise exceptions.LookupException(e)(e.name) - @reraise_safe_exceptions - def pull(self, wire, url, commit_ids=None): - repo = self._factory.repo(wire) - # Disable any prompts for this repo - repo.ui.setconfig('ui', 'interactive', 'off', '-y') + if not both: + return ctx.hex() - remote = peer(repo, {}, url) - # Disable any prompts for this remote - remote.ui.setconfig('ui', 'interactive', 'off', '-y') + ctx = repo[ctx.hex()] + return ctx.hex(), ctx.rev() - if commit_ids: - commit_ids = [bin(commit_id) for commit_id in commit_ids] - - return exchange.pull( - repo, remote, heads=commit_ids, force=None).cgresult + return _lookup(context_uid, repo_id, revision, both) @reraise_safe_exceptions def sync_push(self, wire, url): @@ -649,10 +662,16 @@ class HgRemote(object): return ctx.rev() @reraise_safe_exceptions - def rev_range(self, wire, filter): - repo = self._factory.repo(wire) - revisions = [rev for rev in revrange(repo, filter)] - return revisions + def rev_range(self, wire, commit_filter): + cache_on, context_uid, repo_id = self._cache_on(wire) + + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _rev_range(_context_uid, _repo_id, _filter): + repo = self._factory.repo(wire) + revisions = [rev for rev in revrange(repo, commit_filter)] + return revisions + + return _rev_range(context_uid, repo_id, sorted(commit_filter)) @reraise_safe_exceptions def rev_range_hash(self, wire, node): @@ -684,13 +703,6 @@ class HgRemote(object): return list(repo.revs(rev_spec, *args)) @reraise_safe_exceptions - def strip(self, wire, revision, update, backup): - repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - hgext_strip( - repo.baseui, repo, ctx.node(), update=update, backup=backup) - - @reraise_safe_exceptions def verify(self, wire,): repo = self._factory.repo(wire) baseui = self._factory._create_config(wire['config']) @@ -706,24 +718,31 @@ class HgRemote(object): return output.getvalue() @reraise_safe_exceptions - def tag(self, wire, name, revision, message, local, user, - tag_time, tag_timezone): + def hg_update_cache(self, wire,): repo = self._factory.repo(wire) - ctx = self._get_ctx(repo, revision) - node = ctx.node() + baseui = self._factory._create_config(wire['config']) + baseui.setconfig('ui', 'quiet', 'false') + output = io.BytesIO() - date = (tag_time, tag_timezone) - try: - hg_tag.tag(repo, name, node, message, local, user, date) - except Abort as e: - log.exception("Tag operation aborted") - # Exception can contain unicode which we convert - raise exceptions.AbortException(e)(repr(e)) + def write(data, **unused_kwargs): + output.write(data) + baseui.write = write + + repo.ui = baseui + with repo.wlock(), repo.lock(): + repo.updatecaches(full=True) + + return output.getvalue() @reraise_safe_exceptions def tags(self, wire): - repo = self._factory.repo(wire) - return repo.tags() + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _tags(_context_uid, _repo_id): + repo = self._factory.repo(wire) + return repo.tags() + + return _tags(context_uid, repo_id) @reraise_safe_exceptions def update(self, wire, node=None, clean=False): @@ -744,24 +763,6 @@ class HgRemote(object): return output.getvalue() @reraise_safe_exceptions - def pull_cmd(self, wire, source, bookmark=None, branch=None, revision=None, - hooks=True): - repo = self._factory.repo(wire) - baseui = self._factory._create_config(wire['config'], hooks=hooks) - - # Mercurial internally has a lot of logic that checks ONLY if - # option is defined, we just pass those if they are defined then - opts = {} - if bookmark: - opts['bookmark'] = bookmark - if branch: - opts['branch'] = branch - if revision: - opts['rev'] = revision - - commands.pull(baseui, repo, source, **opts) - - @reraise_safe_exceptions def heads(self, wire, branch=None): repo = self._factory.repo(wire) baseui = self._factory._create_config(wire['config']) @@ -788,14 +789,130 @@ class HgRemote(object): return hex(a) @reraise_safe_exceptions - def push(self, wire, revisions, dest_path, hooks=True, - push_branches=False): + def clone(self, wire, source, dest, update_after_clone=False, hooks=True): + baseui = self._factory._create_config(wire["config"], hooks=hooks) + clone(baseui, source, dest, noupdate=not update_after_clone) + + @reraise_safe_exceptions + def commitctx(self, wire, message, parents, commit_time, commit_timezone, user, files, extra, removed, updated): + + repo = self._factory.repo(wire) + baseui = self._factory._create_config(wire['config']) + publishing = baseui.configbool('phases', 'publish') + if publishing: + new_commit = 'public' + else: + new_commit = 'draft' + + def _filectxfn(_repo, ctx, path): + """ + Marks given path as added/changed/removed in a given _repo. This is + for internal mercurial commit function. + """ + + # check if this path is removed + if path in removed: + # returning None is a way to mark node for removal + return None + + # check if this path is added + for node in updated: + if node['path'] == path: + return memfilectx( + _repo, + changectx=ctx, + path=node['path'], + data=node['content'], + islink=False, + isexec=bool(node['mode'] & stat.S_IXUSR), + copysource=False) + + raise exceptions.AbortException()( + "Given path haven't been marked as added, " + "changed or removed (%s)" % path) + + with repo.ui.configoverride({('phases', 'new-commit'): new_commit}): + + commit_ctx = memctx( + repo=repo, + parents=parents, + text=message, + files=files, + filectxfn=_filectxfn, + user=user, + date=(commit_time, commit_timezone), + extra=extra) + + n = repo.commitctx(commit_ctx) + new_id = hex(n) + + return new_id + + @reraise_safe_exceptions + def pull(self, wire, url, commit_ids=None): + repo = self._factory.repo(wire) + # Disable any prompts for this repo + repo.ui.setconfig('ui', 'interactive', 'off', '-y') + + remote = peer(repo, {}, url) + # Disable any prompts for this remote + remote.ui.setconfig('ui', 'interactive', 'off', '-y') + + if commit_ids: + commit_ids = [bin(commit_id) for commit_id in commit_ids] + + return exchange.pull( + repo, remote, heads=commit_ids, force=None).cgresult + + @reraise_safe_exceptions + def pull_cmd(self, wire, source, bookmark=None, branch=None, revision=None, hooks=True): + repo = self._factory.repo(wire) + baseui = self._factory._create_config(wire['config'], hooks=hooks) + + # Mercurial internally has a lot of logic that checks ONLY if + # option is defined, we just pass those if they are defined then + opts = {} + if bookmark: + opts['bookmark'] = bookmark + if branch: + opts['branch'] = branch + if revision: + opts['rev'] = revision + + commands.pull(baseui, repo, source, **opts) + + @reraise_safe_exceptions + def push(self, wire, revisions, dest_path, hooks=True, push_branches=False): repo = self._factory.repo(wire) baseui = self._factory._create_config(wire['config'], hooks=hooks) commands.push(baseui, repo, dest=dest_path, rev=revisions, new_branch=push_branches) @reraise_safe_exceptions + def strip(self, wire, revision, update, backup): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, revision) + hgext_strip( + repo.baseui, repo, ctx.node(), update=update, backup=backup) + + @reraise_safe_exceptions + def get_unresolved_files(self, wire): + repo = self._factory.repo(wire) + + log.debug('Calculating unresolved files for repo: %s', repo) + output = io.BytesIO() + + def write(data, **unused_kwargs): + output.write(data) + + baseui = self._factory._create_config(wire['config']) + baseui.write = write + + commands.resolve(baseui, repo, list=True) + unresolved = output.getvalue().splitlines(0) + return unresolved + + @reraise_safe_exceptions def merge(self, wire, revision): repo = self._factory.repo(wire) baseui = self._factory._create_config(wire['config']) @@ -828,14 +945,31 @@ class HgRemote(object): repo.ui.setconfig('ui', 'username', username) commands.commit(baseui, repo, message=message, close_branch=close_branch) - @reraise_safe_exceptions def rebase(self, wire, source=None, dest=None, abort=False): repo = self._factory.repo(wire) baseui = self._factory._create_config(wire['config']) repo.ui.setconfig('ui', 'merge', 'internal:dump') - rebase.rebase( - baseui, repo, base=source, dest=dest, abort=abort, keep=not abort) + # In case of sub repositories are used mercurial prompts the user in + # case of merge conflicts or different sub repository sources. By + # setting the interactive flag to `False` mercurial doesn't prompt the + # used but instead uses a default value. + repo.ui.setconfig('ui', 'interactive', False) + rebase.rebase(baseui, repo, base=source, dest=dest, abort=abort, keep=not abort) + + @reraise_safe_exceptions + def tag(self, wire, name, revision, message, local, user, tag_time, tag_timezone): + repo = self._factory.repo(wire) + ctx = self._get_ctx(repo, revision) + node = ctx.node() + + date = (tag_time, tag_timezone) + try: + hg_tag.tag(repo, name, node, message, local, user, date) + except Abort as e: + log.exception("Tag operation aborted") + # Exception can contain unicode which we convert + raise exceptions.AbortException(e)(repr(e)) @reraise_safe_exceptions def bookmark(self, wire, bookmark, revision=None): diff --git a/vcsserver/hooks.py b/vcsserver/hooks.py --- a/vcsserver/hooks.py +++ b/vcsserver/hooks.py @@ -33,7 +33,6 @@ import mercurial.node import simplejson as json from vcsserver import exceptions, subprocessio, settings -from vcsserver.hgcompat import get_ctx log = logging.getLogger(__name__) @@ -81,6 +80,12 @@ class HooksDummyClient(object): return getattr(hooks, hook_name)(extras) +class HooksShadowRepoClient(object): + + def __call__(self, hook_name, extras): + return {'output': '', 'status': 0} + + class RemoteMessageWriter(object): """Writer base class.""" def write(self, message): @@ -141,9 +146,12 @@ def _handle_exception(result): def _get_hooks_client(extras): - if 'hooks_uri' in extras: - protocol = extras.get('hooks_protocol') + hooks_uri = extras.get('hooks_uri') + is_shadow_repo = extras.get('is_shadow_repo') + if hooks_uri: return HooksHttpClient(extras['hooks_uri']) + elif is_shadow_repo: + return HooksShadowRepoClient() else: return HooksDummyClient(extras['hooks_module']) @@ -175,6 +183,7 @@ def _extras_from_ui(ui): def _rev_range_hash(repo, node, check_heads=False): + from vcsserver.hgcompat import get_ctx commits = [] revs = [] @@ -194,6 +203,7 @@ def _rev_range_hash(repo, node, check_he def _check_heads(repo, start, end, commits): + from vcsserver.hgcompat import get_ctx changelog = repo.changelog parents = set() @@ -384,6 +394,7 @@ def post_push_ssh(ui, repo, node, **kwar def key_push(ui, repo, **kwargs): + from vcsserver.hgcompat import get_ctx if kwargs['new'] != '0' and kwargs['namespace'] == 'bookmarks': # store new bookmarks in our UI object propagated later to post_push ui._rc_pushkey_branches = get_ctx(repo, kwargs['key']).bookmarks() diff --git a/vcsserver/http_main.py b/vcsserver/http_main.py --- a/vcsserver/http_main.py +++ b/vcsserver/http_main.py @@ -25,6 +25,7 @@ import wsgiref.util import traceback import tempfile from itertools import chain +from cStringIO import StringIO import simplejson as json import msgpack @@ -32,7 +33,9 @@ from pyramid.config import Configurator from pyramid.settings import asbool, aslist from pyramid.wsgi import wsgiapp from pyramid.compat import configparser +from pyramid.response import Response +from vcsserver.utils import safe_int log = logging.getLogger(__name__) @@ -114,8 +117,8 @@ def _string_setting(settings, name, defa class VCS(object): - def __init__(self, locale=None, cache_config=None): - self.locale = locale + def __init__(self, locale_conf=None, cache_config=None): + self.locale = locale_conf self.cache_config = cache_config self._configure_locale() @@ -232,8 +235,8 @@ class HTTPApplication(object): self.global_config = global_config self.config.include('vcsserver.lib.rc_cache') - locale = settings.get('locale', '') or 'en_US.UTF-8' - vcs = VCS(locale=locale, cache_config=settings) + settings_locale = settings.get('locale', '') or 'en_US.UTF-8' + vcs = VCS(locale_conf=settings_locale, cache_config=settings) self._remotes = { 'hg': vcs._hg_remote, 'git': vcs._git_remote, @@ -290,15 +293,15 @@ class HTTPApplication(object): _string_setting( settings, 'rc_cache.repo_object.backend', - 'dogpile.cache.rc.memory_lru') + 'dogpile.cache.rc.file_namespace', lower=False) _int_setting( settings, 'rc_cache.repo_object.expiration_time', - 300) - _int_setting( + 30 * 24 * 60 * 60) + _string_setting( settings, - 'rc_cache.repo_object.max_size', - 1024) + 'rc_cache.repo_object.arguments.filename', + os.path.join(default_cache_dir, 'vcsserver_cache_1'), lower=False) def _configure(self): self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory) @@ -307,7 +310,14 @@ class HTTPApplication(object): self.config.add_route('status', '/status') self.config.add_route('hg_proxy', '/proxy/hg') self.config.add_route('git_proxy', '/proxy/git') + + # rpc methods self.config.add_route('vcs', '/{backend}') + + # streaming rpc remote methods + self.config.add_route('vcs_stream', '/{backend}/stream') + + # vcs operations clone/push as streaming self.config.add_route('stream_git', '/stream/git/*repo_name') self.config.add_route('stream_hg', '/stream/hg/*repo_name') @@ -318,6 +328,8 @@ class HTTPApplication(object): self.config.add_view(self.git_proxy(), route_name='git_proxy') self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack', vcs_view=self._remotes) + self.config.add_view(self.vcs_stream_view, route_name='vcs_stream', + vcs_view=self._remotes) self.config.add_view(self.hg_stream(), route_name='stream_hg') self.config.add_view(self.git_stream(), route_name='stream_git') @@ -329,17 +341,20 @@ class HTTPApplication(object): self.config.add_view(self.handle_vcs_exception, context=Exception) self.config.add_tween( - 'vcsserver.tweens.RequestWrapperTween', + 'vcsserver.tweens.request_wrapper.RequestWrapperTween', ) + self.config.add_request_method( + 'vcsserver.lib.request_counter.get_request_counter', + 'request_count') def wsgi_app(self): return self.config.make_wsgi_app() - def vcs_view(self, request): + def _vcs_view_params(self, request): remote = self._remotes[request.matchdict['backend']] payload = msgpack.unpackb(request.body, use_list=True) method = payload.get('method') - params = payload.get('params') + params = payload['params'] wire = params.get('wire') args = params.get('args') kwargs = params.get('kwargs') @@ -351,9 +366,28 @@ class HTTPApplication(object): except KeyError: pass args.insert(0, wire) + repo_state_uid = wire.get('repo_state_uid') if wire else None - log.debug('method called:%s with kwargs:%s context_uid: %s', - method, kwargs, context_uid) + # NOTE(marcink): trading complexity for slight performance + if log.isEnabledFor(logging.DEBUG): + no_args_methods = [ + 'archive_repo' + ] + if method in no_args_methods: + call_args = '' + else: + call_args = args[1:] + + log.debug('method requested:%s with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s', + method, call_args, kwargs, context_uid, repo_state_uid) + + return payload, remote, method, args, kwargs + + def vcs_view(self, request): + + payload, remote, method, args, kwargs = self._vcs_view_params(request) + payload_id = payload.get('id') + try: resp = getattr(remote, method)(*args, **kwargs) except Exception as e: @@ -380,7 +414,7 @@ class HTTPApplication(object): type_ = None resp = { - 'id': payload.get('id'), + 'id': payload_id, 'error': { 'message': e.message, 'traceback': tb_info, @@ -395,12 +429,36 @@ class HTTPApplication(object): pass else: resp = { - 'id': payload.get('id'), + 'id': payload_id, 'result': resp } return resp + def vcs_stream_view(self, request): + payload, remote, method, args, kwargs = self._vcs_view_params(request) + # this method has a stream: marker we remove it here + method = method.split('stream:')[-1] + chunk_size = safe_int(payload.get('chunk_size')) or 4096 + + try: + resp = getattr(remote, method)(*args, **kwargs) + except Exception as e: + raise + + def get_chunked_data(method_resp): + stream = StringIO(method_resp) + while 1: + chunk = stream.read(chunk_size) + if not chunk: + break + yield chunk + + response = Response(app_iter=get_chunked_data(resp)) + response.content_type = 'application/octet-stream' + + return response + def status_view(self, request): import vcsserver return {'status': 'OK', 'vcsserver_version': vcsserver.__version__, @@ -410,23 +468,31 @@ class HTTPApplication(object): import vcsserver payload = msgpack.unpackb(request.body, use_list=True) + server_config, app_config = {}, {} try: path = self.global_config['__file__'] - config = configparser.ConfigParser() + config = configparser.RawConfigParser() + config.read(path) - parsed_ini = config - if parsed_ini.has_section('server:main'): - parsed_ini = dict(parsed_ini.items('server:main')) + + if config.has_section('server:main'): + server_config = dict(config.items('server:main')) + if config.has_section('app:main'): + app_config = dict(config.items('app:main')) + except Exception: log.exception('Failed to read .ini file for display') - parsed_ini = {} + + environ = os.environ.items() resp = { 'id': payload.get('id'), 'result': dict( version=vcsserver.__version__, - config=parsed_ini, + config=server_config, + app_config=app_config, + environ=environ, payload=payload, ) } @@ -434,14 +500,13 @@ class HTTPApplication(object): def _msgpack_renderer_factory(self, info): def _render(value, system): - value = msgpack.packb(value) request = system.get('request') if request is not None: response = request.response ct = response.content_type if ct == response.default_content_type: response.content_type = 'application/x-msgpack' - return value + return msgpack.packb(value) return _render def set_env_from_config(self, environ, config): diff --git a/vcsserver/lib/rc_cache/__init__.py b/vcsserver/lib/rc_cache/__init__.py --- a/vcsserver/lib/rc_cache/__init__.py +++ b/vcsserver/lib/rc_cache/__init__.py @@ -22,10 +22,23 @@ register_backend( "dogpile.cache.rc.memory_lru", "vcsserver.lib.rc_cache.backends", "LRUMemoryBackend") +register_backend( + "dogpile.cache.rc.file_namespace", "vcsserver.lib.rc_cache.backends", + "FileNamespaceBackend") + +register_backend( + "dogpile.cache.rc.redis", "vcsserver.lib.rc_cache.backends", + "RedisPickleBackend") + +register_backend( + "dogpile.cache.rc.redis_msgpack", "vcsserver.lib.rc_cache.backends", + "RedisMsgPackBackend") + + log = logging.getLogger(__name__) from . import region_meta -from .util import key_generator, get_default_cache_settings, make_region +from .utils import (get_default_cache_settings, backend_key_generator, make_region) def configure_dogpile_cache(settings): @@ -46,13 +59,12 @@ def configure_dogpile_cache(settings): for region_name in avail_regions: new_region = make_region( name=region_name, - function_key_generator=key_generator + function_key_generator=None ) new_region.configure_from_config(settings, 'rc_cache.{}.'.format(region_name)) - - log.debug('dogpile: registering a new region %s[%s]', - region_name, new_region.__dict__) + new_region.function_key_generator = backend_key_generator(new_region.actual_backend) + log.debug('dogpile: registering a new region %s[%s]', region_name, new_region.__dict__) region_meta.dogpile_cache_regions[region_name] = new_region diff --git a/vcsserver/lib/rc_cache/backends.py b/vcsserver/lib/rc_cache/backends.py --- a/vcsserver/lib/rc_cache/backends.py +++ b/vcsserver/lib/rc_cache/backends.py @@ -15,9 +15,20 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import time +import errno import logging +import msgpack +import redis + +from dogpile.cache.api import CachedValue from dogpile.cache.backends import memory as memory_backend +from dogpile.cache.backends import file as file_backend +from dogpile.cache.backends import redis as redis_backend +from dogpile.cache.backends.file import NO_VALUE, compat, FileLock +from dogpile.cache.util import memoized_property + from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug @@ -27,6 +38,7 @@ log = logging.getLogger(__name__) class LRUMemoryBackend(memory_backend.MemoryBackend): + key_prefix = 'lru_mem_backend' pickle_values = False def __init__(self, arguments): @@ -49,3 +61,193 @@ class LRUMemoryBackend(memory_backend.Me def delete_multi(self, keys): for key in keys: self.delete(key) + + +class PickleSerializer(object): + + def _dumps(self, value, safe=False): + try: + return compat.pickle.dumps(value) + except Exception: + if safe: + return NO_VALUE + else: + raise + + def _loads(self, value, safe=True): + try: + return compat.pickle.loads(value) + except Exception: + if safe: + return NO_VALUE + else: + raise + + +class MsgPackSerializer(object): + + def _dumps(self, value, safe=False): + try: + return msgpack.packb(value) + except Exception: + if safe: + return NO_VALUE + else: + raise + + def _loads(self, value, safe=True): + """ + pickle maintained the `CachedValue` wrapper of the tuple + msgpack does not, so it must be added back in. + """ + try: + value = msgpack.unpackb(value, use_list=False) + return CachedValue(*value) + except Exception: + if safe: + return NO_VALUE + else: + raise + + +import fcntl +flock_org = fcntl.flock + + +class CustomLockFactory(FileLock): + + pass + + +class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend): + key_prefix = 'file_backend' + + def __init__(self, arguments): + arguments['lock_factory'] = CustomLockFactory + super(FileNamespaceBackend, self).__init__(arguments) + + def __repr__(self): + return '{} `{}`'.format(self.__class__, self.filename) + + def list_keys(self, prefix=''): + prefix = '{}:{}'.format(self.key_prefix, prefix) + + def cond(v): + if not prefix: + return True + + if v.startswith(prefix): + return True + return False + + with self._dbm_file(True) as dbm: + + return filter(cond, dbm.keys()) + + def get_store(self): + return self.filename + + def get(self, key): + with self._dbm_file(False) as dbm: + if hasattr(dbm, 'get'): + value = dbm.get(key, NO_VALUE) + else: + # gdbm objects lack a .get method + try: + value = dbm[key] + except KeyError: + value = NO_VALUE + if value is not NO_VALUE: + value = self._loads(value) + return value + + def set(self, key, value): + with self._dbm_file(True) as dbm: + dbm[key] = self._dumps(value) + + def set_multi(self, mapping): + with self._dbm_file(True) as dbm: + for key, value in mapping.items(): + dbm[key] = self._dumps(value) + + +class BaseRedisBackend(redis_backend.RedisBackend): + + def _create_client(self): + args = {} + + if self.url is not None: + args.update(url=self.url) + + else: + args.update( + host=self.host, password=self.password, + port=self.port, db=self.db + ) + + connection_pool = redis.ConnectionPool(**args) + + return redis.StrictRedis(connection_pool=connection_pool) + + def list_keys(self, prefix=''): + prefix = '{}:{}*'.format(self.key_prefix, prefix) + return self.client.keys(prefix) + + def get_store(self): + return self.client.connection_pool + + def get(self, key): + value = self.client.get(key) + if value is None: + return NO_VALUE + return self._loads(value) + + def get_multi(self, keys): + if not keys: + return [] + values = self.client.mget(keys) + loads = self._loads + return [ + loads(v) if v is not None else NO_VALUE + for v in values] + + def set(self, key, value): + if self.redis_expiration_time: + self.client.setex(key, self.redis_expiration_time, + self._dumps(value)) + else: + self.client.set(key, self._dumps(value)) + + def set_multi(self, mapping): + dumps = self._dumps + mapping = dict( + (k, dumps(v)) + for k, v in mapping.items() + ) + + if not self.redis_expiration_time: + self.client.mset(mapping) + else: + pipe = self.client.pipeline() + for key, value in mapping.items(): + pipe.setex(key, self.redis_expiration_time, value) + pipe.execute() + + def get_mutex(self, key): + u = redis_backend.u + if self.distributed_lock: + lock_key = u('_lock_{0}').format(key) + log.debug('Trying to acquire Redis lock for key %s', lock_key) + return self.client.lock(lock_key, self.lock_timeout, self.lock_sleep) + else: + return None + + +class RedisPickleBackend(PickleSerializer, BaseRedisBackend): + key_prefix = 'redis_pickle_backend' + pass + + +class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend): + key_prefix = 'redis_msgpack_backend' + pass diff --git a/vcsserver/lib/rc_cache/util.py b/vcsserver/lib/rc_cache/utils.py rename from vcsserver/lib/rc_cache/util.py rename to vcsserver/lib/rc_cache/utils.py --- a/vcsserver/lib/rc_cache/util.py +++ b/vcsserver/lib/rc_cache/utils.py @@ -18,10 +18,13 @@ import os import logging import functools +from decorator import decorate + +from dogpile.cache import CacheRegion +from dogpile.cache.util import compat from vcsserver.utils import safe_str, sha1 -from dogpile.cache import CacheRegion -from dogpile.cache.util import compat + log = logging.getLogger(__name__) @@ -45,28 +48,35 @@ class RhodeCodeCacheRegion(CacheRegion): if function_key_generator is None: function_key_generator = self.function_key_generator - def decorator(fn): + def get_or_create_for_user_func(key_generator, user_func, *arg, **kw): + + if not condition: + log.debug('Calling un-cached func:%s', user_func.func_name) + return user_func(*arg, **kw) + + key = key_generator(*arg, **kw) + + timeout = expiration_time() if expiration_time_is_callable \ + else expiration_time + + log.debug('Calling cached fn:%s', user_func.func_name) + return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw)) + + def cache_decorator(user_func): if to_str is compat.string_type: # backwards compatible - key_generator = function_key_generator(namespace, fn) + key_generator = function_key_generator(namespace, user_func) else: - key_generator = function_key_generator(namespace, fn, to_str=to_str) - - @functools.wraps(fn) - def decorate(*arg, **kw): - key = key_generator(*arg, **kw) + key_generator = function_key_generator(namespace, user_func, to_str=to_str) - @functools.wraps(fn) - def creator(): - return fn(*arg, **kw) - - if not condition: - return creator() - - timeout = expiration_time() if expiration_time_is_callable \ - else expiration_time - - return self.get_or_create(key, creator, timeout, should_cache_fn) + def refresh(*arg, **kw): + """ + Like invalidate, but regenerates the value instead + """ + key = key_generator(*arg, **kw) + value = user_func(*arg, **kw) + self.set(key, value) + return value def invalidate(*arg, **kw): key = key_generator(*arg, **kw) @@ -80,22 +90,19 @@ class RhodeCodeCacheRegion(CacheRegion): key = key_generator(*arg, **kw) return self.get(key) - def refresh(*arg, **kw): - key = key_generator(*arg, **kw) - value = fn(*arg, **kw) - self.set(key, value) - return value + user_func.set = set_ + user_func.invalidate = invalidate + user_func.get = get + user_func.refresh = refresh + user_func.key_generator = key_generator + user_func.original = user_func - decorate.set = set_ - decorate.invalidate = invalidate - decorate.refresh = refresh - decorate.get = get - decorate.original = fn - decorate.key_generator = key_generator + # Use `decorate` to preserve the signature of :param:`user_func`. - return decorate + return decorate(user_func, functools.partial( + get_or_create_for_user_func, key_generator)) - return decorator + return cache_decorator def make_region(*arg, **kw): @@ -110,7 +117,7 @@ def get_default_cache_settings(settings, if key.startswith(prefix): name = key.split(prefix)[1].strip() val = settings[key] - if isinstance(val, basestring): + if isinstance(val, compat.string_types): val = val.strip() cache_settings[name] = val return cache_settings @@ -123,13 +130,23 @@ def compute_key_from_params(*args): return sha1("_".join(map(safe_str, args))) -def key_generator(namespace, fn): +def backend_key_generator(backend): + """ + Special wrapper that also sends over the backend to the key generator + """ + def wrapper(namespace, fn): + return key_generator(backend, namespace, fn) + return wrapper + + +def key_generator(backend, namespace, fn): fname = fn.__name__ def generate_key(*args): - namespace_pref = namespace or 'default' + backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix' + namespace_pref = namespace or 'default_namespace' arg_key = compute_key_from_params(*args) - final_key = "{}:{}_{}".format(namespace_pref, fname, arg_key) + final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key) return final_key diff --git a/vcsserver/lib/request_counter.py b/vcsserver/lib/request_counter.py new file mode 100644 --- /dev/null +++ b/vcsserver/lib/request_counter.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- + +# RhodeCode VCSServer provides access to different vcs backends via network. +# Copyright (C) 2014-2019 RhodeCode GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +counter = 0 + + +def get_request_counter(request): + global counter + counter += 1 + return counter diff --git a/vcsserver/subprocessio.py b/vcsserver/subprocessio.py --- a/vcsserver/subprocessio.py +++ b/vcsserver/subprocessio.py @@ -216,9 +216,6 @@ class BufferedGenerator(object): except (GeneratorExit, StopIteration): pass - def __del__(self): - self.close() - #################### # Threaded reader's infrastructure. #################### @@ -475,26 +472,23 @@ class SubprocessIOChunker(object): self._closed = True try: self.process.terminate() - except: + except Exception: pass if self._close_input_fd: os.close(self._close_input_fd) try: self.output.close() - except: + except Exception: pass try: self.error.close() - except: + except Exception: pass try: os.close(self.inputstream) - except: + except Exception: pass - def __del__(self): - self.close() - def run_command(arguments, env=None): """ @@ -506,18 +500,20 @@ def run_command(arguments, env=None): cmd = arguments log.debug('Running subprocessio command %s', cmd) + proc = None try: _opts = {'shell': False, 'fail_on_stderr': False} if env: _opts.update({'env': env}) - p = SubprocessIOChunker(cmd, **_opts) - stdout = ''.join(p) - stderr = ''.join(''.join(p.error)) + proc = SubprocessIOChunker(cmd, **_opts) + return ''.join(proc), ''.join(proc.error) except (EnvironmentError, OSError) as err: cmd = ' '.join(cmd) # human friendly CMD tb_err = ("Couldn't run subprocessio command (%s).\n" "Original error was:%s\n" % (cmd, err)) log.exception(tb_err) raise Exception(tb_err) + finally: + if proc: + proc.close() - return stdout, stderr diff --git a/vcsserver/svn.py b/vcsserver/svn.py --- a/vcsserver/svn.py +++ b/vcsserver/svn.py @@ -36,6 +36,7 @@ import svn.repos from vcsserver import svn_diff, exceptions, subprocessio, settings from vcsserver.base import RepoFactory, raise_from_original +from vcsserver.vcs_base import RemoteBase log = logging.getLogger(__name__) @@ -97,23 +98,8 @@ class SubversionFactory(RepoFactory): def repo(self, wire, create=False, compatible_version=None): """ Get a repository instance for the given path. - - Uses internally the low level beaker API since the decorators introduce - significant overhead. """ - region = self._cache_region - context = wire.get('context', None) - repo_path = wire.get('path', '') - context_uid = '{}'.format(context) - cache = wire.get('cache', True) - cache_on = context and cache - - @region.conditional_cache_on_arguments(condition=cache_on) - def create_new_repo(_repo_type, _repo_path, _context_uid, compatible_version_id): - return self._create_repo(wire, create, compatible_version) - - return create_new_repo(self.repo_type, repo_path, context_uid, - compatible_version) + return self._create_repo(wire, create, compatible_version) NODE_TYPE_MAPPING = { @@ -122,7 +108,7 @@ NODE_TYPE_MAPPING = { } -class SvnRemote(object): +class SvnRemote(RemoteBase): def __init__(self, factory, hg_factory=None): self._factory = factory @@ -141,7 +127,6 @@ class SvnRemote(object): @reraise_safe_exceptions def is_empty(self, wire): - repo = self._factory.repo(wire) try: return self.lookup(wire, -1) == 0 @@ -219,9 +204,14 @@ class SvnRemote(object): return start_rev, end_rev def revision_properties(self, wire, revision): - repo = self._factory.repo(wire) - fs_ptr = svn.repos.fs(repo) - return svn.fs.revision_proplist(fs_ptr, revision) + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _revision_properties(_repo_id, _revision): + repo = self._factory.repo(wire) + fs_ptr = svn.repos.fs(repo) + return svn.fs.revision_proplist(fs_ptr, revision) + return _revision_properties(repo_id, revision) def revision_changes(self, wire, revision): @@ -267,28 +257,37 @@ class SvnRemote(object): } return changes + @reraise_safe_exceptions def node_history(self, wire, path, revision, limit): - cross_copies = False - repo = self._factory.repo(wire) - fsobj = svn.repos.fs(repo) - rev_root = svn.fs.revision_root(fsobj, revision) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _assert_correct_path(_context_uid, _repo_id, _path, _revision, _limit): + cross_copies = False + repo = self._factory.repo(wire) + fsobj = svn.repos.fs(repo) + rev_root = svn.fs.revision_root(fsobj, revision) - history_revisions = [] - history = svn.fs.node_history(rev_root, path) - history = svn.fs.history_prev(history, cross_copies) - while history: - __, node_revision = svn.fs.history_location(history) - history_revisions.append(node_revision) - if limit and len(history_revisions) >= limit: - break + history_revisions = [] + history = svn.fs.node_history(rev_root, path) history = svn.fs.history_prev(history, cross_copies) - return history_revisions + while history: + __, node_revision = svn.fs.history_location(history) + history_revisions.append(node_revision) + if limit and len(history_revisions) >= limit: + break + history = svn.fs.history_prev(history, cross_copies) + return history_revisions + return _assert_correct_path(context_uid, repo_id, path, revision, limit) def node_properties(self, wire, path, revision): - repo = self._factory.repo(wire) - fsobj = svn.repos.fs(repo) - rev_root = svn.fs.revision_root(fsobj, revision) - return svn.fs.node_proplist(rev_root, path) + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _node_properties(_repo_id, _path, _revision): + repo = self._factory.repo(wire) + fsobj = svn.repos.fs(repo) + rev_root = svn.fs.revision_root(fsobj, revision) + return svn.fs.node_proplist(rev_root, path) + return _node_properties(repo_id, path, revision) def file_annotate(self, wire, path, revision): abs_path = 'file://' + urllib.pathname2url( @@ -317,27 +316,37 @@ class SvnRemote(object): return annotations - def get_node_type(self, wire, path, rev=None): - repo = self._factory.repo(wire) - fs_ptr = svn.repos.fs(repo) - if rev is None: - rev = svn.fs.youngest_rev(fs_ptr) - root = svn.fs.revision_root(fs_ptr, rev) - node = svn.fs.check_path(root, path) - return NODE_TYPE_MAPPING.get(node, None) + def get_node_type(self, wire, path, revision=None): + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_node_type(_repo_id, _path, _revision): + repo = self._factory.repo(wire) + fs_ptr = svn.repos.fs(repo) + if _revision is None: + _revision = svn.fs.youngest_rev(fs_ptr) + root = svn.fs.revision_root(fs_ptr, _revision) + node = svn.fs.check_path(root, path) + return NODE_TYPE_MAPPING.get(node, None) + return _get_node_type(repo_id, path, revision) def get_nodes(self, wire, path, revision=None): - repo = self._factory.repo(wire) - fsobj = svn.repos.fs(repo) - if revision is None: - revision = svn.fs.youngest_rev(fsobj) - root = svn.fs.revision_root(fsobj, revision) - entries = svn.fs.dir_entries(root, path) - result = [] - for entry_path, entry_info in entries.iteritems(): - result.append( - (entry_path, NODE_TYPE_MAPPING.get(entry_info.kind, None))) - return result + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_nodes(_repo_id, _path, _revision): + repo = self._factory.repo(wire) + fsobj = svn.repos.fs(repo) + if _revision is None: + _revision = svn.fs.youngest_rev(fsobj) + root = svn.fs.revision_root(fsobj, _revision) + entries = svn.fs.dir_entries(root, path) + result = [] + for entry_path, entry_info in entries.iteritems(): + result.append( + (entry_path, NODE_TYPE_MAPPING.get(entry_info.kind, None))) + return result + return _get_nodes(repo_id, path, revision) def get_file_content(self, wire, path, rev=None): repo = self._factory.repo(wire) @@ -349,13 +358,18 @@ class SvnRemote(object): return content.read() def get_file_size(self, wire, path, revision=None): - repo = self._factory.repo(wire) - fsobj = svn.repos.fs(repo) - if revision is None: - revision = svn.fs.youngest_revision(fsobj) - root = svn.fs.revision_root(fsobj, revision) - size = svn.fs.file_length(root, path) - return size + + cache_on, context_uid, repo_id = self._cache_on(wire) + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _get_file_size(_repo_id, _path, _revision): + repo = self._factory.repo(wire) + fsobj = svn.repos.fs(repo) + if _revision is None: + _revision = svn.fs.youngest_revision(fsobj) + root = svn.fs.revision_root(fsobj, _revision) + size = svn.fs.file_length(root, path) + return size + return _get_file_size(repo_id, path, revision) def create_repository(self, wire, compatible_version=None): log.info('Creating Subversion repository in path "%s"', wire['path']) @@ -458,6 +472,17 @@ class SvnRemote(object): return False @reraise_safe_exceptions + def is_binary(self, wire, rev, path): + cache_on, context_uid, repo_id = self._cache_on(wire) + + @self.region.conditional_cache_on_arguments(condition=cache_on) + def _is_binary(_repo_id, _rev, _path): + raw_bytes = self.get_file_content(wire, path, rev) + return raw_bytes and '\0' in raw_bytes + + return _is_binary(repo_id, rev, path) + + @reraise_safe_exceptions def run_svn_command(self, wire, cmd, **opts): path = wire.get('path', None) @@ -673,7 +698,6 @@ class SvnDiffer(object): return content.splitlines(True) - class DiffChangeEditor(svn.delta.Editor): """ Records changes between two given revisions diff --git a/vcsserver/tests/test_git.py b/vcsserver/tests/test_git.py --- a/vcsserver/tests/test_git.py +++ b/vcsserver/tests/test_git.py @@ -61,7 +61,7 @@ class TestGitFetch(object): with patch('dulwich.client.LocalGitClient.fetch') as mock_fetch: mock_fetch.side_effect = side_effect - self.remote_git.pull(wire=None, url='/tmp/', apply_refs=False) + self.remote_git.pull(wire={}, url='/tmp/', apply_refs=False) determine_wants = self.mock_repo.object_store.determine_wants_all determine_wants.assert_called_once_with(SAMPLE_REFS) @@ -79,7 +79,7 @@ class TestGitFetch(object): with patch('dulwich.client.LocalGitClient.fetch') as mock_fetch: mock_fetch.side_effect = side_effect self.remote_git.pull( - wire=None, url='/tmp/', apply_refs=False, + wire={}, url='/tmp/', apply_refs=False, refs=selected_refs.keys()) determine_wants = self.mock_repo.object_store.determine_wants_all assert determine_wants.call_count == 0 @@ -95,18 +95,13 @@ class TestGitFetch(object): with patch('vcsserver.git.Repo', create=False) as mock_repo: mock_repo().get_refs.return_value = sample_refs - remote_refs = remote_git.get_remote_refs(wire=None, url=url) + remote_refs = remote_git.get_remote_refs(wire={}, url=url) mock_repo().get_refs.assert_called_once_with() assert remote_refs == sample_refs - def test_remove_ref(self): - ref_to_remove = 'refs/tags/v0.1.9' - self.mock_repo.refs = SAMPLE_REFS.copy() - self.remote_git.remove_ref(None, ref_to_remove) - assert ref_to_remove not in self.mock_repo.refs - class TestReraiseSafeExceptions(object): + def test_method_decorated_with_reraise_safe_exceptions(self): factory = Mock() git_remote = git.GitRemote(factory) diff --git a/vcsserver/tests/test_hg.py b/vcsserver/tests/test_hg.py --- a/vcsserver/tests/test_hg.py +++ b/vcsserver/tests/test_hg.py @@ -26,36 +26,17 @@ from mock import Mock, MagicMock, patch from vcsserver import exceptions, hg, hgcompat -class TestHGLookup(object): - def setup(self): - self.mock_repo = MagicMock() - self.mock_repo.__getitem__.side_effect = LookupError( - 'revision_or_commit_id', 'index', 'message') - factory = Mock() - factory.repo = Mock(return_value=self.mock_repo) - self.remote_hg = hg.HgRemote(factory) - - def test_fail_lookup_hg(self): - with pytest.raises(Exception) as exc_info: - self.remote_hg.lookup( - wire=None, revision='revision_or_commit_id', both=True) - - assert exc_info.value._vcs_kind == 'lookup' - assert 'revision_or_commit_id' in exc_info.value.args - - class TestDiff(object): def test_raising_safe_exception_when_lookup_failed(self): - repo = Mock() + factory = Mock() - factory.repo = Mock(return_value=repo) hg_remote = hg.HgRemote(factory) with patch('mercurial.patch.diff') as diff_mock: diff_mock.side_effect = LookupError( 'deadbeef', 'index', 'message') with pytest.raises(Exception) as exc_info: hg_remote.diff( - wire=None, rev1='deadbeef', rev2='deadbee1', + wire={}, commit_id_1='deadbeef', commit_id_2='deadbee1', file_filter=None, opt_git=True, opt_ignorews=True, context=3) assert type(exc_info.value) == Exception diff --git a/vcsserver/tests/test_svn.py b/vcsserver/tests/test_svn.py --- a/vcsserver/tests/test_svn.py +++ b/vcsserver/tests/test_svn.py @@ -45,8 +45,10 @@ INVALID_CERTIFICATE_STDERR = '\n'.join([ reason="SVN not packaged for Cygwin") def test_import_remote_repository_certificate_error(stderr, expected_reason): from vcsserver import svn + factory = mock.Mock() + factory.repo = mock.Mock(return_value=mock.Mock()) - remote = svn.SvnRemote(None) + remote = svn.SvnRemote(factory) remote.is_path_valid_repository = lambda wire, path: True with mock.patch('subprocess.Popen', @@ -76,7 +78,10 @@ def test_svn_libraries_can_be_imported() def test_username_password_extraction_from_url(example_url, parts): from vcsserver import svn - remote = svn.SvnRemote(None) + factory = mock.Mock() + factory.repo = mock.Mock(return_value=mock.Mock()) + + remote = svn.SvnRemote(factory) remote.is_path_valid_repository = lambda wire, path: True assert remote.get_url_and_credentials(example_url) == parts diff --git a/vcsserver/tweens/__init__.py b/vcsserver/tweens/__init__.py new file mode 100644 --- /dev/null +++ b/vcsserver/tweens/__init__.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2016-2019 RhodeCode GmbH +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License, version 3 +# (only), as published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +# This program is dual-licensed. If you wish to learn more about the +# RhodeCode Enterprise Edition, including its added features, Support services, +# and proprietary license terms, please see https://rhodecode.com/licenses/ diff --git a/vcsserver/tweens.py b/vcsserver/tweens/request_wrapper.py rename from vcsserver/tweens.py rename to vcsserver/tweens/request_wrapper.py --- a/vcsserver/tweens.py +++ b/vcsserver/tweens/request_wrapper.py @@ -15,12 +15,10 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - - import time import logging - +import vcsserver from vcsserver.utils import safe_str @@ -32,6 +30,10 @@ def get_access_path(request): return environ.get('PATH_INFO') +def get_user_agent(environ): + return environ.get('HTTP_USER_AGENT') + + class RequestWrapperTween(object): def __init__(self, handler, registry): self.handler = handler @@ -45,14 +47,18 @@ class RequestWrapperTween(object): response = self.handler(request) finally: end = time.time() - - log.info('IP: %s Request to path: `%s` time: %.3fs', - '127.0.0.1', safe_str(get_access_path(request)), end - start) + total = end - start + count = request.request_count() + _ver_ = vcsserver.__version__ + log.info( + 'Req[%4s] IP: %s %s Request to %s time: %.4fs [%s], VCSServer %s', + count, '127.0.0.1', request.environ.get('REQUEST_METHOD'), + safe_str(get_access_path(request)), total, get_user_agent(request.environ), _ver_) return response def includeme(config): config.add_tween( - 'vcsserver.tweens.RequestWrapperTween', + 'vcsserver.tweens.request_wrapper.RequestWrapperTween', ) diff --git a/vcsserver/vcs_base.py b/vcsserver/vcs_base.py new file mode 100644 --- /dev/null +++ b/vcsserver/vcs_base.py @@ -0,0 +1,32 @@ +# RhodeCode VCSServer provides access to different vcs backends via network. +# Copyright (C) 2014-2019 RhodeCode GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + + +class RemoteBase(object): + EMPTY_COMMIT = '0' * 40 + + @property + def region(self): + return self._factory._cache_region + + def _cache_on(self, wire): + context = wire.get('context', '') + context_uid = '{}'.format(context) + repo_id = wire.get('repo_id', '') + cache = wire.get('cache', True) + cache_on = context and cache + return cache_on, context_uid, repo_id