##// END OF EJS Templates
merged python3 into default
super-admin -
r1131:20e2294a merge default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -0,0 +1,54 b''
1 [build-system]
2 requires = ["setuptools>=61.0.0", "wheel"]
3 build-backend = "setuptools.build_meta"
4
5 [project]
6 name = "rhodecode-vcsserver"
7 description = "Version Control System Server for RhodeCode"
8 authors = [
9 {name = "RhodeCode GmbH", email = "support@rhodecode.com"},
10 ]
11
12 license = {text = "GPL V3"}
13 requires-python = ">=3.10"
14 dynamic = ["version", "readme", "dependencies", "optional-dependencies"]
15 classifiers = [
16 'Development Status :: 6 - Mature',
17 'Intended Audience :: Developers',
18 'Operating System :: OS Independent',
19 'Topic :: Software Development :: Version Control',
20 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
21 'Programming Language :: Python :: 3.10',
22 ]
23
24 [project.entry-points."paste.app_factory"]
25 main = "vcsserver.http_main:main"
26
27
28 [tool.setuptools]
29 packages = ["vcsserver"]
30
31 [tool.setuptools.dynamic]
32 readme = {file = ["README.rst"], content-type = "text/rst"}
33 version = {file = "vcsserver/VERSION"}
34 dependencies = {file = ["requirements.txt"]}
35 optional-dependencies.tests = {file = ["requirements_test.txt"]}
36
37 [tool.ruff]
38 select = [
39 # Pyflakes
40 "F",
41 # Pycodestyle
42 "E",
43 "W",
44 # isort
45 "I001"
46 ]
47 ignore = [
48 "E501", # line too long, handled by black
49 ]
50 # Same as Black.
51 line-length = 120
52
53 [tool.ruff.isort]
54 known-first-party = ["vcsserver"]
@@ -0,0 +1,27 b''
1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
6 #
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
11 #
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
19 HOOK_REPO_SIZE = 'changegroup.repo_size'
20
21 # HG
22 HOOK_PRE_PULL = 'preoutgoing.pre_pull'
23 HOOK_PULL = 'outgoing.pull_logger'
24 HOOK_PRE_PUSH = 'prechangegroup.pre_push'
25 HOOK_PRETX_PUSH = 'pretxnchangegroup.pre_push'
26 HOOK_PUSH = 'changegroup.push_logger'
27 HOOK_PUSH_KEY = 'pushkey.key_push'
@@ -0,0 +1,53 b''
1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
6 #
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
11 #
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
19 import sys
20 import logging
21
22
23 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(30, 38))
24
25 # Sequences
26 RESET_SEQ = "\033[0m"
27 COLOR_SEQ = "\033[0;%dm"
28 BOLD_SEQ = "\033[1m"
29
30 COLORS = {
31 'CRITICAL': MAGENTA,
32 'ERROR': RED,
33 'WARNING': CYAN,
34 'INFO': GREEN,
35 'DEBUG': BLUE,
36 'SQL': YELLOW
37 }
38
39
40 class ColorFormatter(logging.Formatter):
41
42 def format(self, record):
43 """
44 Change record's levelname to use with COLORS enum
45 """
46 def_record = super().format(record)
47
48 levelname = record.levelname
49 start = COLOR_SEQ % (COLORS[levelname])
50 end = RESET_SEQ
51
52 colored_record = ''.join([start, def_record, end])
53 return colored_record
@@ -0,0 +1,87 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
18 import logging
19 import os
20 import diskcache
21 from diskcache import RLock
22
23 log = logging.getLogger(__name__)
24
25 cache_meta = None
26
27
28 class ReentrantLock(RLock):
29 def __enter__(self):
30 reentrant_lock_key = self._key
31
32 log.debug('Acquire ReentrantLock(key=%s) for archive cache generation...', reentrant_lock_key)
33 #self.acquire()
34 log.debug('Lock for key=%s acquired', reentrant_lock_key)
35
36 def __exit__(self, *exc_info):
37 #self.release()
38 pass
39
40
41 def get_archival_config(config):
42
43 final_config = {
44 'archive_cache.eviction_policy': 'least-frequently-used'
45 }
46
47 for k, v in config.items():
48 if k.startswith('archive_cache'):
49 final_config[k] = v
50
51 return final_config
52
53
54 def get_archival_cache_store(config):
55
56 global cache_meta
57 if cache_meta is not None:
58 return cache_meta
59
60 config = get_archival_config(config)
61
62 archive_cache_dir = config['archive_cache.store_dir']
63 archive_cache_size_gb = config['archive_cache.cache_size_gb']
64 archive_cache_shards = config['archive_cache.cache_shards']
65 archive_cache_eviction_policy = config['archive_cache.eviction_policy']
66
67 log.debug('Initializing archival cache instance under %s', archive_cache_dir)
68
69 # check if it's ok to write, and re-create the archive cache
70 if not os.path.isdir(archive_cache_dir):
71 os.makedirs(archive_cache_dir, exist_ok=True)
72
73 d_cache = diskcache.FanoutCache(
74 archive_cache_dir, shards=archive_cache_shards,
75 cull_limit=0, # manual eviction required
76 size_limit=archive_cache_size_gb * 1024 * 1024 * 1024,
77 eviction_policy=archive_cache_eviction_policy,
78 timeout=30
79 )
80 cache_meta = d_cache
81 return cache_meta
82
83
84 def includeme(config):
85 # init our cache at start, for vcsserver we don't init at runtime
86 # because our cache config is sent via wire on make archive call, this call just lazy-enables the client
87 return
@@ -0,0 +1,2 b''
1 # use orjson by default
2 import orjson as json
@@ -0,0 +1,160 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
18 import os
19 import tempfile
20
21 from svn import client
22 from svn import core
23 from svn import ra
24
25 from mercurial import error
26
27 from vcsserver.str_utils import safe_bytes
28
29 core.svn_config_ensure(None)
30 svn_config = core.svn_config_get_config(None)
31
32
33 class RaCallbacks(ra.Callbacks):
34 @staticmethod
35 def open_tmp_file(pool): # pragma: no cover
36 (fd, fn) = tempfile.mkstemp()
37 os.close(fd)
38 return fn
39
40 @staticmethod
41 def get_client_string(pool):
42 return b'RhodeCode-subversion-url-checker'
43
44
45 class SubversionException(Exception):
46 pass
47
48
49 class SubversionConnectionException(SubversionException):
50 """Exception raised when a generic error occurs when connecting to a repository."""
51
52
53 def normalize_url(url):
54 if not url:
55 return url
56 if url.startswith(b'svn+http://') or url.startswith(b'svn+https://'):
57 url = url[4:]
58 url = url.rstrip(b'/')
59 return url
60
61
62 def _create_auth_baton(pool):
63 """Create a Subversion authentication baton. """
64 # Give the client context baton a suite of authentication
65 # providers.h
66 platform_specific = [
67 'svn_auth_get_gnome_keyring_simple_provider',
68 'svn_auth_get_gnome_keyring_ssl_client_cert_pw_provider',
69 'svn_auth_get_keychain_simple_provider',
70 'svn_auth_get_keychain_ssl_client_cert_pw_provider',
71 'svn_auth_get_kwallet_simple_provider',
72 'svn_auth_get_kwallet_ssl_client_cert_pw_provider',
73 'svn_auth_get_ssl_client_cert_file_provider',
74 'svn_auth_get_windows_simple_provider',
75 'svn_auth_get_windows_ssl_server_trust_provider',
76 ]
77
78 providers = []
79
80 for p in platform_specific:
81 if getattr(core, p, None) is not None:
82 try:
83 providers.append(getattr(core, p)())
84 except RuntimeError:
85 pass
86
87 providers += [
88 client.get_simple_provider(),
89 client.get_username_provider(),
90 client.get_ssl_client_cert_file_provider(),
91 client.get_ssl_client_cert_pw_file_provider(),
92 client.get_ssl_server_trust_file_provider(),
93 ]
94
95 return core.svn_auth_open(providers, pool)
96
97
98 class SubversionRepo(object):
99 """Wrapper for a Subversion repository.
100
101 It uses the SWIG Python bindings, see above for requirements.
102 """
103 def __init__(self, svn_url: bytes = b'', username: bytes = b'', password: bytes = b''):
104
105 self.username = username
106 self.password = password
107 self.svn_url = core.svn_path_canonicalize(svn_url)
108
109 self.auth_baton_pool = core.Pool()
110 self.auth_baton = _create_auth_baton(self.auth_baton_pool)
111 # self.init_ra_and_client() assumes that a pool already exists
112 self.pool = core.Pool()
113
114 self.ra = self.init_ra_and_client()
115 self.uuid = ra.get_uuid(self.ra, self.pool)
116
117 def init_ra_and_client(self):
118 """Initializes the RA and client layers, because sometimes getting
119 unified diffs runs the remote server out of open files.
120 """
121
122 if self.username:
123 core.svn_auth_set_parameter(self.auth_baton,
124 core.SVN_AUTH_PARAM_DEFAULT_USERNAME,
125 self.username)
126 if self.password:
127 core.svn_auth_set_parameter(self.auth_baton,
128 core.SVN_AUTH_PARAM_DEFAULT_PASSWORD,
129 self.password)
130
131 callbacks = RaCallbacks()
132 callbacks.auth_baton = self.auth_baton
133
134 try:
135 return ra.open2(self.svn_url, callbacks, svn_config, self.pool)
136 except SubversionException as e:
137 # e.child contains a detailed error messages
138 msglist = []
139 svn_exc = e
140 while svn_exc:
141 if svn_exc.args[0]:
142 msglist.append(svn_exc.args[0])
143 svn_exc = svn_exc.child
144 msg = '\n'.join(msglist)
145 raise SubversionConnectionException(msg)
146
147
148 class svnremoterepo(object):
149 """ the dumb wrapper for actual Subversion repositories """
150
151 def __init__(self, username: bytes = b'', password: bytes = b'', svn_url: bytes = b''):
152 self.username = username or b''
153 self.password = password or b''
154 self.path = normalize_url(svn_url)
155
156 def svn(self):
157 try:
158 return SubversionRepo(self.path, self.username, self.password)
159 except SubversionConnectionException as e:
160 raise error.Abort(safe_bytes(e))
@@ -0,0 +1,17 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: new file 100644
The requested commit or file is too big and content was truncated. Show full diff
@@ -1,5 +1,5 b''
1 1 [bumpversion]
2 current_version = 4.28.0
2 current_version = 5.0.0
3 3 message = release: Bump version {current_version} to {new_version}
4 4
5 5 [bumpversion:file:vcsserver/VERSION]
@@ -1,37 +1,41 b''
1 1 syntax: glob
2
2 3 *.orig
3 4 *.pyc
4 5 *.swp
5 6 *.sqlite
6 7 *.tox
7 8 *.egg-info
8 9 *.egg
9 10 *.eggs
10 11 *.idea
11 12 .DS_Store*
12 13
13 14
14 15 syntax: regexp
15 16
16 17 #.filename
17 18 ^\.settings$
18 19 ^\.project$
19 20 ^\.pydevproject$
20 21 ^\.coverage$
21 22 ^\.cache.*$
23 ^\.venv.*$
24 ^\.ruff_cache.*$
22 25 ^\.rhodecode$
23 26
27
24 28 ^.dev
25 29 ^build/
26 30 ^coverage\.xml$
27 31 ^data$
28 32 ^dev.ini$
29 33 ^acceptance_tests/dev.*\.ini$
30 34 ^dist/
31 35 ^fabfile.py
32 36 ^htmlcov
33 37 ^junit\.xml$
34 38 ^node_modules/
35 39 ^pylint.log$
36 40 ^build$
37 41 ^result$
@@ -1,45 +1,111 b''
1 .DEFAULT_GOAL := help
1 # required for pushd to work..
2 #SHELL = /bin/bash
3
2 4
3 5 # set by: PATH_TO_OUTDATED_PACKAGES=/some/path/outdated_packages.py
4 6 OUTDATED_PACKAGES = ${PATH_TO_OUTDATED_PACKAGES}
5 7
6 8 .PHONY: clean
7 clean: ## full clean
9 ## Cleanup compiled and cache py files
10 clean:
8 11 make test-clean
9 12 find . -type f \( -iname '*.c' -o -iname '*.pyc' -o -iname '*.so' -o -iname '*.orig' \) -exec rm '{}' ';'
10 13
11 14
12 15 .PHONY: test
13 test: ## run test-clean and tests
16 ## run test-clean and tests
17 test:
14 18 make test-clean
15 19 make test-only
16 20
17 21
18 .PHONY:test-clean
19 test-clean: ## run test-clean and tests
22 .PHONY: test-clean
23 ## run test-clean and tests
24 test-clean:
20 25 rm -rf coverage.xml htmlcov junit.xml pylint.log result
21 26 find . -type d -name "__pycache__" -prune -exec rm -rf '{}' ';'
22 27 find . -type f \( -iname '.coverage.*' \) -exec rm '{}' ';'
23 28
24 29
25 30 .PHONY: test-only
26 test-only: ## run tests
31 ## Run tests only without cleanup
32 test-only:
27 33 PYTHONHASHSEED=random \
28 34 py.test -x -vv -r xw -p no:sugar \
29 --cov=vcsserver --cov-report=term-missing --cov-report=html \
30 vcsserver
31
32
33 .PHONY: generate-pkgs
34 generate-pkgs: ## generate new python packages
35 nix-shell pkgs/shell-generate.nix --command "pip2nix generate --licenses"
35 --cov-report=term-missing --cov-report=html \
36 --cov=vcsserver vcsserver
36 37
37 38
38 39 .PHONY: pip-packages
39 pip-packages: ## show outdated packages
40 ## Show outdated packages
41 pip-packages:
40 42 python ${OUTDATED_PACKAGES}
41 43
42 44
43 .PHONY: help
44 help:
45 @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-24s\033[0m %s\n", $$1, $$2}'
45 .PHONY: build
46 ## Build sdist/egg
47 build:
48 python -m build
49
50
51 .PHONY: dev-env
52 ## make dev-env based on the requirements files and install develop of packages
53 dev-env:
54 pip install build virtualenv
55 pip wheel --wheel-dir=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
56 pip install --no-index --find-links=/home/rhodecode/.cache/pip/wheels -r requirements.txt -r requirements_test.txt -r requirements_debug.txt
57 pip install -e .
58
59
60 .PHONY: dev-srv
61 ## run develop server instance
62 dev-srv:
63 pserve --reload .dev/dev.ini
64
65
66 .PHONY: dev-srv-g
67 ## run gunicorn multi process workers
68 dev-srv-g:
69 gunicorn --workers=4 --paste .dev/dev.ini --bind=0.0.0.0:10010 --worker-class=sync --threads=1 --config=configs/gunicorn_config.py --timeout=120
70
71 # Default command on calling make
72 .DEFAULT_GOAL := show-help
73
74 .PHONY: show-help
75 show-help:
76 @echo "$$(tput bold)Available rules:$$(tput sgr0)"
77 @echo
78 @sed -n -e "/^## / { \
79 h; \
80 s/.*//; \
81 :doc" \
82 -e "H; \
83 n; \
84 s/^## //; \
85 t doc" \
86 -e "s/:.*//; \
87 G; \
88 s/\\n## /---/; \
89 s/\\n/ /g; \
90 p; \
91 }" ${MAKEFILE_LIST} \
92 | LC_ALL='C' sort --ignore-case \
93 | awk -F '---' \
94 -v ncol=$$(tput cols) \
95 -v indent=19 \
96 -v col_on="$$(tput setaf 6)" \
97 -v col_off="$$(tput sgr0)" \
98 '{ \
99 printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
100 n = split($$2, words, " "); \
101 line_length = ncol - indent; \
102 for (i = 1; i <= n; i++) { \
103 line_length -= length(words[i]) + 1; \
104 if (line_length <= 0) { \
105 line_length = ncol - indent - length(words[i]) - 1; \
106 printf "\n%*s ", -indent, " "; \
107 } \
108 printf "%s ", words[i]; \
109 } \
110 printf "\n"; \
111 }'
@@ -1,275 +1,275 b''
1 ## -*- coding: utf-8 -*-
1 #
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 0.0.0.0
10 10 port = 9900
11 11
12 12 ; ##################################################
13 13 ; WAITRESS WSGI SERVER - Recommended for Development
14 14 ; ##################################################
15 15
16 16 ; use server type
17 17 use = egg:waitress#main
18 18
19 19 ; number of worker threads
20 20 threads = 5
21 21
22 22 ; MAX BODY SIZE 100GB
23 23 max_request_body_size = 107374182400
24 24
25 25 ; Use poll instead of select, fixes file descriptors limits problems.
26 26 ; May not work on old windows systems.
27 27 asyncore_use_poll = true
28 28
29 29
30 30 ; ###########################
31 31 ; GUNICORN APPLICATION SERVER
32 32 ; ###########################
33 33
34 34 ; run with gunicorn --paste rhodecode.ini
35 35
36 36 ; Module to use, this setting shouldn't be changed
37 37 #use = egg:gunicorn#main
38 38
39 39 ; Sets the number of process workers. More workers means more concurrent connections
40 40 ; RhodeCode can handle at the same time. Each additional worker also it increases
41 41 ; memory usage as each has it's own set of caches.
42 42 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
43 43 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
44 44 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
45 45 ; when using more than 1 worker.
46 46 #workers = 2
47 47
48 48 ; Gunicorn access log level
49 49 #loglevel = info
50 50
51 51 ; Process name visible in process list
52 52 #proc_name = rhodecode_vcsserver
53 53
54 54 ; Type of worker class, one of `sync`, `gevent`
55 55 ; currently `sync` is the only option allowed.
56 56 #worker_class = sync
57 57
58 58 ; The maximum number of simultaneous clients. Valid only for gevent
59 59 #worker_connections = 10
60 60
61 61 ; Max number of requests that worker will handle before being gracefully restarted.
62 62 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
63 63 #max_requests = 1000
64 64 #max_requests_jitter = 30
65 65
66 66 ; Amount of time a worker can spend with handling a request before it
67 67 ; gets killed and restarted. By default set to 21600 (6hrs)
68 68 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
69 69 #timeout = 21600
70 70
71 71 ; The maximum size of HTTP request line in bytes.
72 72 ; 0 for unlimited
73 73 #limit_request_line = 0
74 74
75 75 ; Limit the number of HTTP headers fields in a request.
76 76 ; By default this value is 100 and can't be larger than 32768.
77 77 #limit_request_fields = 32768
78 78
79 79 ; Limit the allowed size of an HTTP request header field.
80 80 ; Value is a positive number or 0.
81 81 ; Setting it to 0 will allow unlimited header field sizes.
82 82 #limit_request_field_size = 0
83 83
84 84 ; Timeout for graceful workers restart.
85 85 ; After receiving a restart signal, workers have this much time to finish
86 86 ; serving requests. Workers still alive after the timeout (starting from the
87 87 ; receipt of the restart signal) are force killed.
88 88 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
89 89 #graceful_timeout = 21600
90 90
91 91 # The number of seconds to wait for requests on a Keep-Alive connection.
92 92 # Generally set in the 1-5 seconds range.
93 93 #keepalive = 2
94 94
95 95 ; Maximum memory usage that each worker can use before it will receive a
96 96 ; graceful restart signal 0 = memory monitoring is disabled
97 97 ; Examples: 268435456 (256MB), 536870912 (512MB)
98 98 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
99 99 #memory_max_usage = 0
100 100
101 101 ; How often in seconds to check for memory usage for each gunicorn worker
102 102 #memory_usage_check_interval = 60
103 103
104 104 ; Threshold value for which we don't recycle worker if GarbageCollection
105 105 ; frees up enough resources. Before each restart we try to run GC on worker
106 106 ; in case we get enough free memory after that, restart will not happen.
107 107 #memory_usage_recovery_threshold = 0.8
108 108
109 109
110 110 [app:main]
111 111 ; The %(here)s variable will be replaced with the absolute path of parent directory
112 112 ; of this file
113 113 ; Each option in the app:main can be override by an environmental variable
114 114 ;
115 115 ;To override an option:
116 116 ;
117 117 ;RC_<KeyName>
118 118 ;Everything should be uppercase, . and - should be replaced by _.
119 119 ;For example, if you have these configuration settings:
120 120 ;rc_cache.repo_object.backend = foo
121 121 ;can be overridden by
122 122 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
123 123
124 124 use = egg:rhodecode-vcsserver
125 125
126 126
127 127 ; #############
128 128 ; DEBUG OPTIONS
129 129 ; #############
130 130
131 131 # During development the we want to have the debug toolbar enabled
132 132 pyramid.includes =
133 133 pyramid_debugtoolbar
134 134
135 135 debugtoolbar.hosts = 0.0.0.0/0
136 136 debugtoolbar.exclude_prefixes =
137 137 /css
138 138 /fonts
139 139 /images
140 140 /js
141 141
142 142 ; #################
143 143 ; END DEBUG OPTIONS
144 144 ; #################
145 145
146 146 ; Pyramid default locales, we need this to be set
147 147 #pyramid.default_locale_name = en
148 148
149 149 ; default locale used by VCS systems
150 150 #locale = en_US.UTF-8
151 151
152 152 ; path to binaries for vcsserver, it should be set by the installer
153 153 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
154 154 ; it can also be a path to nix-build output in case of development
155 155 core.binary_dir = ""
156 156
157 157 ; Custom exception store path, defaults to TMPDIR
158 158 ; This is used to store exception from RhodeCode in shared directory
159 159 #exception_tracker.store_path =
160 160
161 161 ; #############
162 162 ; DOGPILE CACHE
163 163 ; #############
164 164
165 165 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
166 166 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
167 167 #cache_dir = %(here)s/data
168 168
169 169 ; ***************************************
170 170 ; `repo_object` cache, default file based
171 171 ; ***************************************
172 172
173 173 ; `repo_object` cache settings for vcs methods for repositories
174 174 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
175 175
176 176 ; cache auto-expires after N seconds
177 177 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
178 178 #rc_cache.repo_object.expiration_time = 2592000
179 179
180 180 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
181 181 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
182 182
183 183 ; ***********************************************************
184 184 ; `repo_object` cache with redis backend
185 185 ; recommended for larger instance, and for better performance
186 186 ; ***********************************************************
187 187
188 188 ; `repo_object` cache settings for vcs methods for repositories
189 189 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
190 190
191 191 ; cache auto-expires after N seconds
192 192 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
193 193 #rc_cache.repo_object.expiration_time = 2592000
194 194
195 195 ; redis_expiration_time needs to be greater then expiration_time
196 196 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
197 197
198 198 #rc_cache.repo_object.arguments.host = localhost
199 199 #rc_cache.repo_object.arguments.port = 6379
200 200 #rc_cache.repo_object.arguments.db = 5
201 201 #rc_cache.repo_object.arguments.socket_timeout = 30
202 202 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
203 203 #rc_cache.repo_object.arguments.distributed_lock = true
204 204
205 205 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
206 206 #rc_cache.repo_object.arguments.lock_auto_renewal = true
207 207
208 208 ; Statsd client config, this is used to send metrics to statsd
209 209 ; We recommend setting statsd_exported and scrape them using Promethues
210 210 #statsd.enabled = false
211 211 #statsd.statsd_host = 0.0.0.0
212 212 #statsd.statsd_port = 8125
213 213 #statsd.statsd_prefix =
214 214 #statsd.statsd_ipv6 = false
215 215
216 216 ; configure logging automatically at server startup set to false
217 217 ; to use the below custom logging config.
218 218 ; RC_LOGGING_FORMATTER
219 219 ; RC_LOGGING_LEVEL
220 220 ; env variables can control the settings for logging in case of autoconfigure
221 221
222 222 #logging.autoconfigure = true
223 223
224 224 ; specify your own custom logging config file to configure logging
225 225 #logging.logging_conf_file = /path/to/custom_logging.ini
226 226
227 227 ; #####################
228 228 ; LOGGING CONFIGURATION
229 229 ; #####################
230 230
231 231 [loggers]
232 232 keys = root, vcsserver
233 233
234 234 [handlers]
235 235 keys = console
236 236
237 237 [formatters]
238 238 keys = generic, json
239 239
240 240 ; #######
241 241 ; LOGGERS
242 242 ; #######
243 243 [logger_root]
244 244 level = NOTSET
245 245 handlers = console
246 246
247 247 [logger_vcsserver]
248 248 level = DEBUG
249 249 handlers =
250 250 qualname = vcsserver
251 251 propagate = 1
252 252
253 253 ; ########
254 254 ; HANDLERS
255 255 ; ########
256 256
257 257 [handler_console]
258 258 class = StreamHandler
259 259 args = (sys.stderr, )
260 260 level = DEBUG
261 261 ; To enable JSON formatted logs replace 'generic' with 'json'
262 262 ; This allows sending properly formatted logs to grafana loki or elasticsearch
263 263 formatter = generic
264 264
265 265 ; ##########
266 266 ; FORMATTERS
267 267 ; ##########
268 268
269 269 [formatter_generic]
270 270 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
271 271 datefmt = %Y-%m-%d %H:%M:%S
272 272
273 273 [formatter_json]
274 274 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
275 275 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,396 +1,393 b''
1 1 """
2 2 Gunicorn config extension and hooks. This config file adds some extra settings and memory management.
3 3 Gunicorn configuration should be managed by .ini files entries of RhodeCode or VCSServer
4 4 """
5 5
6 6 import gc
7 7 import os
8 8 import sys
9 9 import math
10 10 import time
11 11 import threading
12 12 import traceback
13 13 import random
14 14 import socket
15 15 from gunicorn.glogging import Logger
16 16
17 17
18 18 def get_workers():
19 19 import multiprocessing
20 20 return multiprocessing.cpu_count() * 2 + 1
21 21
22 22 # GLOBAL
23 23 errorlog = '-'
24 24 accesslog = '-'
25 25
26 26
27 27 # SERVER MECHANICS
28 28 # None == system temp dir
29 29 # worker_tmp_dir is recommended to be set to some tmpfs
30 30 worker_tmp_dir = None
31 31 tmp_upload_dir = None
32 32
33 33 #reuse_port = True
34 34
35 35 # Custom log format
36 36 #access_log_format = (
37 37 # '%(t)s %(p)s INFO [GNCRN] %(h)-15s rqt:%(L)s %(s)s %(b)-6s "%(m)s:%(U)s %(q)s" usr:%(u)s "%(f)s" "%(a)s"')
38 38
39 39 # loki format for easier parsing in grafana
40 40 access_log_format = (
41 41 'time="%(t)s" pid=%(p)s level="INFO" type="[GNCRN]" ip="%(h)-15s" rqt="%(L)s" response_code="%(s)s" response_bytes="%(b)-6s" uri="%(m)s:%(U)s %(q)s" user=":%(u)s" user_agent="%(a)s"')
42 42
43 43 # self adjust workers based on CPU count
44 44 # workers = get_workers()
45 45
46 46
47 47 def _get_process_rss(pid=None):
48 48 try:
49 49 import psutil
50 50 if pid:
51 51 proc = psutil.Process(pid)
52 52 else:
53 53 proc = psutil.Process()
54 54 return proc.memory_info().rss
55 55 except Exception:
56 56 return None
57 57
58 58
59 59 def _get_config(ini_path):
60 import configparser
60 61
61 62 try:
62 import configparser
63 except ImportError:
64 import ConfigParser as configparser
65 try:
66 63 config = configparser.RawConfigParser()
67 64 config.read(ini_path)
68 65 return config
69 66 except Exception:
70 67 return None
71 68
72 69
73 70 def _time_with_offset(memory_usage_check_interval):
74 71 return time.time() - random.randint(0, memory_usage_check_interval/2.0)
75 72
76 73
77 74 def pre_fork(server, worker):
78 75 pass
79 76
80 77
81 78 def post_fork(server, worker):
82 79
83 80 # memory spec defaults
84 81 _memory_max_usage = 0
85 82 _memory_usage_check_interval = 60
86 83 _memory_usage_recovery_threshold = 0.8
87 84
88 85 ini_path = os.path.abspath(server.cfg.paste)
89 86 conf = _get_config(ini_path)
90 87
91 88 section = 'server:main'
92 89 if conf and conf.has_section(section):
93 90
94 91 if conf.has_option(section, 'memory_max_usage'):
95 92 _memory_max_usage = conf.getint(section, 'memory_max_usage')
96 93
97 94 if conf.has_option(section, 'memory_usage_check_interval'):
98 95 _memory_usage_check_interval = conf.getint(section, 'memory_usage_check_interval')
99 96
100 97 if conf.has_option(section, 'memory_usage_recovery_threshold'):
101 98 _memory_usage_recovery_threshold = conf.getfloat(section, 'memory_usage_recovery_threshold')
102 99
103 100 worker._memory_max_usage = int(os.environ.get('RC_GUNICORN_MEMORY_MAX_USAGE', '')
104 101 or _memory_max_usage)
105 102 worker._memory_usage_check_interval = int(os.environ.get('RC_GUNICORN_MEMORY_USAGE_CHECK_INTERVAL', '')
106 103 or _memory_usage_check_interval)
107 104 worker._memory_usage_recovery_threshold = float(os.environ.get('RC_GUNICORN_MEMORY_USAGE_RECOVERY_THRESHOLD', '')
108 105 or _memory_usage_recovery_threshold)
109 106
110 107 # register memory last check time, with some random offset so we don't recycle all
111 108 # at once
112 109 worker._last_memory_check_time = _time_with_offset(_memory_usage_check_interval)
113 110
114 111 if _memory_max_usage:
115 112 server.log.info("[%-10s] WORKER spawned with max memory set at %s", worker.pid,
116 113 _format_data_size(_memory_max_usage))
117 114 else:
118 115 server.log.info("[%-10s] WORKER spawned", worker.pid)
119 116
120 117
121 118 def pre_exec(server):
122 119 server.log.info("Forked child, re-executing.")
123 120
124 121
125 122 def on_starting(server):
126 123 server_lbl = '{} {}'.format(server.proc_name, server.address)
127 124 server.log.info("Server %s is starting.", server_lbl)
128 125
129 126
130 127 def when_ready(server):
131 128 server.log.info("Server %s is ready. Spawning workers", server)
132 129
133 130
134 131 def on_reload(server):
135 132 pass
136 133
137 134
138 135 def _format_data_size(size, unit="B", precision=1, binary=True):
139 136 """Format a number using SI units (kilo, mega, etc.).
140 137
141 138 ``size``: The number as a float or int.
142 139
143 140 ``unit``: The unit name in plural form. Examples: "bytes", "B".
144 141
145 142 ``precision``: How many digits to the right of the decimal point. Default
146 143 is 1. 0 suppresses the decimal point.
147 144
148 145 ``binary``: If false, use base-10 decimal prefixes (kilo = K = 1000).
149 146 If true, use base-2 binary prefixes (kibi = Ki = 1024).
150 147
151 148 ``full_name``: If false (default), use the prefix abbreviation ("k" or
152 149 "Ki"). If true, use the full prefix ("kilo" or "kibi"). If false,
153 150 use abbreviation ("k" or "Ki").
154 151
155 152 """
156 153
157 154 if not binary:
158 155 base = 1000
159 156 multiples = ('', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
160 157 else:
161 158 base = 1024
162 159 multiples = ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi')
163 160
164 161 sign = ""
165 162 if size > 0:
166 163 m = int(math.log(size, base))
167 164 elif size < 0:
168 165 sign = "-"
169 166 size = -size
170 167 m = int(math.log(size, base))
171 168 else:
172 169 m = 0
173 170 if m > 8:
174 171 m = 8
175 172
176 173 if m == 0:
177 174 precision = '%.0f'
178 175 else:
179 176 precision = '%%.%df' % precision
180 177
181 178 size = precision % (size / math.pow(base, m))
182 179
183 180 return '%s%s %s%s' % (sign, size.strip(), multiples[m], unit)
184 181
185 182
186 183 def _check_memory_usage(worker):
187 184 memory_max_usage = worker._memory_max_usage
188 185 if not memory_max_usage:
189 186 return
190 187
191 188 memory_usage_check_interval = worker._memory_usage_check_interval
192 189 memory_usage_recovery_threshold = memory_max_usage * worker._memory_usage_recovery_threshold
193 190
194 191 elapsed = time.time() - worker._last_memory_check_time
195 192 if elapsed > memory_usage_check_interval:
196 193 mem_usage = _get_process_rss()
197 194 if mem_usage and mem_usage > memory_max_usage:
198 195 worker.log.info(
199 196 "memory usage %s > %s, forcing gc",
200 197 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
201 198 # Try to clean it up by forcing a full collection.
202 199 gc.collect()
203 200 mem_usage = _get_process_rss()
204 201 if mem_usage > memory_usage_recovery_threshold:
205 202 # Didn't clean up enough, we'll have to terminate.
206 203 worker.log.warning(
207 204 "memory usage %s > %s after gc, quitting",
208 205 _format_data_size(mem_usage), _format_data_size(memory_max_usage))
209 206 # This will cause worker to auto-restart itself
210 207 worker.alive = False
211 208 worker._last_memory_check_time = time.time()
212 209
213 210
214 211 def worker_int(worker):
215 212 worker.log.info("[%-10s] worker received INT or QUIT signal", worker.pid)
216 213
217 214 # get traceback info, on worker crash
218 215 id2name = dict([(th.ident, th.name) for th in threading.enumerate()])
219 216 code = []
220 217 for thread_id, stack in sys._current_frames().items():
221 218 code.append(
222 219 "\n# Thread: %s(%d)" % (id2name.get(thread_id, ""), thread_id))
223 220 for fname, lineno, name, line in traceback.extract_stack(stack):
224 221 code.append('File: "%s", line %d, in %s' % (fname, lineno, name))
225 222 if line:
226 223 code.append(" %s" % (line.strip()))
227 224 worker.log.debug("\n".join(code))
228 225
229 226
230 227 def worker_abort(worker):
231 228 worker.log.info("[%-10s] worker received SIGABRT signal", worker.pid)
232 229
233 230
234 231 def worker_exit(server, worker):
235 232 worker.log.info("[%-10s] worker exit", worker.pid)
236 233
237 234
238 235 def child_exit(server, worker):
239 236 worker.log.info("[%-10s] worker child exit", worker.pid)
240 237
241 238
242 239 def pre_request(worker, req):
243 240 worker.start_time = time.time()
244 241 worker.log.debug(
245 242 "GNCRN PRE WORKER [cnt:%s]: %s %s", worker.nr, req.method, req.path)
246 243
247 244
248 245 def post_request(worker, req, environ, resp):
249 246 total_time = time.time() - worker.start_time
250 247 # Gunicorn sometimes has problems with reading the status_code
251 248 status_code = getattr(resp, 'status_code', '')
252 249 worker.log.debug(
253 250 "GNCRN POST WORKER [cnt:%s]: %s %s resp: %s, Load Time: %.4fs",
254 251 worker.nr, req.method, req.path, status_code, total_time)
255 252 _check_memory_usage(worker)
256 253
257 254
258 255 def _filter_proxy(ip):
259 256 """
260 257 Passed in IP addresses in HEADERS can be in a special format of multiple
261 258 ips. Those comma separated IPs are passed from various proxies in the
262 259 chain of request processing. The left-most being the original client.
263 260 We only care about the first IP which came from the org. client.
264 261
265 262 :param ip: ip string from headers
266 263 """
267 264 if ',' in ip:
268 265 _ips = ip.split(',')
269 266 _first_ip = _ips[0].strip()
270 267 return _first_ip
271 268 return ip
272 269
273 270
274 271 def _filter_port(ip):
275 272 """
276 273 Removes a port from ip, there are 4 main cases to handle here.
277 274 - ipv4 eg. 127.0.0.1
278 275 - ipv6 eg. ::1
279 276 - ipv4+port eg. 127.0.0.1:8080
280 277 - ipv6+port eg. [::1]:8080
281 278
282 279 :param ip:
283 280 """
284 281 def is_ipv6(ip_addr):
285 282 if hasattr(socket, 'inet_pton'):
286 283 try:
287 284 socket.inet_pton(socket.AF_INET6, ip_addr)
288 285 except socket.error:
289 286 return False
290 287 else:
291 288 return False
292 289 return True
293 290
294 291 if ':' not in ip: # must be ipv4 pure ip
295 292 return ip
296 293
297 294 if '[' in ip and ']' in ip: # ipv6 with port
298 295 return ip.split(']')[0][1:].lower()
299 296
300 297 # must be ipv6 or ipv4 with port
301 298 if is_ipv6(ip):
302 299 return ip
303 300 else:
304 301 ip, _port = ip.split(':')[:2] # means ipv4+port
305 302 return ip
306 303
307 304
308 305 def get_ip_addr(environ):
309 306 proxy_key = 'HTTP_X_REAL_IP'
310 307 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
311 308 def_key = 'REMOTE_ADDR'
312 309 _filters = lambda x: _filter_port(_filter_proxy(x))
313 310
314 311 ip = environ.get(proxy_key)
315 312 if ip:
316 313 return _filters(ip)
317 314
318 315 ip = environ.get(proxy_key2)
319 316 if ip:
320 317 return _filters(ip)
321 318
322 319 ip = environ.get(def_key, '0.0.0.0')
323 320 return _filters(ip)
324 321
325 322
326 323 class RhodeCodeLogger(Logger):
327 324 """
328 325 Custom Logger that allows some customization that gunicorn doesn't allow
329 326 """
330 327
331 328 datefmt = r"%Y-%m-%d %H:%M:%S"
332 329
333 330 def __init__(self, cfg):
334 331 Logger.__init__(self, cfg)
335 332
336 333 def now(self):
337 334 """ return date in RhodeCode Log format """
338 335 now = time.time()
339 msecs = int((now - long(now)) * 1000)
336 msecs = int((now - int(now)) * 1000)
340 337 return time.strftime(self.datefmt, time.localtime(now)) + '.{0:03d}'.format(msecs)
341 338
342 339 def atoms(self, resp, req, environ, request_time):
343 340 """ Gets atoms for log formatting.
344 341 """
345 342 status = resp.status
346 343 if isinstance(status, str):
347 344 status = status.split(None, 1)[0]
348 345 atoms = {
349 346 'h': get_ip_addr(environ),
350 347 'l': '-',
351 348 'u': self._get_user(environ) or '-',
352 349 't': self.now(),
353 350 'r': "%s %s %s" % (environ['REQUEST_METHOD'],
354 351 environ['RAW_URI'],
355 352 environ["SERVER_PROTOCOL"]),
356 353 's': status,
357 354 'm': environ.get('REQUEST_METHOD'),
358 355 'U': environ.get('PATH_INFO'),
359 356 'q': environ.get('QUERY_STRING'),
360 357 'H': environ.get('SERVER_PROTOCOL'),
361 358 'b': getattr(resp, 'sent', None) is not None and str(resp.sent) or '-',
362 359 'B': getattr(resp, 'sent', None),
363 360 'f': environ.get('HTTP_REFERER', '-'),
364 361 'a': environ.get('HTTP_USER_AGENT', '-'),
365 362 'T': request_time.seconds,
366 363 'D': (request_time.seconds * 1000000) + request_time.microseconds,
367 364 'M': (request_time.seconds * 1000) + int(request_time.microseconds/1000),
368 365 'L': "%d.%06d" % (request_time.seconds, request_time.microseconds),
369 366 'p': "<%s>" % os.getpid()
370 367 }
371 368
372 369 # add request headers
373 370 if hasattr(req, 'headers'):
374 371 req_headers = req.headers
375 372 else:
376 373 req_headers = req
377 374
378 375 if hasattr(req_headers, "items"):
379 376 req_headers = req_headers.items()
380 377
381 378 atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
382 379
383 380 resp_headers = resp.headers
384 381 if hasattr(resp_headers, "items"):
385 382 resp_headers = resp_headers.items()
386 383
387 384 # add response headers
388 385 atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
389 386
390 387 # add environ variables
391 388 environ_variables = environ.items()
392 389 atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
393 390
394 391 return atoms
395 392
396 393 logger_class = RhodeCodeLogger
@@ -1,238 +1,238 b''
1 ## -*- coding: utf-8 -*-
1 #
2 2
3 3 ; #################################
4 4 ; RHODECODE VCSSERVER CONFIGURATION
5 5 ; #################################
6 6
7 7 [server:main]
8 8 ; COMMON HOST/IP CONFIG
9 9 host = 127.0.0.1
10 10 port = 9900
11 11
12 12
13 13 ; ###########################
14 14 ; GUNICORN APPLICATION SERVER
15 15 ; ###########################
16 16
17 17 ; run with gunicorn --paste rhodecode.ini
18 18
19 19 ; Module to use, this setting shouldn't be changed
20 20 use = egg:gunicorn#main
21 21
22 22 ; Sets the number of process workers. More workers means more concurrent connections
23 23 ; RhodeCode can handle at the same time. Each additional worker also it increases
24 24 ; memory usage as each has it's own set of caches.
25 25 ; Recommended value is (2 * NUMBER_OF_CPUS + 1), eg 2CPU = 5 workers, but no more
26 26 ; than 8-10 unless for really big deployments .e.g 700-1000 users.
27 27 ; `instance_id = *` must be set in the [app:main] section below (which is the default)
28 28 ; when using more than 1 worker.
29 29 workers = 2
30 30
31 31 ; Gunicorn access log level
32 32 loglevel = info
33 33
34 34 ; Process name visible in process list
35 35 proc_name = rhodecode_vcsserver
36 36
37 37 ; Type of worker class, one of `sync`, `gevent`
38 38 ; currently `sync` is the only option allowed.
39 39 worker_class = sync
40 40
41 41 ; The maximum number of simultaneous clients. Valid only for gevent
42 42 worker_connections = 10
43 43
44 44 ; Max number of requests that worker will handle before being gracefully restarted.
45 45 ; Prevents memory leaks, jitter adds variability so not all workers are restarted at once.
46 46 max_requests = 1000
47 47 max_requests_jitter = 30
48 48
49 49 ; Amount of time a worker can spend with handling a request before it
50 50 ; gets killed and restarted. By default set to 21600 (6hrs)
51 51 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
52 52 timeout = 21600
53 53
54 54 ; The maximum size of HTTP request line in bytes.
55 55 ; 0 for unlimited
56 56 limit_request_line = 0
57 57
58 58 ; Limit the number of HTTP headers fields in a request.
59 59 ; By default this value is 100 and can't be larger than 32768.
60 60 limit_request_fields = 32768
61 61
62 62 ; Limit the allowed size of an HTTP request header field.
63 63 ; Value is a positive number or 0.
64 64 ; Setting it to 0 will allow unlimited header field sizes.
65 65 limit_request_field_size = 0
66 66
67 67 ; Timeout for graceful workers restart.
68 68 ; After receiving a restart signal, workers have this much time to finish
69 69 ; serving requests. Workers still alive after the timeout (starting from the
70 70 ; receipt of the restart signal) are force killed.
71 71 ; Examples: 1800 (30min), 3600 (1hr), 7200 (2hr), 43200 (12h)
72 72 graceful_timeout = 21600
73 73
74 74 # The number of seconds to wait for requests on a Keep-Alive connection.
75 75 # Generally set in the 1-5 seconds range.
76 76 keepalive = 2
77 77
78 78 ; Maximum memory usage that each worker can use before it will receive a
79 79 ; graceful restart signal 0 = memory monitoring is disabled
80 80 ; Examples: 268435456 (256MB), 536870912 (512MB)
81 81 ; 1073741824 (1GB), 2147483648 (2GB), 4294967296 (4GB)
82 82 memory_max_usage = 0
83 83
84 84 ; How often in seconds to check for memory usage for each gunicorn worker
85 85 memory_usage_check_interval = 60
86 86
87 87 ; Threshold value for which we don't recycle worker if GarbageCollection
88 88 ; frees up enough resources. Before each restart we try to run GC on worker
89 89 ; in case we get enough free memory after that, restart will not happen.
90 90 memory_usage_recovery_threshold = 0.8
91 91
92 92
93 93 [app:main]
94 94 ; The %(here)s variable will be replaced with the absolute path of parent directory
95 95 ; of this file
96 96 ; Each option in the app:main can be override by an environmental variable
97 97 ;
98 98 ;To override an option:
99 99 ;
100 100 ;RC_<KeyName>
101 101 ;Everything should be uppercase, . and - should be replaced by _.
102 102 ;For example, if you have these configuration settings:
103 103 ;rc_cache.repo_object.backend = foo
104 104 ;can be overridden by
105 105 ;export RC_CACHE_REPO_OBJECT_BACKEND=foo
106 106
107 107 use = egg:rhodecode-vcsserver
108 108
109 109 ; Pyramid default locales, we need this to be set
110 110 #pyramid.default_locale_name = en
111 111
112 112 ; default locale used by VCS systems
113 113 #locale = en_US.UTF-8
114 114
115 115 ; path to binaries for vcsserver, it should be set by the installer
116 116 ; at installation time, e.g /home/user/.rccontrol/vcsserver-1/profile/bin
117 117 ; it can also be a path to nix-build output in case of development
118 118 core.binary_dir = ""
119 119
120 120 ; Custom exception store path, defaults to TMPDIR
121 121 ; This is used to store exception from RhodeCode in shared directory
122 122 #exception_tracker.store_path =
123 123
124 124 ; #############
125 125 ; DOGPILE CACHE
126 126 ; #############
127 127
128 128 ; Default cache dir for caches. Putting this into a ramdisk can boost performance.
129 129 ; eg. /tmpfs/data_ramdisk, however this directory might require large amount of space
130 130 #cache_dir = %(here)s/data
131 131
132 132 ; ***************************************
133 133 ; `repo_object` cache, default file based
134 134 ; ***************************************
135 135
136 136 ; `repo_object` cache settings for vcs methods for repositories
137 137 #rc_cache.repo_object.backend = dogpile.cache.rc.file_namespace
138 138
139 139 ; cache auto-expires after N seconds
140 140 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
141 141 #rc_cache.repo_object.expiration_time = 2592000
142 142
143 143 ; file cache store path. Defaults to `cache_dir =` value or tempdir if both values are not set
144 144 #rc_cache.repo_object.arguments.filename = /tmp/vcsserver_cache_repo_object.db
145 145
146 146 ; ***********************************************************
147 147 ; `repo_object` cache with redis backend
148 148 ; recommended for larger instance, and for better performance
149 149 ; ***********************************************************
150 150
151 151 ; `repo_object` cache settings for vcs methods for repositories
152 152 #rc_cache.repo_object.backend = dogpile.cache.rc.redis_msgpack
153 153
154 154 ; cache auto-expires after N seconds
155 155 ; Examples: 86400 (1Day), 604800 (7Days), 1209600 (14Days), 2592000 (30days), 7776000 (90Days)
156 156 #rc_cache.repo_object.expiration_time = 2592000
157 157
158 158 ; redis_expiration_time needs to be greater then expiration_time
159 159 #rc_cache.repo_object.arguments.redis_expiration_time = 3592000
160 160
161 161 #rc_cache.repo_object.arguments.host = localhost
162 162 #rc_cache.repo_object.arguments.port = 6379
163 163 #rc_cache.repo_object.arguments.db = 5
164 164 #rc_cache.repo_object.arguments.socket_timeout = 30
165 165 ; more Redis options: https://dogpilecache.sqlalchemy.org/en/latest/api.html#redis-backends
166 166 #rc_cache.repo_object.arguments.distributed_lock = true
167 167
168 168 ; auto-renew lock to prevent stale locks, slower but safer. Use only if problems happen
169 169 #rc_cache.repo_object.arguments.lock_auto_renewal = true
170 170
171 171 ; Statsd client config, this is used to send metrics to statsd
172 172 ; We recommend setting statsd_exported and scrape them using Promethues
173 173 #statsd.enabled = false
174 174 #statsd.statsd_host = 0.0.0.0
175 175 #statsd.statsd_port = 8125
176 176 #statsd.statsd_prefix =
177 177 #statsd.statsd_ipv6 = false
178 178
179 179 ; configure logging automatically at server startup set to false
180 180 ; to use the below custom logging config.
181 181 ; RC_LOGGING_FORMATTER
182 182 ; RC_LOGGING_LEVEL
183 183 ; env variables can control the settings for logging in case of autoconfigure
184 184
185 185 #logging.autoconfigure = true
186 186
187 187 ; specify your own custom logging config file to configure logging
188 188 #logging.logging_conf_file = /path/to/custom_logging.ini
189 189
190 190 ; #####################
191 191 ; LOGGING CONFIGURATION
192 192 ; #####################
193 193
194 194 [loggers]
195 195 keys = root, vcsserver
196 196
197 197 [handlers]
198 198 keys = console
199 199
200 200 [formatters]
201 201 keys = generic, json
202 202
203 203 ; #######
204 204 ; LOGGERS
205 205 ; #######
206 206 [logger_root]
207 207 level = NOTSET
208 208 handlers = console
209 209
210 210 [logger_vcsserver]
211 211 level = INFO
212 212 handlers =
213 213 qualname = vcsserver
214 214 propagate = 1
215 215
216 216 ; ########
217 217 ; HANDLERS
218 218 ; ########
219 219
220 220 [handler_console]
221 221 class = StreamHandler
222 222 args = (sys.stderr, )
223 223 level = INFO
224 224 ; To enable JSON formatted logs replace 'generic' with 'json'
225 225 ; This allows sending properly formatted logs to grafana loki or elasticsearch
226 226 formatter = generic
227 227
228 228 ; ##########
229 229 ; FORMATTERS
230 230 ; ##########
231 231
232 232 [formatter_generic]
233 233 format = %(asctime)s.%(msecs)03d [%(process)d] %(levelname)-5.5s [%(name)s] %(message)s
234 234 datefmt = %Y-%m-%d %H:%M:%S
235 235
236 236 [formatter_json]
237 237 format = %(timestamp)s %(levelname)s %(name)s %(message)s %(req_id)s
238 238 class = vcsserver.lib._vendor.jsonlogger.JsonFormatter
@@ -1,56 +1,56 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import socket
19 19 import pytest
20 20
21 21
22 22 def pytest_addoption(parser):
23 23 parser.addoption(
24 24 '--perf-repeat-vcs', type=int, default=100,
25 25 help="Number of repetitions in performance tests.")
26 26
27 27
28 28 @pytest.fixture(scope='session')
29 29 def repeat(request):
30 30 """
31 31 The number of repetitions is based on this fixture.
32 32
33 33 Slower calls may divide it by 10 or 100. It is chosen in a way so that the
34 34 tests are not too slow in our default test suite.
35 35 """
36 36 return request.config.getoption('--perf-repeat-vcs')
37 37
38 38
39 39 @pytest.fixture(scope='session')
40 40 def vcsserver_port(request):
41 41 port = get_available_port()
42 print('Using vcsserver port %s' % (port, ))
42 print(f'Using vcsserver port {port}')
43 43 return port
44 44
45 45
46 46 def get_available_port():
47 47 family = socket.AF_INET
48 48 socktype = socket.SOCK_STREAM
49 49 host = '127.0.0.1'
50 50
51 51 mysocket = socket.socket(family, socktype)
52 52 mysocket.bind((host, 0))
53 53 port = mysocket.getsockname()[1]
54 54 mysocket.close()
55 55 del mysocket
56 56 return port
@@ -1,47 +1,56 b''
1 ## dependencies
2
3 # our custom configobj
4 https://code.rhodecode.com/upstream/configobj/artifacts/download/0-012de99a-b1e1-4f64-a5c0-07a98a41b324.tar.gz?md5=6a513f51fe04b2c18cf84c1395a7c626#egg=configobj==5.0.6
1 # deps, generated via pipdeptree --exclude setuptools,wheel,pipdeptree,pip -f | tr '[:upper:]' '[:lower:]'
5 2
6 dogpile.cache==0.9.0
7 decorator==4.1.2
8 dulwich==0.13.0
9 hgsubversion==1.9.3
10 hg-evolve==9.1.0
11 mako==1.1.0
12 markupsafe==1.1.1
13 mercurial==5.1.1
14 msgpack-python==0.5.6
15
16 pastedeploy==2.1.0
17 pyramid==1.10.4
18 pyramid-mako==1.1.0
19 pygit2==0.28.2
20
3 async-timeout==4.0.2
4 atomicwrites==1.4.1
5 contextlib2==21.6.0
6 cov-core==1.15.0
7 coverage==7.2.3
8 diskcache==5.6.1
9 dogpile.cache==1.2.2
10 decorator==5.1.1
11 stevedore==5.0.0
12 pbr==5.11.1
13 dulwich==0.21.5
14 urllib3==1.26.14
15 gunicorn==21.0.1
16 packaging==23.1
17 hg-evolve==11.0.2
18 importlib-metadata==6.0.0
19 zipp==3.15.0
20 mercurial==6.3.3
21 mock==5.0.2
22 more-itertools==9.1.0
23 msgpack==1.0.5
24 orjson==3.9.2
25 psutil==5.9.5
26 py==1.11.0
27 pygit2==1.12.2
28 cffi==1.15.1
29 pycparser==2.21
30 pygments==2.15.1
31 pyparsing==3.0.9
32 pyramid==2.0.1
33 hupper==1.12
34 plaster==1.1.2
35 plaster-pastedeploy==1.0.1
36 pastedeploy==3.0.1
37 plaster==1.1.2
38 translationstring==1.4
39 venusian==3.0.0
40 webob==1.8.7
41 zope.deprecation==5.0.0
42 zope.interface==6.0.0
43 redis==4.6.0
44 async-timeout==4.0.2
21 45 repoze.lru==0.7
22 redis==3.5.3
23 simplejson==3.16.0
24 subprocess32==3.5.4
25 subvertpy==0.10.1
46 scandir==1.10.0
47 setproctitle==1.3.2
48 subvertpy==0.11.0
49 wcwidth==0.2.6
26 50
27 six==1.11.0
28 translationstring==1.3
29 webob==1.8.5
30 zope.deprecation==4.4.0
31 zope.interface==4.6.0
32
33 ## http servers
34 gevent==1.5.0
35 greenlet==0.4.15
36 gunicorn==19.9.0
37 waitress==1.3.1
38
39 ## debug
40 ipdb==0.13.2
41 ipython==5.1.0
42 51
43 52 ## test related requirements
44 -r requirements_test.txt
53 #-r requirements_test.txt
45 54
46 55 ## uncomment to add the debug libraries
47 56 #-r requirements_debug.txt
@@ -1,8 +1,22 b''
1 1 ## special libraries we could extend the requirements.txt file with to add some
2 ## custom libraries useful for debug and memory tracing
3
4 ## uncomment inclusion of this file in requirements.txt run make generate-pkgs and nix-shell
2 ## custom libraries usefull for debug and memory tracing
5 3
6 4 objgraph
7 5 memory-profiler
8 6 pympler
7
8 ## debug
9 ipdb
10 ipython
11 rich
12
13 # format
14 flake8
15 ruff
16
17 pipdeptree==2.7.1
18 invoke==2.0.0
19 bumpversion==0.6.0
20 bump2version==1.0.1
21
22 docutils-stubs
@@ -1,16 +1,45 b''
1 1 # test related requirements
2 pytest==4.6.9
3 py==1.8.1
4 pytest-cov==2.8.1
5 pytest-sugar==0.9.3
6 pytest-runner==5.2.0
2
3 cov-core==1.15.0
4 coverage==7.2.3
5 mock==5.0.2
6 py==1.11.0
7 pytest-cov==4.0.0
8 coverage==7.2.3
9 pytest==7.3.1
10 attrs==22.2.0
11 iniconfig==2.0.0
12 packaging==23.1
13 pluggy==1.0.0
7 14 pytest-profiling==1.7.0
8 pytest-timeout==1.3.3
9 gprof2dot==2017.9.19
15 gprof2dot==2022.7.29
16 pytest==7.3.1
17 attrs==22.2.0
18 iniconfig==2.0.0
19 packaging==23.1
20 pluggy==1.0.0
21 six==1.16.0
22 pytest-runner==6.0.0
23 pytest-sugar==0.9.7
24 packaging==23.1
25 pytest==7.3.1
26 attrs==22.2.0
27 iniconfig==2.0.0
28 packaging==23.1
29 pluggy==1.0.0
30 termcolor==2.3.0
31 pytest-timeout==2.1.0
32 pytest==7.3.1
33 attrs==22.2.0
34 iniconfig==2.0.0
35 packaging==23.1
36 pluggy==1.0.0
37 webtest==3.0.0
38 beautifulsoup4==4.11.2
39 soupsieve==2.4
40 waitress==2.1.2
41 webob==1.8.7
10 42
11 mock==3.0.5
12 cov-core==1.15.0
13 coverage==4.5.4
14
15 webtest==2.0.34
16 beautifulsoup4==4.6.3
43 # RhodeCode test-data
44 rc_testdata @ https://code.rhodecode.com/upstream/rc-testdata-dist/raw/77378e9097f700b4c1b9391b56199fe63566b5c9/rc_testdata-0.11.0.tar.gz#egg=rc_testdata
45 rc_testdata==0.11.0
@@ -1,1 +1,1 b''
1 4.28.0 No newline at end of file
1 5.0.0 No newline at end of file
@@ -1,28 +1,28 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import pkgutil
19 19
20 20
21 __version__ = pkgutil.get_data('vcsserver', 'VERSION').strip()
21 __version__ = pkgutil.get_data('vcsserver', 'VERSION').strip().decode()
22 22
23 23 # link to config for pyramid
24 24 CONFIG = {}
25 25
26 26 # Populated with the settings dictionary from application init in
27 27 #
28 28 PYRAMID_SETTINGS = {}
@@ -1,130 +1,195 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17 import os
18 18 import sys
19 import tempfile
19 20 import traceback
20 21 import logging
21 import urlparse
22 import urllib.parse
23
24 from vcsserver.lib.rc_cache.archive_cache import get_archival_cache_store
25 from vcsserver.lib.rc_cache import region_meta
22 26
23 27 from vcsserver import exceptions
24 28 from vcsserver.exceptions import NoContentException
25 from vcsserver.hgcompat import (archival)
29 from vcsserver.hgcompat import archival
30 from vcsserver.str_utils import safe_bytes
26 31
27 32 log = logging.getLogger(__name__)
28 33
29 34
30 35 class RepoFactory(object):
31 36 """
32 37 Utility to create instances of repository
33 38
34 39 It provides internal caching of the `repo` object based on
35 40 the :term:`call context`.
36 41 """
37 42 repo_type = None
38 43
39 44 def __init__(self):
40 pass
45 self._cache_region = region_meta.dogpile_cache_regions['repo_object']
41 46
42 47 def _create_config(self, path, config):
43 48 config = {}
44 49 return config
45 50
46 51 def _create_repo(self, wire, create):
47 52 raise NotImplementedError()
48 53
49 54 def repo(self, wire, create=False):
50 55 raise NotImplementedError()
51 56
52 57
53 58 def obfuscate_qs(query_string):
54 59 if query_string is None:
55 60 return None
56 61
57 62 parsed = []
58 for k, v in urlparse.parse_qsl(query_string, keep_blank_values=True):
63 for k, v in urllib.parse.parse_qsl(query_string, keep_blank_values=True):
59 64 if k in ['auth_token', 'api_key']:
60 65 v = "*****"
61 66 parsed.append((k, v))
62 67
63 68 return '&'.join('{}{}'.format(
64 k, '={}'.format(v) if v else '') for k, v in parsed)
69 k, f'={v}' if v else '') for k, v in parsed)
65 70
66 71
67 def raise_from_original(new_type):
72 def raise_from_original(new_type, org_exc: Exception):
68 73 """
69 74 Raise a new exception type with original args and traceback.
70 75 """
76
71 77 exc_type, exc_value, exc_traceback = sys.exc_info()
72 78 new_exc = new_type(*exc_value.args)
79
73 80 # store the original traceback into the new exc
74 new_exc._org_exc_tb = traceback.format_exc(exc_traceback)
81 new_exc._org_exc_tb = traceback.format_tb(exc_traceback)
75 82
76 83 try:
77 raise new_exc, None, exc_traceback
84 raise new_exc.with_traceback(exc_traceback)
78 85 finally:
79 86 del exc_traceback
80 87
81 88
89
82 90 class ArchiveNode(object):
83 91 def __init__(self, path, mode, is_link, raw_bytes):
84 92 self.path = path
85 93 self.mode = mode
86 94 self.is_link = is_link
87 95 self.raw_bytes = raw_bytes
88 96
89 97
90 def archive_repo(walker, archive_dest_path, kind, mtime, archive_at_path,
91 archive_dir_name, commit_id, write_metadata=True, extra_metadata=None):
98 def store_archive_in_cache(node_walker, archive_key, kind, mtime, archive_at_path, archive_dir_name,
99 commit_id, write_metadata=True, extra_metadata=None, cache_config=None):
92 100 """
101 Function that would store an generate archive and send it to a dedicated backend store
102 In here we use diskcache
103
104 :param node_walker: a generator returning nodes to add to archive
105 :param archive_key: key used to store the path
106 :param kind: archive kind
107 :param mtime: time of creation
108 :param archive_at_path: default '/' the path at archive was started. if this is not '/' it means it's a partial archive
109 :param archive_dir_name: inside dir name when creating an archive
110 :param commit_id: commit sha of revision archive was created at
111 :param write_metadata:
112 :param extra_metadata:
113 :param cache_config:
114
93 115 walker should be a file walker, for example:
94 def walker():
116 def node_walker():
95 117 for file_info in files:
96 118 yield ArchiveNode(fn, mode, is_link, ctx[fn].data)
97 119 """
98 120 extra_metadata = extra_metadata or {}
99 121
122 d_cache = get_archival_cache_store(config=cache_config)
123
124 if archive_key in d_cache:
125 with d_cache as d_cache_reader:
126 reader, tag = d_cache_reader.get(archive_key, read=True, tag=True, retry=True)
127 return reader.name
128
129 archive_tmp_path = safe_bytes(tempfile.mkstemp()[1])
130 log.debug('Creating new temp archive in %s', archive_tmp_path)
131
100 132 if kind == "tgz":
101 archiver = archival.tarit(archive_dest_path, mtime, "gz")
133 archiver = archival.tarit(archive_tmp_path, mtime, b"gz")
102 134 elif kind == "tbz2":
103 archiver = archival.tarit(archive_dest_path, mtime, "bz2")
135 archiver = archival.tarit(archive_tmp_path, mtime, b"bz2")
104 136 elif kind == 'zip':
105 archiver = archival.zipit(archive_dest_path, mtime)
137 archiver = archival.zipit(archive_tmp_path, mtime)
106 138 else:
107 139 raise exceptions.ArchiveException()(
108 'Remote does not support: "%s" archive type.' % kind)
140 f'Remote does not support: "{kind}" archive type.')
109 141
110 for f in walker(commit_id, archive_at_path):
111 f_path = os.path.join(archive_dir_name, f.path.lstrip('/'))
142 for f in node_walker(commit_id, archive_at_path):
143 f_path = os.path.join(safe_bytes(archive_dir_name), safe_bytes(f.path).lstrip(b'/'))
112 144 try:
113 145 archiver.addfile(f_path, f.mode, f.is_link, f.raw_bytes())
114 146 except NoContentException:
115 147 # NOTE(marcink): this is a special case for SVN so we can create "empty"
116 148 # directories which arent supported by archiver
117 archiver.addfile(os.path.join(f_path, '.dir'), f.mode, f.is_link, '')
149 archiver.addfile(os.path.join(f_path, b'.dir'), f.mode, f.is_link, b'')
118 150
119 151 if write_metadata:
120 152 metadata = dict([
121 153 ('commit_id', commit_id),
122 154 ('mtime', mtime),
123 155 ])
124 156 metadata.update(extra_metadata)
125 157
126 meta = ["%s:%s" % (f_name, value) for f_name, value in metadata.items()]
127 f_path = os.path.join(archive_dir_name, '.archival.txt')
128 archiver.addfile(f_path, 0o644, False, '\n'.join(meta))
158 meta = [safe_bytes(f"{f_name}:{value}") for f_name, value in metadata.items()]
159 f_path = os.path.join(safe_bytes(archive_dir_name), b'.archival.txt')
160 archiver.addfile(f_path, 0o644, False, b'\n'.join(meta))
161
162 archiver.done()
163
164 # ensure set & get are atomic
165 with d_cache.transact():
166
167 with open(archive_tmp_path, 'rb') as archive_file:
168 add_result = d_cache.set(archive_key, archive_file, read=True, tag='db-name', retry=True)
169 if not add_result:
170 log.error('Failed to store cache for key=%s', archive_key)
171
172 os.remove(archive_tmp_path)
129 173
130 return archiver.done()
174 reader, tag = d_cache.get(archive_key, read=True, tag=True, retry=True)
175 if not reader:
176 raise AssertionError(f'empty reader on key={archive_key} added={add_result}')
177
178 return reader.name
179
180
181 class BinaryEnvelope(object):
182 def __init__(self, val):
183 self.val = val
184
185
186 class BytesEnvelope(bytes):
187 def __new__(cls, content):
188 if isinstance(content, bytes):
189 return super().__new__(cls, content)
190 else:
191 raise TypeError('Content must be bytes.')
192
193
194 class BinaryBytesEnvelope(BytesEnvelope):
195 pass
@@ -0,0 +1,1 b''
1 # Copyright (C) 2014-2023 RhodeCode GmbH
@@ -1,207 +1,168 b''
1 # -*- coding: utf-8 -*-
2
3 # Copyright (C) 2010-2020 RhodeCode GmbH
1 # Copyright (C) 2010-2023 RhodeCode GmbH
4 2 #
5 3 # This program is free software: you can redistribute it and/or modify
6 4 # it under the terms of the GNU Affero General Public License, version 3
7 5 # (only), as published by the Free Software Foundation.
8 6 #
9 7 # This program is distributed in the hope that it will be useful,
10 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 10 # GNU General Public License for more details.
13 11 #
14 12 # You should have received a copy of the GNU Affero General Public License
15 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 14 #
17 15 # This program is dual-licensed. If you wish to learn more about the
18 16 # RhodeCode Enterprise Edition, including its added features, Support services,
19 17 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 18
21 19 import os
22 20 import textwrap
23 21 import string
24 22 import functools
25 23 import logging
26 24 import tempfile
27 25 import logging.config
26
27 from vcsserver.type_utils import str2bool, aslist
28
28 29 log = logging.getLogger(__name__)
29 30
30 31 # skip keys, that are set here, so we don't double process those
31 32 set_keys = {
32 33 '__file__': ''
33 34 }
34 35
35 36
36 def str2bool(_str):
37 """
38 returns True/False value from given string, it tries to translate the
39 string into boolean
40
41 :param _str: string value to translate into boolean
42 :rtype: boolean
43 :returns: boolean from given string
44 """
45 if _str is None:
46 return False
47 if _str in (True, False):
48 return _str
49 _str = str(_str).strip().lower()
50 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
51
52
53 def aslist(obj, sep=None, strip=True):
54 """
55 Returns given string separated by sep as list
56
57 :param obj:
58 :param sep:
59 :param strip:
60 """
61 if isinstance(obj, (basestring,)):
62 if obj in ['', ""]:
63 return []
64
65 lst = obj.split(sep)
66 if strip:
67 lst = [v.strip() for v in lst]
68 return lst
69 elif isinstance(obj, (list, tuple)):
70 return obj
71 elif obj is None:
72 return []
73 else:
74 return [obj]
75
76
77 37 class SettingsMaker(object):
78 38
79 39 def __init__(self, app_settings):
80 40 self.settings = app_settings
81 41
82 42 @classmethod
83 43 def _bool_func(cls, input_val):
84 if isinstance(input_val, unicode):
85 input_val = input_val.encode('utf8')
44 if isinstance(input_val, bytes):
45 # decode to str
46 input_val = input_val.decode('utf8')
86 47 return str2bool(input_val)
87 48
88 49 @classmethod
89 50 def _int_func(cls, input_val):
90 51 return int(input_val)
91 52
92 53 @classmethod
93 54 def _list_func(cls, input_val, sep=','):
94 55 return aslist(input_val, sep=sep)
95 56
96 57 @classmethod
97 58 def _string_func(cls, input_val, lower=True):
98 59 if lower:
99 60 input_val = input_val.lower()
100 61 return input_val
101 62
102 63 @classmethod
103 64 def _float_func(cls, input_val):
104 65 return float(input_val)
105 66
106 67 @classmethod
107 68 def _dir_func(cls, input_val, ensure_dir=False, mode=0o755):
108 69
109 70 # ensure we have our dir created
110 71 if not os.path.isdir(input_val) and ensure_dir:
111 os.makedirs(input_val, mode=mode)
72 os.makedirs(input_val, mode=mode, exist_ok=True)
112 73
113 74 if not os.path.isdir(input_val):
114 raise Exception('Dir at {} does not exist'.format(input_val))
75 raise Exception(f'Dir at {input_val} does not exist')
115 76 return input_val
116 77
117 78 @classmethod
118 79 def _file_path_func(cls, input_val, ensure_dir=False, mode=0o755):
119 80 dirname = os.path.dirname(input_val)
120 81 cls._dir_func(dirname, ensure_dir=ensure_dir)
121 82 return input_val
122 83
123 84 @classmethod
124 85 def _key_transformator(cls, key):
125 86 return "{}_{}".format('RC'.upper(), key.upper().replace('.', '_').replace('-', '_'))
126 87
127 88 def maybe_env_key(self, key):
128 89 # now maybe we have this KEY in env, search and use the value with higher priority.
129 90 transformed_key = self._key_transformator(key)
130 91 envvar_value = os.environ.get(transformed_key)
131 92 if envvar_value:
132 93 log.debug('using `%s` key instead of `%s` key for config', transformed_key, key)
133 94
134 95 return envvar_value
135 96
136 97 def env_expand(self):
137 98 replaced = {}
138 99 for k, v in self.settings.items():
139 100 if k not in set_keys:
140 101 envvar_value = self.maybe_env_key(k)
141 102 if envvar_value:
142 103 replaced[k] = envvar_value
143 104 set_keys[k] = envvar_value
144 105
145 106 # replace ALL keys updated
146 107 self.settings.update(replaced)
147 108
148 109 def enable_logging(self, logging_conf=None, level='INFO', formatter='generic'):
149 110 """
150 111 Helper to enable debug on running instance
151 112 :return:
152 113 """
153 114
154 115 if not str2bool(self.settings.get('logging.autoconfigure')):
155 116 log.info('logging configuration based on main .ini file')
156 117 return
157 118
158 119 if logging_conf is None:
159 120 logging_conf = self.settings.get('logging.logging_conf_file') or ''
160 121
161 122 if not os.path.isfile(logging_conf):
162 123 log.error('Unable to setup logging based on %s, '
163 124 'file does not exist.... specify path using logging.logging_conf_file= config setting. ', logging_conf)
164 125 return
165 126
166 with open(logging_conf, 'rb') as f:
127 with open(logging_conf, 'rt') as f:
167 128 ini_template = textwrap.dedent(f.read())
168 129 ini_template = string.Template(ini_template).safe_substitute(
169 130 RC_LOGGING_LEVEL=os.environ.get('RC_LOGGING_LEVEL', '') or level,
170 131 RC_LOGGING_FORMATTER=os.environ.get('RC_LOGGING_FORMATTER', '') or formatter
171 132 )
172 133
173 134 with tempfile.NamedTemporaryFile(prefix='rc_logging_', suffix='.ini', delete=False) as f:
174 135 log.info('Saved Temporary LOGGING config at %s', f.name)
175 136 f.write(ini_template)
176 137
177 138 logging.config.fileConfig(f.name)
178 139 os.remove(f.name)
179 140
180 141 def make_setting(self, key, default, lower=False, default_when_empty=False, parser=None):
181 142 input_val = self.settings.get(key, default)
182 143
183 144 if default_when_empty and not input_val:
184 145 # use default value when value is set in the config but it is empty
185 146 input_val = default
186 147
187 148 parser_func = {
188 149 'bool': self._bool_func,
189 150 'int': self._int_func,
190 151 'list': self._list_func,
191 152 'list:newline': functools.partial(self._list_func, sep='/n'),
192 153 'list:spacesep': functools.partial(self._list_func, sep=' '),
193 154 'string': functools.partial(self._string_func, lower=lower),
194 155 'dir': self._dir_func,
195 156 'dir:ensured': functools.partial(self._dir_func, ensure_dir=True),
196 157 'file': self._file_path_func,
197 158 'file:ensured': functools.partial(self._file_path_func, ensure_dir=True),
198 159 None: lambda i: i
199 160 }[parser]
200 161
201 162 envvar_value = self.maybe_env_key(key)
202 163 if envvar_value:
203 164 input_val = envvar_value
204 165 set_keys[key] = input_val
205 166
206 167 self.settings[key] = parser_func(input_val)
207 168 return self.settings[key]
@@ -1,8 +1,10 b''
1 # Copyright (C) 2014-2023 RhodeCode GmbH
2
1 3 """
2 4 Provides a stub implementation for VCS operations.
3 5
4 6 Intended usage is to help in performance measurements. The basic idea is to
5 7 implement an `EchoApp` which sends back what it gets. Based on a configuration
6 8 parameter this app can be activated, so that it replaced the endpoints for Git
7 9 and Mercurial.
8 10 """
@@ -1,54 +1,56 b''
1 # Copyright (C) 2014-2023 RhodeCode GmbH
2
1 3 """
2 4 Implementation of :class:`EchoApp`.
3 5
4 6 This WSGI application will just echo back the data which it recieves.
5 7 """
6 8
7 9 import logging
8 10
9 11
10 12 log = logging.getLogger(__name__)
11 13
12 14
13 15 class EchoApp(object):
14 16
15 17 def __init__(self, repo_path, repo_name, config):
16 18 self._repo_path = repo_path
17 19 log.info("EchoApp initialized for %s", repo_path)
18 20
19 21 def __call__(self, environ, start_response):
20 22 log.debug("EchoApp called for %s", self._repo_path)
21 23 log.debug("Content-Length: %s", environ.get('CONTENT_LENGTH'))
22 24 environ['wsgi.input'].read()
23 25 status = '200 OK'
24 26 headers = [('Content-Type', 'text/plain')]
25 27 start_response(status, headers)
26 return ["ECHO"]
28 return [b"ECHO"]
27 29
28 30
29 31 class EchoAppStream(object):
30 32
31 33 def __init__(self, repo_path, repo_name, config):
32 34 self._repo_path = repo_path
33 35 log.info("EchoApp initialized for %s", repo_path)
34 36
35 37 def __call__(self, environ, start_response):
36 38 log.debug("EchoApp called for %s", self._repo_path)
37 39 log.debug("Content-Length: %s", environ.get('CONTENT_LENGTH'))
38 40 environ['wsgi.input'].read()
39 41 status = '200 OK'
40 42 headers = [('Content-Type', 'text/plain')]
41 43 start_response(status, headers)
42 44
43 45 def generator():
44 for _ in xrange(1000000):
45 yield "ECHO"
46 for _ in range(1000000):
47 yield b"ECHO_STREAM"
46 48 return generator()
47 49
48 50
49 51 def create_app():
50 52 """
51 53 Allows to run this app directly in a WSGI server.
52 54 """
53 55 stub_config = {}
54 56 return EchoApp('stub_path', 'stub_name', stub_config)
@@ -1,45 +1,47 b''
1 # Copyright (C) 2014-2023 RhodeCode GmbH
2
1 3 """
2 4 Provides the same API as :mod:`remote_wsgi`.
3 5
4 6 Uses the `EchoApp` instead of real implementations.
5 7 """
6 8
7 9 import logging
8 10
9 11 from .echo_app import EchoApp
10 12 from vcsserver import wsgi_app_caller
11 13
12 14
13 15 log = logging.getLogger(__name__)
14 16
15 17
16 18 class GitRemoteWsgi(object):
17 19 def handle(self, environ, input_data, *args, **kwargs):
18 20 app = wsgi_app_caller.WSGIAppCaller(
19 21 create_echo_wsgi_app(*args, **kwargs))
20 22
21 23 return app.handle(environ, input_data)
22 24
23 25
24 26 class HgRemoteWsgi(object):
25 27 def handle(self, environ, input_data, *args, **kwargs):
26 28 app = wsgi_app_caller.WSGIAppCaller(
27 29 create_echo_wsgi_app(*args, **kwargs))
28 30
29 31 return app.handle(environ, input_data)
30 32
31 33
32 34 def create_echo_wsgi_app(repo_path, repo_name, config):
33 35 log.debug("Creating EchoApp WSGI application")
34 36
35 37 _assert_valid_config(config)
36 38
37 39 # Remaining items are forwarded to have the extras available
38 40 return EchoApp(repo_path, repo_name, config=config)
39 41
40 42
41 43 def _assert_valid_config(config):
42 44 config = config.copy()
43 45
44 46 # This is what git needs from config at this stage
45 config.pop('git_update_server_info')
47 config.pop(b'git_update_server_info')
@@ -1,125 +1,125 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """
19 19 Special exception handling over the wire.
20 20
21 21 Since we cannot assume that our client is able to import our exception classes,
22 22 this module provides a "wrapping" mechanism to raise plain exceptions
23 23 which contain an extra attribute `_vcs_kind` to allow a client to distinguish
24 24 different error conditions.
25 25 """
26 26
27 27 from pyramid.httpexceptions import HTTPLocked, HTTPForbidden
28 28
29 29
30 30 def _make_exception(kind, org_exc, *args):
31 31 """
32 32 Prepares a base `Exception` instance to be sent over the wire.
33 33
34 34 To give our caller a hint what this is about, it will attach an attribute
35 35 `_vcs_kind` to the exception.
36 36 """
37 37 exc = Exception(*args)
38 38 exc._vcs_kind = kind
39 39 exc._org_exc = org_exc
40 40 exc._org_exc_tb = getattr(org_exc, '_org_exc_tb', '')
41 41 return exc
42 42
43 43
44 44 def AbortException(org_exc=None):
45 45 def _make_exception_wrapper(*args):
46 46 return _make_exception('abort', org_exc, *args)
47 47 return _make_exception_wrapper
48 48
49 49
50 50 def ArchiveException(org_exc=None):
51 51 def _make_exception_wrapper(*args):
52 52 return _make_exception('archive', org_exc, *args)
53 53 return _make_exception_wrapper
54 54
55 55
56 56 def LookupException(org_exc=None):
57 57 def _make_exception_wrapper(*args):
58 58 return _make_exception('lookup', org_exc, *args)
59 59 return _make_exception_wrapper
60 60
61 61
62 62 def VcsException(org_exc=None):
63 63 def _make_exception_wrapper(*args):
64 64 return _make_exception('error', org_exc, *args)
65 65 return _make_exception_wrapper
66 66
67 67
68 68 def RepositoryLockedException(org_exc=None):
69 69 def _make_exception_wrapper(*args):
70 70 return _make_exception('repo_locked', org_exc, *args)
71 71 return _make_exception_wrapper
72 72
73 73
74 74 def RepositoryBranchProtectedException(org_exc=None):
75 75 def _make_exception_wrapper(*args):
76 76 return _make_exception('repo_branch_protected', org_exc, *args)
77 77 return _make_exception_wrapper
78 78
79 79
80 80 def RequirementException(org_exc=None):
81 81 def _make_exception_wrapper(*args):
82 82 return _make_exception('requirement', org_exc, *args)
83 83 return _make_exception_wrapper
84 84
85 85
86 86 def UnhandledException(org_exc=None):
87 87 def _make_exception_wrapper(*args):
88 88 return _make_exception('unhandled', org_exc, *args)
89 89 return _make_exception_wrapper
90 90
91 91
92 92 def URLError(org_exc=None):
93 93 def _make_exception_wrapper(*args):
94 94 return _make_exception('url_error', org_exc, *args)
95 95 return _make_exception_wrapper
96 96
97 97
98 98 def SubrepoMergeException(org_exc=None):
99 99 def _make_exception_wrapper(*args):
100 100 return _make_exception('subrepo_merge_error', org_exc, *args)
101 101 return _make_exception_wrapper
102 102
103 103
104 104 class HTTPRepoLocked(HTTPLocked):
105 105 """
106 106 Subclass of HTTPLocked response that allows to set the title and status
107 107 code via constructor arguments.
108 108 """
109 109 def __init__(self, title, status_code=None, **kwargs):
110 110 self.code = status_code or HTTPLocked.code
111 111 self.title = title
112 super(HTTPRepoLocked, self).__init__(**kwargs)
112 super().__init__(**kwargs)
113 113
114 114
115 115 class HTTPRepoBranchProtected(HTTPForbidden):
116 116 def __init__(self, *args, **kwargs):
117 117 super(HTTPForbidden, self).__init__(*args, **kwargs)
118 118
119 119
120 120 class RefNotFoundException(KeyError):
121 121 pass
122 122
123 123
124 124 class NoContentException(ValueError):
125 125 pass
@@ -1,19 +1,19 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18
19 from app import create_app
19 from .app import create_app
@@ -1,292 +1,292 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import re
19 19 import logging
20 20 from wsgiref.util import FileWrapper
21 21
22 import simplejson as json
23 22 from pyramid.config import Configurator
24 23 from pyramid.response import Response, FileIter
25 24 from pyramid.httpexceptions import (
26 25 HTTPBadRequest, HTTPNotImplemented, HTTPNotFound, HTTPForbidden,
27 26 HTTPUnprocessableEntity)
28 27
28 from vcsserver.lib.rc_json import json
29 29 from vcsserver.git_lfs.lib import OidHandler, LFSOidStore
30 30 from vcsserver.git_lfs.utils import safe_result, get_cython_compat_decorator
31 from vcsserver.utils import safe_int
31 from vcsserver.str_utils import safe_int
32 32
33 33 log = logging.getLogger(__name__)
34 34
35 35
36 36 GIT_LFS_CONTENT_TYPE = 'application/vnd.git-lfs' #+json ?
37 37 GIT_LFS_PROTO_PAT = re.compile(r'^/(.+)/(info/lfs/(.+))')
38 38
39 39
40 40 def write_response_error(http_exception, text=None):
41 41 content_type = GIT_LFS_CONTENT_TYPE + '+json'
42 42 _exception = http_exception(content_type=content_type)
43 43 _exception.content_type = content_type
44 44 if text:
45 45 _exception.body = json.dumps({'message': text})
46 46 log.debug('LFS: writing response of type %s to client with text:%s',
47 47 http_exception, text)
48 48 return _exception
49 49
50 50
51 51 class AuthHeaderRequired(object):
52 52 """
53 53 Decorator to check if request has proper auth-header
54 54 """
55 55
56 56 def __call__(self, func):
57 57 return get_cython_compat_decorator(self.__wrapper, func)
58 58
59 59 def __wrapper(self, func, *fargs, **fkwargs):
60 60 request = fargs[1]
61 61 auth = request.authorization
62 62 if not auth:
63 63 return write_response_error(HTTPForbidden)
64 64 return func(*fargs[1:], **fkwargs)
65 65
66 66
67 67 # views
68 68
69 69 def lfs_objects(request):
70 70 # indicate not supported, V1 API
71 71 log.warning('LFS: v1 api not supported, reporting it back to client')
72 72 return write_response_error(HTTPNotImplemented, 'LFS: v1 api not supported')
73 73
74 74
75 75 @AuthHeaderRequired()
76 76 def lfs_objects_batch(request):
77 77 """
78 78 The client sends the following information to the Batch endpoint to transfer some objects:
79 79
80 80 operation - Should be download or upload.
81 81 transfers - An optional Array of String identifiers for transfer
82 82 adapters that the client has configured. If omitted, the basic
83 83 transfer adapter MUST be assumed by the server.
84 84 objects - An Array of objects to download.
85 85 oid - String OID of the LFS object.
86 86 size - Integer byte size of the LFS object. Must be at least zero.
87 87 """
88 88 request.response.content_type = GIT_LFS_CONTENT_TYPE + '+json'
89 89 auth = request.authorization
90 90 repo = request.matchdict.get('repo')
91 91 data = request.json
92 92 operation = data.get('operation')
93 93 http_scheme = request.registry.git_lfs_http_scheme
94 94
95 95 if operation not in ('download', 'upload'):
96 96 log.debug('LFS: unsupported operation:%s', operation)
97 97 return write_response_error(
98 98 HTTPBadRequest, 'unsupported operation mode: `%s`' % operation)
99 99
100 100 if 'objects' not in data:
101 101 log.debug('LFS: missing objects data')
102 102 return write_response_error(
103 103 HTTPBadRequest, 'missing objects data')
104 104
105 105 log.debug('LFS: handling operation of type: %s', operation)
106 106
107 107 objects = []
108 108 for o in data['objects']:
109 109 try:
110 110 oid = o['oid']
111 111 obj_size = o['size']
112 112 except KeyError:
113 113 log.exception('LFS, failed to extract data')
114 114 return write_response_error(
115 115 HTTPBadRequest, 'unsupported data in objects')
116 116
117 117 obj_data = {'oid': oid}
118 118
119 obj_href = request.route_url('lfs_objects_oid', repo=repo, oid=oid,
119 obj_href = request.route_url('lfs_objects_oid', repo=repo, oid=oid,
120 120 _scheme=http_scheme)
121 121 obj_verify_href = request.route_url('lfs_objects_verify', repo=repo,
122 122 _scheme=http_scheme)
123 123 store = LFSOidStore(
124 124 oid, repo, store_location=request.registry.git_lfs_store_path)
125 125 handler = OidHandler(
126 126 store, repo, auth, oid, obj_size, obj_data,
127 127 obj_href, obj_verify_href)
128 128
129 129 # this verifies also OIDs
130 130 actions, errors = handler.exec_operation(operation)
131 131 if errors:
132 132 log.warning('LFS: got following errors: %s', errors)
133 133 obj_data['errors'] = errors
134 134
135 135 if actions:
136 136 obj_data['actions'] = actions
137 137
138 138 obj_data['size'] = obj_size
139 139 obj_data['authenticated'] = True
140 140 objects.append(obj_data)
141 141
142 142 result = {'objects': objects, 'transfer': 'basic'}
143 143 log.debug('LFS Response %s', safe_result(result))
144 144
145 145 return result
146 146
147 147
148 148 def lfs_objects_oid_upload(request):
149 149 request.response.content_type = GIT_LFS_CONTENT_TYPE + '+json'
150 150 repo = request.matchdict.get('repo')
151 151 oid = request.matchdict.get('oid')
152 152 store = LFSOidStore(
153 153 oid, repo, store_location=request.registry.git_lfs_store_path)
154 154 engine = store.get_engine(mode='wb')
155 155 log.debug('LFS: starting chunked write of LFS oid: %s to storage', oid)
156 156
157 157 body = request.environ['wsgi.input']
158 158
159 159 with engine as f:
160 160 blksize = 64 * 1024 # 64kb
161 161 while True:
162 162 # read in chunks as stream comes in from Gunicorn
163 163 # this is a specific Gunicorn support function.
164 164 # might work differently on waitress
165 165 chunk = body.read(blksize)
166 166 if not chunk:
167 167 break
168 168 f.write(chunk)
169 169
170 170 return {'upload': 'ok'}
171 171
172 172
173 173 def lfs_objects_oid_download(request):
174 174 repo = request.matchdict.get('repo')
175 175 oid = request.matchdict.get('oid')
176 176
177 177 store = LFSOidStore(
178 178 oid, repo, store_location=request.registry.git_lfs_store_path)
179 179 if not store.has_oid():
180 180 log.debug('LFS: oid %s does not exists in store', oid)
181 181 return write_response_error(
182 182 HTTPNotFound, 'requested file with oid `%s` not found in store' % oid)
183 183
184 184 # TODO(marcink): support range header ?
185 185 # Range: bytes=0-, `bytes=(\d+)\-.*`
186 186
187 187 f = open(store.oid_path, 'rb')
188 188 response = Response(
189 189 content_type='application/octet-stream', app_iter=FileIter(f))
190 190 response.headers.add('X-RC-LFS-Response-Oid', str(oid))
191 191 return response
192 192
193 193
194 194 def lfs_objects_verify(request):
195 195 request.response.content_type = GIT_LFS_CONTENT_TYPE + '+json'
196 196 repo = request.matchdict.get('repo')
197 197
198 198 data = request.json
199 199 oid = data.get('oid')
200 200 size = safe_int(data.get('size'))
201 201
202 202 if not (oid and size):
203 203 return write_response_error(
204 204 HTTPBadRequest, 'missing oid and size in request data')
205 205
206 206 store = LFSOidStore(
207 207 oid, repo, store_location=request.registry.git_lfs_store_path)
208 208 if not store.has_oid():
209 209 log.debug('LFS: oid %s does not exists in store', oid)
210 210 return write_response_error(
211 211 HTTPNotFound, 'oid `%s` does not exists in store' % oid)
212 212
213 213 store_size = store.size_oid()
214 214 if store_size != size:
215 msg = 'requested file size mismatch store size:%s requested:%s' % (
215 msg = 'requested file size mismatch store size:{} requested:{}'.format(
216 216 store_size, size)
217 217 return write_response_error(
218 218 HTTPUnprocessableEntity, msg)
219 219
220 220 return {'message': {'size': 'ok', 'in_store': 'ok'}}
221 221
222 222
223 223 def lfs_objects_lock(request):
224 224 return write_response_error(
225 225 HTTPNotImplemented, 'GIT LFS locking api not supported')
226 226
227 227
228 228 def not_found(request):
229 229 return write_response_error(
230 230 HTTPNotFound, 'request path not found')
231 231
232 232
233 233 def lfs_disabled(request):
234 234 return write_response_error(
235 235 HTTPNotImplemented, 'GIT LFS disabled for this repo')
236 236
237 237
238 238 def git_lfs_app(config):
239 239
240 240 # v1 API deprecation endpoint
241 241 config.add_route('lfs_objects',
242 242 '/{repo:.*?[^/]}/info/lfs/objects')
243 243 config.add_view(lfs_objects, route_name='lfs_objects',
244 244 request_method='POST', renderer='json')
245 245
246 246 # locking API
247 247 config.add_route('lfs_objects_lock',
248 248 '/{repo:.*?[^/]}/info/lfs/locks')
249 249 config.add_view(lfs_objects_lock, route_name='lfs_objects_lock',
250 250 request_method=('POST', 'GET'), renderer='json')
251 251
252 252 config.add_route('lfs_objects_lock_verify',
253 253 '/{repo:.*?[^/]}/info/lfs/locks/verify')
254 254 config.add_view(lfs_objects_lock, route_name='lfs_objects_lock_verify',
255 255 request_method=('POST', 'GET'), renderer='json')
256 256
257 257 # batch API
258 258 config.add_route('lfs_objects_batch',
259 259 '/{repo:.*?[^/]}/info/lfs/objects/batch')
260 260 config.add_view(lfs_objects_batch, route_name='lfs_objects_batch',
261 261 request_method='POST', renderer='json')
262 262
263 263 # oid upload/download API
264 264 config.add_route('lfs_objects_oid',
265 265 '/{repo:.*?[^/]}/info/lfs/objects/{oid}')
266 266 config.add_view(lfs_objects_oid_upload, route_name='lfs_objects_oid',
267 267 request_method='PUT', renderer='json')
268 268 config.add_view(lfs_objects_oid_download, route_name='lfs_objects_oid',
269 269 request_method='GET', renderer='json')
270 270
271 271 # verification API
272 272 config.add_route('lfs_objects_verify',
273 273 '/{repo:.*?[^/]}/info/lfs/verify')
274 274 config.add_view(lfs_objects_verify, route_name='lfs_objects_verify',
275 275 request_method='POST', renderer='json')
276 276
277 277 # not found handler for API
278 278 config.add_notfound_view(not_found, renderer='json')
279 279
280 280
281 281 def create_app(git_lfs_enabled, git_lfs_store_path, git_lfs_http_scheme):
282 282 config = Configurator()
283 283 if git_lfs_enabled:
284 284 config.include(git_lfs_app)
285 285 config.registry.git_lfs_store_path = git_lfs_store_path
286 286 config.registry.git_lfs_http_scheme = git_lfs_http_scheme
287 287 else:
288 288 # not found handler for API, reporting disabled LFS support
289 289 config.add_notfound_view(lfs_disabled, renderer='json')
290 290
291 291 app = config.make_wsgi_app()
292 292 return app
@@ -1,175 +1,175 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import shutil
20 20 import logging
21 21 from collections import OrderedDict
22 22
23 23 log = logging.getLogger(__name__)
24 24
25 25
26 26 class OidHandler(object):
27 27
28 28 def __init__(self, store, repo_name, auth, oid, obj_size, obj_data, obj_href,
29 29 obj_verify_href=None):
30 30 self.current_store = store
31 31 self.repo_name = repo_name
32 32 self.auth = auth
33 33 self.oid = oid
34 34 self.obj_size = obj_size
35 35 self.obj_data = obj_data
36 36 self.obj_href = obj_href
37 37 self.obj_verify_href = obj_verify_href
38 38
39 39 def get_store(self, mode=None):
40 40 return self.current_store
41 41
42 42 def get_auth(self):
43 43 """returns auth header for re-use in upload/download"""
44 44 return " ".join(self.auth)
45 45
46 46 def download(self):
47 47
48 48 store = self.get_store()
49 49 response = None
50 50 has_errors = None
51 51
52 52 if not store.has_oid():
53 53 # error reply back to client that something is wrong with dl
54 err_msg = 'object: {} does not exist in store'.format(store.oid)
54 err_msg = f'object: {store.oid} does not exist in store'
55 55 has_errors = OrderedDict(
56 56 error=OrderedDict(
57 57 code=404,
58 58 message=err_msg
59 59 )
60 60 )
61 61
62 62 download_action = OrderedDict(
63 63 href=self.obj_href,
64 64 header=OrderedDict([("Authorization", self.get_auth())])
65 65 )
66 66 if not has_errors:
67 67 response = OrderedDict(download=download_action)
68 68 return response, has_errors
69 69
70 70 def upload(self, skip_existing=True):
71 71 """
72 72 Write upload action for git-lfs server
73 73 """
74 74
75 75 store = self.get_store()
76 76 response = None
77 77 has_errors = None
78 78
79 79 # verify if we have the OID before, if we do, reply with empty
80 80 if store.has_oid():
81 81 log.debug('LFS: store already has oid %s', store.oid)
82 82
83 83 # validate size
84 84 store_size = store.size_oid()
85 85 size_match = store_size == self.obj_size
86 86 if not size_match:
87 87 log.warning(
88 88 'LFS: size mismatch for oid:%s, in store:%s expected: %s',
89 89 self.oid, store_size, self.obj_size)
90 90 elif skip_existing:
91 91 log.debug('LFS: skipping further action as oid is existing')
92 92 return response, has_errors
93 93
94 94 chunked = ("Transfer-Encoding", "chunked")
95 95 upload_action = OrderedDict(
96 96 href=self.obj_href,
97 97 header=OrderedDict([("Authorization", self.get_auth()), chunked])
98 98 )
99 99 if not has_errors:
100 100 response = OrderedDict(upload=upload_action)
101 101 # if specified in handler, return the verification endpoint
102 102 if self.obj_verify_href:
103 103 verify_action = OrderedDict(
104 104 href=self.obj_verify_href,
105 105 header=OrderedDict([("Authorization", self.get_auth())])
106 106 )
107 107 response['verify'] = verify_action
108 108 return response, has_errors
109 109
110 110 def exec_operation(self, operation, *args, **kwargs):
111 111 handler = getattr(self, operation)
112 112 log.debug('LFS: handling request using %s handler', handler)
113 113 return handler(*args, **kwargs)
114 114
115 115
116 116 class LFSOidStore(object):
117 117
118 118 def __init__(self, oid, repo, store_location=None):
119 119 self.oid = oid
120 120 self.repo = repo
121 121 self.store_path = store_location or self.get_default_store()
122 122 self.tmp_oid_path = os.path.join(self.store_path, oid + '.tmp')
123 123 self.oid_path = os.path.join(self.store_path, oid)
124 124 self.fd = None
125 125
126 126 def get_engine(self, mode):
127 127 """
128 128 engine = .get_engine(mode='wb')
129 129 with engine as f:
130 130 f.write('...')
131 131 """
132 132
133 133 class StoreEngine(object):
134 134 def __init__(self, mode, store_path, oid_path, tmp_oid_path):
135 135 self.mode = mode
136 136 self.store_path = store_path
137 137 self.oid_path = oid_path
138 138 self.tmp_oid_path = tmp_oid_path
139 139
140 140 def __enter__(self):
141 141 if not os.path.isdir(self.store_path):
142 142 os.makedirs(self.store_path)
143 143
144 144 # TODO(marcink): maybe write metadata here with size/oid ?
145 145 fd = open(self.tmp_oid_path, self.mode)
146 146 self.fd = fd
147 147 return fd
148 148
149 149 def __exit__(self, exc_type, exc_value, traceback):
150 150 # close tmp file, and rename to final destination
151 151 self.fd.close()
152 152 shutil.move(self.tmp_oid_path, self.oid_path)
153 153
154 154 return StoreEngine(
155 155 mode, self.store_path, self.oid_path, self.tmp_oid_path)
156 156
157 157 def get_default_store(self):
158 158 """
159 159 Default store, consistent with defaults of Mercurial large files store
160 160 which is /home/username/.cache/largefiles
161 161 """
162 162 user_home = os.path.expanduser("~")
163 163 return os.path.join(user_home, '.cache', 'lfs-store')
164 164
165 165 def has_oid(self):
166 166 return os.path.exists(os.path.join(self.store_path, self.oid))
167 167
168 168 def size_oid(self):
169 169 size = -1
170 170
171 171 if self.has_oid():
172 172 oid = os.path.join(self.store_path, self.oid)
173 173 size = os.stat(oid).st_size
174 174
175 175 return size
@@ -1,16 +1,16 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@@ -1,272 +1,273 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import pytest
20 20 from webtest.app import TestApp as WebObTestApp
21 import simplejson as json
22 21
22 from vcsserver.lib.rc_json import json
23 from vcsserver.str_utils import safe_bytes
23 24 from vcsserver.git_lfs.app import create_app
24 25
25 26
26 27 @pytest.fixture(scope='function')
27 28 def git_lfs_app(tmpdir):
28 29 custom_app = WebObTestApp(create_app(
29 30 git_lfs_enabled=True, git_lfs_store_path=str(tmpdir),
30 31 git_lfs_http_scheme='http'))
31 32 custom_app._store = str(tmpdir)
32 33 return custom_app
33 34
34 35
35 36 @pytest.fixture(scope='function')
36 37 def git_lfs_https_app(tmpdir):
37 38 custom_app = WebObTestApp(create_app(
38 39 git_lfs_enabled=True, git_lfs_store_path=str(tmpdir),
39 40 git_lfs_http_scheme='https'))
40 41 custom_app._store = str(tmpdir)
41 42 return custom_app
42 43
43 44
44 45 @pytest.fixture()
45 46 def http_auth():
46 47 return {'HTTP_AUTHORIZATION': "Basic XXXXX"}
47 48
48 49
49 50 class TestLFSApplication(object):
50 51
51 52 def test_app_wrong_path(self, git_lfs_app):
52 53 git_lfs_app.get('/repo/info/lfs/xxx', status=404)
53 54
54 55 def test_app_deprecated_endpoint(self, git_lfs_app):
55 56 response = git_lfs_app.post('/repo/info/lfs/objects', status=501)
56 57 assert response.status_code == 501
57 assert json.loads(response.text) == {u'message': u'LFS: v1 api not supported'}
58 assert json.loads(response.text) == {'message': 'LFS: v1 api not supported'}
58 59
59 60 def test_app_lock_verify_api_not_available(self, git_lfs_app):
60 61 response = git_lfs_app.post('/repo/info/lfs/locks/verify', status=501)
61 62 assert response.status_code == 501
62 63 assert json.loads(response.text) == {
63 u'message': u'GIT LFS locking api not supported'}
64 'message': 'GIT LFS locking api not supported'}
64 65
65 66 def test_app_lock_api_not_available(self, git_lfs_app):
66 67 response = git_lfs_app.post('/repo/info/lfs/locks', status=501)
67 68 assert response.status_code == 501
68 69 assert json.loads(response.text) == {
69 u'message': u'GIT LFS locking api not supported'}
70 'message': 'GIT LFS locking api not supported'}
70 71
71 72 def test_app_batch_api_missing_auth(self, git_lfs_app):
72 73 git_lfs_app.post_json(
73 74 '/repo/info/lfs/objects/batch', params={}, status=403)
74 75
75 76 def test_app_batch_api_unsupported_operation(self, git_lfs_app, http_auth):
76 77 response = git_lfs_app.post_json(
77 78 '/repo/info/lfs/objects/batch', params={}, status=400,
78 79 extra_environ=http_auth)
79 80 assert json.loads(response.text) == {
80 u'message': u'unsupported operation mode: `None`'}
81 'message': 'unsupported operation mode: `None`'}
81 82
82 83 def test_app_batch_api_missing_objects(self, git_lfs_app, http_auth):
83 84 response = git_lfs_app.post_json(
84 85 '/repo/info/lfs/objects/batch', params={'operation': 'download'},
85 86 status=400, extra_environ=http_auth)
86 87 assert json.loads(response.text) == {
87 u'message': u'missing objects data'}
88 'message': 'missing objects data'}
88 89
89 90 def test_app_batch_api_unsupported_data_in_objects(
90 91 self, git_lfs_app, http_auth):
91 92 params = {'operation': 'download',
92 93 'objects': [{}]}
93 94 response = git_lfs_app.post_json(
94 95 '/repo/info/lfs/objects/batch', params=params, status=400,
95 96 extra_environ=http_auth)
96 97 assert json.loads(response.text) == {
97 u'message': u'unsupported data in objects'}
98 'message': 'unsupported data in objects'}
98 99
99 100 def test_app_batch_api_download_missing_object(
100 101 self, git_lfs_app, http_auth):
101 102 params = {'operation': 'download',
102 103 'objects': [{'oid': '123', 'size': '1024'}]}
103 104 response = git_lfs_app.post_json(
104 105 '/repo/info/lfs/objects/batch', params=params,
105 106 extra_environ=http_auth)
106 107
107 108 expected_objects = [
108 {u'authenticated': True,
109 u'errors': {u'error': {
110 u'code': 404,
111 u'message': u'object: 123 does not exist in store'}},
112 u'oid': u'123',
113 u'size': u'1024'}
109 {'authenticated': True,
110 'errors': {'error': {
111 'code': 404,
112 'message': 'object: 123 does not exist in store'}},
113 'oid': '123',
114 'size': '1024'}
114 115 ]
115 116 assert json.loads(response.text) == {
116 117 'objects': expected_objects, 'transfer': 'basic'}
117 118
118 119 def test_app_batch_api_download(self, git_lfs_app, http_auth):
119 120 oid = '456'
120 121 oid_path = os.path.join(git_lfs_app._store, oid)
121 122 if not os.path.isdir(os.path.dirname(oid_path)):
122 123 os.makedirs(os.path.dirname(oid_path))
123 124 with open(oid_path, 'wb') as f:
124 f.write('OID_CONTENT')
125 f.write(safe_bytes('OID_CONTENT'))
125 126
126 127 params = {'operation': 'download',
127 128 'objects': [{'oid': oid, 'size': '1024'}]}
128 129 response = git_lfs_app.post_json(
129 130 '/repo/info/lfs/objects/batch', params=params,
130 131 extra_environ=http_auth)
131 132
132 133 expected_objects = [
133 {u'authenticated': True,
134 u'actions': {
135 u'download': {
136 u'header': {u'Authorization': u'Basic XXXXX'},
137 u'href': u'http://localhost/repo/info/lfs/objects/456'},
134 {'authenticated': True,
135 'actions': {
136 'download': {
137 'header': {'Authorization': 'Basic XXXXX'},
138 'href': 'http://localhost/repo/info/lfs/objects/456'},
138 139 },
139 u'oid': u'456',
140 u'size': u'1024'}
140 'oid': '456',
141 'size': '1024'}
141 142 ]
142 143 assert json.loads(response.text) == {
143 144 'objects': expected_objects, 'transfer': 'basic'}
144 145
145 146 def test_app_batch_api_upload(self, git_lfs_app, http_auth):
146 147 params = {'operation': 'upload',
147 148 'objects': [{'oid': '123', 'size': '1024'}]}
148 149 response = git_lfs_app.post_json(
149 150 '/repo/info/lfs/objects/batch', params=params,
150 151 extra_environ=http_auth)
151 152 expected_objects = [
152 {u'authenticated': True,
153 u'actions': {
154 u'upload': {
155 u'header': {u'Authorization': u'Basic XXXXX',
156 u'Transfer-Encoding': u'chunked'},
157 u'href': u'http://localhost/repo/info/lfs/objects/123'},
158 u'verify': {
159 u'header': {u'Authorization': u'Basic XXXXX'},
160 u'href': u'http://localhost/repo/info/lfs/verify'}
153 {'authenticated': True,
154 'actions': {
155 'upload': {
156 'header': {'Authorization': 'Basic XXXXX',
157 'Transfer-Encoding': 'chunked'},
158 'href': 'http://localhost/repo/info/lfs/objects/123'},
159 'verify': {
160 'header': {'Authorization': 'Basic XXXXX'},
161 'href': 'http://localhost/repo/info/lfs/verify'}
161 162 },
162 u'oid': u'123',
163 u'size': u'1024'}
163 'oid': '123',
164 'size': '1024'}
164 165 ]
165 166 assert json.loads(response.text) == {
166 167 'objects': expected_objects, 'transfer': 'basic'}
167 168
168 169 def test_app_batch_api_upload_for_https(self, git_lfs_https_app, http_auth):
169 170 params = {'operation': 'upload',
170 171 'objects': [{'oid': '123', 'size': '1024'}]}
171 172 response = git_lfs_https_app.post_json(
172 173 '/repo/info/lfs/objects/batch', params=params,
173 174 extra_environ=http_auth)
174 175 expected_objects = [
175 {u'authenticated': True,
176 u'actions': {
177 u'upload': {
178 u'header': {u'Authorization': u'Basic XXXXX',
179 u'Transfer-Encoding': u'chunked'},
180 u'href': u'https://localhost/repo/info/lfs/objects/123'},
181 u'verify': {
182 u'header': {u'Authorization': u'Basic XXXXX'},
183 u'href': u'https://localhost/repo/info/lfs/verify'}
176 {'authenticated': True,
177 'actions': {
178 'upload': {
179 'header': {'Authorization': 'Basic XXXXX',
180 'Transfer-Encoding': 'chunked'},
181 'href': 'https://localhost/repo/info/lfs/objects/123'},
182 'verify': {
183 'header': {'Authorization': 'Basic XXXXX'},
184 'href': 'https://localhost/repo/info/lfs/verify'}
184 185 },
185 u'oid': u'123',
186 u'size': u'1024'}
186 'oid': '123',
187 'size': '1024'}
187 188 ]
188 189 assert json.loads(response.text) == {
189 190 'objects': expected_objects, 'transfer': 'basic'}
190 191
191 192 def test_app_verify_api_missing_data(self, git_lfs_app):
192 193 params = {'oid': 'missing'}
193 194 response = git_lfs_app.post_json(
194 195 '/repo/info/lfs/verify', params=params,
195 196 status=400)
196 197
197 198 assert json.loads(response.text) == {
198 u'message': u'missing oid and size in request data'}
199 'message': 'missing oid and size in request data'}
199 200
200 201 def test_app_verify_api_missing_obj(self, git_lfs_app):
201 202 params = {'oid': 'missing', 'size': '1024'}
202 203 response = git_lfs_app.post_json(
203 204 '/repo/info/lfs/verify', params=params,
204 205 status=404)
205 206
206 207 assert json.loads(response.text) == {
207 u'message': u'oid `missing` does not exists in store'}
208 'message': 'oid `missing` does not exists in store'}
208 209
209 210 def test_app_verify_api_size_mismatch(self, git_lfs_app):
210 211 oid = 'existing'
211 212 oid_path = os.path.join(git_lfs_app._store, oid)
212 213 if not os.path.isdir(os.path.dirname(oid_path)):
213 214 os.makedirs(os.path.dirname(oid_path))
214 215 with open(oid_path, 'wb') as f:
215 f.write('OID_CONTENT')
216 f.write(safe_bytes('OID_CONTENT'))
216 217
217 218 params = {'oid': oid, 'size': '1024'}
218 219 response = git_lfs_app.post_json(
219 220 '/repo/info/lfs/verify', params=params, status=422)
220 221
221 222 assert json.loads(response.text) == {
222 u'message': u'requested file size mismatch '
223 u'store size:11 requested:1024'}
223 'message': 'requested file size mismatch '
224 'store size:11 requested:1024'}
224 225
225 226 def test_app_verify_api(self, git_lfs_app):
226 227 oid = 'existing'
227 228 oid_path = os.path.join(git_lfs_app._store, oid)
228 229 if not os.path.isdir(os.path.dirname(oid_path)):
229 230 os.makedirs(os.path.dirname(oid_path))
230 231 with open(oid_path, 'wb') as f:
231 f.write('OID_CONTENT')
232 f.write(safe_bytes('OID_CONTENT'))
232 233
233 234 params = {'oid': oid, 'size': 11}
234 235 response = git_lfs_app.post_json(
235 236 '/repo/info/lfs/verify', params=params)
236 237
237 238 assert json.loads(response.text) == {
238 u'message': {u'size': u'ok', u'in_store': u'ok'}}
239 'message': {'size': 'ok', 'in_store': 'ok'}}
239 240
240 241 def test_app_download_api_oid_not_existing(self, git_lfs_app):
241 242 oid = 'missing'
242 243
243 244 response = git_lfs_app.get(
244 245 '/repo/info/lfs/objects/{oid}'.format(oid=oid), status=404)
245 246
246 247 assert json.loads(response.text) == {
247 u'message': u'requested file with oid `missing` not found in store'}
248 'message': 'requested file with oid `missing` not found in store'}
248 249
249 250 def test_app_download_api(self, git_lfs_app):
250 251 oid = 'existing'
251 252 oid_path = os.path.join(git_lfs_app._store, oid)
252 253 if not os.path.isdir(os.path.dirname(oid_path)):
253 254 os.makedirs(os.path.dirname(oid_path))
254 255 with open(oid_path, 'wb') as f:
255 f.write('OID_CONTENT')
256 f.write(safe_bytes('OID_CONTENT'))
256 257
257 258 response = git_lfs_app.get(
258 259 '/repo/info/lfs/objects/{oid}'.format(oid=oid))
259 260 assert response
260 261
261 262 def test_app_upload(self, git_lfs_app):
262 263 oid = 'uploaded'
263 264
264 265 response = git_lfs_app.put(
265 266 '/repo/info/lfs/objects/{oid}'.format(oid=oid), params='CONTENT')
266 267
267 assert json.loads(response.text) == {u'upload': u'ok'}
268 assert json.loads(response.text) == {'upload': 'ok'}
268 269
269 270 # verify that we actually wrote that OID
270 271 oid_path = os.path.join(git_lfs_app._store, oid)
271 272 assert os.path.isfile(oid_path)
272 273 assert 'CONTENT' == open(oid_path).read()
@@ -1,141 +1,142 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import pytest
20 from vcsserver.str_utils import safe_bytes
20 21 from vcsserver.git_lfs.lib import OidHandler, LFSOidStore
21 22
22 23
23 24 @pytest.fixture()
24 25 def lfs_store(tmpdir):
25 26 repo = 'test'
26 27 oid = '123456789'
27 28 store = LFSOidStore(oid=oid, repo=repo, store_location=str(tmpdir))
28 29 return store
29 30
30 31
31 32 @pytest.fixture()
32 33 def oid_handler(lfs_store):
33 34 store = lfs_store
34 35 repo = store.repo
35 36 oid = store.oid
36 37
37 38 oid_handler = OidHandler(
38 39 store=store, repo_name=repo, auth=('basic', 'xxxx'),
39 40 oid=oid,
40 41 obj_size='1024', obj_data={}, obj_href='http://localhost/handle_oid',
41 42 obj_verify_href='http://localhost/verify')
42 43 return oid_handler
43 44
44 45
45 46 class TestOidHandler(object):
46 47
47 48 @pytest.mark.parametrize('exec_action', [
48 49 'download',
49 50 'upload',
50 51 ])
51 52 def test_exec_action(self, exec_action, oid_handler):
52 53 handler = oid_handler.exec_operation(exec_action)
53 54 assert handler
54 55
55 56 def test_exec_action_undefined(self, oid_handler):
56 57 with pytest.raises(AttributeError):
57 58 oid_handler.exec_operation('wrong')
58 59
59 60 def test_download_oid_not_existing(self, oid_handler):
60 61 response, has_errors = oid_handler.exec_operation('download')
61 62
62 63 assert response is None
63 64 assert has_errors['error'] == {
64 65 'code': 404,
65 66 'message': 'object: 123456789 does not exist in store'}
66 67
67 68 def test_download_oid(self, oid_handler):
68 69 store = oid_handler.get_store()
69 70 if not os.path.isdir(os.path.dirname(store.oid_path)):
70 71 os.makedirs(os.path.dirname(store.oid_path))
71 72
72 73 with open(store.oid_path, 'wb') as f:
73 f.write('CONTENT')
74 f.write(safe_bytes('CONTENT'))
74 75
75 76 response, has_errors = oid_handler.exec_operation('download')
76 77
77 78 assert has_errors is None
78 79 assert response['download'] == {
79 80 'header': {'Authorization': 'basic xxxx'},
80 81 'href': 'http://localhost/handle_oid'
81 82 }
82 83
83 84 def test_upload_oid_that_exists(self, oid_handler):
84 85 store = oid_handler.get_store()
85 86 if not os.path.isdir(os.path.dirname(store.oid_path)):
86 87 os.makedirs(os.path.dirname(store.oid_path))
87 88
88 89 with open(store.oid_path, 'wb') as f:
89 f.write('CONTENT')
90 f.write(safe_bytes('CONTENT'))
90 91 oid_handler.obj_size = 7
91 92 response, has_errors = oid_handler.exec_operation('upload')
92 93 assert has_errors is None
93 94 assert response is None
94 95
95 96 def test_upload_oid_that_exists_but_has_wrong_size(self, oid_handler):
96 97 store = oid_handler.get_store()
97 98 if not os.path.isdir(os.path.dirname(store.oid_path)):
98 99 os.makedirs(os.path.dirname(store.oid_path))
99 100
100 101 with open(store.oid_path, 'wb') as f:
101 f.write('CONTENT')
102 f.write(safe_bytes('CONTENT'))
102 103
103 104 oid_handler.obj_size = 10240
104 105 response, has_errors = oid_handler.exec_operation('upload')
105 106 assert has_errors is None
106 107 assert response['upload'] == {
107 108 'header': {'Authorization': 'basic xxxx',
108 109 'Transfer-Encoding': 'chunked'},
109 110 'href': 'http://localhost/handle_oid',
110 111 }
111 112
112 113 def test_upload_oid(self, oid_handler):
113 114 response, has_errors = oid_handler.exec_operation('upload')
114 115 assert has_errors is None
115 116 assert response['upload'] == {
116 117 'header': {'Authorization': 'basic xxxx',
117 118 'Transfer-Encoding': 'chunked'},
118 119 'href': 'http://localhost/handle_oid'
119 120 }
120 121
121 122
122 123 class TestLFSStore(object):
123 124 def test_write_oid(self, lfs_store):
124 125 oid_location = lfs_store.oid_path
125 126
126 127 assert not os.path.isfile(oid_location)
127 128
128 129 engine = lfs_store.get_engine(mode='wb')
129 130 with engine as f:
130 f.write('CONTENT')
131 f.write(safe_bytes('CONTENT'))
131 132
132 133 assert os.path.isfile(oid_location)
133 134
134 135 def test_detect_has_oid(self, lfs_store):
135 136
136 137 assert lfs_store.has_oid() is False
137 138 engine = lfs_store.get_engine(mode='wb')
138 139 with engine as f:
139 f.write('CONTENT')
140 f.write(safe_bytes('CONTENT'))
140 141
141 assert lfs_store.has_oid() is True No newline at end of file
142 assert lfs_store.has_oid() is True
@@ -1,50 +1,50 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17 import copy
18 18 from functools import wraps
19 19
20 20
21 21 def get_cython_compat_decorator(wrapper, func):
22 22 """
23 23 Creates a cython compatible decorator. The previously used
24 24 decorator.decorator() function seems to be incompatible with cython.
25 25
26 26 :param wrapper: __wrapper method of the decorator class
27 27 :param func: decorated function
28 28 """
29 29 @wraps(func)
30 30 def local_wrapper(*args, **kwds):
31 31 return wrapper(func, *args, **kwds)
32 32 local_wrapper.__wrapped__ = func
33 33 return local_wrapper
34 34
35 35
36 36 def safe_result(result):
37 37 """clean result for better representation in logs"""
38 38 clean_copy = copy.deepcopy(result)
39 39
40 40 try:
41 41 if 'objects' in clean_copy:
42 42 for oid_data in clean_copy['objects']:
43 43 if 'actions' in oid_data:
44 44 for action_name, data in oid_data['actions'].items():
45 45 if 'header' in data:
46 46 data['header'] = {'Authorization': '*****'}
47 47 except Exception:
48 48 return result
49 49
50 50 return clean_copy
@@ -1,79 +1,88 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """
19 19 Mercurial libs compatibility
20 20 """
21 21
22 22 import mercurial
23 23 from mercurial import demandimport
24
24 25 # patch demandimport, due to bug in mercurial when it always triggers
25 26 # demandimport.enable()
27 from vcsserver.str_utils import safe_bytes
28
26 29 demandimport.enable = lambda *args, **kwargs: 1
27 30
28 31 from mercurial import ui
29 32 from mercurial import patch
30 33 from mercurial import config
31 34 from mercurial import extensions
32 35 from mercurial import scmutil
33 36 from mercurial import archival
34 37 from mercurial import discovery
35 38 from mercurial import unionrepo
36 39 from mercurial import localrepo
37 40 from mercurial import merge as hg_merge
38 41 from mercurial import subrepo
39 42 from mercurial import subrepoutil
40 43 from mercurial import tags as hg_tag
41 44 from mercurial import util as hgutil
42 from mercurial.commands import clone, nullid, pull
45 from mercurial.commands import clone, pull
46 from mercurial.node import nullid
43 47 from mercurial.context import memctx, memfilectx
44 48 from mercurial.error import (
45 49 LookupError, RepoError, RepoLookupError, Abort, InterventionRequired,
46 50 RequirementError, ProgrammingError)
47 51 from mercurial.hgweb import hgweb_mod
48 52 from mercurial.localrepo import instance
49 53 from mercurial.match import match, alwaysmatcher, patternmatcher
50 54 from mercurial.mdiff import diffopts
51 55 from mercurial.node import bin, hex
52 56 from mercurial.encoding import tolocal
53 57 from mercurial.discovery import findcommonoutgoing
54 58 from mercurial.hg import peer
55 59 from mercurial.httppeer import makepeer
56 from mercurial.util import url as hg_url
60 from mercurial.utils.urlutil import url as hg_url
57 61 from mercurial.scmutil import revrange, revsymbol
58 62 from mercurial.node import nullrev
59 63 from mercurial import exchange
60 64 from hgext import largefiles
61 65
62 66 # those authnadlers are patched for python 2.6.5 bug an
63 67 # infinit looping when given invalid resources
64 68 from mercurial.url import httpbasicauthhandler, httpdigestauthhandler
65 69
70 # hg strip is in core now
71 from mercurial import strip as hgext_strip
72
66 73
67 74 def get_ctx(repo, ref):
75 if not isinstance(ref, int):
76 ref = safe_bytes(ref)
68 77 try:
69 78 ctx = repo[ref]
70 79 except (ProgrammingError, TypeError):
71 80 # we're unable to find the rev using a regular lookup, we fallback
72 81 # to slower, but backward compat revsymbol usage
73 82 ctx = revsymbol(repo, ref)
74 83 except (LookupError, RepoLookupError):
75 84 # Similar case as above but only for refs that are not numeric
76 if isinstance(ref, (int, long)):
85 if isinstance(ref, int):
77 86 raise
78 87 ctx = revsymbol(repo, ref)
79 88 return ctx
@@ -1,134 +1,134 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """
19 19 Adjustments to Mercurial
20 20
21 21 Intentionally kept separate from `hgcompat` and `hg`, so that these patches can
22 22 be applied without having to import the whole Mercurial machinery.
23 23
24 24 Imports are function local, so that just importing this module does not cause
25 25 side-effects other than these functions being defined.
26 26 """
27 27
28 28 import logging
29 29
30 30
31 31 def patch_largefiles_capabilities():
32 32 """
33 33 Patches the capabilities function in the largefiles extension.
34 34 """
35 35 from vcsserver import hgcompat
36 36 lfproto = hgcompat.largefiles.proto
37 37 wrapper = _dynamic_capabilities_wrapper(
38 38 lfproto, hgcompat.extensions.extensions)
39 39 lfproto._capabilities = wrapper
40 40
41 41
42 42 def _dynamic_capabilities_wrapper(lfproto, extensions):
43 43
44 44 wrapped_capabilities = lfproto._capabilities
45 45 logger = logging.getLogger('vcsserver.hg')
46 46
47 47 def _dynamic_capabilities(orig, repo, proto):
48 48 """
49 49 Adds dynamic behavior, so that the capability is only added if the
50 50 extension is enabled in the current ui object.
51 51 """
52 52 if 'largefiles' in dict(extensions(repo.ui)):
53 53 logger.debug('Extension largefiles enabled')
54 54 calc_capabilities = wrapped_capabilities
55 55 return calc_capabilities(orig, repo, proto)
56 56 else:
57 57 logger.debug('Extension largefiles disabled')
58 58 return orig(repo, proto)
59 59
60 60 return _dynamic_capabilities
61 61
62 62
63 63 def patch_subrepo_type_mapping():
64 64 from collections import defaultdict
65 from hgcompat import subrepo, subrepoutil
65 from .hgcompat import subrepo, subrepoutil
66 66 from vcsserver.exceptions import SubrepoMergeException
67 67
68 68 class NoOpSubrepo(subrepo.abstractsubrepo):
69 69
70 70 def __init__(self, ctx, path, *args, **kwargs):
71 71 """Initialize abstractsubrepo part
72 72
73 73 ``ctx`` is the context referring this subrepository in the
74 74 parent repository.
75 75
76 76 ``path`` is the path to this subrepository as seen from
77 77 innermost repository.
78 78 """
79 79 self.ui = ctx.repo().ui
80 80 self._ctx = ctx
81 81 self._path = path
82 82
83 83 def storeclean(self, path):
84 84 """
85 85 returns true if the repository has not changed since it was last
86 86 cloned from or pushed to a given repository.
87 87 """
88 88 return True
89 89
90 90 def dirty(self, ignoreupdate=False, missing=False):
91 91 """returns true if the dirstate of the subrepo is dirty or does not
92 92 match current stored state. If ignoreupdate is true, only check
93 93 whether the subrepo has uncommitted changes in its dirstate.
94 94 """
95 95 return False
96 96
97 97 def basestate(self):
98 98 """current working directory base state, disregarding .hgsubstate
99 99 state and working directory modifications"""
100 100 substate = subrepoutil.state(self._ctx, self.ui)
101 101 file_system_path, rev, repotype = substate.get(self._path)
102 102 return rev
103 103
104 104 def remove(self):
105 105 """remove the subrepo
106 106
107 107 (should verify the dirstate is not dirty first)
108 108 """
109 109 pass
110 110
111 111 def get(self, state, overwrite=False):
112 112 """run whatever commands are needed to put the subrepo into
113 113 this state
114 114 """
115 115 pass
116 116
117 117 def merge(self, state):
118 118 """merge currently-saved state with the new state."""
119 119 raise SubrepoMergeException()()
120 120
121 121 def push(self, opts):
122 122 """perform whatever action is analogous to 'hg push'
123 123
124 124 This may be a no-op on some systems.
125 125 """
126 126 pass
127 127
128 128 # Patch subrepo type mapping to always return our NoOpSubrepo class
129 129 # whenever a subrepo class is looked up.
130 130 subrepo.types = {
131 131 'hg': NoOpSubrepo,
132 132 'git': NoOpSubrepo,
133 133 'svn': NoOpSubrepo
134 134 }
@@ -1,205 +1,202 b''
1 # -*- coding: utf-8 -*-
2
3 1 # RhodeCode VCSServer provides access to different vcs backends via network.
4 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
5 3 #
6 4 # This program is free software; you can redistribute it and/or modify
7 5 # it under the terms of the GNU General Public License as published by
8 6 # the Free Software Foundation; either version 3 of the License, or
9 7 # (at your option) any later version.
10 8 #
11 9 # This program is distributed in the hope that it will be useful,
12 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 12 # GNU General Public License for more details.
15 13 #
16 14 # You should have received a copy of the GNU General Public License
17 15 # along with this program; if not, write to the Free Software Foundation,
18 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 17
20 18 import re
21 19 import os
22 20 import sys
23 21 import datetime
24 22 import logging
25 23 import pkg_resources
26 24
27 25 import vcsserver
26 from vcsserver.str_utils import safe_bytes
28 27
29 28 log = logging.getLogger(__name__)
30 29
31 30
32 31 def get_git_hooks_path(repo_path, bare):
33 32 hooks_path = os.path.join(repo_path, 'hooks')
34 33 if not bare:
35 34 hooks_path = os.path.join(repo_path, '.git', 'hooks')
36 35
37 36 return hooks_path
38 37
39 38
40 39 def install_git_hooks(repo_path, bare, executable=None, force_create=False):
41 40 """
42 41 Creates a RhodeCode hook inside a git repository
43 42
44 43 :param repo_path: path to repository
45 44 :param executable: binary executable to put in the hooks
46 45 :param force_create: Create even if same name hook exists
47 46 """
48 47 executable = executable or sys.executable
49 48 hooks_path = get_git_hooks_path(repo_path, bare)
50 49
51 50 if not os.path.isdir(hooks_path):
52 os.makedirs(hooks_path, mode=0o777)
51 os.makedirs(hooks_path, mode=0o777, exist_ok=True)
53 52
54 53 tmpl_post = pkg_resources.resource_string(
55 54 'vcsserver', '/'.join(
56 55 ('hook_utils', 'hook_templates', 'git_post_receive.py.tmpl')))
57 56 tmpl_pre = pkg_resources.resource_string(
58 57 'vcsserver', '/'.join(
59 58 ('hook_utils', 'hook_templates', 'git_pre_receive.py.tmpl')))
60 59
61 60 path = '' # not used for now
62 61 timestamp = datetime.datetime.utcnow().isoformat()
63 62
64 63 for h_type, template in [('pre', tmpl_pre), ('post', tmpl_post)]:
65 64 log.debug('Installing git hook in repo %s', repo_path)
66 65 _hook_file = os.path.join(hooks_path, '%s-receive' % h_type)
67 66 _rhodecode_hook = check_rhodecode_hook(_hook_file)
68 67
69 68 if _rhodecode_hook or force_create:
70 69 log.debug('writing git %s hook file at %s !', h_type, _hook_file)
71 70 try:
72 71 with open(_hook_file, 'wb') as f:
73 template = template.replace(
74 '_TMPL_', vcsserver.__version__)
75 template = template.replace('_DATE_', timestamp)
76 template = template.replace('_ENV_', executable)
77 template = template.replace('_PATH_', path)
72 template = template.replace(b'_TMPL_', safe_bytes(vcsserver.__version__))
73 template = template.replace(b'_DATE_', safe_bytes(timestamp))
74 template = template.replace(b'_ENV_', safe_bytes(executable))
75 template = template.replace(b'_PATH_', safe_bytes(path))
78 76 f.write(template)
79 77 os.chmod(_hook_file, 0o755)
80 except IOError:
78 except OSError:
81 79 log.exception('error writing hook file %s', _hook_file)
82 80 else:
83 81 log.debug('skipping writing hook file')
84 82
85 83 return True
86 84
87 85
88 86 def get_svn_hooks_path(repo_path):
89 87 hooks_path = os.path.join(repo_path, 'hooks')
90 88
91 89 return hooks_path
92 90
93 91
94 92 def install_svn_hooks(repo_path, executable=None, force_create=False):
95 93 """
96 94 Creates RhodeCode hooks inside a svn repository
97 95
98 96 :param repo_path: path to repository
99 97 :param executable: binary executable to put in the hooks
100 98 :param force_create: Create even if same name hook exists
101 99 """
102 100 executable = executable or sys.executable
103 101 hooks_path = get_svn_hooks_path(repo_path)
104 102 if not os.path.isdir(hooks_path):
105 os.makedirs(hooks_path, mode=0o777)
103 os.makedirs(hooks_path, mode=0o777, exist_ok=True)
106 104
107 105 tmpl_post = pkg_resources.resource_string(
108 106 'vcsserver', '/'.join(
109 107 ('hook_utils', 'hook_templates', 'svn_post_commit_hook.py.tmpl')))
110 108 tmpl_pre = pkg_resources.resource_string(
111 109 'vcsserver', '/'.join(
112 110 ('hook_utils', 'hook_templates', 'svn_pre_commit_hook.py.tmpl')))
113 111
114 112 path = '' # not used for now
115 113 timestamp = datetime.datetime.utcnow().isoformat()
116 114
117 115 for h_type, template in [('pre', tmpl_pre), ('post', tmpl_post)]:
118 116 log.debug('Installing svn hook in repo %s', repo_path)
119 117 _hook_file = os.path.join(hooks_path, '%s-commit' % h_type)
120 118 _rhodecode_hook = check_rhodecode_hook(_hook_file)
121 119
122 120 if _rhodecode_hook or force_create:
123 121 log.debug('writing svn %s hook file at %s !', h_type, _hook_file)
124 122
125 123 try:
126 124 with open(_hook_file, 'wb') as f:
127 template = template.replace(
128 '_TMPL_', vcsserver.__version__)
129 template = template.replace('_DATE_', timestamp)
130 template = template.replace('_ENV_', executable)
131 template = template.replace('_PATH_', path)
125 template = template.replace(b'_TMPL_', safe_bytes(vcsserver.__version__))
126 template = template.replace(b'_DATE_', safe_bytes(timestamp))
127 template = template.replace(b'_ENV_', safe_bytes(executable))
128 template = template.replace(b'_PATH_', safe_bytes(path))
132 129
133 130 f.write(template)
134 131 os.chmod(_hook_file, 0o755)
135 except IOError:
132 except OSError:
136 133 log.exception('error writing hook file %s', _hook_file)
137 134 else:
138 135 log.debug('skipping writing hook file')
139 136
140 137 return True
141 138
142 139
143 140 def get_version_from_hook(hook_path):
144 version = ''
141 version = b''
145 142 hook_content = read_hook_content(hook_path)
146 matches = re.search(r'(?:RC_HOOK_VER)\s*=\s*(.*)', hook_content)
143 matches = re.search(rb'RC_HOOK_VER\s*=\s*(.*)', hook_content)
147 144 if matches:
148 145 try:
149 146 version = matches.groups()[0]
150 147 log.debug('got version %s from hooks.', version)
151 148 except Exception:
152 149 log.exception("Exception while reading the hook version.")
153 return version.replace("'", "")
150 return version.replace(b"'", b"")
154 151
155 152
156 153 def check_rhodecode_hook(hook_path):
157 154 """
158 155 Check if the hook was created by RhodeCode
159 156 """
160 157 if not os.path.exists(hook_path):
161 158 return True
162 159
163 160 log.debug('hook exists, checking if it is from RhodeCode')
164 161
165 162 version = get_version_from_hook(hook_path)
166 163 if version:
167 164 return True
168 165
169 166 return False
170 167
171 168
172 def read_hook_content(hook_path):
173 content = ''
169 def read_hook_content(hook_path) -> bytes:
170 content = b''
174 171 if os.path.isfile(hook_path):
175 172 with open(hook_path, 'rb') as f:
176 173 content = f.read()
177 174 return content
178 175
179 176
180 177 def get_git_pre_hook_version(repo_path, bare):
181 178 hooks_path = get_git_hooks_path(repo_path, bare)
182 179 _hook_file = os.path.join(hooks_path, 'pre-receive')
183 180 version = get_version_from_hook(_hook_file)
184 181 return version
185 182
186 183
187 184 def get_git_post_hook_version(repo_path, bare):
188 185 hooks_path = get_git_hooks_path(repo_path, bare)
189 186 _hook_file = os.path.join(hooks_path, 'post-receive')
190 187 version = get_version_from_hook(_hook_file)
191 188 return version
192 189
193 190
194 191 def get_svn_pre_hook_version(repo_path):
195 192 hooks_path = get_svn_hooks_path(repo_path)
196 193 _hook_file = os.path.join(hooks_path, 'pre-commit')
197 194 version = get_version_from_hook(_hook_file)
198 195 return version
199 196
200 197
201 198 def get_svn_post_hook_version(repo_path):
202 199 hooks_path = get_svn_hooks_path(repo_path)
203 200 _hook_file = os.path.join(hooks_path, 'post-commit')
204 201 version = get_version_from_hook(_hook_file)
205 202 return version
@@ -1,51 +1,51 b''
1 1 #!_ENV_
2 2 import os
3 3 import sys
4 4 path_adjust = [_PATH_]
5 5
6 6 if path_adjust:
7 7 sys.path = path_adjust
8 8
9 9 try:
10 10 from vcsserver import hooks
11 11 except ImportError:
12 12 if os.environ.get('RC_DEBUG_GIT_HOOK'):
13 13 import traceback
14 print traceback.format_exc()
14 print(traceback.format_exc())
15 15 hooks = None
16 16
17 17
18 18 # TIMESTAMP: _DATE_
19 19 RC_HOOK_VER = '_TMPL_'
20 20
21 21
22 22 def main():
23 23 if hooks is None:
24 24 # exit with success if we cannot import vcsserver.hooks !!
25 25 # this allows simply push to this repo even without rhodecode
26 26 sys.exit(0)
27 27
28 28 if os.environ.get('RC_SKIP_HOOKS') or os.environ.get('RC_SKIP_GIT_HOOKS'):
29 29 sys.exit(0)
30 30
31 31 repo_path = os.getcwd()
32 32 push_data = sys.stdin.readlines()
33 33 os.environ['RC_HOOK_VER'] = RC_HOOK_VER
34 34 # os.environ is modified here by a subprocess call that
35 35 # runs git and later git executes this hook.
36 36 # Environ gets some additional info from rhodecode system
37 37 # like IP or username from basic-auth
38 38 try:
39 39 result = hooks.git_post_receive(repo_path, push_data, os.environ)
40 40 sys.exit(result)
41 41 except Exception as error:
42 42 # TODO: johbo: Improve handling of this special case
43 43 if not getattr(error, '_vcs_kind', None) == 'repo_locked':
44 44 raise
45 print 'ERROR:', error
45 print(f'ERROR: {error}')
46 46 sys.exit(1)
47 47 sys.exit(0)
48 48
49 49
50 50 if __name__ == '__main__':
51 51 main()
@@ -1,51 +1,51 b''
1 1 #!_ENV_
2 2 import os
3 3 import sys
4 4 path_adjust = [_PATH_]
5 5
6 6 if path_adjust:
7 7 sys.path = path_adjust
8 8
9 9 try:
10 10 from vcsserver import hooks
11 11 except ImportError:
12 12 if os.environ.get('RC_DEBUG_GIT_HOOK'):
13 13 import traceback
14 print traceback.format_exc()
14 print(traceback.format_exc())
15 15 hooks = None
16 16
17 17
18 18 # TIMESTAMP: _DATE_
19 19 RC_HOOK_VER = '_TMPL_'
20 20
21 21
22 22 def main():
23 23 if hooks is None:
24 24 # exit with success if we cannot import vcsserver.hooks !!
25 25 # this allows simply push to this repo even without rhodecode
26 26 sys.exit(0)
27 27
28 28 if os.environ.get('RC_SKIP_HOOKS') or os.environ.get('RC_SKIP_GIT_HOOKS'):
29 29 sys.exit(0)
30 30
31 31 repo_path = os.getcwd()
32 32 push_data = sys.stdin.readlines()
33 33 os.environ['RC_HOOK_VER'] = RC_HOOK_VER
34 34 # os.environ is modified here by a subprocess call that
35 35 # runs git and later git executes this hook.
36 36 # Environ gets some additional info from rhodecode system
37 37 # like IP or username from basic-auth
38 38 try:
39 39 result = hooks.git_pre_receive(repo_path, push_data, os.environ)
40 40 sys.exit(result)
41 41 except Exception as error:
42 42 # TODO: johbo: Improve handling of this special case
43 43 if not getattr(error, '_vcs_kind', None) == 'repo_locked':
44 44 raise
45 print 'ERROR:', error
45 print(f'ERROR: {error}')
46 46 sys.exit(1)
47 47 sys.exit(0)
48 48
49 49
50 50 if __name__ == '__main__':
51 51 main()
@@ -1,50 +1,50 b''
1 1 #!_ENV_
2 2
3 3 import os
4 4 import sys
5 5 path_adjust = [_PATH_]
6 6
7 7 if path_adjust:
8 8 sys.path = path_adjust
9 9
10 10 try:
11 11 from vcsserver import hooks
12 12 except ImportError:
13 13 if os.environ.get('RC_DEBUG_SVN_HOOK'):
14 14 import traceback
15 print traceback.format_exc()
15 print(traceback.format_exc())
16 16 hooks = None
17 17
18 18
19 19 # TIMESTAMP: _DATE_
20 20 RC_HOOK_VER = '_TMPL_'
21 21
22 22
23 23 def main():
24 24 if hooks is None:
25 25 # exit with success if we cannot import vcsserver.hooks !!
26 26 # this allows simply push to this repo even without rhodecode
27 27 sys.exit(0)
28 28
29 29 if os.environ.get('RC_SKIP_HOOKS') or os.environ.get('RC_SKIP_SVN_HOOKS'):
30 30 sys.exit(0)
31 31 repo_path = os.getcwd()
32 32 push_data = sys.argv[1:]
33 33
34 34 os.environ['RC_HOOK_VER'] = RC_HOOK_VER
35 35
36 36 try:
37 37 result = hooks.svn_post_commit(repo_path, push_data, os.environ)
38 38 sys.exit(result)
39 39 except Exception as error:
40 40 # TODO: johbo: Improve handling of this special case
41 41 if not getattr(error, '_vcs_kind', None) == 'repo_locked':
42 42 raise
43 print 'ERROR:', error
43 print(f'ERROR: {error}')
44 44 sys.exit(1)
45 45 sys.exit(0)
46 46
47 47
48 48
49 49 if __name__ == '__main__':
50 50 main()
@@ -1,52 +1,52 b''
1 1 #!_ENV_
2 2
3 3 import os
4 4 import sys
5 5 path_adjust = [_PATH_]
6 6
7 7 if path_adjust:
8 8 sys.path = path_adjust
9 9
10 10 try:
11 11 from vcsserver import hooks
12 12 except ImportError:
13 13 if os.environ.get('RC_DEBUG_SVN_HOOK'):
14 14 import traceback
15 print traceback.format_exc()
15 print(traceback.format_exc())
16 16 hooks = None
17 17
18 18
19 19 # TIMESTAMP: _DATE_
20 20 RC_HOOK_VER = '_TMPL_'
21 21
22 22
23 23 def main():
24 24 if os.environ.get('SSH_READ_ONLY') == '1':
25 25 sys.stderr.write('Only read-only access is allowed')
26 26 sys.exit(1)
27 27
28 28 if hooks is None:
29 29 # exit with success if we cannot import vcsserver.hooks !!
30 30 # this allows simply push to this repo even without rhodecode
31 31 sys.exit(0)
32 32 if os.environ.get('RC_SKIP_HOOKS') or os.environ.get('RC_SKIP_SVN_HOOKS'):
33 33 sys.exit(0)
34 34 repo_path = os.getcwd()
35 35 push_data = sys.argv[1:]
36 36
37 37 os.environ['RC_HOOK_VER'] = RC_HOOK_VER
38 38
39 39 try:
40 40 result = hooks.svn_pre_commit(repo_path, push_data, os.environ)
41 41 sys.exit(result)
42 42 except Exception as error:
43 43 # TODO: johbo: Improve handling of this special case
44 44 if not getattr(error, '_vcs_kind', None) == 'repo_locked':
45 45 raise
46 print 'ERROR:', error
46 print(f'ERROR: {error}')
47 47 sys.exit(1)
48 48 sys.exit(0)
49 49
50 50
51 51 if __name__ == '__main__':
52 52 main()
@@ -1,729 +1,779 b''
1 # -*- coding: utf-8 -*-
2
3 1 # RhodeCode VCSServer provides access to different vcs backends via network.
4 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
5 3 #
6 4 # This program is free software; you can redistribute it and/or modify
7 5 # it under the terms of the GNU General Public License as published by
8 6 # the Free Software Foundation; either version 3 of the License, or
9 7 # (at your option) any later version.
10 8 #
11 9 # This program is distributed in the hope that it will be useful,
12 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 12 # GNU General Public License for more details.
15 13 #
16 14 # You should have received a copy of the GNU General Public License
17 15 # along with this program; if not, write to the Free Software Foundation,
18 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 17
20 18 import io
21 19 import os
22 20 import sys
23 21 import logging
24 22 import collections
25 23 import importlib
26 24 import base64
25 import msgpack
26 import dataclasses
27 import pygit2
27 28
28 from httplib import HTTPConnection
29 import http.client
29 30
30 31
31 32 import mercurial.scmutil
32 33 import mercurial.node
33 import simplejson as json
34 34
35 from vcsserver.lib.rc_json import json
35 36 from vcsserver import exceptions, subprocessio, settings
37 from vcsserver.str_utils import ascii_str, safe_str
38 from vcsserver.remote.git import Repository
36 39
37 40 log = logging.getLogger(__name__)
38 41
39 42
40 43 class HooksHttpClient(object):
44 proto = 'msgpack.v1'
41 45 connection = None
42 46
43 47 def __init__(self, hooks_uri):
44 48 self.hooks_uri = hooks_uri
45 49
50 def __repr__(self):
51 return f'{self.__class__}(hook_uri={self.hooks_uri}, proto={self.proto})'
52
46 53 def __call__(self, method, extras):
47 connection = HTTPConnection(self.hooks_uri)
48 body = self._serialize(method, extras)
49 try:
50 connection.request('POST', '/', body)
51 except Exception:
52 log.error('Hooks calling Connection failed on %s', connection.__dict__)
53 raise
54 response = connection.getresponse()
55
56 response_data = response.read()
54 connection = http.client.HTTPConnection(self.hooks_uri)
55 # binary msgpack body
56 headers, body = self._serialize(method, extras)
57 log.debug('Doing a new hooks call using HTTPConnection to %s', self.hooks_uri)
57 58
58 59 try:
59 return json.loads(response_data)
60 except Exception:
61 log.exception('Failed to decode hook response json data. '
62 'response_code:%s, raw_data:%s',
63 response.status, response_data)
64 raise
60 try:
61 connection.request('POST', '/', body, headers)
62 except Exception as error:
63 log.error('Hooks calling Connection failed on %s, org error: %s', connection.__dict__, error)
64 raise
65 65
66 def _serialize(self, hook_name, extras):
66 response = connection.getresponse()
67 try:
68 return msgpack.load(response)
69 except Exception:
70 response_data = response.read()
71 log.exception('Failed to decode hook response json data. '
72 'response_code:%s, raw_data:%s',
73 response.status, response_data)
74 raise
75 finally:
76 connection.close()
77
78 @classmethod
79 def _serialize(cls, hook_name, extras):
67 80 data = {
68 81 'method': hook_name,
69 82 'extras': extras
70 83 }
71 return json.dumps(data)
84 headers = {
85 "rc-hooks-protocol": cls.proto,
86 "Connection": "keep-alive"
87 }
88 return headers, msgpack.packb(data)
72 89
73 90
74 91 class HooksDummyClient(object):
75 92 def __init__(self, hooks_module):
76 93 self._hooks_module = importlib.import_module(hooks_module)
77 94
78 95 def __call__(self, hook_name, extras):
79 96 with self._hooks_module.Hooks() as hooks:
80 97 return getattr(hooks, hook_name)(extras)
81 98
82 99
83 100 class HooksShadowRepoClient(object):
84 101
85 102 def __call__(self, hook_name, extras):
86 103 return {'output': '', 'status': 0}
87 104
88 105
89 106 class RemoteMessageWriter(object):
90 107 """Writer base class."""
91 108 def write(self, message):
92 109 raise NotImplementedError()
93 110
94 111
95 112 class HgMessageWriter(RemoteMessageWriter):
96 113 """Writer that knows how to send messages to mercurial clients."""
97 114
98 115 def __init__(self, ui):
99 116 self.ui = ui
100 117
101 def write(self, message):
118 def write(self, message: str):
102 119 # TODO: Check why the quiet flag is set by default.
103 120 old = self.ui.quiet
104 121 self.ui.quiet = False
105 122 self.ui.status(message.encode('utf-8'))
106 123 self.ui.quiet = old
107 124
108 125
109 126 class GitMessageWriter(RemoteMessageWriter):
110 127 """Writer that knows how to send messages to git clients."""
111 128
112 129 def __init__(self, stdout=None):
113 130 self.stdout = stdout or sys.stdout
114 131
115 def write(self, message):
116 self.stdout.write(message.encode('utf-8'))
132 def write(self, message: str):
133 self.stdout.write(message)
117 134
118 135
119 136 class SvnMessageWriter(RemoteMessageWriter):
120 137 """Writer that knows how to send messages to svn clients."""
121 138
122 139 def __init__(self, stderr=None):
123 140 # SVN needs data sent to stderr for back-to-client messaging
124 141 self.stderr = stderr or sys.stderr
125 142
126 143 def write(self, message):
127 144 self.stderr.write(message.encode('utf-8'))
128 145
129 146
130 147 def _handle_exception(result):
131 148 exception_class = result.get('exception')
132 149 exception_traceback = result.get('exception_traceback')
150 log.debug('Handling hook-call exception: %s', exception_class)
133 151
134 152 if exception_traceback:
135 153 log.error('Got traceback from remote call:%s', exception_traceback)
136 154
137 155 if exception_class == 'HTTPLockedRC':
138 156 raise exceptions.RepositoryLockedException()(*result['exception_args'])
139 157 elif exception_class == 'HTTPBranchProtected':
140 158 raise exceptions.RepositoryBranchProtectedException()(*result['exception_args'])
141 159 elif exception_class == 'RepositoryError':
142 160 raise exceptions.VcsException()(*result['exception_args'])
143 161 elif exception_class:
144 raise Exception('Got remote exception "%s" with args "%s"' %
145 (exception_class, result['exception_args']))
162 raise Exception(
163 f"""Got remote exception "{exception_class}" with args "{result['exception_args']}" """
164 )
146 165
147 166
148 167 def _get_hooks_client(extras):
149 168 hooks_uri = extras.get('hooks_uri')
150 169 is_shadow_repo = extras.get('is_shadow_repo')
170
151 171 if hooks_uri:
152 172 return HooksHttpClient(extras['hooks_uri'])
153 173 elif is_shadow_repo:
154 174 return HooksShadowRepoClient()
155 175 else:
156 176 return HooksDummyClient(extras['hooks_module'])
157 177
158 178
159 179 def _call_hook(hook_name, extras, writer):
160 180 hooks_client = _get_hooks_client(extras)
161 181 log.debug('Hooks, using client:%s', hooks_client)
162 182 result = hooks_client(hook_name, extras)
163 183 log.debug('Hooks got result: %s', result)
164
165 184 _handle_exception(result)
166 185 writer.write(result['output'])
167 186
168 187 return result['status']
169 188
170 189
171 190 def _extras_from_ui(ui):
172 hook_data = ui.config('rhodecode', 'RC_SCM_DATA')
191 hook_data = ui.config(b'rhodecode', b'RC_SCM_DATA')
173 192 if not hook_data:
174 193 # maybe it's inside environ ?
175 194 env_hook_data = os.environ.get('RC_SCM_DATA')
176 195 if env_hook_data:
177 196 hook_data = env_hook_data
178 197
179 198 extras = {}
180 199 if hook_data:
181 200 extras = json.loads(hook_data)
182 201 return extras
183 202
184 203
185 204 def _rev_range_hash(repo, node, check_heads=False):
186 205 from vcsserver.hgcompat import get_ctx
187 206
188 207 commits = []
189 208 revs = []
190 209 start = get_ctx(repo, node).rev()
191 210 end = len(repo)
192 211 for rev in range(start, end):
193 212 revs.append(rev)
194 213 ctx = get_ctx(repo, rev)
195 commit_id = mercurial.node.hex(ctx.node())
196 branch = ctx.branch()
214 commit_id = ascii_str(mercurial.node.hex(ctx.node()))
215 branch = safe_str(ctx.branch())
197 216 commits.append((commit_id, branch))
198 217
199 218 parent_heads = []
200 219 if check_heads:
201 220 parent_heads = _check_heads(repo, start, end, revs)
202 221 return commits, parent_heads
203 222
204 223
205 224 def _check_heads(repo, start, end, commits):
206 225 from vcsserver.hgcompat import get_ctx
207 226 changelog = repo.changelog
208 227 parents = set()
209 228
210 229 for new_rev in commits:
211 230 for p in changelog.parentrevs(new_rev):
212 231 if p == mercurial.node.nullrev:
213 232 continue
214 233 if p < start:
215 234 parents.add(p)
216 235
217 236 for p in parents:
218 237 branch = get_ctx(repo, p).branch()
219 238 # The heads descending from that parent, on the same branch
220 parent_heads = set([p])
221 reachable = set([p])
222 for x in xrange(p + 1, end):
239 parent_heads = {p}
240 reachable = {p}
241 for x in range(p + 1, end):
223 242 if get_ctx(repo, x).branch() != branch:
224 243 continue
225 244 for pp in changelog.parentrevs(x):
226 245 if pp in reachable:
227 246 reachable.add(x)
228 247 parent_heads.discard(pp)
229 248 parent_heads.add(x)
230 249 # More than one head? Suggest merging
231 250 if len(parent_heads) > 1:
232 251 return list(parent_heads)
233 252
234 253 return []
235 254
236 255
237 256 def _get_git_env():
238 257 env = {}
239 258 for k, v in os.environ.items():
240 259 if k.startswith('GIT'):
241 260 env[k] = v
242 261
243 262 # serialized version
244 263 return [(k, v) for k, v in env.items()]
245 264
246 265
247 266 def _get_hg_env(old_rev, new_rev, txnid, repo_path):
248 267 env = {}
249 268 for k, v in os.environ.items():
250 269 if k.startswith('HG'):
251 270 env[k] = v
252 271
253 272 env['HG_NODE'] = old_rev
254 273 env['HG_NODE_LAST'] = new_rev
255 274 env['HG_TXNID'] = txnid
256 275 env['HG_PENDING'] = repo_path
257 276
258 277 return [(k, v) for k, v in env.items()]
259 278
260 279
261 280 def repo_size(ui, repo, **kwargs):
262 281 extras = _extras_from_ui(ui)
263 282 return _call_hook('repo_size', extras, HgMessageWriter(ui))
264 283
265 284
266 285 def pre_pull(ui, repo, **kwargs):
267 286 extras = _extras_from_ui(ui)
268 287 return _call_hook('pre_pull', extras, HgMessageWriter(ui))
269 288
270 289
271 290 def pre_pull_ssh(ui, repo, **kwargs):
272 291 extras = _extras_from_ui(ui)
273 292 if extras and extras.get('SSH'):
274 293 return pre_pull(ui, repo, **kwargs)
275 294 return 0
276 295
277 296
278 297 def post_pull(ui, repo, **kwargs):
279 298 extras = _extras_from_ui(ui)
280 299 return _call_hook('post_pull', extras, HgMessageWriter(ui))
281 300
282 301
283 302 def post_pull_ssh(ui, repo, **kwargs):
284 303 extras = _extras_from_ui(ui)
285 304 if extras and extras.get('SSH'):
286 305 return post_pull(ui, repo, **kwargs)
287 306 return 0
288 307
289 308
290 309 def pre_push(ui, repo, node=None, **kwargs):
291 310 """
292 311 Mercurial pre_push hook
293 312 """
294 313 extras = _extras_from_ui(ui)
295 314 detect_force_push = extras.get('detect_force_push')
296 315
297 316 rev_data = []
298 if node and kwargs.get('hooktype') == 'pretxnchangegroup':
317 hook_type: str = safe_str(kwargs.get('hooktype'))
318
319 if node and hook_type == 'pretxnchangegroup':
299 320 branches = collections.defaultdict(list)
300 321 commits, _heads = _rev_range_hash(repo, node, check_heads=detect_force_push)
301 322 for commit_id, branch in commits:
302 323 branches[branch].append(commit_id)
303 324
304 325 for branch, commits in branches.items():
305 old_rev = kwargs.get('node_last') or commits[0]
326 old_rev = ascii_str(kwargs.get('node_last')) or commits[0]
306 327 rev_data.append({
307 328 'total_commits': len(commits),
308 329 'old_rev': old_rev,
309 330 'new_rev': commits[-1],
310 331 'ref': '',
311 332 'type': 'branch',
312 333 'name': branch,
313 334 })
314 335
315 336 for push_ref in rev_data:
316 337 push_ref['multiple_heads'] = _heads
317 338
318 339 repo_path = os.path.join(
319 340 extras.get('repo_store', ''), extras.get('repository', ''))
320 341 push_ref['hg_env'] = _get_hg_env(
321 342 old_rev=push_ref['old_rev'],
322 new_rev=push_ref['new_rev'], txnid=kwargs.get('txnid'),
343 new_rev=push_ref['new_rev'], txnid=ascii_str(kwargs.get('txnid')),
323 344 repo_path=repo_path)
324 345
325 extras['hook_type'] = kwargs.get('hooktype', 'pre_push')
346 extras['hook_type'] = hook_type or 'pre_push'
326 347 extras['commit_ids'] = rev_data
327 348
328 349 return _call_hook('pre_push', extras, HgMessageWriter(ui))
329 350
330 351
331 352 def pre_push_ssh(ui, repo, node=None, **kwargs):
332 353 extras = _extras_from_ui(ui)
333 354 if extras.get('SSH'):
334 355 return pre_push(ui, repo, node, **kwargs)
335 356
336 357 return 0
337 358
338 359
339 360 def pre_push_ssh_auth(ui, repo, node=None, **kwargs):
340 361 """
341 362 Mercurial pre_push hook for SSH
342 363 """
343 364 extras = _extras_from_ui(ui)
344 365 if extras.get('SSH'):
345 366 permission = extras['SSH_PERMISSIONS']
346 367
347 368 if 'repository.write' == permission or 'repository.admin' == permission:
348 369 return 0
349 370
350 371 # non-zero ret code
351 372 return 1
352 373
353 374 return 0
354 375
355 376
356 377 def post_push(ui, repo, node, **kwargs):
357 378 """
358 379 Mercurial post_push hook
359 380 """
360 381 extras = _extras_from_ui(ui)
361 382
362 383 commit_ids = []
363 384 branches = []
364 385 bookmarks = []
365 386 tags = []
387 hook_type: str = safe_str(kwargs.get('hooktype'))
366 388
367 389 commits, _heads = _rev_range_hash(repo, node)
368 390 for commit_id, branch in commits:
369 391 commit_ids.append(commit_id)
370 392 if branch not in branches:
371 393 branches.append(branch)
372 394
373 if hasattr(ui, '_rc_pushkey_branches'):
374 bookmarks = ui._rc_pushkey_branches
395 if hasattr(ui, '_rc_pushkey_bookmarks'):
396 bookmarks = ui._rc_pushkey_bookmarks
375 397
376 extras['hook_type'] = kwargs.get('hooktype', 'post_push')
398 extras['hook_type'] = hook_type or 'post_push'
377 399 extras['commit_ids'] = commit_ids
400
378 401 extras['new_refs'] = {
379 402 'branches': branches,
380 403 'bookmarks': bookmarks,
381 404 'tags': tags
382 405 }
383 406
384 407 return _call_hook('post_push', extras, HgMessageWriter(ui))
385 408
386 409
387 410 def post_push_ssh(ui, repo, node, **kwargs):
388 411 """
389 412 Mercurial post_push hook for SSH
390 413 """
391 414 if _extras_from_ui(ui).get('SSH'):
392 415 return post_push(ui, repo, node, **kwargs)
393 416 return 0
394 417
395 418
396 419 def key_push(ui, repo, **kwargs):
397 420 from vcsserver.hgcompat import get_ctx
398 if kwargs['new'] != '0' and kwargs['namespace'] == 'bookmarks':
421
422 if kwargs['new'] != b'0' and kwargs['namespace'] == b'bookmarks':
399 423 # store new bookmarks in our UI object propagated later to post_push
400 ui._rc_pushkey_branches = get_ctx(repo, kwargs['key']).bookmarks()
424 ui._rc_pushkey_bookmarks = get_ctx(repo, kwargs['key']).bookmarks()
401 425 return
402 426
403 427
404 428 # backward compat
405 429 log_pull_action = post_pull
406 430
407 431 # backward compat
408 432 log_push_action = post_push
409 433
410 434
411 435 def handle_git_pre_receive(unused_repo_path, unused_revs, unused_env):
412 436 """
413 437 Old hook name: keep here for backward compatibility.
414 438
415 439 This is only required when the installed git hooks are not upgraded.
416 440 """
417 441 pass
418 442
419 443
420 444 def handle_git_post_receive(unused_repo_path, unused_revs, unused_env):
421 445 """
422 446 Old hook name: keep here for backward compatibility.
423 447
424 448 This is only required when the installed git hooks are not upgraded.
425 449 """
426 450 pass
427 451
428 452
429 HookResponse = collections.namedtuple('HookResponse', ('status', 'output'))
453 @dataclasses.dataclass
454 class HookResponse:
455 status: int
456 output: str
430 457
431 458
432 def git_pre_pull(extras):
459 def git_pre_pull(extras) -> HookResponse:
433 460 """
434 461 Pre pull hook.
435 462
436 463 :param extras: dictionary containing the keys defined in simplevcs
437 464 :type extras: dict
438 465
439 466 :return: status code of the hook. 0 for success.
440 467 :rtype: int
441 468 """
469
442 470 if 'pull' not in extras['hooks']:
443 471 return HookResponse(0, '')
444 472
445 stdout = io.BytesIO()
473 stdout = io.StringIO()
446 474 try:
447 status = _call_hook('pre_pull', extras, GitMessageWriter(stdout))
475 status_code = _call_hook('pre_pull', extras, GitMessageWriter(stdout))
476
448 477 except Exception as error:
449 status = 128
450 stdout.write('ERROR: %s\n' % str(error))
478 log.exception('Failed to call pre_pull hook')
479 status_code = 128
480 stdout.write(f'ERROR: {error}\n')
451 481
452 return HookResponse(status, stdout.getvalue())
482 return HookResponse(status_code, stdout.getvalue())
453 483
454 484
455 def git_post_pull(extras):
485 def git_post_pull(extras) -> HookResponse:
456 486 """
457 487 Post pull hook.
458 488
459 489 :param extras: dictionary containing the keys defined in simplevcs
460 490 :type extras: dict
461 491
462 492 :return: status code of the hook. 0 for success.
463 493 :rtype: int
464 494 """
465 495 if 'pull' not in extras['hooks']:
466 496 return HookResponse(0, '')
467 497
468 stdout = io.BytesIO()
498 stdout = io.StringIO()
469 499 try:
470 500 status = _call_hook('post_pull', extras, GitMessageWriter(stdout))
471 501 except Exception as error:
472 502 status = 128
473 stdout.write('ERROR: %s\n' % error)
503 stdout.write(f'ERROR: {error}\n')
474 504
475 505 return HookResponse(status, stdout.getvalue())
476 506
477 507
478 508 def _parse_git_ref_lines(revision_lines):
479 509 rev_data = []
480 510 for revision_line in revision_lines or []:
481 511 old_rev, new_rev, ref = revision_line.strip().split(' ')
482 512 ref_data = ref.split('/', 2)
483 513 if ref_data[1] in ('tags', 'heads'):
484 514 rev_data.append({
485 515 # NOTE(marcink):
486 516 # we're unable to tell total_commits for git at this point
487 517 # but we set the variable for consistency with GIT
488 518 'total_commits': -1,
489 519 'old_rev': old_rev,
490 520 'new_rev': new_rev,
491 521 'ref': ref,
492 522 'type': ref_data[1],
493 523 'name': ref_data[2],
494 524 })
495 525 return rev_data
496 526
497 527
498 def git_pre_receive(unused_repo_path, revision_lines, env):
528 def git_pre_receive(unused_repo_path, revision_lines, env) -> int:
499 529 """
500 530 Pre push hook.
501 531
502 :param extras: dictionary containing the keys defined in simplevcs
503 :type extras: dict
504
505 532 :return: status code of the hook. 0 for success.
506 :rtype: int
507 533 """
508 534 extras = json.loads(env['RC_SCM_DATA'])
509 535 rev_data = _parse_git_ref_lines(revision_lines)
510 536 if 'push' not in extras['hooks']:
511 537 return 0
512 538 empty_commit_id = '0' * 40
513 539
514 540 detect_force_push = extras.get('detect_force_push')
515 541
516 542 for push_ref in rev_data:
517 543 # store our git-env which holds the temp store
518 544 push_ref['git_env'] = _get_git_env()
519 545 push_ref['pruned_sha'] = ''
520 546 if not detect_force_push:
521 547 # don't check for forced-push when we don't need to
522 548 continue
523 549
524 550 type_ = push_ref['type']
525 551 new_branch = push_ref['old_rev'] == empty_commit_id
526 552 delete_branch = push_ref['new_rev'] == empty_commit_id
527 553 if type_ == 'heads' and not (new_branch or delete_branch):
528 554 old_rev = push_ref['old_rev']
529 555 new_rev = push_ref['new_rev']
530 cmd = [settings.GIT_EXECUTABLE, 'rev-list', old_rev, '^{}'.format(new_rev)]
556 cmd = [settings.GIT_EXECUTABLE, 'rev-list', old_rev, f'^{new_rev}']
531 557 stdout, stderr = subprocessio.run_command(
532 558 cmd, env=os.environ.copy())
533 559 # means we're having some non-reachable objects, this forced push was used
534 560 if stdout:
535 561 push_ref['pruned_sha'] = stdout.splitlines()
536 562
537 563 extras['hook_type'] = 'pre_receive'
538 564 extras['commit_ids'] = rev_data
539 return _call_hook('pre_push', extras, GitMessageWriter())
565
566 stdout = sys.stdout
567 status_code = _call_hook('pre_push', extras, GitMessageWriter(stdout))
568
569 return status_code
540 570
541 571
542 def git_post_receive(unused_repo_path, revision_lines, env):
572 def git_post_receive(unused_repo_path, revision_lines, env) -> int:
543 573 """
544 574 Post push hook.
545 575
546 :param extras: dictionary containing the keys defined in simplevcs
547 :type extras: dict
548
549 576 :return: status code of the hook. 0 for success.
550 :rtype: int
551 577 """
552 578 extras = json.loads(env['RC_SCM_DATA'])
553 579 if 'push' not in extras['hooks']:
554 580 return 0
555 581
556 582 rev_data = _parse_git_ref_lines(revision_lines)
557 583
558 584 git_revs = []
559 585
560 586 # N.B.(skreft): it is ok to just call git, as git before calling a
561 587 # subcommand sets the PATH environment variable so that it point to the
562 588 # correct version of the git executable.
563 589 empty_commit_id = '0' * 40
564 590 branches = []
565 591 tags = []
566 592 for push_ref in rev_data:
567 593 type_ = push_ref['type']
568 594
569 595 if type_ == 'heads':
596 # starting new branch case
570 597 if push_ref['old_rev'] == empty_commit_id:
571 # starting new branch case
572 if push_ref['name'] not in branches:
573 branches.append(push_ref['name'])
598 push_ref_name = push_ref['name']
599
600 if push_ref_name not in branches:
601 branches.append(push_ref_name)
574 602
575 # Fix up head revision if needed
576 cmd = [settings.GIT_EXECUTABLE, 'show', 'HEAD']
577 try:
578 subprocessio.run_command(cmd, env=os.environ.copy())
579 except Exception:
580 cmd = [settings.GIT_EXECUTABLE, 'symbolic-ref', '"HEAD"',
581 '"refs/heads/%s"' % push_ref['name']]
582 print("Setting default branch to %s" % push_ref['name'])
583 subprocessio.run_command(cmd, env=os.environ.copy())
603 need_head_set = ''
604 with Repository(os.getcwd()) as repo:
605 try:
606 repo.head
607 except pygit2.GitError:
608 need_head_set = f'refs/heads/{push_ref_name}'
584 609
585 cmd = [settings.GIT_EXECUTABLE, 'for-each-ref',
586 '--format=%(refname)', 'refs/heads/*']
610 if need_head_set:
611 repo.set_head(need_head_set)
612 print(f"Setting default branch to {push_ref_name}")
613
614 cmd = [settings.GIT_EXECUTABLE, 'for-each-ref', '--format=%(refname)', 'refs/heads/*']
587 615 stdout, stderr = subprocessio.run_command(
588 616 cmd, env=os.environ.copy())
589 heads = stdout
617 heads = safe_str(stdout)
590 618 heads = heads.replace(push_ref['ref'], '')
591 619 heads = ' '.join(head for head
592 620 in heads.splitlines() if head) or '.'
593 621 cmd = [settings.GIT_EXECUTABLE, 'log', '--reverse',
594 622 '--pretty=format:%H', '--', push_ref['new_rev'],
595 623 '--not', heads]
596 624 stdout, stderr = subprocessio.run_command(
597 625 cmd, env=os.environ.copy())
598 git_revs.extend(stdout.splitlines())
626 git_revs.extend(list(map(ascii_str, stdout.splitlines())))
627
628 # delete branch case
599 629 elif push_ref['new_rev'] == empty_commit_id:
600 # delete branch case
601 630 git_revs.append('delete_branch=>%s' % push_ref['name'])
602 631 else:
603 632 if push_ref['name'] not in branches:
604 633 branches.append(push_ref['name'])
605 634
606 635 cmd = [settings.GIT_EXECUTABLE, 'log',
607 636 '{old_rev}..{new_rev}'.format(**push_ref),
608 637 '--reverse', '--pretty=format:%H']
609 638 stdout, stderr = subprocessio.run_command(
610 639 cmd, env=os.environ.copy())
611 git_revs.extend(stdout.splitlines())
640 # we get bytes from stdout, we need str to be consistent
641 log_revs = list(map(ascii_str, stdout.splitlines()))
642 git_revs.extend(log_revs)
643
644 # Pure pygit2 impl. but still 2-3x slower :/
645 # results = []
646 #
647 # with Repository(os.getcwd()) as repo:
648 # repo_new_rev = repo[push_ref['new_rev']]
649 # repo_old_rev = repo[push_ref['old_rev']]
650 # walker = repo.walk(repo_new_rev.id, pygit2.GIT_SORT_TOPOLOGICAL)
651 #
652 # for commit in walker:
653 # if commit.id == repo_old_rev.id:
654 # break
655 # results.append(commit.id.hex)
656 # # reverse the order, can't use GIT_SORT_REVERSE
657 # log_revs = results[::-1]
658
612 659 elif type_ == 'tags':
613 660 if push_ref['name'] not in tags:
614 661 tags.append(push_ref['name'])
615 662 git_revs.append('tag=>%s' % push_ref['name'])
616 663
617 664 extras['hook_type'] = 'post_receive'
618 665 extras['commit_ids'] = git_revs
619 666 extras['new_refs'] = {
620 667 'branches': branches,
621 668 'bookmarks': [],
622 669 'tags': tags,
623 670 }
624 671
672 stdout = sys.stdout
673
625 674 if 'repo_size' in extras['hooks']:
626 675 try:
627 _call_hook('repo_size', extras, GitMessageWriter())
628 except:
676 _call_hook('repo_size', extras, GitMessageWriter(stdout))
677 except Exception:
629 678 pass
630 679
631 return _call_hook('post_push', extras, GitMessageWriter())
680 status_code = _call_hook('post_push', extras, GitMessageWriter(stdout))
681 return status_code
632 682
633 683
634 684 def _get_extras_from_txn_id(path, txn_id):
635 685 extras = {}
636 686 try:
637 687 cmd = [settings.SVNLOOK_EXECUTABLE, 'pget',
638 688 '-t', txn_id,
639 689 '--revprop', path, 'rc-scm-extras']
640 690 stdout, stderr = subprocessio.run_command(
641 691 cmd, env=os.environ.copy())
642 692 extras = json.loads(base64.urlsafe_b64decode(stdout))
643 693 except Exception:
644 694 log.exception('Failed to extract extras info from txn_id')
645 695
646 696 return extras
647 697
648 698
649 699 def _get_extras_from_commit_id(commit_id, path):
650 700 extras = {}
651 701 try:
652 702 cmd = [settings.SVNLOOK_EXECUTABLE, 'pget',
653 703 '-r', commit_id,
654 704 '--revprop', path, 'rc-scm-extras']
655 705 stdout, stderr = subprocessio.run_command(
656 706 cmd, env=os.environ.copy())
657 707 extras = json.loads(base64.urlsafe_b64decode(stdout))
658 708 except Exception:
659 709 log.exception('Failed to extract extras info from commit_id')
660 710
661 711 return extras
662 712
663 713
664 714 def svn_pre_commit(repo_path, commit_data, env):
665 715 path, txn_id = commit_data
666 716 branches = []
667 717 tags = []
668 718
669 719 if env.get('RC_SCM_DATA'):
670 720 extras = json.loads(env['RC_SCM_DATA'])
671 721 else:
672 722 # fallback method to read from TXN-ID stored data
673 723 extras = _get_extras_from_txn_id(path, txn_id)
674 724 if not extras:
675 725 return 0
676 726
677 727 extras['hook_type'] = 'pre_commit'
678 728 extras['commit_ids'] = [txn_id]
679 729 extras['txn_id'] = txn_id
680 730 extras['new_refs'] = {
681 731 'total_commits': 1,
682 732 'branches': branches,
683 733 'bookmarks': [],
684 734 'tags': tags,
685 735 }
686 736
687 737 return _call_hook('pre_push', extras, SvnMessageWriter())
688 738
689 739
690 740 def svn_post_commit(repo_path, commit_data, env):
691 741 """
692 742 commit_data is path, rev, txn_id
693 743 """
694 744 if len(commit_data) == 3:
695 745 path, commit_id, txn_id = commit_data
696 746 elif len(commit_data) == 2:
697 747 log.error('Failed to extract txn_id from commit_data using legacy method. '
698 748 'Some functionality might be limited')
699 749 path, commit_id = commit_data
700 750 txn_id = None
701 751
702 752 branches = []
703 753 tags = []
704 754
705 755 if env.get('RC_SCM_DATA'):
706 756 extras = json.loads(env['RC_SCM_DATA'])
707 757 else:
708 758 # fallback method to read from TXN-ID stored data
709 759 extras = _get_extras_from_commit_id(commit_id, path)
710 760 if not extras:
711 761 return 0
712 762
713 763 extras['hook_type'] = 'post_commit'
714 764 extras['commit_ids'] = [commit_id]
715 765 extras['txn_id'] = txn_id
716 766 extras['new_refs'] = {
717 767 'branches': branches,
718 768 'bookmarks': [],
719 769 'tags': tags,
720 770 'total_commits': 1,
721 771 }
722 772
723 773 if 'repo_size' in extras['hooks']:
724 774 try:
725 775 _call_hook('repo_size', extras, SvnMessageWriter())
726 776 except Exception:
727 777 pass
728 778
729 779 return _call_hook('post_push', extras, SvnMessageWriter())
@@ -1,723 +1,777 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 import io
18 19 import os
19 20 import sys
20 import base64
21 21 import locale
22 22 import logging
23 23 import uuid
24 24 import time
25 25 import wsgiref.util
26 26 import traceback
27 27 import tempfile
28 28 import psutil
29 29
30 30 from itertools import chain
31 from cStringIO import StringIO
32 31
33 import simplejson as json
34 32 import msgpack
33 import configparser
34
35 35 from pyramid.config import Configurator
36 36 from pyramid.wsgi import wsgiapp
37 from pyramid.compat import configparser
38 37 from pyramid.response import Response
38
39 from vcsserver.base import BytesEnvelope, BinaryEnvelope
40 from vcsserver.lib.rc_json import json
39 41 from vcsserver.config.settings_maker import SettingsMaker
40 from vcsserver.utils import safe_int
42 from vcsserver.str_utils import safe_int
41 43 from vcsserver.lib.statsd_client import StatsdClient
44 from vcsserver.tweens.request_wrapper import get_call_context, get_headers_call_context
42 45
43 46 log = logging.getLogger(__name__)
44 47
45 48 # due to Mercurial/glibc2.27 problems we need to detect if locale settings are
46 49 # causing problems and "fix" it in case they do and fallback to LC_ALL = C
47 50
48 51 try:
49 52 locale.setlocale(locale.LC_ALL, '')
50 53 except locale.Error as e:
51 54 log.error(
52 55 'LOCALE ERROR: failed to set LC_ALL, fallback to LC_ALL=C, org error: %s', e)
53 56 os.environ['LC_ALL'] = 'C'
54 57
55 58
56 59 import vcsserver
57 60 from vcsserver import remote_wsgi, scm_app, settings, hgpatches
58 61 from vcsserver.git_lfs.app import GIT_LFS_CONTENT_TYPE, GIT_LFS_PROTO_PAT
59 62 from vcsserver.echo_stub import remote_wsgi as remote_wsgi_stub
60 63 from vcsserver.echo_stub.echo_app import EchoApp
61 64 from vcsserver.exceptions import HTTPRepoLocked, HTTPRepoBranchProtected
62 65 from vcsserver.lib.exc_tracking import store_exception
63 66 from vcsserver.server import VcsServer
64 67
68 strict_vcs = True
69
70 git_import_err = None
65 71 try:
66 from vcsserver.git import GitFactory, GitRemote
67 except ImportError:
72 from vcsserver.remote.git import GitFactory, GitRemote
73 except ImportError as e:
68 74 GitFactory = None
69 75 GitRemote = None
76 git_import_err = e
77 if strict_vcs:
78 raise
70 79
80
81 hg_import_err = None
71 82 try:
72 from vcsserver.hg import MercurialFactory, HgRemote
73 except ImportError:
83 from vcsserver.remote.hg import MercurialFactory, HgRemote
84 except ImportError as e:
74 85 MercurialFactory = None
75 86 HgRemote = None
87 hg_import_err = e
88 if strict_vcs:
89 raise
76 90
91
92 svn_import_err = None
77 93 try:
78 from vcsserver.svn import SubversionFactory, SvnRemote
79 except ImportError:
94 from vcsserver.remote.svn import SubversionFactory, SvnRemote
95 except ImportError as e:
80 96 SubversionFactory = None
81 97 SvnRemote = None
98 svn_import_err = e
99 if strict_vcs:
100 raise
82 101
83 102
84 103 def _is_request_chunked(environ):
85 104 stream = environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked'
86 105 return stream
87 106
88 107
89 108 def log_max_fd():
90 109 try:
91 110 maxfd = psutil.Process().rlimit(psutil.RLIMIT_NOFILE)[1]
92 111 log.info('Max file descriptors value: %s', maxfd)
93 112 except Exception:
94 113 pass
95 114
96 115
97 116 class VCS(object):
98 117 def __init__(self, locale_conf=None, cache_config=None):
99 118 self.locale = locale_conf
100 119 self.cache_config = cache_config
101 120 self._configure_locale()
102 121
103 122 log_max_fd()
104 123
105 124 if GitFactory and GitRemote:
106 125 git_factory = GitFactory()
107 126 self._git_remote = GitRemote(git_factory)
108 127 else:
109 log.info("Git client import failed")
128 log.error("Git client import failed: %s", git_import_err)
110 129
111 130 if MercurialFactory and HgRemote:
112 131 hg_factory = MercurialFactory()
113 132 self._hg_remote = HgRemote(hg_factory)
114 133 else:
115 log.info("Mercurial client import failed")
134 log.error("Mercurial client import failed: %s", hg_import_err)
116 135
117 136 if SubversionFactory and SvnRemote:
118 137 svn_factory = SubversionFactory()
119 138
120 139 # hg factory is used for svn url validation
121 140 hg_factory = MercurialFactory()
122 141 self._svn_remote = SvnRemote(svn_factory, hg_factory=hg_factory)
123 142 else:
124 log.warning("Subversion client import failed")
143 log.error("Subversion client import failed: %s", svn_import_err)
125 144
126 145 self._vcsserver = VcsServer()
127 146
128 147 def _configure_locale(self):
129 148 if self.locale:
130 149 log.info('Settings locale: `LC_ALL` to %s', self.locale)
131 150 else:
132 log.info(
133 'Configuring locale subsystem based on environment variables')
151 log.info('Configuring locale subsystem based on environment variables')
134 152 try:
135 153 # If self.locale is the empty string, then the locale
136 154 # module will use the environment variables. See the
137 155 # documentation of the package `locale`.
138 156 locale.setlocale(locale.LC_ALL, self.locale)
139 157
140 158 language_code, encoding = locale.getlocale()
141 159 log.info(
142 160 'Locale set to language code "%s" with encoding "%s".',
143 161 language_code, encoding)
144 162 except locale.Error:
145 log.exception(
146 'Cannot set locale, not configuring the locale system')
163 log.exception('Cannot set locale, not configuring the locale system')
147 164
148 165
149 166 class WsgiProxy(object):
150 167 def __init__(self, wsgi):
151 168 self.wsgi = wsgi
152 169
153 170 def __call__(self, environ, start_response):
154 171 input_data = environ['wsgi.input'].read()
155 172 input_data = msgpack.unpackb(input_data)
156 173
157 174 error = None
158 175 try:
159 176 data, status, headers = self.wsgi.handle(
160 177 input_data['environment'], input_data['input_data'],
161 178 *input_data['args'], **input_data['kwargs'])
162 179 except Exception as e:
163 180 data, status, headers = [], None, None
164 181 error = {
165 182 'message': str(e),
166 183 '_vcs_kind': getattr(e, '_vcs_kind', None)
167 184 }
168 185
169 186 start_response(200, {})
170 187 return self._iterator(error, status, headers, data)
171 188
172 189 def _iterator(self, error, status, headers, data):
173 190 initial_data = [
174 191 error,
175 192 status,
176 193 headers,
177 194 ]
178 195
179 196 for d in chain(initial_data, data):
180 197 yield msgpack.packb(d)
181 198
182 199
183 200 def not_found(request):
184 201 return {'status': '404 NOT FOUND'}
185 202
186 203
187 204 class VCSViewPredicate(object):
188 205 def __init__(self, val, config):
189 206 self.remotes = val
190 207
191 208 def text(self):
192 return 'vcs view method = %s' % (self.remotes.keys(),)
209 return f'vcs view method = {list(self.remotes.keys())}'
193 210
194 211 phash = text
195 212
196 213 def __call__(self, context, request):
197 214 """
198 215 View predicate that returns true if given backend is supported by
199 216 defined remotes.
200 217 """
201 218 backend = request.matchdict.get('backend')
202 219 return backend in self.remotes
203 220
204 221
205 222 class HTTPApplication(object):
206 223 ALLOWED_EXCEPTIONS = ('KeyError', 'URLError')
207 224
208 225 remote_wsgi = remote_wsgi
209 226 _use_echo_app = False
210 227
211 228 def __init__(self, settings=None, global_config=None):
212 229
213 230 self.config = Configurator(settings=settings)
214 231 # Init our statsd at very start
215 232 self.config.registry.statsd = StatsdClient.statsd
233 self.config.registry.vcs_call_context = {}
216 234
217 235 self.global_config = global_config
218 236 self.config.include('vcsserver.lib.rc_cache')
237 self.config.include('vcsserver.lib.rc_cache.archive_cache')
219 238
220 239 settings_locale = settings.get('locale', '') or 'en_US.UTF-8'
221 240 vcs = VCS(locale_conf=settings_locale, cache_config=settings)
222 241 self._remotes = {
223 242 'hg': vcs._hg_remote,
224 243 'git': vcs._git_remote,
225 244 'svn': vcs._svn_remote,
226 245 'server': vcs._vcsserver,
227 246 }
228 247 if settings.get('dev.use_echo_app', 'false').lower() == 'true':
229 248 self._use_echo_app = True
230 249 log.warning("Using EchoApp for VCS operations.")
231 250 self.remote_wsgi = remote_wsgi_stub
232 251
233 252 self._configure_settings(global_config, settings)
234 253
235 254 self._configure()
236 255
237 256 def _configure_settings(self, global_config, app_settings):
238 257 """
239 258 Configure the settings module.
240 259 """
241 260 settings_merged = global_config.copy()
242 261 settings_merged.update(app_settings)
243 262
244 263 git_path = app_settings.get('git_path', None)
245 264 if git_path:
246 265 settings.GIT_EXECUTABLE = git_path
247 266 binary_dir = app_settings.get('core.binary_dir', None)
248 267 if binary_dir:
249 268 settings.BINARY_DIR = binary_dir
250 269
251 270 # Store the settings to make them available to other modules.
252 271 vcsserver.PYRAMID_SETTINGS = settings_merged
253 272 vcsserver.CONFIG = settings_merged
254 273
255 274 def _configure(self):
256 275 self.config.add_renderer(name='msgpack', factory=self._msgpack_renderer_factory)
257 276
258 277 self.config.add_route('service', '/_service')
259 278 self.config.add_route('status', '/status')
260 279 self.config.add_route('hg_proxy', '/proxy/hg')
261 280 self.config.add_route('git_proxy', '/proxy/git')
262 281
263 282 # rpc methods
264 283 self.config.add_route('vcs', '/{backend}')
265 284
266 285 # streaming rpc remote methods
267 286 self.config.add_route('vcs_stream', '/{backend}/stream')
268 287
269 288 # vcs operations clone/push as streaming
270 289 self.config.add_route('stream_git', '/stream/git/*repo_name')
271 290 self.config.add_route('stream_hg', '/stream/hg/*repo_name')
272 291
273 292 self.config.add_view(self.status_view, route_name='status', renderer='json')
274 293 self.config.add_view(self.service_view, route_name='service', renderer='msgpack')
275 294
276 295 self.config.add_view(self.hg_proxy(), route_name='hg_proxy')
277 296 self.config.add_view(self.git_proxy(), route_name='git_proxy')
278 297 self.config.add_view(self.vcs_view, route_name='vcs', renderer='msgpack',
279 298 vcs_view=self._remotes)
280 299 self.config.add_view(self.vcs_stream_view, route_name='vcs_stream',
281 300 vcs_view=self._remotes)
282 301
283 302 self.config.add_view(self.hg_stream(), route_name='stream_hg')
284 303 self.config.add_view(self.git_stream(), route_name='stream_git')
285 304
286 305 self.config.add_view_predicate('vcs_view', VCSViewPredicate)
287 306
288 307 self.config.add_notfound_view(not_found, renderer='json')
289 308
290 309 self.config.add_view(self.handle_vcs_exception, context=Exception)
291 310
292 311 self.config.add_tween(
293 312 'vcsserver.tweens.request_wrapper.RequestWrapperTween',
294 313 )
295 314 self.config.add_request_method(
296 315 'vcsserver.lib.request_counter.get_request_counter',
297 316 'request_count')
298 317
299 318 def wsgi_app(self):
300 319 return self.config.make_wsgi_app()
301 320
302 321 def _vcs_view_params(self, request):
303 322 remote = self._remotes[request.matchdict['backend']]
304 323 payload = msgpack.unpackb(request.body, use_list=True)
324
305 325 method = payload.get('method')
306 326 params = payload['params']
307 327 wire = params.get('wire')
308 328 args = params.get('args')
309 329 kwargs = params.get('kwargs')
310 330 context_uid = None
311 331
332 request.registry.vcs_call_context = {
333 'method': method,
334 'repo_name': payload.get('_repo_name'),
335 }
336
312 337 if wire:
313 338 try:
314 339 wire['context'] = context_uid = uuid.UUID(wire['context'])
315 340 except KeyError:
316 341 pass
317 342 args.insert(0, wire)
318 343 repo_state_uid = wire.get('repo_state_uid') if wire else None
319 344
320 345 # NOTE(marcink): trading complexity for slight performance
321 346 if log.isEnabledFor(logging.DEBUG):
322 no_args_methods = [
323
324 ]
325 if method in no_args_methods:
347 # also we SKIP printing out any of those methods args since they maybe excessive
348 just_args_methods = {
349 'commitctx': ('content', 'removed', 'updated'),
350 'commit': ('content', 'removed', 'updated')
351 }
352 if method in just_args_methods:
353 skip_args = just_args_methods[method]
326 354 call_args = ''
355 call_kwargs = {}
356 for k in kwargs:
357 if k in skip_args:
358 # replace our skip key with dummy
359 call_kwargs[k] = f'RemovedParam({k})'
360 else:
361 call_kwargs[k] = kwargs[k]
327 362 else:
328 363 call_args = args[1:]
364 call_kwargs = kwargs
329 365
330 366 log.debug('Method requested:`%s` with args:%s kwargs:%s context_uid: %s, repo_state_uid:%s',
331 method, call_args, kwargs, context_uid, repo_state_uid)
367 method, call_args, call_kwargs, context_uid, repo_state_uid)
332 368
333 369 statsd = request.registry.statsd
334 370 if statsd:
335 371 statsd.incr(
336 372 'vcsserver_method_total', tags=[
337 "method:{}".format(method),
373 f"method:{method}",
338 374 ])
339 375 return payload, remote, method, args, kwargs
340 376
341 377 def vcs_view(self, request):
342 378
343 379 payload, remote, method, args, kwargs = self._vcs_view_params(request)
344 380 payload_id = payload.get('id')
345 381
346 382 try:
347 383 resp = getattr(remote, method)(*args, **kwargs)
348 384 except Exception as e:
349 385 exc_info = list(sys.exc_info())
350 386 exc_type, exc_value, exc_traceback = exc_info
351 387
352 388 org_exc = getattr(e, '_org_exc', None)
353 389 org_exc_name = None
354 390 org_exc_tb = ''
355 391 if org_exc:
356 392 org_exc_name = org_exc.__class__.__name__
357 393 org_exc_tb = getattr(e, '_org_exc_tb', '')
358 394 # replace our "faked" exception with our org
359 395 exc_info[0] = org_exc.__class__
360 396 exc_info[1] = org_exc
361 397
362 398 should_store_exc = True
363 399 if org_exc:
364 400 def get_exc_fqn(_exc_obj):
365 401 module_name = getattr(org_exc.__class__, '__module__', 'UNKNOWN')
366 402 return module_name + '.' + org_exc_name
367 403
368 404 exc_fqn = get_exc_fqn(org_exc)
369 405
370 406 if exc_fqn in ['mercurial.error.RepoLookupError',
371 407 'vcsserver.exceptions.RefNotFoundException']:
372 408 should_store_exc = False
373 409
374 410 if should_store_exc:
375 411 store_exception(id(exc_info), exc_info, request_path=request.path)
376 412
377 413 tb_info = ''.join(
378 414 traceback.format_exception(exc_type, exc_value, exc_traceback))
379 415
380 416 type_ = e.__class__.__name__
381 417 if type_ not in self.ALLOWED_EXCEPTIONS:
382 418 type_ = None
383 419
384 420 resp = {
385 421 'id': payload_id,
386 422 'error': {
387 'message': e.message,
423 'message': str(e),
388 424 'traceback': tb_info,
389 425 'org_exc': org_exc_name,
390 426 'org_exc_tb': org_exc_tb,
391 427 'type': type_
392 428 }
393 429 }
394 430
395 431 try:
396 432 resp['error']['_vcs_kind'] = getattr(e, '_vcs_kind', None)
397 433 except AttributeError:
398 434 pass
399 435 else:
400 436 resp = {
401 437 'id': payload_id,
402 438 'result': resp
403 439 }
404
440 log.debug('Serving data for method %s', method)
405 441 return resp
406 442
407 443 def vcs_stream_view(self, request):
408 444 payload, remote, method, args, kwargs = self._vcs_view_params(request)
409 445 # this method has a stream: marker we remove it here
410 446 method = method.split('stream:')[-1]
411 447 chunk_size = safe_int(payload.get('chunk_size')) or 4096
412 448
413 449 try:
414 450 resp = getattr(remote, method)(*args, **kwargs)
415 451 except Exception as e:
416 452 raise
417 453
418 454 def get_chunked_data(method_resp):
419 stream = StringIO(method_resp)
455 stream = io.BytesIO(method_resp)
420 456 while 1:
421 457 chunk = stream.read(chunk_size)
422 458 if not chunk:
423 459 break
424 460 yield chunk
425 461
426 462 response = Response(app_iter=get_chunked_data(resp))
427 463 response.content_type = 'application/octet-stream'
428 464
429 465 return response
430 466
431 467 def status_view(self, request):
432 468 import vcsserver
433 469 return {'status': 'OK', 'vcsserver_version': vcsserver.__version__,
434 470 'pid': os.getpid()}
435 471
436 472 def service_view(self, request):
437 473 import vcsserver
438 474
439 475 payload = msgpack.unpackb(request.body, use_list=True)
440 476 server_config, app_config = {}, {}
441 477
442 478 try:
443 479 path = self.global_config['__file__']
444 480 config = configparser.RawConfigParser()
445 481
446 482 config.read(path)
447 483
448 484 if config.has_section('server:main'):
449 485 server_config = dict(config.items('server:main'))
450 486 if config.has_section('app:main'):
451 487 app_config = dict(config.items('app:main'))
452 488
453 489 except Exception:
454 490 log.exception('Failed to read .ini file for display')
455 491
456 environ = os.environ.items()
492 environ = list(os.environ.items())
457 493
458 494 resp = {
459 495 'id': payload.get('id'),
460 496 'result': dict(
461 497 version=vcsserver.__version__,
462 498 config=server_config,
463 499 app_config=app_config,
464 500 environ=environ,
465 501 payload=payload,
466 502 )
467 503 }
468 504 return resp
469 505
470 506 def _msgpack_renderer_factory(self, info):
507
471 508 def _render(value, system):
509 bin_type = False
510 res = value.get('result')
511 if isinstance(res, BytesEnvelope):
512 log.debug('Result is wrapped in BytesEnvelope type')
513 bin_type = True
514 elif isinstance(res, BinaryEnvelope):
515 log.debug('Result is wrapped in BinaryEnvelope type')
516 value['result'] = res.val
517 bin_type = True
518
472 519 request = system.get('request')
473 520 if request is not None:
474 521 response = request.response
475 522 ct = response.content_type
476 523 if ct == response.default_content_type:
477 524 response.content_type = 'application/x-msgpack'
478 return msgpack.packb(value)
525 if bin_type:
526 response.content_type = 'application/x-msgpack-bin'
527
528 return msgpack.packb(value, use_bin_type=bin_type)
479 529 return _render
480 530
481 531 def set_env_from_config(self, environ, config):
482 532 dict_conf = {}
483 533 try:
484 534 for elem in config:
485 535 if elem[0] == 'rhodecode':
486 536 dict_conf = json.loads(elem[2])
487 537 break
488 538 except Exception:
489 539 log.exception('Failed to fetch SCM CONFIG')
490 540 return
491 541
492 542 username = dict_conf.get('username')
493 543 if username:
494 544 environ['REMOTE_USER'] = username
495 545 # mercurial specific, some extension api rely on this
496 546 environ['HGUSER'] = username
497 547
498 548 ip = dict_conf.get('ip')
499 549 if ip:
500 550 environ['REMOTE_HOST'] = ip
501 551
502 552 if _is_request_chunked(environ):
503 553 # set the compatibility flag for webob
504 554 environ['wsgi.input_terminated'] = True
505 555
506 556 def hg_proxy(self):
507 557 @wsgiapp
508 558 def _hg_proxy(environ, start_response):
509 559 app = WsgiProxy(self.remote_wsgi.HgRemoteWsgi())
510 560 return app(environ, start_response)
511 561 return _hg_proxy
512 562
513 563 def git_proxy(self):
514 564 @wsgiapp
515 565 def _git_proxy(environ, start_response):
516 566 app = WsgiProxy(self.remote_wsgi.GitRemoteWsgi())
517 567 return app(environ, start_response)
518 568 return _git_proxy
519 569
520 570 def hg_stream(self):
521 571 if self._use_echo_app:
522 572 @wsgiapp
523 573 def _hg_stream(environ, start_response):
524 574 app = EchoApp('fake_path', 'fake_name', None)
525 575 return app(environ, start_response)
526 576 return _hg_stream
527 577 else:
528 578 @wsgiapp
529 579 def _hg_stream(environ, start_response):
530 580 log.debug('http-app: handling hg stream')
531 repo_path = environ['HTTP_X_RC_REPO_PATH']
532 repo_name = environ['HTTP_X_RC_REPO_NAME']
533 packed_config = base64.b64decode(
534 environ['HTTP_X_RC_REPO_CONFIG'])
535 config = msgpack.unpackb(packed_config)
581 call_context = get_headers_call_context(environ)
582
583 repo_path = call_context['repo_path']
584 repo_name = call_context['repo_name']
585 config = call_context['repo_config']
586
536 587 app = scm_app.create_hg_wsgi_app(
537 588 repo_path, repo_name, config)
538 589
539 590 # Consistent path information for hgweb
540 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
591 environ['PATH_INFO'] = call_context['path_info']
541 592 environ['REPO_NAME'] = repo_name
542 593 self.set_env_from_config(environ, config)
543 594
544 595 log.debug('http-app: starting app handler '
545 596 'with %s and process request', app)
546 597 return app(environ, ResponseFilter(start_response))
547 598 return _hg_stream
548 599
549 600 def git_stream(self):
550 601 if self._use_echo_app:
551 602 @wsgiapp
552 603 def _git_stream(environ, start_response):
553 604 app = EchoApp('fake_path', 'fake_name', None)
554 605 return app(environ, start_response)
555 606 return _git_stream
556 607 else:
557 608 @wsgiapp
558 609 def _git_stream(environ, start_response):
559 610 log.debug('http-app: handling git stream')
560 repo_path = environ['HTTP_X_RC_REPO_PATH']
561 repo_name = environ['HTTP_X_RC_REPO_NAME']
562 packed_config = base64.b64decode(
563 environ['HTTP_X_RC_REPO_CONFIG'])
564 config = msgpack.unpackb(packed_config)
611
612 call_context = get_headers_call_context(environ)
565 613
566 environ['PATH_INFO'] = environ['HTTP_X_RC_PATH_INFO']
614 repo_path = call_context['repo_path']
615 repo_name = call_context['repo_name']
616 config = call_context['repo_config']
617
618 environ['PATH_INFO'] = call_context['path_info']
567 619 self.set_env_from_config(environ, config)
568 620
569 621 content_type = environ.get('CONTENT_TYPE', '')
570 622
571 623 path = environ['PATH_INFO']
572 624 is_lfs_request = GIT_LFS_CONTENT_TYPE in content_type
573 625 log.debug(
574 626 'LFS: Detecting if request `%s` is LFS server path based '
575 627 'on content type:`%s`, is_lfs:%s',
576 628 path, content_type, is_lfs_request)
577 629
578 630 if not is_lfs_request:
579 631 # fallback detection by path
580 632 if GIT_LFS_PROTO_PAT.match(path):
581 633 is_lfs_request = True
582 634 log.debug(
583 635 'LFS: fallback detection by path of: `%s`, is_lfs:%s',
584 636 path, is_lfs_request)
585 637
586 638 if is_lfs_request:
587 639 app = scm_app.create_git_lfs_wsgi_app(
588 640 repo_path, repo_name, config)
589 641 else:
590 642 app = scm_app.create_git_wsgi_app(
591 643 repo_path, repo_name, config)
592 644
593 645 log.debug('http-app: starting app handler '
594 646 'with %s and process request', app)
595 647
596 648 return app(environ, start_response)
597 649
598 650 return _git_stream
599 651
600 652 def handle_vcs_exception(self, exception, request):
601 653 _vcs_kind = getattr(exception, '_vcs_kind', '')
654
602 655 if _vcs_kind == 'repo_locked':
603 # Get custom repo-locked status code if present.
604 status_code = request.headers.get('X-RC-Locked-Status-Code')
656 headers_call_context = get_headers_call_context(request.environ)
657 status_code = safe_int(headers_call_context['locked_status_code'])
658
605 659 return HTTPRepoLocked(
606 title=exception.message, status_code=status_code)
660 title=str(exception), status_code=status_code, headers=[('X-Rc-Locked', '1')])
607 661
608 662 elif _vcs_kind == 'repo_branch_protected':
609 663 # Get custom repo-branch-protected status code if present.
610 return HTTPRepoBranchProtected(title=exception.message)
664 return HTTPRepoBranchProtected(
665 title=str(exception), headers=[('X-Rc-Branch-Protection', '1')])
611 666
612 667 exc_info = request.exc_info
613 668 store_exception(id(exc_info), exc_info)
614 669
615 670 traceback_info = 'unavailable'
616 671 if request.exc_info:
617 672 exc_type, exc_value, exc_tb = request.exc_info
618 673 traceback_info = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb))
619 674
620 675 log.error(
621 676 'error occurred handling this request for path: %s, \n tb: %s',
622 677 request.path, traceback_info)
623 678
624 679 statsd = request.registry.statsd
625 680 if statsd:
626 exc_type = "{}.{}".format(exception.__class__.__module__, exception.__class__.__name__)
681 exc_type = f"{exception.__class__.__module__}.{exception.__class__.__name__}"
627 682 statsd.incr('vcsserver_exception_total',
628 tags=["type:{}".format(exc_type)])
683 tags=[f"type:{exc_type}"])
629 684 raise exception
630 685
631 686
632 687 class ResponseFilter(object):
633 688
634 689 def __init__(self, start_response):
635 690 self._start_response = start_response
636 691
637 692 def __call__(self, status, response_headers, exc_info=None):
638 693 headers = tuple(
639 694 (h, v) for h, v in response_headers
640 695 if not wsgiref.util.is_hop_by_hop(h))
641 696 return self._start_response(status, headers, exc_info)
642 697
643 698
644 699 def sanitize_settings_and_apply_defaults(global_config, settings):
645 700 global_settings_maker = SettingsMaker(global_config)
646 701 settings_maker = SettingsMaker(settings)
647 702
648 703 settings_maker.make_setting('logging.autoconfigure', False, parser='bool')
649 704
650 705 logging_conf = os.path.join(os.path.dirname(global_config.get('__file__')), 'logging.ini')
651 706 settings_maker.enable_logging(logging_conf)
652 707
653 708 # Default includes, possible to change as a user
654 709 pyramid_includes = settings_maker.make_setting('pyramid.includes', [], parser='list:newline')
655 710 log.debug("Using the following pyramid.includes: %s", pyramid_includes)
656 711
657 712 settings_maker.make_setting('__file__', global_config.get('__file__'))
658 713
659 714 settings_maker.make_setting('pyramid.default_locale_name', 'en')
660 715 settings_maker.make_setting('locale', 'en_US.UTF-8')
661 716
662 717 settings_maker.make_setting('core.binary_dir', '')
663 718
664 719 temp_store = tempfile.gettempdir()
665 720 default_cache_dir = os.path.join(temp_store, 'rc_cache')
666 721 # save default, cache dir, and use it for all backends later.
667 722 default_cache_dir = settings_maker.make_setting(
668 723 'cache_dir',
669 724 default=default_cache_dir, default_when_empty=True,
670 725 parser='dir:ensured')
671 726
672 727 # exception store cache
673 728 settings_maker.make_setting(
674 729 'exception_tracker.store_path',
675 730 default=os.path.join(default_cache_dir, 'exc_store'), default_when_empty=True,
676 731 parser='dir:ensured'
677 732 )
678 733
679 734 # repo_object cache defaults
680 735 settings_maker.make_setting(
681 736 'rc_cache.repo_object.backend',
682 737 default='dogpile.cache.rc.file_namespace',
683 738 parser='string')
684 739 settings_maker.make_setting(
685 740 'rc_cache.repo_object.expiration_time',
686 741 default=30 * 24 * 60 * 60, # 30days
687 742 parser='int')
688 743 settings_maker.make_setting(
689 744 'rc_cache.repo_object.arguments.filename',
690 745 default=os.path.join(default_cache_dir, 'vcsserver_cache_repo_object.db'),
691 746 parser='string')
692 747
693 748 # statsd
694 749 settings_maker.make_setting('statsd.enabled', False, parser='bool')
695 750 settings_maker.make_setting('statsd.statsd_host', 'statsd-exporter', parser='string')
696 751 settings_maker.make_setting('statsd.statsd_port', 9125, parser='int')
697 752 settings_maker.make_setting('statsd.statsd_prefix', '')
698 753 settings_maker.make_setting('statsd.statsd_ipv6', False, parser='bool')
699 754
700 755 settings_maker.env_expand()
701 756
702 757
703 758 def main(global_config, **settings):
704 759 start_time = time.time()
705 760 log.info('Pyramid app config starting')
706 761
707 762 if MercurialFactory:
708 763 hgpatches.patch_largefiles_capabilities()
709 764 hgpatches.patch_subrepo_type_mapping()
710 765
711 766 # Fill in and sanitize the defaults & do ENV expansion
712 767 sanitize_settings_and_apply_defaults(global_config, settings)
713 768
714 769 # init and bootstrap StatsdClient
715 770 StatsdClient.setup(settings)
716 771
717 772 pyramid_app = HTTPApplication(settings=settings, global_config=global_config).wsgi_app()
718 773 total_time = time.time() - start_time
719 log.info('Pyramid app `%s` created and configured in %.2fs',
720 getattr(pyramid_app, 'func_name', 'pyramid_app'), total_time)
774 log.info('Pyramid app created and configured in %.2fs', total_time)
721 775 return pyramid_app
722 776
723 777
@@ -1,16 +1,16 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
@@ -1,26 +1,26 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 # This package contains non rhodecode licensed packages that are
19 19 # vendored for various reasons
20 20
21 21 import os
22 22 import sys
23 23
24 24 vendor_dir = os.path.abspath(os.path.dirname(__file__))
25 25
26 26 sys.path.append(vendor_dir)
@@ -1,243 +1,243 b''
1 1 '''
2 2 This library is provided to allow standard python logging
3 3 to output log data as JSON formatted strings
4 4 '''
5 5 import logging
6 6 import json
7 7 import re
8 8 from datetime import date, datetime, time, tzinfo, timedelta
9 9 import traceback
10 10 import importlib
11 11
12 12 from inspect import istraceback
13 13
14 14 from collections import OrderedDict
15 15
16 16
17 17 def _inject_req_id(record, *args, **kwargs):
18 18 return record
19 19
20 20
21 21 ExceptionAwareFormatter = logging.Formatter
22 22
23 23
24 24 ZERO = timedelta(0)
25 25 HOUR = timedelta(hours=1)
26 26
27 27
28 28 class UTC(tzinfo):
29 29 """UTC"""
30 30
31 31 def utcoffset(self, dt):
32 32 return ZERO
33 33
34 34 def tzname(self, dt):
35 35 return "UTC"
36 36
37 37 def dst(self, dt):
38 38 return ZERO
39 39
40 40 utc = UTC()
41 41
42 42
43 43 # skip natural LogRecord attributes
44 44 # http://docs.python.org/library/logging.html#logrecord-attributes
45 45 RESERVED_ATTRS = (
46 46 'args', 'asctime', 'created', 'exc_info', 'exc_text', 'filename',
47 47 'funcName', 'levelname', 'levelno', 'lineno', 'module',
48 48 'msecs', 'message', 'msg', 'name', 'pathname', 'process',
49 49 'processName', 'relativeCreated', 'stack_info', 'thread', 'threadName')
50 50
51 51
52 52 def merge_record_extra(record, target, reserved):
53 53 """
54 54 Merges extra attributes from LogRecord object into target dictionary
55 55
56 56 :param record: logging.LogRecord
57 57 :param target: dict to update
58 58 :param reserved: dict or list with reserved keys to skip
59 59 """
60 60 for key, value in record.__dict__.items():
61 61 # this allows to have numeric keys
62 62 if (key not in reserved
63 63 and not (hasattr(key, "startswith")
64 64 and key.startswith('_'))):
65 65 target[key] = value
66 66 return target
67 67
68 68
69 69 class JsonEncoder(json.JSONEncoder):
70 70 """
71 71 A custom encoder extending the default JSONEncoder
72 72 """
73 73
74 74 def default(self, obj):
75 75 if isinstance(obj, (date, datetime, time)):
76 76 return self.format_datetime_obj(obj)
77 77
78 78 elif istraceback(obj):
79 79 return ''.join(traceback.format_tb(obj)).strip()
80 80
81 81 elif type(obj) == Exception \
82 82 or isinstance(obj, Exception) \
83 83 or type(obj) == type:
84 84 return str(obj)
85 85
86 86 try:
87 return super(JsonEncoder, self).default(obj)
87 return super().default(obj)
88 88
89 89 except TypeError:
90 90 try:
91 91 return str(obj)
92 92
93 93 except Exception:
94 94 return None
95 95
96 96 def format_datetime_obj(self, obj):
97 97 return obj.isoformat()
98 98
99 99
100 100 class JsonFormatter(ExceptionAwareFormatter):
101 101 """
102 102 A custom formatter to format logging records as json strings.
103 103 Extra values will be formatted as str() if not supported by
104 104 json default encoder
105 105 """
106 106
107 107 def __init__(self, *args, **kwargs):
108 108 """
109 109 :param json_default: a function for encoding non-standard objects
110 110 as outlined in http://docs.python.org/2/library/json.html
111 111 :param json_encoder: optional custom encoder
112 112 :param json_serializer: a :meth:`json.dumps`-compatible callable
113 113 that will be used to serialize the log record.
114 114 :param json_indent: an optional :meth:`json.dumps`-compatible numeric value
115 115 that will be used to customize the indent of the output json.
116 116 :param prefix: an optional string prefix added at the beginning of
117 117 the formatted string
118 118 :param json_indent: indent parameter for json.dumps
119 119 :param json_ensure_ascii: ensure_ascii parameter for json.dumps
120 120 :param reserved_attrs: an optional list of fields that will be skipped when
121 121 outputting json log record. Defaults to all log record attributes:
122 122 http://docs.python.org/library/logging.html#logrecord-attributes
123 123 :param timestamp: an optional string/boolean field to add a timestamp when
124 124 outputting the json log record. If string is passed, timestamp will be added
125 125 to log record using string as key. If True boolean is passed, timestamp key
126 126 will be "timestamp". Defaults to False/off.
127 127 """
128 128 self.json_default = self._str_to_fn(kwargs.pop("json_default", None))
129 129 self.json_encoder = self._str_to_fn(kwargs.pop("json_encoder", None))
130 130 self.json_serializer = self._str_to_fn(kwargs.pop("json_serializer", json.dumps))
131 131 self.json_indent = kwargs.pop("json_indent", None)
132 132 self.json_ensure_ascii = kwargs.pop("json_ensure_ascii", True)
133 133 self.prefix = kwargs.pop("prefix", "")
134 134 reserved_attrs = kwargs.pop("reserved_attrs", RESERVED_ATTRS)
135 self.reserved_attrs = dict(zip(reserved_attrs, reserved_attrs))
135 self.reserved_attrs = dict(list(zip(reserved_attrs, reserved_attrs)))
136 136 self.timestamp = kwargs.pop("timestamp", True)
137 137
138 138 # super(JsonFormatter, self).__init__(*args, **kwargs)
139 139 logging.Formatter.__init__(self, *args, **kwargs)
140 140 if not self.json_encoder and not self.json_default:
141 141 self.json_encoder = JsonEncoder
142 142
143 143 self._required_fields = self.parse()
144 self._skip_fields = dict(zip(self._required_fields,
145 self._required_fields))
144 self._skip_fields = dict(list(zip(self._required_fields,
145 self._required_fields)))
146 146 self._skip_fields.update(self.reserved_attrs)
147 147
148 148 def _str_to_fn(self, fn_as_str):
149 149 """
150 150 If the argument is not a string, return whatever was passed in.
151 151 Parses a string such as package.module.function, imports the module
152 152 and returns the function.
153 153
154 154 :param fn_as_str: The string to parse. If not a string, return it.
155 155 """
156 156 if not isinstance(fn_as_str, str):
157 157 return fn_as_str
158 158
159 159 path, _, function = fn_as_str.rpartition('.')
160 160 module = importlib.import_module(path)
161 161 return getattr(module, function)
162 162
163 163 def parse(self):
164 164 """
165 165 Parses format string looking for substitutions
166 166
167 167 This method is responsible for returning a list of fields (as strings)
168 168 to include in all log messages.
169 169 """
170 170 standard_formatters = re.compile(r'\((.+?)\)', re.IGNORECASE)
171 171 return standard_formatters.findall(self._fmt)
172 172
173 173 def add_fields(self, log_record, record, message_dict):
174 174 """
175 175 Override this method to implement custom logic for adding fields.
176 176 """
177 177 for field in self._required_fields:
178 178 log_record[field] = record.__dict__.get(field)
179 179 log_record.update(message_dict)
180 180 merge_record_extra(record, log_record, reserved=self._skip_fields)
181 181
182 182 if self.timestamp:
183 183 key = self.timestamp if type(self.timestamp) == str else 'timestamp'
184 184 log_record[key] = datetime.fromtimestamp(record.created, tz=utc)
185 185
186 186 def process_log_record(self, log_record):
187 187 """
188 188 Override this method to implement custom logic
189 189 on the possibly ordered dictionary.
190 190 """
191 191 return log_record
192 192
193 193 def jsonify_log_record(self, log_record):
194 194 """Returns a json string of the log record."""
195 195 return self.json_serializer(log_record,
196 196 default=self.json_default,
197 197 cls=self.json_encoder,
198 198 indent=self.json_indent,
199 199 ensure_ascii=self.json_ensure_ascii)
200 200
201 201 def serialize_log_record(self, log_record):
202 202 """Returns the final representation of the log record."""
203 return "%s%s" % (self.prefix, self.jsonify_log_record(log_record))
203 return "{}{}".format(self.prefix, self.jsonify_log_record(log_record))
204 204
205 205 def format(self, record):
206 206 """Formats a log record and serializes to json"""
207 207 message_dict = {}
208 208 # FIXME: logging.LogRecord.msg and logging.LogRecord.message in typeshed
209 209 # are always type of str. We shouldn't need to override that.
210 210 if isinstance(record.msg, dict):
211 211 message_dict = record.msg
212 212 record.message = None
213 213 else:
214 214 record.message = record.getMessage()
215 215 # only format time if needed
216 216 if "asctime" in self._required_fields:
217 217 record.asctime = self.formatTime(record, self.datefmt)
218 218
219 219 # Display formatted exception, but allow overriding it in the
220 220 # user-supplied dict.
221 221 if record.exc_info and not message_dict.get('exc_info'):
222 222 message_dict['exc_info'] = self.formatException(record.exc_info)
223 223 if not message_dict.get('exc_info') and record.exc_text:
224 224 message_dict['exc_info'] = record.exc_text
225 225 # Display formatted record of stack frames
226 226 # default format is a string returned from :func:`traceback.print_stack`
227 227 try:
228 228 if record.stack_info and not message_dict.get('stack_info'):
229 229 message_dict['stack_info'] = self.formatStack(record.stack_info)
230 230 except AttributeError:
231 231 # Python2.7 doesn't have stack_info.
232 232 pass
233 233
234 234 try:
235 235 log_record = OrderedDict()
236 236 except NameError:
237 237 log_record = {}
238 238
239 239 _inject_req_id(record, with_prefix=False)
240 240 self.add_fields(log_record, record, message_dict)
241 241 log_record = self.process_log_record(log_record)
242 242
243 243 return self.serialize_log_record(log_record)
@@ -1,390 +1,384 b''
1 1 import sys
2 2 import threading
3 3 import weakref
4 4 from base64 import b64encode
5 5 from logging import getLogger
6 6 from os import urandom
7 7
8 8 from redis import StrictRedis
9 9
10 10 __version__ = '3.7.0'
11 11
12 12 loggers = {
13 13 k: getLogger("vcsserver." + ".".join((__name__, k)))
14 14 for k in [
15 15 "acquire",
16 16 "refresh.thread.start",
17 17 "refresh.thread.stop",
18 18 "refresh.thread.exit",
19 19 "refresh.start",
20 20 "refresh.shutdown",
21 21 "refresh.exit",
22 22 "release",
23 23 ]
24 24 }
25 25
26 PY3 = sys.version_info[0] == 3
27
28 if PY3:
29 text_type = str
30 binary_type = bytes
31 else:
32 text_type = unicode # noqa
33 binary_type = str
26 text_type = str
27 binary_type = bytes
34 28
35 29
36 30 # Check if the id match. If not, return an error code.
37 31 UNLOCK_SCRIPT = b"""
38 32 if redis.call("get", KEYS[1]) ~= ARGV[1] then
39 33 return 1
40 34 else
41 35 redis.call("del", KEYS[2])
42 36 redis.call("lpush", KEYS[2], 1)
43 37 redis.call("pexpire", KEYS[2], ARGV[2])
44 38 redis.call("del", KEYS[1])
45 39 return 0
46 40 end
47 41 """
48 42
49 43 # Covers both cases when key doesn't exist and doesn't equal to lock's id
50 44 EXTEND_SCRIPT = b"""
51 45 if redis.call("get", KEYS[1]) ~= ARGV[1] then
52 46 return 1
53 47 elseif redis.call("ttl", KEYS[1]) < 0 then
54 48 return 2
55 49 else
56 50 redis.call("expire", KEYS[1], ARGV[2])
57 51 return 0
58 52 end
59 53 """
60 54
61 55 RESET_SCRIPT = b"""
62 56 redis.call('del', KEYS[2])
63 57 redis.call('lpush', KEYS[2], 1)
64 58 redis.call('pexpire', KEYS[2], ARGV[2])
65 59 return redis.call('del', KEYS[1])
66 60 """
67 61
68 62 RESET_ALL_SCRIPT = b"""
69 63 local locks = redis.call('keys', 'lock:*')
70 64 local signal
71 65 for _, lock in pairs(locks) do
72 66 signal = 'lock-signal:' .. string.sub(lock, 6)
73 67 redis.call('del', signal)
74 68 redis.call('lpush', signal, 1)
75 69 redis.call('expire', signal, 1)
76 70 redis.call('del', lock)
77 71 end
78 72 return #locks
79 73 """
80 74
81 75
82 76 class AlreadyAcquired(RuntimeError):
83 77 pass
84 78
85 79
86 80 class NotAcquired(RuntimeError):
87 81 pass
88 82
89 83
90 84 class AlreadyStarted(RuntimeError):
91 85 pass
92 86
93 87
94 88 class TimeoutNotUsable(RuntimeError):
95 89 pass
96 90
97 91
98 92 class InvalidTimeout(RuntimeError):
99 93 pass
100 94
101 95
102 96 class TimeoutTooLarge(RuntimeError):
103 97 pass
104 98
105 99
106 100 class NotExpirable(RuntimeError):
107 101 pass
108 102
109 103
110 104 class Lock(object):
111 105 """
112 106 A Lock context manager implemented via redis SETNX/BLPOP.
113 107 """
114 108 unlock_script = None
115 109 extend_script = None
116 110 reset_script = None
117 111 reset_all_script = None
118 112
119 113 def __init__(self, redis_client, name, expire=None, id=None, auto_renewal=False, strict=True, signal_expire=1000):
120 114 """
121 115 :param redis_client:
122 116 An instance of :class:`~StrictRedis`.
123 117 :param name:
124 118 The name (redis key) the lock should have.
125 119 :param expire:
126 120 The lock expiry time in seconds. If left at the default (None)
127 121 the lock will not expire.
128 122 :param id:
129 123 The ID (redis value) the lock should have. A random value is
130 124 generated when left at the default.
131 125
132 126 Note that if you specify this then the lock is marked as "held". Acquires
133 127 won't be possible.
134 128 :param auto_renewal:
135 129 If set to ``True``, Lock will automatically renew the lock so that it
136 130 doesn't expire for as long as the lock is held (acquire() called
137 131 or running in a context manager).
138 132
139 133 Implementation note: Renewal will happen using a daemon thread with
140 134 an interval of ``expire*2/3``. If wishing to use a different renewal
141 135 time, subclass Lock, call ``super().__init__()`` then set
142 136 ``self._lock_renewal_interval`` to your desired interval.
143 137 :param strict:
144 138 If set ``True`` then the ``redis_client`` needs to be an instance of ``redis.StrictRedis``.
145 139 :param signal_expire:
146 140 Advanced option to override signal list expiration in milliseconds. Increase it for very slow clients. Default: ``1000``.
147 141 """
148 142 if strict and not isinstance(redis_client, StrictRedis):
149 143 raise ValueError("redis_client must be instance of StrictRedis. "
150 144 "Use strict=False if you know what you're doing.")
151 145 if auto_renewal and expire is None:
152 146 raise ValueError("Expire may not be None when auto_renewal is set")
153 147
154 148 self._client = redis_client
155 149
156 150 if expire:
157 151 expire = int(expire)
158 152 if expire < 0:
159 153 raise ValueError("A negative expire is not acceptable.")
160 154 else:
161 155 expire = None
162 156 self._expire = expire
163 157
164 158 self._signal_expire = signal_expire
165 159 if id is None:
166 160 self._id = b64encode(urandom(18)).decode('ascii')
167 161 elif isinstance(id, binary_type):
168 162 try:
169 163 self._id = id.decode('ascii')
170 164 except UnicodeDecodeError:
171 165 self._id = b64encode(id).decode('ascii')
172 166 elif isinstance(id, text_type):
173 167 self._id = id
174 168 else:
175 169 raise TypeError("Incorrect type for `id`. Must be bytes/str not %s." % type(id))
176 170 self._name = 'lock:' + name
177 171 self._signal = 'lock-signal:' + name
178 172 self._lock_renewal_interval = (float(expire) * 2 / 3
179 173 if auto_renewal
180 174 else None)
181 175 self._lock_renewal_thread = None
182 176
183 177 self.register_scripts(redis_client)
184 178
185 179 @classmethod
186 180 def register_scripts(cls, redis_client):
187 181 global reset_all_script
188 182 if reset_all_script is None:
189 183 reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
190 184 cls.unlock_script = redis_client.register_script(UNLOCK_SCRIPT)
191 185 cls.extend_script = redis_client.register_script(EXTEND_SCRIPT)
192 186 cls.reset_script = redis_client.register_script(RESET_SCRIPT)
193 187 cls.reset_all_script = redis_client.register_script(RESET_ALL_SCRIPT)
194 188
195 189 @property
196 190 def _held(self):
197 191 return self.id == self.get_owner_id()
198 192
199 193 def reset(self):
200 194 """
201 195 Forcibly deletes the lock. Use this with care.
202 196 """
203 197 self.reset_script(client=self._client, keys=(self._name, self._signal), args=(self.id, self._signal_expire))
204 198
205 199 @property
206 200 def id(self):
207 201 return self._id
208 202
209 203 def get_owner_id(self):
210 204 owner_id = self._client.get(self._name)
211 205 if isinstance(owner_id, binary_type):
212 206 owner_id = owner_id.decode('ascii', 'replace')
213 207 return owner_id
214 208
215 209 def acquire(self, blocking=True, timeout=None):
216 210 """
217 211 :param blocking:
218 212 Boolean value specifying whether lock should be blocking or not.
219 213 :param timeout:
220 214 An integer value specifying the maximum number of seconds to block.
221 215 """
222 216 logger = loggers["acquire"]
223 217
224 218 logger.debug("Getting blocking: %s acquire on %r ...", blocking, self._name)
225 219
226 220 if self._held:
227 221 owner_id = self.get_owner_id()
228 raise AlreadyAcquired("Already acquired from this Lock instance. Lock id: {}".format(owner_id))
222 raise AlreadyAcquired(f"Already acquired from this Lock instance. Lock id: {owner_id}")
229 223
230 224 if not blocking and timeout is not None:
231 225 raise TimeoutNotUsable("Timeout cannot be used if blocking=False")
232 226
233 227 if timeout:
234 228 timeout = int(timeout)
235 229 if timeout < 0:
236 230 raise InvalidTimeout("Timeout (%d) cannot be less than or equal to 0" % timeout)
237 231
238 232 if self._expire and not self._lock_renewal_interval and timeout > self._expire:
239 233 raise TimeoutTooLarge("Timeout (%d) cannot be greater than expire (%d)" % (timeout, self._expire))
240 234
241 235 busy = True
242 236 blpop_timeout = timeout or self._expire or 0
243 237 timed_out = False
244 238 while busy:
245 239 busy = not self._client.set(self._name, self._id, nx=True, ex=self._expire)
246 240 if busy:
247 241 if timed_out:
248 242 return False
249 243 elif blocking:
250 244 timed_out = not self._client.blpop(self._signal, blpop_timeout) and timeout
251 245 else:
252 246 logger.warning("Failed to get %r.", self._name)
253 247 return False
254 248
255 249 logger.debug("Got lock for %r.", self._name)
256 250 if self._lock_renewal_interval is not None:
257 251 self._start_lock_renewer()
258 252 return True
259 253
260 254 def extend(self, expire=None):
261 255 """Extends expiration time of the lock.
262 256
263 257 :param expire:
264 258 New expiration time. If ``None`` - `expire` provided during
265 259 lock initialization will be taken.
266 260 """
267 261 if expire:
268 262 expire = int(expire)
269 263 if expire < 0:
270 264 raise ValueError("A negative expire is not acceptable.")
271 265 elif self._expire is not None:
272 266 expire = self._expire
273 267 else:
274 268 raise TypeError(
275 269 "To extend a lock 'expire' must be provided as an "
276 270 "argument to extend() method or at initialization time."
277 271 )
278 272
279 273 error = self.extend_script(client=self._client, keys=(self._name, self._signal), args=(self._id, expire))
280 274 if error == 1:
281 275 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
282 276 elif error == 2:
283 277 raise NotExpirable("Lock %s has no assigned expiration time" % self._name)
284 278 elif error:
285 279 raise RuntimeError("Unsupported error code %s from EXTEND script" % error)
286 280
287 281 @staticmethod
288 282 def _lock_renewer(lockref, interval, stop):
289 283 """
290 284 Renew the lock key in redis every `interval` seconds for as long
291 285 as `self._lock_renewal_thread.should_exit` is False.
292 286 """
293 287 while not stop.wait(timeout=interval):
294 288 loggers["refresh.thread.start"].debug("Refreshing lock")
295 289 lock = lockref()
296 290 if lock is None:
297 291 loggers["refresh.thread.stop"].debug(
298 292 "The lock no longer exists, stopping lock refreshing"
299 293 )
300 294 break
301 295 lock.extend(expire=lock._expire)
302 296 del lock
303 297 loggers["refresh.thread.exit"].debug("Exit requested, stopping lock refreshing")
304 298
305 299 def _start_lock_renewer(self):
306 300 """
307 301 Starts the lock refresher thread.
308 302 """
309 303 if self._lock_renewal_thread is not None:
310 304 raise AlreadyStarted("Lock refresh thread already started")
311 305
312 306 loggers["refresh.start"].debug(
313 307 "Starting thread to refresh lock every %s seconds",
314 308 self._lock_renewal_interval
315 309 )
316 310 self._lock_renewal_stop = threading.Event()
317 311 self._lock_renewal_thread = threading.Thread(
318 312 group=None,
319 313 target=self._lock_renewer,
320 314 kwargs={'lockref': weakref.ref(self),
321 315 'interval': self._lock_renewal_interval,
322 316 'stop': self._lock_renewal_stop}
323 317 )
324 318 self._lock_renewal_thread.setDaemon(True)
325 319 self._lock_renewal_thread.start()
326 320
327 321 def _stop_lock_renewer(self):
328 322 """
329 323 Stop the lock renewer.
330 324
331 325 This signals the renewal thread and waits for its exit.
332 326 """
333 327 if self._lock_renewal_thread is None or not self._lock_renewal_thread.is_alive():
334 328 return
335 329 loggers["refresh.shutdown"].debug("Signalling the lock refresher to stop")
336 330 self._lock_renewal_stop.set()
337 331 self._lock_renewal_thread.join()
338 332 self._lock_renewal_thread = None
339 333 loggers["refresh.exit"].debug("Lock refresher has stopped")
340 334
341 335 def __enter__(self):
342 336 acquired = self.acquire(blocking=True)
343 337 assert acquired, "Lock wasn't acquired, but blocking=True"
344 338 return self
345 339
346 340 def __exit__(self, exc_type=None, exc_value=None, traceback=None):
347 341 self.release()
348 342
349 343 def release(self):
350 344 """Releases the lock, that was acquired with the same object.
351 345
352 346 .. note::
353 347
354 348 If you want to release a lock that you acquired in a different place you have two choices:
355 349
356 350 * Use ``Lock("name", id=id_from_other_place).release()``
357 351 * Use ``Lock("name").reset()``
358 352 """
359 353 if self._lock_renewal_thread is not None:
360 354 self._stop_lock_renewer()
361 355 loggers["release"].debug("Releasing %r.", self._name)
362 356 error = self.unlock_script(client=self._client, keys=(self._name, self._signal), args=(self._id, self._signal_expire))
363 357 if error == 1:
364 358 raise NotAcquired("Lock %s is not acquired or it already expired." % self._name)
365 359 elif error:
366 360 raise RuntimeError("Unsupported error code %s from EXTEND script." % error)
367 361
368 362 def locked(self):
369 363 """
370 364 Return true if the lock is acquired.
371 365
372 366 Checks that lock with same name already exists. This method returns true, even if
373 367 lock have another id.
374 368 """
375 369 return self._client.exists(self._name) == 1
376 370
377 371
378 372 reset_all_script = None
379 373
380 374
381 375 def reset_all(redis_client):
382 376 """
383 377 Forcibly deletes all locks if its remains (like a crash reason). Use this with care.
384 378
385 379 :param redis_client:
386 380 An instance of :class:`~StrictRedis`.
387 381 """
388 382 Lock.register_scripts(redis_client)
389 383
390 384 reset_all_script(client=redis_client) # noqa
@@ -1,52 +1,50 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 1 import logging
4 2
5 3 from .stream import TCPStatsClient, UnixSocketStatsClient # noqa
6 4 from .udp import StatsClient # noqa
7 5
8 6 HOST = 'localhost'
9 7 PORT = 8125
10 8 IPV6 = False
11 9 PREFIX = None
12 10 MAXUDPSIZE = 512
13 11
14 12 log = logging.getLogger('rhodecode.statsd')
15 13
16 14
17 15 def statsd_config(config, prefix='statsd.'):
18 16 _config = {}
19 17 for key in config.keys():
20 18 if key.startswith(prefix):
21 19 _config[key[len(prefix):]] = config[key]
22 20 return _config
23 21
24 22
25 23 def client_from_config(configuration, prefix='statsd.', **kwargs):
26 24 from pyramid.settings import asbool
27 25
28 26 _config = statsd_config(configuration, prefix)
29 27 statsd_enabled = asbool(_config.pop('enabled', False))
30 28 if not statsd_enabled:
31 29 log.debug('statsd client not enabled by statsd.enabled = flag, skipping...')
32 30 return
33 31
34 32 host = _config.pop('statsd_host', HOST)
35 33 port = _config.pop('statsd_port', PORT)
36 34 prefix = _config.pop('statsd_prefix', PREFIX)
37 35 maxudpsize = _config.pop('statsd_maxudpsize', MAXUDPSIZE)
38 36 ipv6 = asbool(_config.pop('statsd_ipv6', IPV6))
39 37 log.debug('configured statsd client %s:%s', host, port)
40 38
41 39 try:
42 40 client = StatsClient(
43 41 host=host, port=port, prefix=prefix, maxudpsize=maxudpsize, ipv6=ipv6)
44 42 except Exception:
45 43 log.exception('StatsD is enabled, but failed to connect to statsd server, fallback: disable statsd')
46 44 client = None
47 45
48 46 return client
49 47
50 48
51 49 def get_statsd_client(request):
52 50 return client_from_config(request.registry.settings)
@@ -1,156 +1,154 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 1 import re
4 2 import random
5 3 from collections import deque
6 4 from datetime import timedelta
7 5 from repoze.lru import lru_cache
8 6
9 7 from .timer import Timer
10 8
11 9 TAG_INVALID_CHARS_RE = re.compile(
12 10 r"[^\w\d_\-:/\.]",
13 11 #re.UNICODE
14 12 )
15 13 TAG_INVALID_CHARS_SUBS = "_"
16 14
17 15 # we save and expose methods called by statsd for discovery
18 16 buckets_dict = {
19 17
20 18 }
21 19
22 20
23 21 @lru_cache(maxsize=500)
24 22 def _normalize_tags_with_cache(tag_list):
25 23 return [TAG_INVALID_CHARS_RE.sub(TAG_INVALID_CHARS_SUBS, tag) for tag in tag_list]
26 24
27 25
28 26 def normalize_tags(tag_list):
29 27 # We have to turn our input tag list into a non-mutable tuple for it to
30 28 # be hashable (and thus usable) by the @lru_cache decorator.
31 29 return _normalize_tags_with_cache(tuple(tag_list))
32 30
33 31
34 32 class StatsClientBase(object):
35 33 """A Base class for various statsd clients."""
36 34
37 35 def close(self):
38 36 """Used to close and clean up any underlying resources."""
39 37 raise NotImplementedError()
40 38
41 39 def _send(self):
42 40 raise NotImplementedError()
43 41
44 42 def pipeline(self):
45 43 raise NotImplementedError()
46 44
47 45 def timer(self, stat, rate=1, tags=None, auto_send=True):
48 46 """
49 47 statsd = StatsdClient.statsd
50 48 with statsd.timer('bucket_name', auto_send=True) as tmr:
51 49 # This block will be timed.
52 for i in xrange(0, 100000):
50 for i in range(0, 100000):
53 51 i ** 2
54 52 # you can access time here...
55 53 elapsed_ms = tmr.ms
56 54 """
57 55 return Timer(self, stat, rate, tags, auto_send=auto_send)
58 56
59 57 def timing(self, stat, delta, rate=1, tags=None, use_decimals=True):
60 58 """
61 59 Send new timing information.
62 60
63 61 `delta` can be either a number of milliseconds or a timedelta.
64 62 """
65 63 if isinstance(delta, timedelta):
66 64 # Convert timedelta to number of milliseconds.
67 65 delta = delta.total_seconds() * 1000.
68 66 if use_decimals:
69 67 fmt = '%0.6f|ms'
70 68 else:
71 69 fmt = '%s|ms'
72 70 self._send_stat(stat, fmt % delta, rate, tags)
73 71
74 72 def incr(self, stat, count=1, rate=1, tags=None):
75 73 """Increment a stat by `count`."""
76 74 self._send_stat(stat, '%s|c' % count, rate, tags)
77 75
78 76 def decr(self, stat, count=1, rate=1, tags=None):
79 77 """Decrement a stat by `count`."""
80 78 self.incr(stat, -count, rate, tags)
81 79
82 80 def gauge(self, stat, value, rate=1, delta=False, tags=None):
83 81 """Set a gauge value."""
84 82 if value < 0 and not delta:
85 83 if rate < 1:
86 84 if random.random() > rate:
87 85 return
88 86 with self.pipeline() as pipe:
89 87 pipe._send_stat(stat, '0|g', 1)
90 88 pipe._send_stat(stat, '%s|g' % value, 1)
91 89 else:
92 90 prefix = '+' if delta and value >= 0 else ''
93 91 self._send_stat(stat, '%s%s|g' % (prefix, value), rate, tags)
94 92
95 93 def set(self, stat, value, rate=1):
96 94 """Set a set value."""
97 95 self._send_stat(stat, '%s|s' % value, rate)
98 96
99 97 def histogram(self, stat, value, rate=1, tags=None):
100 98 """Set a histogram"""
101 99 self._send_stat(stat, '%s|h' % value, rate, tags)
102 100
103 101 def _send_stat(self, stat, value, rate, tags=None):
104 102 self._after(self._prepare(stat, value, rate, tags))
105 103
106 104 def _prepare(self, stat, value, rate, tags=None):
107 105 global buckets_dict
108 106 buckets_dict[stat] = 1
109 107
110 108 if rate < 1:
111 109 if random.random() > rate:
112 110 return
113 111 value = '%s|@%s' % (value, rate)
114 112
115 113 if self._prefix:
116 114 stat = '%s.%s' % (self._prefix, stat)
117 115
118 116 res = '%s:%s%s' % (
119 117 stat,
120 118 value,
121 119 ("|#" + ",".join(normalize_tags(tags))) if tags else "",
122 120 )
123 121 return res
124 122
125 123 def _after(self, data):
126 124 if data:
127 125 self._send(data)
128 126
129 127
130 128 class PipelineBase(StatsClientBase):
131 129
132 130 def __init__(self, client):
133 131 self._client = client
134 132 self._prefix = client._prefix
135 133 self._stats = deque()
136 134
137 135 def _send(self):
138 136 raise NotImplementedError()
139 137
140 138 def _after(self, data):
141 139 if data is not None:
142 140 self._stats.append(data)
143 141
144 142 def __enter__(self):
145 143 return self
146 144
147 145 def __exit__(self, typ, value, tb):
148 146 self.send()
149 147
150 148 def send(self):
151 149 if not self._stats:
152 150 return
153 151 self._send()
154 152
155 153 def pipeline(self):
156 154 return self.__class__(self)
@@ -1,75 +1,73 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 1 import socket
4 2
5 3 from .base import StatsClientBase, PipelineBase
6 4
7 5
8 6 class StreamPipeline(PipelineBase):
9 7 def _send(self):
10 8 self._client._after('\n'.join(self._stats))
11 9 self._stats.clear()
12 10
13 11
14 12 class StreamClientBase(StatsClientBase):
15 13 def connect(self):
16 14 raise NotImplementedError()
17 15
18 16 def close(self):
19 17 if self._sock and hasattr(self._sock, 'close'):
20 18 self._sock.close()
21 19 self._sock = None
22 20
23 21 def reconnect(self):
24 22 self.close()
25 23 self.connect()
26 24
27 25 def pipeline(self):
28 26 return StreamPipeline(self)
29 27
30 28 def _send(self, data):
31 29 """Send data to statsd."""
32 30 if not self._sock:
33 31 self.connect()
34 32 self._do_send(data)
35 33
36 34 def _do_send(self, data):
37 35 self._sock.sendall(data.encode('ascii') + b'\n')
38 36
39 37
40 38 class TCPStatsClient(StreamClientBase):
41 39 """TCP version of StatsClient."""
42 40
43 41 def __init__(self, host='localhost', port=8125, prefix=None,
44 42 timeout=None, ipv6=False):
45 43 """Create a new client."""
46 44 self._host = host
47 45 self._port = port
48 46 self._ipv6 = ipv6
49 47 self._timeout = timeout
50 48 self._prefix = prefix
51 49 self._sock = None
52 50
53 51 def connect(self):
54 52 fam = socket.AF_INET6 if self._ipv6 else socket.AF_INET
55 53 family, _, _, _, addr = socket.getaddrinfo(
56 54 self._host, self._port, fam, socket.SOCK_STREAM)[0]
57 55 self._sock = socket.socket(family, socket.SOCK_STREAM)
58 56 self._sock.settimeout(self._timeout)
59 57 self._sock.connect(addr)
60 58
61 59
62 60 class UnixSocketStatsClient(StreamClientBase):
63 61 """Unix domain socket version of StatsClient."""
64 62
65 63 def __init__(self, socket_path, prefix=None, timeout=None):
66 64 """Create a new client."""
67 65 self._socket_path = socket_path
68 66 self._timeout = timeout
69 67 self._prefix = prefix
70 68 self._sock = None
71 69
72 70 def connect(self):
73 71 self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
74 72 self._sock.settimeout(self._timeout)
75 73 self._sock.connect(self._socket_path)
@@ -1,75 +1,66 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 1 import functools
4
5 # Use timer that's not susceptible to time of day adjustments.
6 try:
7 # perf_counter is only present on Py3.3+
8 from time import perf_counter as time_now
9 except ImportError:
10 # fall back to using time
11 from time import time as time_now
2 from time import perf_counter as time_now
12 3
13 4
14 5 def safe_wraps(wrapper, *args, **kwargs):
15 6 """Safely wraps partial functions."""
16 7 while isinstance(wrapper, functools.partial):
17 8 wrapper = wrapper.func
18 9 return functools.wraps(wrapper, *args, **kwargs)
19 10
20 11
21 12 class Timer(object):
22 13 """A context manager/decorator for statsd.timing()."""
23 14
24 15 def __init__(self, client, stat, rate=1, tags=None, use_decimals=True, auto_send=True):
25 16 self.client = client
26 17 self.stat = stat
27 18 self.rate = rate
28 19 self.tags = tags
29 20 self.ms = None
30 21 self._sent = False
31 22 self._start_time = None
32 23 self.use_decimals = use_decimals
33 24 self.auto_send = auto_send
34 25
35 26 def __call__(self, f):
36 27 """Thread-safe timing function decorator."""
37 28 @safe_wraps(f)
38 29 def _wrapped(*args, **kwargs):
39 30 start_time = time_now()
40 31 try:
41 32 return f(*args, **kwargs)
42 33 finally:
43 34 elapsed_time_ms = 1000.0 * (time_now() - start_time)
44 35 self.client.timing(self.stat, elapsed_time_ms, self.rate, self.tags, self.use_decimals)
45 36 self._sent = True
46 37 return _wrapped
47 38
48 39 def __enter__(self):
49 40 return self.start()
50 41
51 42 def __exit__(self, typ, value, tb):
52 43 self.stop(send=self.auto_send)
53 44
54 45 def start(self):
55 46 self.ms = None
56 47 self._sent = False
57 48 self._start_time = time_now()
58 49 return self
59 50
60 51 def stop(self, send=True):
61 52 if self._start_time is None:
62 53 raise RuntimeError('Timer has not started.')
63 54 dt = time_now() - self._start_time
64 55 self.ms = 1000.0 * dt # Convert to milliseconds.
65 56 if send:
66 57 self.send()
67 58 return self
68 59
69 60 def send(self):
70 61 if self.ms is None:
71 62 raise RuntimeError('No data recorded.')
72 63 if self._sent:
73 64 raise RuntimeError('Already sent data.')
74 65 self._sent = True
75 66 self.client.timing(self.stat, self.ms, self.rate, self.tags, self.use_decimals)
@@ -1,55 +1,53 b''
1 from __future__ import absolute_import, division, unicode_literals
2
3 1 import socket
4 2
5 3 from .base import StatsClientBase, PipelineBase
6 4
7 5
8 6 class Pipeline(PipelineBase):
9 7
10 8 def __init__(self, client):
11 super(Pipeline, self).__init__(client)
9 super().__init__(client)
12 10 self._maxudpsize = client._maxudpsize
13 11
14 12 def _send(self):
15 13 data = self._stats.popleft()
16 14 while self._stats:
17 15 # Use popleft to preserve the order of the stats.
18 16 stat = self._stats.popleft()
19 17 if len(stat) + len(data) + 1 >= self._maxudpsize:
20 18 self._client._after(data)
21 19 data = stat
22 20 else:
23 21 data += '\n' + stat
24 22 self._client._after(data)
25 23
26 24
27 25 class StatsClient(StatsClientBase):
28 26 """A client for statsd."""
29 27
30 28 def __init__(self, host='localhost', port=8125, prefix=None,
31 29 maxudpsize=512, ipv6=False):
32 30 """Create a new client."""
33 31 fam = socket.AF_INET6 if ipv6 else socket.AF_INET
34 32 family, _, _, _, addr = socket.getaddrinfo(
35 33 host, port, fam, socket.SOCK_DGRAM)[0]
36 34 self._addr = addr
37 35 self._sock = socket.socket(family, socket.SOCK_DGRAM)
38 36 self._prefix = prefix
39 37 self._maxudpsize = maxudpsize
40 38
41 39 def _send(self, data):
42 40 """Send data to statsd."""
43 41 try:
44 42 self._sock.sendto(data.encode('ascii'), self._addr)
45 43 except (socket.error, RuntimeError):
46 44 # No time for love, Dr. Jones!
47 45 pass
48 46
49 47 def close(self):
50 48 if self._sock and hasattr(self._sock, 'close'):
51 49 self._sock.close()
52 50 self._sock = None
53 51
54 52 def pipeline(self):
55 53 return Pipeline(self)
@@ -1,175 +1,172 b''
1 # -*- coding: utf-8 -*-
2
3 1 # RhodeCode VCSServer provides access to different vcs backends via network.
4 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
5 3 #
6 4 # This program is free software; you can redistribute it and/or modify
7 5 # it under the terms of the GNU General Public License as published by
8 6 # the Free Software Foundation; either version 3 of the License, or
9 7 # (at your option) any later version.
10 8 #
11 9 # This program is distributed in the hope that it will be useful,
12 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 12 # GNU General Public License for more details.
15 13 #
16 14 # You should have received a copy of the GNU General Public License
17 15 # along with this program; if not, write to the Free Software Foundation,
18 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 17
20 18
21 19 import os
22 20 import time
23 21 import datetime
24 22 import msgpack
25 23 import logging
26 24 import traceback
27 25 import tempfile
28 26
29 from pyramid import compat
30
31 27 log = logging.getLogger(__name__)
32 28
33 29 # NOTE: Any changes should be synced with exc_tracking at rhodecode.lib.exc_tracking
34 30 global_prefix = 'vcsserver'
35 31 exc_store_dir_name = 'rc_exception_store_v1'
36 32
37 33
38 34 def exc_serialize(exc_id, tb, exc_type):
39 35
40 36 data = {
41 37 'version': 'v1',
42 38 'exc_id': exc_id,
43 39 'exc_utc_date': datetime.datetime.utcnow().isoformat(),
44 40 'exc_timestamp': repr(time.time()),
45 41 'exc_message': tb,
46 42 'exc_type': exc_type,
47 43 }
48 44 return msgpack.packb(data), data
49 45
50 46
51 47 def exc_unserialize(tb):
52 48 return msgpack.unpackb(tb)
53 49
54 50
55 51 def get_exc_store():
56 52 """
57 53 Get and create exception store if it's not existing
58 54 """
59 55 import vcsserver as app
60 56
61 57 exc_store_dir = app.CONFIG.get('exception_tracker.store_path', '') or tempfile.gettempdir()
62 58 _exc_store_path = os.path.join(exc_store_dir, exc_store_dir_name)
63 59
64 60 _exc_store_path = os.path.abspath(_exc_store_path)
65 61 if not os.path.isdir(_exc_store_path):
66 62 os.makedirs(_exc_store_path)
67 63 log.debug('Initializing exceptions store at %s', _exc_store_path)
68 64 return _exc_store_path
69 65
70 66
71 67 def _store_exception(exc_id, exc_info, prefix, request_path=''):
72 68 exc_type, exc_value, exc_traceback = exc_info
73 69
74 70 tb = ''.join(traceback.format_exception(
75 71 exc_type, exc_value, exc_traceback, None))
76 72
77 73 detailed_tb = getattr(exc_value, '_org_exc_tb', None)
78 74
79 75 if detailed_tb:
80 if isinstance(detailed_tb, compat.string_types):
76 remote_tb = detailed_tb
77 if isinstance(detailed_tb, str):
81 78 remote_tb = [detailed_tb]
82 79
83 80 tb += (
84 81 '\n+++ BEG SOURCE EXCEPTION +++\n\n'
85 82 '{}\n'
86 83 '+++ END SOURCE EXCEPTION +++\n'
87 84 ''.format('\n'.join(remote_tb))
88 85 )
89 86
90 87 # Avoid that remote_tb also appears in the frame
91 88 del remote_tb
92 89
93 90 exc_type_name = exc_type.__name__
94 91 exc_store_path = get_exc_store()
95 92 exc_data, org_data = exc_serialize(exc_id, tb, exc_type_name)
96 93 exc_pref_id = '{}_{}_{}'.format(exc_id, prefix, org_data['exc_timestamp'])
97 94 if not os.path.isdir(exc_store_path):
98 95 os.makedirs(exc_store_path)
99 96 stored_exc_path = os.path.join(exc_store_path, exc_pref_id)
100 97 with open(stored_exc_path, 'wb') as f:
101 98 f.write(exc_data)
102 99 log.debug('Stored generated exception %s as: %s', exc_id, stored_exc_path)
103 100
104 101 log.error(
105 102 'error occurred handling this request.\n'
106 103 'Path: `%s`, tb: %s',
107 104 request_path, tb)
108 105
109 106
110 107 def store_exception(exc_id, exc_info, prefix=global_prefix, request_path=''):
111 108 """
112 109 Example usage::
113 110
114 111 exc_info = sys.exc_info()
115 112 store_exception(id(exc_info), exc_info)
116 113 """
117 114
118 115 try:
119 116 _store_exception(exc_id=exc_id, exc_info=exc_info, prefix=prefix,
120 117 request_path=request_path)
121 118 except Exception:
122 119 log.exception('Failed to store exception `%s` information', exc_id)
123 120 # there's no way this can fail, it will crash server badly if it does.
124 121 pass
125 122
126 123
127 124 def _find_exc_file(exc_id, prefix=global_prefix):
128 125 exc_store_path = get_exc_store()
129 126 if prefix:
130 exc_id = '{}_{}'.format(exc_id, prefix)
127 exc_id = f'{exc_id}_{prefix}'
131 128 else:
132 129 # search without a prefix
133 exc_id = '{}'.format(exc_id)
130 exc_id = f'{exc_id}'
134 131
135 132 # we need to search the store for such start pattern as above
136 133 for fname in os.listdir(exc_store_path):
137 134 if fname.startswith(exc_id):
138 135 exc_id = os.path.join(exc_store_path, fname)
139 136 break
140 137 continue
141 138 else:
142 139 exc_id = None
143 140
144 141 return exc_id
145 142
146 143
147 144 def _read_exception(exc_id, prefix):
148 145 exc_id_file_path = _find_exc_file(exc_id=exc_id, prefix=prefix)
149 146 if exc_id_file_path:
150 147 with open(exc_id_file_path, 'rb') as f:
151 148 return exc_unserialize(f.read())
152 149 else:
153 150 log.debug('Exception File `%s` not found', exc_id_file_path)
154 151 return None
155 152
156 153
157 154 def read_exception(exc_id, prefix=global_prefix):
158 155 try:
159 156 return _read_exception(exc_id=exc_id, prefix=prefix)
160 157 except Exception:
161 158 log.exception('Failed to read exception `%s` information', exc_id)
162 159 # there's no way this can fail, it will crash server badly if it does.
163 160 return None
164 161
165 162
166 163 def delete_exception(exc_id, prefix=global_prefix):
167 164 try:
168 165 exc_id_file_path = _find_exc_file(exc_id, prefix=prefix)
169 166 if exc_id_file_path:
170 167 os.remove(exc_id_file_path)
171 168
172 169 except Exception:
173 170 log.exception('Failed to remove exception `%s` information', exc_id)
174 171 # there's no way this can fail, it will crash server badly if it does.
175 172 pass
@@ -1,65 +1,63 b''
1 # -*- coding: utf-8 -*-
2
3 1 # RhodeCode VCSServer provides access to different vcs backends via network.
4 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
5 3 #
6 4 # This program is free software; you can redistribute it and/or modify
7 5 # it under the terms of the GNU General Public License as published by
8 6 # the Free Software Foundation; either version 3 of the License, or
9 7 # (at your option) any later version.
10 8 #
11 9 # This program is distributed in the hope that it will be useful,
12 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 12 # GNU General Public License for more details.
15 13 #
16 14 # You should have received a copy of the GNU General Public License
17 15 # along with this program; if not, write to the Free Software Foundation,
18 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 17
20 18
21 19 import logging
22 20
23 21 from repoze.lru import LRUCache
24 22
25 from vcsserver.utils import safe_str
23 from vcsserver.str_utils import safe_str
26 24
27 25 log = logging.getLogger(__name__)
28 26
29 27
30 28 class LRUDict(LRUCache):
31 29 """
32 30 Wrapper to provide partial dict access
33 31 """
34 32
35 33 def __setitem__(self, key, value):
36 34 return self.put(key, value)
37 35
38 36 def __getitem__(self, key):
39 37 return self.get(key)
40 38
41 39 def __contains__(self, key):
42 40 return bool(self.get(key))
43 41
44 42 def __delitem__(self, key):
45 43 del self.data[key]
46 44
47 45 def keys(self):
48 return self.data.keys()
46 return list(self.data.keys())
49 47
50 48
51 49 class LRUDictDebug(LRUDict):
52 50 """
53 51 Wrapper to provide some debug options
54 52 """
55 53 def _report_keys(self):
56 elems_cnt = '%s/%s' % (len(self.keys()), self.size)
54 elems_cnt = f'{len(list(self.keys()))}/{self.size}'
57 55 # trick for pformat print it more nicely
58 56 fmt = '\n'
59 57 for cnt, elem in enumerate(self.keys()):
60 fmt += '%s - %s\n' % (cnt+1, safe_str(elem))
58 fmt += f'{cnt+1} - {safe_str(elem)}\n'
61 59 log.debug('current LRU keys (%s):%s', elems_cnt, fmt)
62 60
63 61 def __getitem__(self, key):
64 62 self._report_keys()
65 63 return self.get(key)
@@ -1,79 +1,112 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import logging
19 import threading
20
19 21 from dogpile.cache import register_backend
20 22
23 from . import region_meta
24 from .utils import (
25 backend_key_generator,
26 clear_cache_namespace,
27 get_default_cache_settings,
28 get_or_create_region,
29 make_region,
30 str2bool,
31 )
32
33 module_name = 'vcsserver'
34
21 35 register_backend(
22 "dogpile.cache.rc.memory_lru", "vcsserver.lib.rc_cache.backends",
36 "dogpile.cache.rc.memory_lru", f"{module_name}.lib.rc_cache.backends",
23 37 "LRUMemoryBackend")
24 38
25 39 register_backend(
26 "dogpile.cache.rc.file_namespace", "vcsserver.lib.rc_cache.backends",
40 "dogpile.cache.rc.file_namespace", f"{module_name}.lib.rc_cache.backends",
27 41 "FileNamespaceBackend")
28 42
29 43 register_backend(
30 "dogpile.cache.rc.redis", "vcsserver.lib.rc_cache.backends",
44 "dogpile.cache.rc.redis", f"{module_name}.lib.rc_cache.backends",
31 45 "RedisPickleBackend")
32 46
33 47 register_backend(
34 "dogpile.cache.rc.redis_msgpack", "vcsserver.lib.rc_cache.backends",
48 "dogpile.cache.rc.redis_msgpack", f"{module_name}.lib.rc_cache.backends",
35 49 "RedisMsgPackBackend")
36 50
37 51
38 52 log = logging.getLogger(__name__)
39 53
40 from . import region_meta
41 from .utils import (
42 get_default_cache_settings, backend_key_generator, get_or_create_region,
43 clear_cache_namespace, make_region)
54
55 CLEAR_DELETE = 'delete'
56 CLEAR_INVALIDATE = 'invalidate'
57
58
59 def async_creation_runner(cache, somekey, creator, mutex):
60
61 def runner():
62 try:
63 value = creator()
64 cache.set(somekey, value)
65 finally:
66 mutex.release()
67
68 thread = threading.Thread(target=runner)
69 thread.start()
44 70
45 71
46 72 def configure_dogpile_cache(settings):
47 73 cache_dir = settings.get('cache_dir')
48 74 if cache_dir:
49 75 region_meta.dogpile_config_defaults['cache_dir'] = cache_dir
50 76
51 77 rc_cache_data = get_default_cache_settings(settings, prefixes=['rc_cache.'])
52 78
53 79 # inspect available namespaces
54 80 avail_regions = set()
55 81 for key in rc_cache_data.keys():
56 82 namespace_name = key.split('.', 1)[0]
57 83 if namespace_name in avail_regions:
58 84 continue
59 85
60 86 avail_regions.add(namespace_name)
61 87 log.debug('dogpile: found following cache regions: %s', namespace_name)
62 88
63 89 new_region = make_region(
64 90 name=namespace_name,
65 function_key_generator=None
91 function_key_generator=None,
92 async_creation_runner=None
66 93 )
67 94
68 new_region.configure_from_config(settings, 'rc_cache.{}.'.format(namespace_name))
95 new_region.configure_from_config(settings, f'rc_cache.{namespace_name}.')
69 96 new_region.function_key_generator = backend_key_generator(new_region.actual_backend)
97
98 async_creator = str2bool(settings.pop(f'rc_cache.{namespace_name}.async_creator', 'false'))
99 if async_creator:
100 log.debug('configuring region %s with async creator', new_region)
101 new_region.async_creation_runner = async_creation_runner
102
70 103 if log.isEnabledFor(logging.DEBUG):
71 region_args = dict(backend=new_region.actual_backend.__class__,
104 region_args = dict(backend=new_region.actual_backend,
72 105 region_invalidator=new_region.region_invalidator.__class__)
73 106 log.debug('dogpile: registering a new region `%s` %s', namespace_name, region_args)
74 107
75 108 region_meta.dogpile_cache_regions[namespace_name] = new_region
76 109
77 110
78 111 def includeme(config):
79 112 configure_dogpile_cache(config.registry.settings)
@@ -1,329 +1,267 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 import time
19 18 import errno
19 import fcntl
20 import functools
20 21 import logging
22 import os
23 import pickle
24 #import time
21 25
26 #import gevent
22 27 import msgpack
23 28 import redis
24 29
25 from dogpile.cache.api import CachedValue
26 from dogpile.cache.backends import memory as memory_backend
30 flock_org = fcntl.flock
31 from typing import Union
32
33 from dogpile.cache.api import Deserializer, Serializer
27 34 from dogpile.cache.backends import file as file_backend
35 from dogpile.cache.backends import memory as memory_backend
28 36 from dogpile.cache.backends import redis as redis_backend
29 from dogpile.cache.backends.file import NO_VALUE, compat, FileLock
37 from dogpile.cache.backends.file import FileLock
30 38 from dogpile.cache.util import memoized_property
31 39
32 from pyramid.settings import asbool
33
34 40 from vcsserver.lib.memory_lru_dict import LRUDict, LRUDictDebug
35 from vcsserver.utils import safe_str, safe_unicode
36
41 from vcsserver.str_utils import safe_bytes, safe_str
42 from vcsserver.type_utils import str2bool
37 43
38 44 _default_max_size = 1024
39 45
40 46 log = logging.getLogger(__name__)
41 47
42 48
43 49 class LRUMemoryBackend(memory_backend.MemoryBackend):
44 50 key_prefix = 'lru_mem_backend'
45 51 pickle_values = False
46 52
47 53 def __init__(self, arguments):
48 max_size = arguments.pop('max_size', _default_max_size)
54 self.max_size = arguments.pop('max_size', _default_max_size)
49 55
50 56 LRUDictClass = LRUDict
51 57 if arguments.pop('log_key_count', None):
52 58 LRUDictClass = LRUDictDebug
53 59
54 arguments['cache_dict'] = LRUDictClass(max_size)
55 super(LRUMemoryBackend, self).__init__(arguments)
60 arguments['cache_dict'] = LRUDictClass(self.max_size)
61 super().__init__(arguments)
62
63 def __repr__(self):
64 return f'{self.__class__}(maxsize=`{self.max_size}`)'
65
66 def __str__(self):
67 return self.__repr__()
56 68
57 69 def delete(self, key):
58 70 try:
59 71 del self._cache[key]
60 72 except KeyError:
61 73 # we don't care if key isn't there at deletion
62 74 pass
63 75
64 76 def delete_multi(self, keys):
65 77 for key in keys:
66 78 self.delete(key)
67 79
68 80
69 class PickleSerializer(object):
70
71 def _dumps(self, value, safe=False):
72 try:
73 return compat.pickle.dumps(value)
74 except Exception:
75 if safe:
76 return NO_VALUE
77 else:
78 raise
79
80 def _loads(self, value, safe=True):
81 try:
82 return compat.pickle.loads(value)
83 except Exception:
84 if safe:
85 return NO_VALUE
86 else:
87 raise
81 class PickleSerializer:
82 serializer: None | Serializer = staticmethod( # type: ignore
83 functools.partial(pickle.dumps, protocol=pickle.HIGHEST_PROTOCOL)
84 )
85 deserializer: None | Deserializer = staticmethod( # type: ignore
86 functools.partial(pickle.loads)
87 )
88 88
89 89
90 90 class MsgPackSerializer(object):
91
92 def _dumps(self, value, safe=False):
93 try:
94 return msgpack.packb(value)
95 except Exception:
96 if safe:
97 return NO_VALUE
98 else:
99 raise
100
101 def _loads(self, value, safe=True):
102 """
103 pickle maintained the `CachedValue` wrapper of the tuple
104 msgpack does not, so it must be added back in.
105 """
106 try:
107 value = msgpack.unpackb(value, use_list=False)
108 return CachedValue(*value)
109 except Exception:
110 if safe:
111 return NO_VALUE
112 else:
113 raise
114
115
116 import fcntl
117 flock_org = fcntl.flock
91 serializer: None | Serializer = staticmethod( # type: ignore
92 msgpack.packb
93 )
94 deserializer: None | Deserializer = staticmethod( # type: ignore
95 functools.partial(msgpack.unpackb, use_list=False)
96 )
118 97
119 98
120 99 class CustomLockFactory(FileLock):
121 100
122 101 pass
123 102
124 103
125 104 class FileNamespaceBackend(PickleSerializer, file_backend.DBMBackend):
126 105 key_prefix = 'file_backend'
127 106
128 107 def __init__(self, arguments):
129 108 arguments['lock_factory'] = CustomLockFactory
130 109 db_file = arguments.get('filename')
131 110
132 log.debug('initialing %s DB in %s', self.__class__.__name__, db_file)
111 log.debug('initialing cache-backend=%s db in %s', self.__class__.__name__, db_file)
112 db_file_dir = os.path.dirname(db_file)
113 if not os.path.isdir(db_file_dir):
114 os.makedirs(db_file_dir)
115
133 116 try:
134 super(FileNamespaceBackend, self).__init__(arguments)
117 super().__init__(arguments)
135 118 except Exception:
136 119 log.exception('Failed to initialize db at: %s', db_file)
137 120 raise
138 121
139 122 def __repr__(self):
140 return '{} `{}`'.format(self.__class__, self.filename)
123 return f'{self.__class__}(file=`{self.filename}`)'
124
125 def __str__(self):
126 return self.__repr__()
141 127
142 def list_keys(self, prefix=''):
143 prefix = '{}:{}'.format(self.key_prefix, prefix)
128 def _get_keys_pattern(self, prefix: bytes = b''):
129 return b'%b:%b' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
144 130
145 def cond(v):
131 def list_keys(self, prefix: bytes = b''):
132 prefix = self._get_keys_pattern(prefix)
133
134 def cond(dbm_key: bytes):
146 135 if not prefix:
147 136 return True
148 137
149 if v.startswith(prefix):
138 if dbm_key.startswith(prefix):
150 139 return True
151 140 return False
152 141
153 142 with self._dbm_file(True) as dbm:
154 143 try:
155 return filter(cond, dbm.keys())
144 return list(filter(cond, dbm.keys()))
156 145 except Exception:
157 146 log.error('Failed to fetch DBM keys from DB: %s', self.get_store())
158 147 raise
159 148
160 149 def get_store(self):
161 150 return self.filename
162 151
163 def _dbm_get(self, key):
164 with self._dbm_file(False) as dbm:
165 if hasattr(dbm, 'get'):
166 value = dbm.get(key, NO_VALUE)
167 else:
168 # gdbm objects lack a .get method
169 try:
170 value = dbm[key]
171 except KeyError:
172 value = NO_VALUE
173 if value is not NO_VALUE:
174 value = self._loads(value)
175 return value
176
177 def get(self, key):
178 try:
179 return self._dbm_get(key)
180 except Exception:
181 log.error('Failed to fetch DBM key %s from DB: %s', key, self.get_store())
182 raise
183
184 def set(self, key, value):
185 with self._dbm_file(True) as dbm:
186 dbm[key] = self._dumps(value)
187
188 def set_multi(self, mapping):
189 with self._dbm_file(True) as dbm:
190 for key, value in mapping.items():
191 dbm[key] = self._dumps(value)
192
193 152
194 153 class BaseRedisBackend(redis_backend.RedisBackend):
195 154 key_prefix = ''
196 155
197 156 def __init__(self, arguments):
198 super(BaseRedisBackend, self).__init__(arguments)
157 self.db_conn = arguments.get('host', '') or arguments.get('url', '') or 'redis-host'
158 super().__init__(arguments)
159
199 160 self._lock_timeout = self.lock_timeout
200 self._lock_auto_renewal = asbool(arguments.pop("lock_auto_renewal", True))
161 self._lock_auto_renewal = str2bool(arguments.pop("lock_auto_renewal", True))
201 162
202 163 if self._lock_auto_renewal and not self._lock_timeout:
203 164 # set default timeout for auto_renewal
204 165 self._lock_timeout = 30
205 166
167 def __repr__(self):
168 return f'{self.__class__}(conn=`{self.db_conn}`)'
169
170 def __str__(self):
171 return self.__repr__()
172
206 173 def _create_client(self):
207 174 args = {}
208 175
209 176 if self.url is not None:
210 177 args.update(url=self.url)
211 178
212 179 else:
213 180 args.update(
214 181 host=self.host, password=self.password,
215 182 port=self.port, db=self.db
216 183 )
217 184
218 185 connection_pool = redis.ConnectionPool(**args)
219
220 return redis.StrictRedis(connection_pool=connection_pool)
186 self.writer_client = redis.StrictRedis(
187 connection_pool=connection_pool
188 )
189 self.reader_client = self.writer_client
221 190
222 def list_keys(self, prefix=''):
223 prefix = '{}:{}*'.format(self.key_prefix, prefix)
224 return self.client.keys(prefix)
191 def _get_keys_pattern(self, prefix: bytes = b''):
192 return b'%b:%b*' % (safe_bytes(self.key_prefix), safe_bytes(prefix))
193
194 def list_keys(self, prefix: bytes = b''):
195 prefix = self._get_keys_pattern(prefix)
196 return self.reader_client.keys(prefix)
225 197
226 198 def get_store(self):
227 return self.client.connection_pool
228
229 def get(self, key):
230 value = self.client.get(key)
231 if value is None:
232 return NO_VALUE
233 return self._loads(value)
234
235 def get_multi(self, keys):
236 if not keys:
237 return []
238 values = self.client.mget(keys)
239 loads = self._loads
240 return [
241 loads(v) if v is not None else NO_VALUE
242 for v in values]
243
244 def set(self, key, value):
245 if self.redis_expiration_time:
246 self.client.setex(key, self.redis_expiration_time,
247 self._dumps(value))
248 else:
249 self.client.set(key, self._dumps(value))
250
251 def set_multi(self, mapping):
252 dumps = self._dumps
253 mapping = dict(
254 (k, dumps(v))
255 for k, v in mapping.items()
256 )
257
258 if not self.redis_expiration_time:
259 self.client.mset(mapping)
260 else:
261 pipe = self.client.pipeline()
262 for key, value in mapping.items():
263 pipe.setex(key, self.redis_expiration_time, value)
264 pipe.execute()
199 return self.reader_client.connection_pool
265 200
266 201 def get_mutex(self, key):
267 202 if self.distributed_lock:
268 lock_key = u'_lock_{0}'.format(safe_unicode(key))
269 return get_mutex_lock(self.client, lock_key, self._lock_timeout,
270 auto_renewal=self._lock_auto_renewal)
203 lock_key = f'_lock_{safe_str(key)}'
204 return get_mutex_lock(
205 self.writer_client, lock_key,
206 self._lock_timeout,
207 auto_renewal=self._lock_auto_renewal
208 )
271 209 else:
272 210 return None
273 211
274 212
275 213 class RedisPickleBackend(PickleSerializer, BaseRedisBackend):
276 214 key_prefix = 'redis_pickle_backend'
277 215 pass
278 216
279 217
280 218 class RedisMsgPackBackend(MsgPackSerializer, BaseRedisBackend):
281 219 key_prefix = 'redis_msgpack_backend'
282 220 pass
283 221
284 222
285 223 def get_mutex_lock(client, lock_key, lock_timeout, auto_renewal=False):
286 import redis_lock
224 from vcsserver.lib._vendor import redis_lock
287 225
288 226 class _RedisLockWrapper(object):
289 227 """LockWrapper for redis_lock"""
290 228
291 229 @classmethod
292 230 def get_lock(cls):
293 231 return redis_lock.Lock(
294 232 redis_client=client,
295 233 name=lock_key,
296 234 expire=lock_timeout,
297 235 auto_renewal=auto_renewal,
298 236 strict=True,
299 237 )
300 238
301 239 def __repr__(self):
302 return "{}:{}".format(self.__class__.__name__, lock_key)
240 return f"{self.__class__.__name__}:{lock_key}"
303 241
304 242 def __str__(self):
305 return "{}:{}".format(self.__class__.__name__, lock_key)
243 return f"{self.__class__.__name__}:{lock_key}"
306 244
307 245 def __init__(self):
308 246 self.lock = self.get_lock()
309 247 self.lock_key = lock_key
310 248
311 249 def acquire(self, wait=True):
312 250 log.debug('Trying to acquire Redis lock for key %s', self.lock_key)
313 251 try:
314 252 acquired = self.lock.acquire(wait)
315 253 log.debug('Got lock for key %s, %s', self.lock_key, acquired)
316 254 return acquired
317 255 except redis_lock.AlreadyAcquired:
318 256 return False
319 257 except redis_lock.AlreadyStarted:
320 258 # refresh thread exists, but it also means we acquired the lock
321 259 return True
322 260
323 261 def release(self):
324 262 try:
325 263 self.lock.release()
326 264 except redis_lock.NotAcquired:
327 265 pass
328 266
329 267 return _RedisLockWrapper()
@@ -1,26 +1,26 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import os
19 19 import tempfile
20 20
21 21 dogpile_config_defaults = {
22 22 'cache_dir': os.path.join(tempfile.gettempdir(), 'rc_cache')
23 23 }
24 24
25 25 # GLOBAL TO STORE ALL REGISTERED REGIONS
26 26 dogpile_cache_regions = {}
@@ -1,263 +1,247 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 import os
19 import time
18 import functools
20 19 import logging
21 import functools
20 import os
21 import threading
22 import time
22 23
24 import decorator
23 25 from dogpile.cache import CacheRegion
24 from dogpile.cache.util import compat
26
25 27
26 from vcsserver.utils import safe_str, sha1
28 from vcsserver.utils import sha1
29 from vcsserver.str_utils import safe_bytes
30 from vcsserver.type_utils import str2bool
27 31
28 from vcsserver.lib.rc_cache import region_meta
32 from . import region_meta
29 33
30 34 log = logging.getLogger(__name__)
31 35
32 36
33 37 class RhodeCodeCacheRegion(CacheRegion):
34 38
39 def __repr__(self):
40 return f'{self.__class__}(name={self.name})'
41
35 42 def conditional_cache_on_arguments(
36 43 self, namespace=None,
37 44 expiration_time=None,
38 45 should_cache_fn=None,
39 to_str=compat.string_type,
46 to_str=str,
40 47 function_key_generator=None,
41 48 condition=True):
42 49 """
43 50 Custom conditional decorator, that will not touch any dogpile internals if
44 condition isn't meet. This works a bit different than should_cache_fn
51 condition isn't meet. This works a bit different from should_cache_fn
45 52 And it's faster in cases we don't ever want to compute cached values
46 53 """
47 expiration_time_is_callable = compat.callable(expiration_time)
54 expiration_time_is_callable = callable(expiration_time)
55 if not namespace:
56 namespace = getattr(self, '_default_namespace', None)
48 57
49 58 if function_key_generator is None:
50 59 function_key_generator = self.function_key_generator
51 60
52 # workaround for py2 and cython problems, this block should be removed
53 # once we've migrated to py3
54 if 'cython' == 'cython':
55 def decorator(fn):
56 if to_str is compat.string_type:
57 # backwards compatible
58 key_generator = function_key_generator(namespace, fn)
59 else:
60 key_generator = function_key_generator(namespace, fn, to_str=to_str)
61
62 @functools.wraps(fn)
63 def decorate(*arg, **kw):
64 key = key_generator(*arg, **kw)
65
66 @functools.wraps(fn)
67 def creator():
68 return fn(*arg, **kw)
69
70 if not condition:
71 return creator()
72
73 timeout = expiration_time() if expiration_time_is_callable \
74 else expiration_time
75
76 return self.get_or_create(key, creator, timeout, should_cache_fn)
77
78 def invalidate(*arg, **kw):
79 key = key_generator(*arg, **kw)
80 self.delete(key)
81
82 def set_(value, *arg, **kw):
83 key = key_generator(*arg, **kw)
84 self.set(key, value)
85
86 def get(*arg, **kw):
87 key = key_generator(*arg, **kw)
88 return self.get(key)
89
90 def refresh(*arg, **kw):
91 key = key_generator(*arg, **kw)
92 value = fn(*arg, **kw)
93 self.set(key, value)
94 return value
95
96 decorate.set = set_
97 decorate.invalidate = invalidate
98 decorate.refresh = refresh
99 decorate.get = get
100 decorate.original = fn
101 decorate.key_generator = key_generator
102 decorate.__wrapped__ = fn
103
104 return decorate
105 return decorator
106
107 def get_or_create_for_user_func(key_generator, user_func, *arg, **kw):
61 def get_or_create_for_user_func(func_key_generator, user_func, *arg, **kw):
108 62
109 63 if not condition:
110 log.debug('Calling un-cached method:%s', user_func.func_name)
64 log.debug('Calling un-cached method:%s', user_func.__name__)
111 65 start = time.time()
112 66 result = user_func(*arg, **kw)
113 67 total = time.time() - start
114 log.debug('un-cached method:%s took %.4fs', user_func.func_name, total)
68 log.debug('un-cached method:%s took %.4fs', user_func.__name__, total)
115 69 return result
116 70
117 key = key_generator(*arg, **kw)
71 key = func_key_generator(*arg, **kw)
118 72
119 73 timeout = expiration_time() if expiration_time_is_callable \
120 74 else expiration_time
121 75
122 log.debug('Calling cached method:`%s`', user_func.func_name)
76 log.debug('Calling cached method:`%s`', user_func.__name__)
123 77 return self.get_or_create(key, user_func, timeout, should_cache_fn, (arg, kw))
124 78
125 79 def cache_decorator(user_func):
126 if to_str is compat.string_type:
80 if to_str is str:
127 81 # backwards compatible
128 82 key_generator = function_key_generator(namespace, user_func)
129 83 else:
130 84 key_generator = function_key_generator(namespace, user_func, to_str=to_str)
131 85
132 86 def refresh(*arg, **kw):
133 87 """
134 88 Like invalidate, but regenerates the value instead
135 89 """
136 90 key = key_generator(*arg, **kw)
137 91 value = user_func(*arg, **kw)
138 92 self.set(key, value)
139 93 return value
140 94
141 95 def invalidate(*arg, **kw):
142 96 key = key_generator(*arg, **kw)
143 97 self.delete(key)
144 98
145 99 def set_(value, *arg, **kw):
146 100 key = key_generator(*arg, **kw)
147 101 self.set(key, value)
148 102
149 103 def get(*arg, **kw):
150 104 key = key_generator(*arg, **kw)
151 105 return self.get(key)
152 106
153 107 user_func.set = set_
154 108 user_func.invalidate = invalidate
155 109 user_func.get = get
156 110 user_func.refresh = refresh
157 111 user_func.key_generator = key_generator
158 112 user_func.original = user_func
159 113
160 114 # Use `decorate` to preserve the signature of :param:`user_func`.
161 115 return decorator.decorate(user_func, functools.partial(
162 116 get_or_create_for_user_func, key_generator))
163 117
164 118 return cache_decorator
165 119
166 120
167 121 def make_region(*arg, **kw):
168 122 return RhodeCodeCacheRegion(*arg, **kw)
169 123
170 124
171 125 def get_default_cache_settings(settings, prefixes=None):
172 126 prefixes = prefixes or []
173 127 cache_settings = {}
174 128 for key in settings.keys():
175 129 for prefix in prefixes:
176 130 if key.startswith(prefix):
177 131 name = key.split(prefix)[1].strip()
178 132 val = settings[key]
179 if isinstance(val, compat.string_types):
133 if isinstance(val, str):
180 134 val = val.strip()
181 135 cache_settings[name] = val
182 136 return cache_settings
183 137
184 138
185 139 def compute_key_from_params(*args):
186 140 """
187 141 Helper to compute key from given params to be used in cache manager
188 142 """
189 return sha1("_".join(map(safe_str, args)))
143 return sha1(safe_bytes("_".join(map(str, args))))
144
145
146 def custom_key_generator(backend, namespace, fn):
147 func_name = fn.__name__
148
149 def generate_key(*args):
150 backend_pref = getattr(backend, 'key_prefix', None) or 'backend_prefix'
151 namespace_pref = namespace or 'default_namespace'
152 arg_key = compute_key_from_params(*args)
153 final_key = f"{backend_pref}:{namespace_pref}:{func_name}_{arg_key}"
154
155 return final_key
156
157 return generate_key
190 158
191 159
192 160 def backend_key_generator(backend):
193 161 """
194 162 Special wrapper that also sends over the backend to the key generator
195 163 """
196 164 def wrapper(namespace, fn):
197 return key_generator(backend, namespace, fn)
165 return custom_key_generator(backend, namespace, fn)
198 166 return wrapper
199 167
200 168
201 def key_generator(backend, namespace, fn):
202 fname = fn.__name__
169 def get_or_create_region(region_name, region_namespace: str = None, use_async_runner=False):
170 from .backends import FileNamespaceBackend
171 from . import async_creation_runner
203 172
204 def generate_key(*args):
205 backend_prefix = getattr(backend, 'key_prefix', None) or 'backend_prefix'
206 namespace_pref = namespace or 'default_namespace'
207 arg_key = compute_key_from_params(*args)
208 final_key = "{}:{}:{}_{}".format(backend_prefix, namespace_pref, fname, arg_key)
209
210 return final_key
211
212 return generate_key
213
214
215 def get_or_create_region(region_name, region_namespace=None):
216 from vcsserver.lib.rc_cache.backends import FileNamespaceBackend
217 173 region_obj = region_meta.dogpile_cache_regions.get(region_name)
218 174 if not region_obj:
219 raise EnvironmentError(
220 'Region `{}` not in configured: {}.'.format(
221 region_name, region_meta.dogpile_cache_regions.keys()))
175 reg_keys = list(region_meta.dogpile_cache_regions.keys())
176 raise OSError(f'Region `{region_name}` not in configured: {reg_keys}.')
177
178 region_uid_name = f'{region_name}:{region_namespace}'
222 179
223 region_uid_name = '{}:{}'.format(region_name, region_namespace)
224 180 if isinstance(region_obj.actual_backend, FileNamespaceBackend):
181 if not region_namespace:
182 raise ValueError(f'{FileNamespaceBackend} used requires to specify region_namespace param')
183
225 184 region_exist = region_meta.dogpile_cache_regions.get(region_namespace)
226 185 if region_exist:
227 186 log.debug('Using already configured region: %s', region_namespace)
228 187 return region_exist
229 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
188
230 189 expiration_time = region_obj.expiration_time
231 190
232 if not os.path.isdir(cache_dir):
233 os.makedirs(cache_dir)
191 cache_dir = region_meta.dogpile_config_defaults['cache_dir']
192 namespace_cache_dir = cache_dir
193
194 # we default the namespace_cache_dir to our default cache dir.
195 # however if this backend is configured with filename= param, we prioritize that
196 # so all caches within that particular region, even those namespaced end up in the same path
197 if region_obj.actual_backend.filename:
198 namespace_cache_dir = os.path.dirname(region_obj.actual_backend.filename)
199
200 if not os.path.isdir(namespace_cache_dir):
201 os.makedirs(namespace_cache_dir)
234 202 new_region = make_region(
235 203 name=region_uid_name,
236 204 function_key_generator=backend_key_generator(region_obj.actual_backend)
237 205 )
206
238 207 namespace_filename = os.path.join(
239 cache_dir, "{}.cache.dbm".format(region_namespace))
208 namespace_cache_dir, f"{region_name}_{region_namespace}.cache_db")
240 209 # special type that allows 1db per namespace
241 210 new_region.configure(
242 211 backend='dogpile.cache.rc.file_namespace',
243 212 expiration_time=expiration_time,
244 213 arguments={"filename": namespace_filename}
245 214 )
246 215
247 216 # create and save in region caches
248 217 log.debug('configuring new region: %s', region_uid_name)
249 218 region_obj = region_meta.dogpile_cache_regions[region_namespace] = new_region
250 219
220 region_obj._default_namespace = region_namespace
221 if use_async_runner:
222 region_obj.async_creation_runner = async_creation_runner
251 223 return region_obj
252 224
253 225
254 def clear_cache_namespace(cache_region, cache_namespace_uid, invalidate=False):
255 region = get_or_create_region(cache_region, cache_namespace_uid)
256 cache_keys = region.backend.list_keys(prefix=cache_namespace_uid)
257 num_delete_keys = len(cache_keys)
258 if invalidate:
259 region.invalidate(hard=False)
260 else:
261 if num_delete_keys:
262 region.delete_multi(cache_keys)
263 return num_delete_keys
226 def clear_cache_namespace(cache_region: str | RhodeCodeCacheRegion, cache_namespace_uid: str, method: str):
227 from . import CLEAR_DELETE, CLEAR_INVALIDATE
228
229 if not isinstance(cache_region, RhodeCodeCacheRegion):
230 cache_region = get_or_create_region(cache_region, cache_namespace_uid)
231 log.debug('clearing cache region: %s with method=%s', cache_region, method)
232
233 num_affected_keys = None
234
235 if method == CLEAR_INVALIDATE:
236 # NOTE: The CacheRegion.invalidate() method’s default mode of
237 # operation is to set a timestamp local to this CacheRegion in this Python process only.
238 # It does not impact other Python processes or regions as the timestamp is only stored locally in memory.
239 cache_region.invalidate(hard=True)
240
241 if method == CLEAR_DELETE:
242 cache_keys = cache_region.backend.list_keys(prefix=cache_namespace_uid)
243 num_affected_keys = len(cache_keys)
244 if num_affected_keys:
245 cache_region.delete_multi(cache_keys)
246
247 return num_affected_keys
@@ -1,27 +1,25 b''
1 # -*- coding: utf-8 -*-
2
3 1 # RhodeCode VCSServer provides access to different vcs backends via network.
4 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
5 3 #
6 4 # This program is free software; you can redistribute it and/or modify
7 5 # it under the terms of the GNU General Public License as published by
8 6 # the Free Software Foundation; either version 3 of the License, or
9 7 # (at your option) any later version.
10 8 #
11 9 # This program is distributed in the hope that it will be useful,
12 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 12 # GNU General Public License for more details.
15 13 #
16 14 # You should have received a copy of the GNU General Public License
17 15 # along with this program; if not, write to the Free Software Foundation,
18 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 17
20 18
21 19 counter = 0
22 20
23 21
24 22 def get_request_counter(request):
25 23 global counter
26 24 counter += 1
27 25 return counter
@@ -1,49 +1,70 b''
1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 #
4 # This program is free software; you can redistribute it and/or modify
5 # it under the terms of the GNU General Public License as published by
6 # the Free Software Foundation; either version 3 of the License, or
7 # (at your option) any later version.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program; if not, write to the Free Software Foundation,
16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17
1 18 from vcsserver.lib._vendor.statsd import client_from_config
2 19
3 20
4 21 class StatsdClientNotInitialised(Exception):
5 22 pass
6 23
7 24
8 25 class _Singleton(type):
9 26 """A metaclass that creates a Singleton base class when called."""
10 27
11 28 _instances = {}
12 29
13 30 def __call__(cls, *args, **kwargs):
14 31 if cls not in cls._instances:
15 cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
32 cls._instances[cls] = super().__call__(*args, **kwargs)
16 33 return cls._instances[cls]
17 34
18 35
19 36 class Singleton(_Singleton("SingletonMeta", (object,), {})):
20 37 pass
21 38
22 39
23 40 class StatsdClientClass(Singleton):
24 41 setup_run = False
25 42 statsd_client = None
26 43 statsd = None
44 strict_mode_init = False
27 45
28 46 def __getattribute__(self, name):
29 47
30 48 if name.startswith("statsd"):
31 49 if self.setup_run:
32 return super(StatsdClientClass, self).__getattribute__(name)
50 return super().__getattribute__(name)
33 51 else:
52 if self.strict_mode_init:
53 raise StatsdClientNotInitialised(f"requested key was {name}")
34 54 return None
35 #raise StatsdClientNotInitialised("requested key was %s" % name)
36 55
37 return super(StatsdClientClass, self).__getattribute__(name)
56 return super().__getattribute__(name)
38 57
39 58 def setup(self, settings):
40 59 """
41 60 Initialize the client
42 61 """
62 strict_init_mode = settings.pop('statsd_strict_init', False)
63
43 64 statsd = client_from_config(settings)
44 65 self.statsd = statsd
45 66 self.statsd_client = statsd
46 67 self.setup_run = True
47 68
48 69
49 70 StatsdClient = StatsdClientClass()
@@ -1,386 +1,417 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 """Handles the Git smart protocol."""
19 19
20 20 import os
21 21 import socket
22 22 import logging
23 23
24 import simplejson as json
25 24 import dulwich.protocol
25 from dulwich.protocol import CAPABILITY_SIDE_BAND, CAPABILITY_SIDE_BAND_64K
26 26 from webob import Request, Response, exc
27 27
28 from vcsserver.lib.rc_json import json
28 29 from vcsserver import hooks, subprocessio
30 from vcsserver.str_utils import ascii_bytes
29 31
30 32
31 33 log = logging.getLogger(__name__)
32 34
33 35
34 36 class FileWrapper(object):
35 37 """File wrapper that ensures how much data is read from it."""
36 38
37 39 def __init__(self, fd, content_length):
38 40 self.fd = fd
39 41 self.content_length = content_length
40 42 self.remain = content_length
41 43
42 44 def read(self, size):
43 45 if size <= self.remain:
44 46 try:
45 47 data = self.fd.read(size)
46 48 except socket.error:
47 49 raise IOError(self)
48 50 self.remain -= size
49 51 elif self.remain:
50 52 data = self.fd.read(self.remain)
51 53 self.remain = 0
52 54 else:
53 55 data = None
54 56 return data
55 57
56 58 def __repr__(self):
57 return '<FileWrapper %s len: %s, read: %s>' % (
59 return '<FileWrapper {} len: {}, read: {}>'.format(
58 60 self.fd, self.content_length, self.content_length - self.remain
59 61 )
60 62
61 63
62 64 class GitRepository(object):
63 65 """WSGI app for handling Git smart protocol endpoints."""
64 66
65 git_folder_signature = frozenset(
66 ('config', 'head', 'info', 'objects', 'refs'))
67 git_folder_signature = frozenset(('config', 'head', 'info', 'objects', 'refs'))
67 68 commands = frozenset(('git-upload-pack', 'git-receive-pack'))
68 valid_accepts = frozenset(('application/x-%s-result' %
69 c for c in commands))
69 valid_accepts = frozenset(f'application/x-{c}-result' for c in commands)
70 70
71 71 # The last bytes are the SHA1 of the first 12 bytes.
72 72 EMPTY_PACK = (
73 'PACK\x00\x00\x00\x02\x00\x00\x00\x00' +
74 '\x02\x9d\x08\x82;\xd8\xa8\xea\xb5\x10\xadj\xc7\\\x82<\xfd>\xd3\x1e'
73 b'PACK\x00\x00\x00\x02\x00\x00\x00\x00\x02\x9d\x08' +
74 b'\x82;\xd8\xa8\xea\xb5\x10\xadj\xc7\\\x82<\xfd>\xd3\x1e'
75 75 )
76 SIDE_BAND_CAPS = frozenset(('side-band', 'side-band-64k'))
76 FLUSH_PACKET = b"0000"
77 77
78 def __init__(self, repo_name, content_path, git_path, update_server_info,
79 extras):
78 SIDE_BAND_CAPS = frozenset((CAPABILITY_SIDE_BAND, CAPABILITY_SIDE_BAND_64K))
79
80 def __init__(self, repo_name, content_path, git_path, update_server_info, extras):
80 81 files = frozenset(f.lower() for f in os.listdir(content_path))
81 82 valid_dir_signature = self.git_folder_signature.issubset(files)
82 83
83 84 if not valid_dir_signature:
84 raise OSError('%s missing git signature' % content_path)
85 raise OSError(f'{content_path} missing git signature')
85 86
86 87 self.content_path = content_path
87 88 self.repo_name = repo_name
88 89 self.extras = extras
89 90 self.git_path = git_path
90 91 self.update_server_info = update_server_info
91 92
92 93 def _get_fixedpath(self, path):
93 94 """
94 95 Small fix for repo_path
95 96
96 97 :param path:
97 98 """
98 99 path = path.split(self.repo_name, 1)[-1]
99 100 if path.startswith('.git'):
100 101 # for bare repos we still get the .git prefix inside, we skip it
101 102 # here, and remove from the service command
102 103 path = path[4:]
103 104
104 105 return path.strip('/')
105 106
106 107 def inforefs(self, request, unused_environ):
107 108 """
108 109 WSGI Response producer for HTTP GET Git Smart
109 110 HTTP /info/refs request.
110 111 """
111 112
112 113 git_command = request.GET.get('service')
113 114 if git_command not in self.commands:
114 115 log.debug('command %s not allowed', git_command)
115 116 return exc.HTTPForbidden()
116 117
117 118 # please, resist the urge to add '\n' to git capture and increment
118 119 # line count by 1.
119 120 # by git docs: Documentation/technical/http-protocol.txt#L214 \n is
120 121 # a part of protocol.
121 122 # The code in Git client not only does NOT need '\n', but actually
122 123 # blows up if you sprinkle "flush" (0000) as "0001\n".
123 124 # It reads binary, per number of bytes specified.
124 125 # if you do add '\n' as part of data, count it.
125 126 server_advert = '# service=%s\n' % git_command
126 packet_len = str(hex(len(server_advert) + 4)[2:].rjust(4, '0')).lower()
127 packet_len = hex(len(server_advert) + 4)[2:].rjust(4, '0').lower()
127 128 try:
128 129 gitenv = dict(os.environ)
129 130 # forget all configs
130 131 gitenv['RC_SCM_DATA'] = json.dumps(self.extras)
131 132 command = [self.git_path, git_command[4:], '--stateless-rpc',
132 133 '--advertise-refs', self.content_path]
133 134 out = subprocessio.SubprocessIOChunker(
134 135 command,
135 136 env=gitenv,
136 starting_values=[packet_len + server_advert + '0000'],
137 starting_values=[ascii_bytes(packet_len + server_advert) + self.FLUSH_PACKET],
137 138 shell=False
138 139 )
139 except EnvironmentError:
140 except OSError:
140 141 log.exception('Error processing command')
141 142 raise exc.HTTPExpectationFailed()
142 143
143 144 resp = Response()
144 resp.content_type = 'application/x-%s-advertisement' % str(git_command)
145 resp.content_type = f'application/x-{git_command}-advertisement'
145 146 resp.charset = None
146 147 resp.app_iter = out
147 148
148 149 return resp
149 150
150 151 def _get_want_capabilities(self, request):
151 152 """Read the capabilities found in the first want line of the request."""
152 153 pos = request.body_file_seekable.tell()
153 154 first_line = request.body_file_seekable.readline()
154 155 request.body_file_seekable.seek(pos)
155 156
156 157 return frozenset(
157 158 dulwich.protocol.extract_want_line_capabilities(first_line)[1])
158 159
159 160 def _build_failed_pre_pull_response(self, capabilities, pre_pull_messages):
160 161 """
161 162 Construct a response with an empty PACK file.
162 163
163 164 We use an empty PACK file, as that would trigger the failure of the pull
164 165 or clone command.
165 166
166 167 We also print in the error output a message explaining why the command
167 168 was aborted.
168 169
169 If aditionally, the user is accepting messages we send them the output
170 If additionally, the user is accepting messages we send them the output
170 171 of the pre-pull hook.
171 172
172 173 Note that for clients not supporting side-band we just send them the
173 174 emtpy PACK file.
174 175 """
176
175 177 if self.SIDE_BAND_CAPS.intersection(capabilities):
176 178 response = []
177 179 proto = dulwich.protocol.Protocol(None, response.append)
178 proto.write_pkt_line('NAK\n')
179 self._write_sideband_to_proto(pre_pull_messages, proto,
180 capabilities)
180 proto.write_pkt_line(dulwich.protocol.NAK_LINE)
181
182 self._write_sideband_to_proto(proto, ascii_bytes(pre_pull_messages, allow_bytes=True), capabilities)
181 183 # N.B.(skreft): Do not change the sideband channel to 3, as that
182 184 # produces a fatal error in the client:
183 185 # fatal: error in sideband demultiplexer
184 proto.write_sideband(2, 'Pre pull hook failed: aborting\n')
185 proto.write_sideband(1, self.EMPTY_PACK)
186 proto.write_sideband(
187 dulwich.protocol.SIDE_BAND_CHANNEL_PROGRESS,
188 ascii_bytes('Pre pull hook failed: aborting\n', allow_bytes=True))
189 proto.write_sideband(
190 dulwich.protocol.SIDE_BAND_CHANNEL_DATA,
191 ascii_bytes(self.EMPTY_PACK, allow_bytes=True))
186 192
187 # writes 0000
193 # writes b"0000" as default
188 194 proto.write_pkt_line(None)
189 195
190 196 return response
191 197 else:
192 return [self.EMPTY_PACK]
198 return [ascii_bytes(self.EMPTY_PACK, allow_bytes=True)]
199
200 def _build_post_pull_response(self, response, capabilities, start_message, end_message):
201 """
202 Given a list response we inject the post-pull messages.
203
204 We only inject the messages if the client supports sideband, and the
205 response has the format:
206 0008NAK\n...0000
207
208 Note that we do not check the no-progress capability as by default, git
209 sends it, which effectively would block all messages.
210 """
211
212 if not self.SIDE_BAND_CAPS.intersection(capabilities):
213 return response
214
215 if not start_message and not end_message:
216 return response
217
218 try:
219 iter(response)
220 # iterator probably will work, we continue
221 except TypeError:
222 raise TypeError(f'response must be an iterator: got {type(response)}')
223 if isinstance(response, (list, tuple)):
224 raise TypeError(f'response must be an iterator: got {type(response)}')
225
226 def injected_response():
193 227
194 def _write_sideband_to_proto(self, data, proto, capabilities):
228 do_loop = 1
229 header_injected = 0
230 next_item = None
231 has_item = False
232 item = b''
233
234 while do_loop:
235
236 try:
237 next_item = next(response)
238 except StopIteration:
239 do_loop = 0
240
241 if has_item:
242 # last item ! alter it now
243 if do_loop == 0 and item.endswith(self.FLUSH_PACKET):
244 new_response = [item[:-4]]
245 new_response.extend(self._get_messages(end_message, capabilities))
246 new_response.append(self.FLUSH_PACKET)
247 item = b''.join(new_response)
248
249 yield item
250
251 has_item = True
252 item = next_item
253
254 # alter item if it's the initial chunk
255 if not header_injected and item.startswith(b'0008NAK\n'):
256 new_response = [b'0008NAK\n']
257 new_response.extend(self._get_messages(start_message, capabilities))
258 new_response.append(item[8:])
259 item = b''.join(new_response)
260 header_injected = 1
261
262 return injected_response()
263
264 def _write_sideband_to_proto(self, proto, data, capabilities):
195 265 """
196 Write the data to the proto's sideband number 2.
266 Write the data to the proto's sideband number 2 == SIDE_BAND_CHANNEL_PROGRESS
197 267
198 268 We do not use dulwich's write_sideband directly as it only supports
199 269 side-band-64k.
200 270 """
201 271 if not data:
202 272 return
203 273
204 274 # N.B.(skreft): The values below are explained in the pack protocol
205 275 # documentation, section Packfile Data.
206 276 # https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt
207 if 'side-band-64k' in capabilities:
277 if CAPABILITY_SIDE_BAND_64K in capabilities:
208 278 chunk_size = 65515
209 elif 'side-band' in capabilities:
279 elif CAPABILITY_SIDE_BAND in capabilities:
210 280 chunk_size = 995
211 281 else:
212 282 return
213 283
214 chunker = (
215 data[i:i + chunk_size] for i in xrange(0, len(data), chunk_size))
284 chunker = (data[i:i + chunk_size] for i in range(0, len(data), chunk_size))
216 285
217 286 for chunk in chunker:
218 proto.write_sideband(2, chunk)
287 proto.write_sideband(dulwich.protocol.SIDE_BAND_CHANNEL_PROGRESS, ascii_bytes(chunk, allow_bytes=True))
219 288
220 289 def _get_messages(self, data, capabilities):
221 290 """Return a list with packets for sending data in sideband number 2."""
222 291 response = []
223 292 proto = dulwich.protocol.Protocol(None, response.append)
224 293
225 self._write_sideband_to_proto(data, proto, capabilities)
294 self._write_sideband_to_proto(proto, data, capabilities)
226 295
227 296 return response
228 297
229 def _inject_messages_to_response(self, response, capabilities,
230 start_messages, end_messages):
231 """
232 Given a list response we inject the pre/post-pull messages.
233
234 We only inject the messages if the client supports sideband, and the
235 response has the format:
236 0008NAK\n...0000
237
238 Note that we do not check the no-progress capability as by default, git
239 sends it, which effectively would block all messages.
240 """
241 if not self.SIDE_BAND_CAPS.intersection(capabilities):
242 return response
243
244 if not start_messages and not end_messages:
245 return response
246
247 # make a list out of response if it's an iterator
248 # so we can investigate it for message injection.
249 if hasattr(response, '__iter__'):
250 response = list(response)
251
252 if (not response[0].startswith('0008NAK\n') or
253 not response[-1].endswith('0000')):
254 return response
255
256 new_response = ['0008NAK\n']
257 new_response.extend(self._get_messages(start_messages, capabilities))
258 if len(response) == 1:
259 new_response.append(response[0][8:-4])
260 else:
261 new_response.append(response[0][8:])
262 new_response.extend(response[1:-1])
263 new_response.append(response[-1][:-4])
264 new_response.extend(self._get_messages(end_messages, capabilities))
265 new_response.append('0000')
266
267 return new_response
268
269 298 def backend(self, request, environ):
270 299 """
271 300 WSGI Response producer for HTTP POST Git Smart HTTP requests.
272 301 Reads commands and data from HTTP POST's body.
273 302 returns an iterator obj with contents of git command's
274 303 response to stdout
275 304 """
276 305 # TODO(skreft): think how we could detect an HTTPLockedException, as
277 306 # we probably want to have the same mechanism used by mercurial and
278 307 # simplevcs.
279 308 # For that we would need to parse the output of the command looking for
280 309 # some signs of the HTTPLockedError, parse the data and reraise it in
281 310 # pygrack. However, that would interfere with the streaming.
282 311 #
283 312 # Now the output of a blocked push is:
284 313 # Pushing to http://test_regular:test12@127.0.0.1:5001/vcs_test_git
285 314 # POST git-receive-pack (1047 bytes)
286 315 # remote: ERROR: Repository `vcs_test_git` locked by user `test_admin`. Reason:`lock_auto`
287 316 # To http://test_regular:test12@127.0.0.1:5001/vcs_test_git
288 317 # ! [remote rejected] master -> master (pre-receive hook declined)
289 318 # error: failed to push some refs to 'http://test_regular:test12@127.0.0.1:5001/vcs_test_git'
290 319
291 320 git_command = self._get_fixedpath(request.path_info)
292 321 if git_command not in self.commands:
293 322 log.debug('command %s not allowed', git_command)
294 323 return exc.HTTPForbidden()
295 324
296 325 capabilities = None
297 326 if git_command == 'git-upload-pack':
298 327 capabilities = self._get_want_capabilities(request)
299 328
300 329 if 'CONTENT_LENGTH' in environ:
301 330 inputstream = FileWrapper(request.body_file_seekable,
302 331 request.content_length)
303 332 else:
304 333 inputstream = request.body_file_seekable
305 334
306 335 resp = Response()
307 resp.content_type = ('application/x-%s-result' %
308 git_command.encode('utf8'))
336 resp.content_type = f'application/x-{git_command}-result'
309 337 resp.charset = None
310 338
311 339 pre_pull_messages = ''
340 # Upload-pack == clone
312 341 if git_command == 'git-upload-pack':
313 status, pre_pull_messages = hooks.git_pre_pull(self.extras)
314 if status != 0:
342 hook_response = hooks.git_pre_pull(self.extras)
343 if hook_response.status != 0:
344 pre_pull_messages = hook_response.output
315 345 resp.app_iter = self._build_failed_pre_pull_response(
316 346 capabilities, pre_pull_messages)
317 347 return resp
318 348
319 349 gitenv = dict(os.environ)
320 350 # forget all configs
321 351 gitenv['GIT_CONFIG_NOGLOBAL'] = '1'
322 352 gitenv['RC_SCM_DATA'] = json.dumps(self.extras)
323 353 cmd = [self.git_path, git_command[4:], '--stateless-rpc',
324 354 self.content_path]
325 355 log.debug('handling cmd %s', cmd)
326 356
327 357 out = subprocessio.SubprocessIOChunker(
328 358 cmd,
329 inputstream=inputstream,
359 input_stream=inputstream,
330 360 env=gitenv,
331 361 cwd=self.content_path,
332 362 shell=False,
333 363 fail_on_stderr=False,
334 364 fail_on_return_code=False
335 365 )
336 366
337 367 if self.update_server_info and git_command == 'git-receive-pack':
338 368 # We need to fully consume the iterator here, as the
339 369 # update-server-info command needs to be run after the push.
340 370 out = list(out)
341 371
342 372 # Updating refs manually after each push.
343 373 # This is required as some clients are exposing Git repos internally
344 374 # with the dumb protocol.
345 375 cmd = [self.git_path, 'update-server-info']
346 376 log.debug('handling cmd %s', cmd)
347 377 output = subprocessio.SubprocessIOChunker(
348 378 cmd,
349 inputstream=inputstream,
379 input_stream=inputstream,
350 380 env=gitenv,
351 381 cwd=self.content_path,
352 382 shell=False,
353 383 fail_on_stderr=False,
354 384 fail_on_return_code=False
355 385 )
356 386 # Consume all the output so the subprocess finishes
357 387 for _ in output:
358 388 pass
359 389
390 # Upload-pack == clone
360 391 if git_command == 'git-upload-pack':
361 unused_status, post_pull_messages = hooks.git_post_pull(self.extras)
362 resp.app_iter = self._inject_messages_to_response(
363 out, capabilities, pre_pull_messages, post_pull_messages)
392 hook_response = hooks.git_post_pull(self.extras)
393 post_pull_messages = hook_response.output
394 resp.app_iter = self._build_post_pull_response(out, capabilities, pre_pull_messages, post_pull_messages)
364 395 else:
365 396 resp.app_iter = out
366 397
367 398 return resp
368 399
369 400 def __call__(self, environ, start_response):
370 401 request = Request(environ)
371 402 _path = self._get_fixedpath(request.path_info)
372 403 if _path.startswith('info/refs'):
373 404 app = self.inforefs
374 405 else:
375 406 app = self.backend
376 407
377 408 try:
378 409 resp = app(request, environ)
379 410 except exc.HTTPException as error:
380 411 log.exception('HTTP Error')
381 412 resp = error
382 413 except Exception:
383 414 log.exception('Unknown error')
384 415 resp = exc.HTTPInternalServerError()
385 416
386 417 return resp(environ, start_response)
This diff has been collapsed as it changes many lines, (688 lines changed) Show them Hide them
@@ -1,1281 +1,1463 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 17
18 18 import collections
19 19 import logging
20 20 import os
21 import posixpath as vcspath
22 21 import re
23 22 import stat
24 23 import traceback
25 import urllib
26 import urllib2
24 import urllib.request
25 import urllib.parse
26 import urllib.error
27 27 from functools import wraps
28 28
29 29 import more_itertools
30 30 import pygit2
31 31 from pygit2 import Repository as LibGit2Repo
32 32 from pygit2 import index as LibGit2Index
33 33 from dulwich import index, objects
34 from dulwich.client import HttpGitClient, LocalGitClient
34 from dulwich.client import HttpGitClient, LocalGitClient, FetchPackResult
35 35 from dulwich.errors import (
36 36 NotGitRepository, ChecksumMismatch, WrongObjectException,
37 37 MissingCommitError, ObjectMissing, HangupException,
38 38 UnexpectedCommandError)
39 39 from dulwich.repo import Repo as DulwichRepo
40 40 from dulwich.server import update_server_info
41 41
42 42 from vcsserver import exceptions, settings, subprocessio
43 from vcsserver.utils import safe_str, safe_int, safe_unicode
44 from vcsserver.base import RepoFactory, obfuscate_qs, ArchiveNode, archive_repo
43 from vcsserver.str_utils import safe_str, safe_int, safe_bytes, ascii_bytes
44 from vcsserver.base import RepoFactory, obfuscate_qs, ArchiveNode, store_archive_in_cache, BytesEnvelope, BinaryEnvelope
45 45 from vcsserver.hgcompat import (
46 46 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler)
47 47 from vcsserver.git_lfs.lib import LFSOidStore
48 48 from vcsserver.vcs_base import RemoteBase
49 49
50 50 DIR_STAT = stat.S_IFDIR
51 51 FILE_MODE = stat.S_IFMT
52 52 GIT_LINK = objects.S_IFGITLINK
53 PEELED_REF_MARKER = '^{}'
54
53 PEELED_REF_MARKER = b'^{}'
54 HEAD_MARKER = b'HEAD'
55 55
56 56 log = logging.getLogger(__name__)
57 57
58 58
59 def str_to_dulwich(value):
60 """
61 Dulwich 0.10.1a requires `unicode` objects to be passed in.
62 """
63 return value.decode(settings.WIRE_ENCODING)
64
65
66 59 def reraise_safe_exceptions(func):
67 60 """Converts Dulwich exceptions to something neutral."""
68 61
69 62 @wraps(func)
70 63 def wrapper(*args, **kwargs):
71 64 try:
72 65 return func(*args, **kwargs)
73 66 except (ChecksumMismatch, WrongObjectException, MissingCommitError, ObjectMissing,) as e:
74 67 exc = exceptions.LookupException(org_exc=e)
75 68 raise exc(safe_str(e))
76 69 except (HangupException, UnexpectedCommandError) as e:
77 70 exc = exceptions.VcsException(org_exc=e)
78 71 raise exc(safe_str(e))
79 except Exception as e:
80 # NOTE(marcink): becuase of how dulwich handles some exceptions
72 except Exception:
73 # NOTE(marcink): because of how dulwich handles some exceptions
81 74 # (KeyError on empty repos), we cannot track this and catch all
82 75 # exceptions, it's an exceptions from other handlers
83 76 #if not hasattr(e, '_vcs_kind'):
84 77 #log.exception("Unhandled exception in git remote call")
85 78 #raise_from_original(exceptions.UnhandledException)
86 79 raise
87 80 return wrapper
88 81
89 82
90 83 class Repo(DulwichRepo):
91 84 """
92 85 A wrapper for dulwich Repo class.
93 86
94 87 Since dulwich is sometimes keeping .idx file descriptors open, it leads to
95 88 "Too many open files" error. We need to close all opened file descriptors
96 89 once the repo object is destroyed.
97 90 """
98 91 def __del__(self):
99 92 if hasattr(self, 'object_store'):
100 93 self.close()
101 94
102 95
103 96 class Repository(LibGit2Repo):
104 97
105 98 def __enter__(self):
106 99 return self
107 100
108 101 def __exit__(self, exc_type, exc_val, exc_tb):
109 102 self.free()
110 103
111 104
112 105 class GitFactory(RepoFactory):
113 106 repo_type = 'git'
114 107
115 108 def _create_repo(self, wire, create, use_libgit2=False):
116 109 if use_libgit2:
117 return Repository(wire['path'])
110 repo = Repository(safe_bytes(wire['path']))
118 111 else:
119 repo_path = str_to_dulwich(wire['path'])
120 return Repo(repo_path)
112 # dulwich mode
113 repo_path = safe_str(wire['path'], to_encoding=settings.WIRE_ENCODING)
114 repo = Repo(repo_path)
115
116 log.debug('repository created: got GIT object: %s', repo)
117 return repo
121 118
122 119 def repo(self, wire, create=False, use_libgit2=False):
123 120 """
124 121 Get a repository instance for the given path.
125 122 """
126 123 return self._create_repo(wire, create, use_libgit2)
127 124
128 125 def repo_libgit2(self, wire):
129 126 return self.repo(wire, use_libgit2=True)
130 127
131 128
129 def create_signature_from_string(author_str, **kwargs):
130 """
131 Creates a pygit2.Signature object from a string of the format 'Name <email>'.
132
133 :param author_str: String of the format 'Name <email>'
134 :return: pygit2.Signature object
135 """
136 match = re.match(r'^(.+) <(.+)>$', author_str)
137 if match is None:
138 raise ValueError(f"Invalid format: {author_str}")
139
140 name, email = match.groups()
141 return pygit2.Signature(name, email, **kwargs)
142
143
144 def get_obfuscated_url(url_obj):
145 url_obj.passwd = b'*****' if url_obj.passwd else url_obj.passwd
146 url_obj.query = obfuscate_qs(url_obj.query)
147 obfuscated_uri = str(url_obj)
148 return obfuscated_uri
149
150
132 151 class GitRemote(RemoteBase):
133 152
134 153 def __init__(self, factory):
135 154 self._factory = factory
136 155 self._bulk_methods = {
137 156 "date": self.date,
138 157 "author": self.author,
139 158 "branch": self.branch,
140 159 "message": self.message,
141 160 "parents": self.parents,
142 161 "_commit": self.revision,
143 162 }
163 self._bulk_file_methods = {
164 "size": self.get_node_size,
165 "data": self.get_node_data,
166 "flags": self.get_node_flags,
167 "is_binary": self.get_node_is_binary,
168 "md5": self.md5_hash
169 }
144 170
145 171 def _wire_to_config(self, wire):
146 172 if 'config' in wire:
147 return dict([(x[0] + '_' + x[1], x[2]) for x in wire['config']])
173 return {x[0] + '_' + x[1]: x[2] for x in wire['config']}
148 174 return {}
149 175
150 176 def _remote_conf(self, config):
151 177 params = [
152 178 '-c', 'core.askpass=""',
153 179 ]
154 180 ssl_cert_dir = config.get('vcs_ssl_dir')
155 181 if ssl_cert_dir:
156 params.extend(['-c', 'http.sslCAinfo={}'.format(ssl_cert_dir)])
182 params.extend(['-c', f'http.sslCAinfo={ssl_cert_dir}'])
157 183 return params
158 184
159 185 @reraise_safe_exceptions
160 186 def discover_git_version(self):
161 187 stdout, _ = self.run_git_command(
162 188 {}, ['--version'], _bare=True, _safe=True)
163 prefix = 'git version'
189 prefix = b'git version'
164 190 if stdout.startswith(prefix):
165 191 stdout = stdout[len(prefix):]
166 return stdout.strip()
192 return safe_str(stdout.strip())
167 193
168 194 @reraise_safe_exceptions
169 195 def is_empty(self, wire):
170 196 repo_init = self._factory.repo_libgit2(wire)
171 197 with repo_init as repo:
172 198
173 199 try:
174 200 has_head = repo.head.name
175 201 if has_head:
176 202 return False
177 203
178 204 # NOTE(marcink): check again using more expensive method
179 205 return repo.is_empty
180 206 except Exception:
181 207 pass
182 208
183 209 return True
184 210
185 211 @reraise_safe_exceptions
186 212 def assert_correct_path(self, wire):
187 213 cache_on, context_uid, repo_id = self._cache_on(wire)
188 214 region = self._region(wire)
215
189 216 @region.conditional_cache_on_arguments(condition=cache_on)
190 def _assert_correct_path(_context_uid, _repo_id):
191 try:
192 repo_init = self._factory.repo_libgit2(wire)
193 with repo_init as repo:
194 pass
195 except pygit2.GitError:
196 path = wire.get('path')
197 tb = traceback.format_exc()
198 log.debug("Invalid Git path `%s`, tb: %s", path, tb)
217 def _assert_correct_path(_context_uid, _repo_id, fast_check):
218 if fast_check:
219 path = safe_str(wire['path'])
220 if pygit2.discover_repository(path):
221 return True
199 222 return False
223 else:
224 try:
225 repo_init = self._factory.repo_libgit2(wire)
226 with repo_init:
227 pass
228 except pygit2.GitError:
229 path = wire.get('path')
230 tb = traceback.format_exc()
231 log.debug("Invalid Git path `%s`, tb: %s", path, tb)
232 return False
233 return True
200 234
201 return True
202 return _assert_correct_path(context_uid, repo_id)
235 return _assert_correct_path(context_uid, repo_id, True)
203 236
204 237 @reraise_safe_exceptions
205 238 def bare(self, wire):
206 239 repo_init = self._factory.repo_libgit2(wire)
207 240 with repo_init as repo:
208 241 return repo.is_bare
209 242
210 243 @reraise_safe_exceptions
244 def get_node_data(self, wire, commit_id, path):
245 repo_init = self._factory.repo_libgit2(wire)
246 with repo_init as repo:
247 commit = repo[commit_id]
248 blob_obj = commit.tree[path]
249
250 if blob_obj.type != pygit2.GIT_OBJ_BLOB:
251 raise exceptions.LookupException()(
252 f'Tree for commit_id:{commit_id} is not a blob: {blob_obj.type_str}')
253
254 return BytesEnvelope(blob_obj.data)
255
256 @reraise_safe_exceptions
257 def get_node_size(self, wire, commit_id, path):
258 repo_init = self._factory.repo_libgit2(wire)
259 with repo_init as repo:
260 commit = repo[commit_id]
261 blob_obj = commit.tree[path]
262
263 if blob_obj.type != pygit2.GIT_OBJ_BLOB:
264 raise exceptions.LookupException()(
265 f'Tree for commit_id:{commit_id} is not a blob: {blob_obj.type_str}')
266
267 return blob_obj.size
268
269 @reraise_safe_exceptions
270 def get_node_flags(self, wire, commit_id, path):
271 repo_init = self._factory.repo_libgit2(wire)
272 with repo_init as repo:
273 commit = repo[commit_id]
274 blob_obj = commit.tree[path]
275
276 if blob_obj.type != pygit2.GIT_OBJ_BLOB:
277 raise exceptions.LookupException()(
278 f'Tree for commit_id:{commit_id} is not a blob: {blob_obj.type_str}')
279
280 return blob_obj.filemode
281
282 @reraise_safe_exceptions
283 def get_node_is_binary(self, wire, commit_id, path):
284 repo_init = self._factory.repo_libgit2(wire)
285 with repo_init as repo:
286 commit = repo[commit_id]
287 blob_obj = commit.tree[path]
288
289 if blob_obj.type != pygit2.GIT_OBJ_BLOB:
290 raise exceptions.LookupException()(
291 f'Tree for commit_id:{commit_id} is not a blob: {blob_obj.type_str}')
292
293 return blob_obj.is_binary
294
295 @reraise_safe_exceptions
211 296 def blob_as_pretty_string(self, wire, sha):
212 297 repo_init = self._factory.repo_libgit2(wire)
213 298 with repo_init as repo:
214 299 blob_obj = repo[sha]
215 blob = blob_obj.data
216 return blob
300 return BytesEnvelope(blob_obj.data)
217 301
218 302 @reraise_safe_exceptions
219 303 def blob_raw_length(self, wire, sha):
220 304 cache_on, context_uid, repo_id = self._cache_on(wire)
221 305 region = self._region(wire)
306
222 307 @region.conditional_cache_on_arguments(condition=cache_on)
223 308 def _blob_raw_length(_repo_id, _sha):
224 309
225 310 repo_init = self._factory.repo_libgit2(wire)
226 311 with repo_init as repo:
227 312 blob = repo[sha]
228 313 return blob.size
229 314
230 315 return _blob_raw_length(repo_id, sha)
231 316
232 317 def _parse_lfs_pointer(self, raw_content):
318 spec_string = b'version https://git-lfs.github.com/spec'
319 if raw_content and raw_content.startswith(spec_string):
233 320
234 spec_string = 'version https://git-lfs.github.com/spec'
235 if raw_content and raw_content.startswith(spec_string):
236 pattern = re.compile(r"""
321 pattern = re.compile(rb"""
237 322 (?:\n)?
238 323 ^version[ ]https://git-lfs\.github\.com/spec/(?P<spec_ver>v\d+)\n
239 324 ^oid[ ] sha256:(?P<oid_hash>[0-9a-f]{64})\n
240 325 ^size[ ](?P<oid_size>[0-9]+)\n
241 326 (?:\n)?
242 327 """, re.VERBOSE | re.MULTILINE)
243 328 match = pattern.match(raw_content)
244 329 if match:
245 330 return match.groupdict()
246 331
247 332 return {}
248 333
249 334 @reraise_safe_exceptions
250 335 def is_large_file(self, wire, commit_id):
251 336 cache_on, context_uid, repo_id = self._cache_on(wire)
337 region = self._region(wire)
252 338
253 region = self._region(wire)
254 339 @region.conditional_cache_on_arguments(condition=cache_on)
255 340 def _is_large_file(_repo_id, _sha):
256 341 repo_init = self._factory.repo_libgit2(wire)
257 342 with repo_init as repo:
258 343 blob = repo[commit_id]
259 344 if blob.is_binary:
260 345 return {}
261 346
262 347 return self._parse_lfs_pointer(blob.data)
263 348
264 349 return _is_large_file(repo_id, commit_id)
265 350
266 351 @reraise_safe_exceptions
267 352 def is_binary(self, wire, tree_id):
268 353 cache_on, context_uid, repo_id = self._cache_on(wire)
354 region = self._region(wire)
269 355
270 region = self._region(wire)
271 356 @region.conditional_cache_on_arguments(condition=cache_on)
272 357 def _is_binary(_repo_id, _tree_id):
273 358 repo_init = self._factory.repo_libgit2(wire)
274 359 with repo_init as repo:
275 360 blob_obj = repo[tree_id]
276 361 return blob_obj.is_binary
277 362
278 363 return _is_binary(repo_id, tree_id)
279 364
280 365 @reraise_safe_exceptions
366 def md5_hash(self, wire, commit_id, path):
367 cache_on, context_uid, repo_id = self._cache_on(wire)
368 region = self._region(wire)
369
370 @region.conditional_cache_on_arguments(condition=cache_on)
371 def _md5_hash(_repo_id, _commit_id, _path):
372 repo_init = self._factory.repo_libgit2(wire)
373 with repo_init as repo:
374 commit = repo[_commit_id]
375 blob_obj = commit.tree[_path]
376
377 if blob_obj.type != pygit2.GIT_OBJ_BLOB:
378 raise exceptions.LookupException()(
379 f'Tree for commit_id:{_commit_id} is not a blob: {blob_obj.type_str}')
380
381 return ''
382
383 return _md5_hash(repo_id, commit_id, path)
384
385 @reraise_safe_exceptions
281 386 def in_largefiles_store(self, wire, oid):
282 387 conf = self._wire_to_config(wire)
283 388 repo_init = self._factory.repo_libgit2(wire)
284 389 with repo_init as repo:
285 390 repo_name = repo.path
286 391
287 392 store_location = conf.get('vcs_git_lfs_store_location')
288 393 if store_location:
289 394
290 395 store = LFSOidStore(
291 396 oid=oid, repo=repo_name, store_location=store_location)
292 397 return store.has_oid()
293 398
294 399 return False
295 400
296 401 @reraise_safe_exceptions
297 402 def store_path(self, wire, oid):
298 403 conf = self._wire_to_config(wire)
299 404 repo_init = self._factory.repo_libgit2(wire)
300 405 with repo_init as repo:
301 406 repo_name = repo.path
302 407
303 408 store_location = conf.get('vcs_git_lfs_store_location')
304 409 if store_location:
305 410 store = LFSOidStore(
306 411 oid=oid, repo=repo_name, store_location=store_location)
307 412 return store.oid_path
308 raise ValueError('Unable to fetch oid with path {}'.format(oid))
413 raise ValueError(f'Unable to fetch oid with path {oid}')
309 414
310 415 @reraise_safe_exceptions
311 416 def bulk_request(self, wire, rev, pre_load):
312 417 cache_on, context_uid, repo_id = self._cache_on(wire)
313 418 region = self._region(wire)
419
314 420 @region.conditional_cache_on_arguments(condition=cache_on)
315 421 def _bulk_request(_repo_id, _rev, _pre_load):
316 422 result = {}
317 423 for attr in pre_load:
318 424 try:
319 425 method = self._bulk_methods[attr]
426 wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
320 427 args = [wire, rev]
321 428 result[attr] = method(*args)
322 429 except KeyError as e:
323 raise exceptions.VcsException(e)(
324 "Unknown bulk attribute: %s" % attr)
430 raise exceptions.VcsException(e)(f"Unknown bulk attribute: {attr}")
325 431 return result
326 432
327 433 return _bulk_request(repo_id, rev, sorted(pre_load))
328 434
329 def _build_opener(self, url):
435 @reraise_safe_exceptions
436 def bulk_file_request(self, wire, commit_id, path, pre_load):
437 cache_on, context_uid, repo_id = self._cache_on(wire)
438 region = self._region(wire)
439
440 @region.conditional_cache_on_arguments(condition=cache_on)
441 def _bulk_file_request(_repo_id, _commit_id, _path, _pre_load):
442 result = {}
443 for attr in pre_load:
444 try:
445 method = self._bulk_file_methods[attr]
446 wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
447 result[attr] = method(wire, _commit_id, _path)
448 except KeyError as e:
449 raise exceptions.VcsException(e)(f'Unknown bulk attribute: "{attr}"')
450 return BinaryEnvelope(result)
451
452 return _bulk_file_request(repo_id, commit_id, path, sorted(pre_load))
453
454 def _build_opener(self, url: str):
330 455 handlers = []
331 url_obj = url_parser(url)
332 _, authinfo = url_obj.authinfo()
456 url_obj = url_parser(safe_bytes(url))
457 authinfo = url_obj.authinfo()[1]
333 458
334 459 if authinfo:
335 460 # create a password manager
336 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
461 passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
337 462 passmgr.add_password(*authinfo)
338 463
339 464 handlers.extend((httpbasicauthhandler(passmgr),
340 465 httpdigestauthhandler(passmgr)))
341 466
342 return urllib2.build_opener(*handlers)
343
344 def _type_id_to_name(self, type_id):
345 return {
346 1: b'commit',
347 2: b'tree',
348 3: b'blob',
349 4: b'tag'
350 }[type_id]
467 return urllib.request.build_opener(*handlers)
351 468
352 469 @reraise_safe_exceptions
353 470 def check_url(self, url, config):
354 url_obj = url_parser(url)
355 test_uri, _ = url_obj.authinfo()
356 url_obj.passwd = '*****' if url_obj.passwd else url_obj.passwd
357 url_obj.query = obfuscate_qs(url_obj.query)
358 cleaned_uri = str(url_obj)
359 log.info("Checking URL for remote cloning/import: %s", cleaned_uri)
471 url_obj = url_parser(safe_bytes(url))
472
473 test_uri = safe_str(url_obj.authinfo()[0])
474 obfuscated_uri = get_obfuscated_url(url_obj)
475
476 log.info("Checking URL for remote cloning/import: %s", obfuscated_uri)
360 477
361 478 if not test_uri.endswith('info/refs'):
362 479 test_uri = test_uri.rstrip('/') + '/info/refs'
363 480
364 o = self._build_opener(url)
481 o = self._build_opener(test_uri)
365 482 o.addheaders = [('User-Agent', 'git/1.7.8.0')] # fake some git
366 483
367 484 q = {"service": 'git-upload-pack'}
368 qs = '?%s' % urllib.urlencode(q)
369 cu = "%s%s" % (test_uri, qs)
370 req = urllib2.Request(cu, None, {})
485 qs = '?%s' % urllib.parse.urlencode(q)
486 cu = f"{test_uri}{qs}"
487 req = urllib.request.Request(cu, None, {})
371 488
372 489 try:
373 log.debug("Trying to open URL %s", cleaned_uri)
490 log.debug("Trying to open URL %s", obfuscated_uri)
374 491 resp = o.open(req)
375 492 if resp.code != 200:
376 493 raise exceptions.URLError()('Return Code is not 200')
377 494 except Exception as e:
378 log.warning("URL cannot be opened: %s", cleaned_uri, exc_info=True)
495 log.warning("URL cannot be opened: %s", obfuscated_uri, exc_info=True)
379 496 # means it cannot be cloned
380 raise exceptions.URLError(e)("[%s] org_exc: %s" % (cleaned_uri, e))
497 raise exceptions.URLError(e)(f"[{obfuscated_uri}] org_exc: {e}")
381 498
382 499 # now detect if it's proper git repo
383 gitdata = resp.read()
384 if 'service=git-upload-pack' in gitdata:
500 gitdata: bytes = resp.read()
501
502 if b'service=git-upload-pack' in gitdata:
385 503 pass
386 elif re.findall(r'[0-9a-fA-F]{40}\s+refs', gitdata):
504 elif re.findall(br'[0-9a-fA-F]{40}\s+refs', gitdata):
387 505 # old style git can return some other format !
388 506 pass
389 507 else:
390 raise exceptions.URLError()(
391 "url [%s] does not look like an git" % (cleaned_uri,))
508 e = None
509 raise exceptions.URLError(e)(
510 "url [%s] does not look like an hg repo org_exc: %s"
511 % (obfuscated_uri, e))
392 512
393 513 return True
394 514
395 515 @reraise_safe_exceptions
396 516 def clone(self, wire, url, deferred, valid_refs, update_after_clone):
397 517 # TODO(marcink): deprecate this method. Last i checked we don't use it anymore
398 518 remote_refs = self.pull(wire, url, apply_refs=False)
399 519 repo = self._factory.repo(wire)
400 520 if isinstance(valid_refs, list):
401 521 valid_refs = tuple(valid_refs)
402 522
403 523 for k in remote_refs:
404 524 # only parse heads/tags and skip so called deferred tags
405 525 if k.startswith(valid_refs) and not k.endswith(deferred):
406 526 repo[k] = remote_refs[k]
407 527
408 528 if update_after_clone:
409 529 # we want to checkout HEAD
410 530 repo["HEAD"] = remote_refs["HEAD"]
411 531 index.build_index_from_tree(repo.path, repo.index_path(),
412 532 repo.object_store, repo["HEAD"].tree)
413 533
414 534 @reraise_safe_exceptions
415 535 def branch(self, wire, commit_id):
416 536 cache_on, context_uid, repo_id = self._cache_on(wire)
417 537 region = self._region(wire)
538
418 539 @region.conditional_cache_on_arguments(condition=cache_on)
419 540 def _branch(_context_uid, _repo_id, _commit_id):
420 541 regex = re.compile('^refs/heads')
421 542
422 543 def filter_with(ref):
423 544 return regex.match(ref[0]) and ref[1] == _commit_id
424 545
425 branches = filter(filter_with, self.get_refs(wire).items())
546 branches = list(filter(filter_with, list(self.get_refs(wire).items())))
426 547 return [x[0].split('refs/heads/')[-1] for x in branches]
427 548
428 549 return _branch(context_uid, repo_id, commit_id)
429 550
430 551 @reraise_safe_exceptions
431 552 def commit_branches(self, wire, commit_id):
432 553 cache_on, context_uid, repo_id = self._cache_on(wire)
433 554 region = self._region(wire)
555
434 556 @region.conditional_cache_on_arguments(condition=cache_on)
435 557 def _commit_branches(_context_uid, _repo_id, _commit_id):
436 558 repo_init = self._factory.repo_libgit2(wire)
437 559 with repo_init as repo:
438 560 branches = [x for x in repo.branches.with_commit(_commit_id)]
439 561 return branches
440 562
441 563 return _commit_branches(context_uid, repo_id, commit_id)
442 564
443 565 @reraise_safe_exceptions
444 566 def add_object(self, wire, content):
445 567 repo_init = self._factory.repo_libgit2(wire)
446 568 with repo_init as repo:
447 569 blob = objects.Blob()
448 570 blob.set_raw_string(content)
449 571 repo.object_store.add_object(blob)
450 572 return blob.id
451 573
452 # TODO: this is quite complex, check if that can be simplified
574 @reraise_safe_exceptions
575 def create_commit(self, wire, author, committer, message, branch, new_tree_id, date_args: list[int, int] = None):
576 repo_init = self._factory.repo_libgit2(wire)
577 with repo_init as repo:
578
579 if date_args:
580 current_time, offset = date_args
581
582 kw = {
583 'time': current_time,
584 'offset': offset
585 }
586 author = create_signature_from_string(author, **kw)
587 committer = create_signature_from_string(committer, **kw)
588
589 tree = new_tree_id
590 if isinstance(tree, (bytes, str)):
591 # validate this tree is in the repo...
592 tree = repo[safe_str(tree)].id
593
594 parents = []
595 # ensure we COMMIT on top of given branch head
596 # check if this repo has ANY branches, otherwise it's a new branch case we need to make
597 if branch in repo.branches.local:
598 parents += [repo.branches[branch].target]
599 elif [x for x in repo.branches.local]:
600 parents += [repo.head.target]
601 #else:
602 # in case we want to commit on new branch we create it on top of HEAD
603 #repo.branches.local.create(branch, repo.revparse_single('HEAD'))
604
605 # # Create a new commit
606 commit_oid = repo.create_commit(
607 f'refs/heads/{branch}', # the name of the reference to update
608 author, # the author of the commit
609 committer, # the committer of the commit
610 message, # the commit message
611 tree, # the tree produced by the index
612 parents # list of parents for the new commit, usually just one,
613 )
614
615 new_commit_id = safe_str(commit_oid)
616
617 return new_commit_id
618
453 619 @reraise_safe_exceptions
454 620 def commit(self, wire, commit_data, branch, commit_tree, updated, removed):
455 # Defines the root tree
456 class _Root(object):
457 def __repr__(self):
458 return 'ROOT TREE'
459 ROOT = _Root()
460 621
461 repo = self._factory.repo(wire)
462 object_store = repo.object_store
463
464 # Create tree and populates it with blobs
465
466 if commit_tree and repo[commit_tree]:
467 git_commit = repo[commit_data['parents'][0]]
468 commit_tree = repo[git_commit.tree] # root tree
469 else:
470 commit_tree = objects.Tree()
471
472 for node in updated:
473 # Compute subdirs if needed
474 dirpath, nodename = vcspath.split(node['path'])
475 dirnames = map(safe_str, dirpath and dirpath.split('/') or [])
476 parent = commit_tree
477 ancestors = [('', parent)]
622 def mode2pygit(mode):
623 """
624 git only supports two filemode 644 and 755
478 625
479 # Tries to dig for the deepest existing tree
480 while dirnames:
481 curdir = dirnames.pop(0)
482 try:
483 dir_id = parent[curdir][1]
484 except KeyError:
485 # put curdir back into dirnames and stops
486 dirnames.insert(0, curdir)
487 break
488 else:
489 # If found, updates parent
490 parent = repo[dir_id]
491 ancestors.append((curdir, parent))
492 # Now parent is deepest existing tree and we need to create
493 # subtrees for dirnames (in reverse order)
494 # [this only applies for nodes from added]
495 new_trees = []
626 0o100755 -> 33261
627 0o100644 -> 33188
628 """
629 return {
630 0o100644: pygit2.GIT_FILEMODE_BLOB,
631 0o100755: pygit2.GIT_FILEMODE_BLOB_EXECUTABLE,
632 0o120000: pygit2.GIT_FILEMODE_LINK
633 }.get(mode) or pygit2.GIT_FILEMODE_BLOB
496 634
497 blob = objects.Blob.from_string(node['content'])
635 repo_init = self._factory.repo_libgit2(wire)
636 with repo_init as repo:
637 repo_index = repo.index
498 638
499 if dirnames:
500 # If there are trees which should be created we need to build
501 # them now (in reverse order)
502 reversed_dirnames = list(reversed(dirnames))
503 curtree = objects.Tree()
504 curtree[node['node_path']] = node['mode'], blob.id
505 new_trees.append(curtree)
506 for dirname in reversed_dirnames[:-1]:
507 newtree = objects.Tree()
508 newtree[dirname] = (DIR_STAT, curtree.id)
509 new_trees.append(newtree)
510 curtree = newtree
511 parent[reversed_dirnames[-1]] = (DIR_STAT, curtree.id)
512 else:
513 parent.add(name=node['node_path'], mode=node['mode'], hexsha=blob.id)
639 for pathspec in updated:
640 blob_id = repo.create_blob(pathspec['content'])
641 ie = pygit2.IndexEntry(pathspec['path'], blob_id, mode2pygit(pathspec['mode']))
642 repo_index.add(ie)
514 643
515 new_trees.append(parent)
516 # Update ancestors
517 reversed_ancestors = reversed(
518 [(a[1], b[1], b[0]) for a, b in zip(ancestors, ancestors[1:])])
519 for parent, tree, path in reversed_ancestors:
520 parent[path] = (DIR_STAT, tree.id)
521 object_store.add_object(tree)
644 for pathspec in removed:
645 repo_index.remove(pathspec)
522 646
523 object_store.add_object(blob)
524 for tree in new_trees:
525 object_store.add_object(tree)
647 # Write changes to the index
648 repo_index.write()
649
650 # Create a tree from the updated index
651 commit_tree = repo_index.write_tree()
652
653 new_tree_id = commit_tree
526 654
527 for node_path in removed:
528 paths = node_path.split('/')
529 tree = commit_tree # start with top-level
530 trees = [{'tree': tree, 'path': ROOT}]
531 # Traverse deep into the forest...
532 # resolve final tree by iterating the path.
533 # e.g a/b/c.txt will get
534 # - root as tree then
535 # - 'a' as tree,
536 # - 'b' as tree,
537 # - stop at c as blob.
538 for path in paths:
539 try:
540 obj = repo[tree[path][1]]
541 if isinstance(obj, objects.Tree):
542 trees.append({'tree': obj, 'path': path})
543 tree = obj
544 except KeyError:
545 break
546 #PROBLEM:
547 """
548 We're not editing same reference tree object
549 """
550 # Cut down the blob and all rotten trees on the way back...
551 for path, tree_data in reversed(zip(paths, trees)):
552 tree = tree_data['tree']
553 tree.__delitem__(path)
554 # This operation edits the tree, we need to mark new commit back
655 author = commit_data['author']
656 committer = commit_data['committer']
657 message = commit_data['message']
658
659 date_args = [int(commit_data['commit_time']), int(commit_data['commit_timezone'])]
555 660
556 if len(tree) > 0:
557 # This tree still has elements - don't remove it or any
558 # of it's parents
559 break
560
561 object_store.add_object(commit_tree)
661 new_commit_id = self.create_commit(wire, author, committer, message, branch,
662 new_tree_id, date_args=date_args)
562 663
563 # Create commit
564 commit = objects.Commit()
565 commit.tree = commit_tree.id
566 for k, v in commit_data.items():
567 setattr(commit, k, v)
568 object_store.add_object(commit)
664 # libgit2, ensure the branch is there and exists
665 self.create_branch(wire, branch, new_commit_id)
569 666
570 self.create_branch(wire, branch, commit.id)
667 # libgit2, set new ref to this created commit
668 self.set_refs(wire, f'refs/heads/{branch}', new_commit_id)
571 669
572 # dulwich set-ref
573 ref = 'refs/heads/%s' % branch
574 repo.refs[ref] = commit.id
575
576 return commit.id
670 return new_commit_id
577 671
578 672 @reraise_safe_exceptions
579 673 def pull(self, wire, url, apply_refs=True, refs=None, update_after=False):
580 674 if url != 'default' and '://' not in url:
581 675 client = LocalGitClient(url)
582 676 else:
583 url_obj = url_parser(url)
677 url_obj = url_parser(safe_bytes(url))
584 678 o = self._build_opener(url)
585 url, _ = url_obj.authinfo()
679 url = url_obj.authinfo()[0]
586 680 client = HttpGitClient(base_url=url, opener=o)
587 681 repo = self._factory.repo(wire)
588 682
589 683 determine_wants = repo.object_store.determine_wants_all
590 684 if refs:
591 def determine_wants_requested(references):
592 return [references[r] for r in references if r in refs]
685 refs = [ascii_bytes(x) for x in refs]
686
687 def determine_wants_requested(remote_refs):
688 determined = []
689 for ref_name, ref_hash in remote_refs.items():
690 bytes_ref_name = safe_bytes(ref_name)
691
692 if bytes_ref_name in refs:
693 bytes_ref_hash = safe_bytes(ref_hash)
694 determined.append(bytes_ref_hash)
695 return determined
696
697 # swap with our custom requested wants
593 698 determine_wants = determine_wants_requested
594 699
595 700 try:
596 701 remote_refs = client.fetch(
597 702 path=url, target=repo, determine_wants=determine_wants)
703
598 704 except NotGitRepository as e:
599 705 log.warning(
600 706 'Trying to fetch from "%s" failed, not a Git repository.', url)
601 707 # Exception can contain unicode which we convert
602 708 raise exceptions.AbortException(e)(repr(e))
603 709
604 710 # mikhail: client.fetch() returns all the remote refs, but fetches only
605 711 # refs filtered by `determine_wants` function. We need to filter result
606 712 # as well
607 713 if refs:
608 714 remote_refs = {k: remote_refs[k] for k in remote_refs if k in refs}
609 715
610 716 if apply_refs:
611 717 # TODO: johbo: Needs proper test coverage with a git repository
612 718 # that contains a tag object, so that we would end up with
613 719 # a peeled ref at this point.
614 720 for k in remote_refs:
615 721 if k.endswith(PEELED_REF_MARKER):
616 722 log.debug("Skipping peeled reference %s", k)
617 723 continue
618 724 repo[k] = remote_refs[k]
619 725
620 726 if refs and not update_after:
621 727 # mikhail: explicitly set the head to the last ref.
622 repo["HEAD"] = remote_refs[refs[-1]]
728 repo[HEAD_MARKER] = remote_refs[refs[-1]]
623 729
624 730 if update_after:
625 # we want to checkout HEAD
626 repo["HEAD"] = remote_refs["HEAD"]
731 # we want to check out HEAD
732 repo[HEAD_MARKER] = remote_refs[HEAD_MARKER]
627 733 index.build_index_from_tree(repo.path, repo.index_path(),
628 repo.object_store, repo["HEAD"].tree)
734 repo.object_store, repo[HEAD_MARKER].tree)
735
736 if isinstance(remote_refs, FetchPackResult):
737 return remote_refs.refs
629 738 return remote_refs
630 739
631 740 @reraise_safe_exceptions
632 741 def sync_fetch(self, wire, url, refs=None, all_refs=False):
633 repo = self._factory.repo(wire)
742 self._factory.repo(wire)
634 743 if refs and not isinstance(refs, (list, tuple)):
635 744 refs = [refs]
636 745
637 746 config = self._wire_to_config(wire)
638 747 # get all remote refs we'll use to fetch later
639 748 cmd = ['ls-remote']
640 749 if not all_refs:
641 750 cmd += ['--heads', '--tags']
642 751 cmd += [url]
643 752 output, __ = self.run_git_command(
644 753 wire, cmd, fail_on_stderr=False,
645 754 _copts=self._remote_conf(config),
646 755 extra_env={'GIT_TERMINAL_PROMPT': '0'})
647 756
648 757 remote_refs = collections.OrderedDict()
649 758 fetch_refs = []
650 759
651 760 for ref_line in output.splitlines():
652 sha, ref = ref_line.split('\t')
761 sha, ref = ref_line.split(b'\t')
653 762 sha = sha.strip()
654 763 if ref in remote_refs:
655 764 # duplicate, skip
656 765 continue
657 766 if ref.endswith(PEELED_REF_MARKER):
658 767 log.debug("Skipping peeled reference %s", ref)
659 768 continue
660 769 # don't sync HEAD
661 if ref in ['HEAD']:
770 if ref in [HEAD_MARKER]:
662 771 continue
663 772
664 773 remote_refs[ref] = sha
665 774
666 775 if refs and sha in refs:
667 776 # we filter fetch using our specified refs
668 fetch_refs.append('{}:{}'.format(ref, ref))
777 fetch_refs.append(f'{safe_str(ref)}:{safe_str(ref)}')
669 778 elif not refs:
670 fetch_refs.append('{}:{}'.format(ref, ref))
779 fetch_refs.append(f'{safe_str(ref)}:{safe_str(ref)}')
671 780 log.debug('Finished obtaining fetch refs, total: %s', len(fetch_refs))
672 781
673 782 if fetch_refs:
674 783 for chunk in more_itertools.chunked(fetch_refs, 1024 * 4):
675 784 fetch_refs_chunks = list(chunk)
676 785 log.debug('Fetching %s refs from import url', len(fetch_refs_chunks))
677 _out, _err = self.run_git_command(
786 self.run_git_command(
678 787 wire, ['fetch', url, '--force', '--prune', '--'] + fetch_refs_chunks,
679 788 fail_on_stderr=False,
680 789 _copts=self._remote_conf(config),
681 790 extra_env={'GIT_TERMINAL_PROMPT': '0'})
682 791
683 792 return remote_refs
684 793
685 794 @reraise_safe_exceptions
686 795 def sync_push(self, wire, url, refs=None):
687 796 if not self.check_url(url, wire):
688 797 return
689 798 config = self._wire_to_config(wire)
690 799 self._factory.repo(wire)
691 800 self.run_git_command(
692 801 wire, ['push', url, '--mirror'], fail_on_stderr=False,
693 802 _copts=self._remote_conf(config),
694 803 extra_env={'GIT_TERMINAL_PROMPT': '0'})
695 804
696 805 @reraise_safe_exceptions
697 806 def get_remote_refs(self, wire, url):
698 807 repo = Repo(url)
699 808 return repo.get_refs()
700 809
701 810 @reraise_safe_exceptions
702 811 def get_description(self, wire):
703 812 repo = self._factory.repo(wire)
704 813 return repo.get_description()
705 814
706 815 @reraise_safe_exceptions
707 816 def get_missing_revs(self, wire, rev1, rev2, path2):
708 817 repo = self._factory.repo(wire)
709 818 LocalGitClient(thin_packs=False).fetch(path2, repo)
710 819
711 820 wire_remote = wire.copy()
712 821 wire_remote['path'] = path2
713 822 repo_remote = self._factory.repo(wire_remote)
714 LocalGitClient(thin_packs=False).fetch(wire["path"], repo_remote)
823 LocalGitClient(thin_packs=False).fetch(path2, repo_remote)
715 824
716 825 revs = [
717 826 x.commit.id
718 for x in repo_remote.get_walker(include=[rev2], exclude=[rev1])]
827 for x in repo_remote.get_walker(include=[safe_bytes(rev2)], exclude=[safe_bytes(rev1)])]
719 828 return revs
720 829
721 830 @reraise_safe_exceptions
722 831 def get_object(self, wire, sha, maybe_unreachable=False):
723 832 cache_on, context_uid, repo_id = self._cache_on(wire)
724 833 region = self._region(wire)
834
725 835 @region.conditional_cache_on_arguments(condition=cache_on)
726 836 def _get_object(_context_uid, _repo_id, _sha):
727 837 repo_init = self._factory.repo_libgit2(wire)
728 838 with repo_init as repo:
729 839
730 840 missing_commit_err = 'Commit {} does not exist for `{}`'.format(sha, wire['path'])
731 841 try:
732 842 commit = repo.revparse_single(sha)
733 843 except KeyError:
734 844 # NOTE(marcink): KeyError doesn't give us any meaningful information
735 845 # here, we instead give something more explicit
736 846 e = exceptions.RefNotFoundException('SHA: %s not found', sha)
737 847 raise exceptions.LookupException(e)(missing_commit_err)
738 848 except ValueError as e:
739 849 raise exceptions.LookupException(e)(missing_commit_err)
740 850
741 851 is_tag = False
742 852 if isinstance(commit, pygit2.Tag):
743 853 commit = repo.get(commit.target)
744 854 is_tag = True
745 855
746 856 check_dangling = True
747 857 if is_tag:
748 858 check_dangling = False
749 859
750 860 if check_dangling and maybe_unreachable:
751 861 check_dangling = False
752 862
753 863 # we used a reference and it parsed means we're not having a dangling commit
754 864 if sha != commit.hex:
755 865 check_dangling = False
756 866
757 867 if check_dangling:
758 868 # check for dangling commit
759 869 for branch in repo.branches.with_commit(commit.hex):
760 870 if branch:
761 871 break
762 872 else:
763 873 # NOTE(marcink): Empty error doesn't give us any meaningful information
764 874 # here, we instead give something more explicit
765 875 e = exceptions.RefNotFoundException('SHA: %s not found in branches', sha)
766 876 raise exceptions.LookupException(e)(missing_commit_err)
767 877
768 878 commit_id = commit.hex
769 type_id = commit.type
879 type_str = commit.type_str
770 880
771 881 return {
772 882 'id': commit_id,
773 'type': self._type_id_to_name(type_id),
883 'type': type_str,
774 884 'commit_id': commit_id,
775 885 'idx': 0
776 886 }
777 887
778 888 return _get_object(context_uid, repo_id, sha)
779 889
780 890 @reraise_safe_exceptions
781 891 def get_refs(self, wire):
782 892 cache_on, context_uid, repo_id = self._cache_on(wire)
783 893 region = self._region(wire)
894
784 895 @region.conditional_cache_on_arguments(condition=cache_on)
785 896 def _get_refs(_context_uid, _repo_id):
786 897
787 898 repo_init = self._factory.repo_libgit2(wire)
788 899 with repo_init as repo:
789 900 regex = re.compile('^refs/(heads|tags)/')
790 901 return {x.name: x.target.hex for x in
791 filter(lambda ref: regex.match(ref.name) ,repo.listall_reference_objects())}
902 [ref for ref in repo.listall_reference_objects() if regex.match(ref.name)]}
792 903
793 904 return _get_refs(context_uid, repo_id)
794 905
795 906 @reraise_safe_exceptions
796 907 def get_branch_pointers(self, wire):
797 908 cache_on, context_uid, repo_id = self._cache_on(wire)
798 909 region = self._region(wire)
910
799 911 @region.conditional_cache_on_arguments(condition=cache_on)
800 912 def _get_branch_pointers(_context_uid, _repo_id):
801 913
802 914 repo_init = self._factory.repo_libgit2(wire)
803 915 regex = re.compile('^refs/heads')
804 916 with repo_init as repo:
805 branches = filter(lambda ref: regex.match(ref.name), repo.listall_reference_objects())
917 branches = [ref for ref in repo.listall_reference_objects() if regex.match(ref.name)]
806 918 return {x.target.hex: x.shorthand for x in branches}
807 919
808 920 return _get_branch_pointers(context_uid, repo_id)
809 921
810 922 @reraise_safe_exceptions
811 923 def head(self, wire, show_exc=True):
812 924 cache_on, context_uid, repo_id = self._cache_on(wire)
813 925 region = self._region(wire)
926
814 927 @region.conditional_cache_on_arguments(condition=cache_on)
815 928 def _head(_context_uid, _repo_id, _show_exc):
816 929 repo_init = self._factory.repo_libgit2(wire)
817 930 with repo_init as repo:
818 931 try:
819 932 return repo.head.peel().hex
820 933 except Exception:
821 934 if show_exc:
822 935 raise
823 936 return _head(context_uid, repo_id, show_exc)
824 937
825 938 @reraise_safe_exceptions
826 939 def init(self, wire):
827 repo_path = str_to_dulwich(wire['path'])
940 repo_path = safe_str(wire['path'])
828 941 self.repo = Repo.init(repo_path)
829 942
830 943 @reraise_safe_exceptions
831 944 def init_bare(self, wire):
832 repo_path = str_to_dulwich(wire['path'])
945 repo_path = safe_str(wire['path'])
833 946 self.repo = Repo.init_bare(repo_path)
834 947
835 948 @reraise_safe_exceptions
836 949 def revision(self, wire, rev):
837 950
838 951 cache_on, context_uid, repo_id = self._cache_on(wire)
839 952 region = self._region(wire)
953
840 954 @region.conditional_cache_on_arguments(condition=cache_on)
841 955 def _revision(_context_uid, _repo_id, _rev):
842 956 repo_init = self._factory.repo_libgit2(wire)
843 957 with repo_init as repo:
844 958 commit = repo[rev]
845 959 obj_data = {
846 960 'id': commit.id.hex,
847 961 }
848 962 # tree objects itself don't have tree_id attribute
849 963 if hasattr(commit, 'tree_id'):
850 964 obj_data['tree'] = commit.tree_id.hex
851 965
852 966 return obj_data
853 967 return _revision(context_uid, repo_id, rev)
854 968
855 969 @reraise_safe_exceptions
856 970 def date(self, wire, commit_id):
857 971 cache_on, context_uid, repo_id = self._cache_on(wire)
858 972 region = self._region(wire)
973
859 974 @region.conditional_cache_on_arguments(condition=cache_on)
860 975 def _date(_repo_id, _commit_id):
861 976 repo_init = self._factory.repo_libgit2(wire)
862 977 with repo_init as repo:
863 978 commit = repo[commit_id]
864 979
865 980 if hasattr(commit, 'commit_time'):
866 981 commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset
867 982 else:
868 983 commit = commit.get_object()
869 984 commit_time, commit_time_offset = commit.commit_time, commit.commit_time_offset
870 985
871 986 # TODO(marcink): check dulwich difference of offset vs timezone
872 987 return [commit_time, commit_time_offset]
873 988 return _date(repo_id, commit_id)
874 989
875 990 @reraise_safe_exceptions
876 991 def author(self, wire, commit_id):
877 992 cache_on, context_uid, repo_id = self._cache_on(wire)
878 993 region = self._region(wire)
994
879 995 @region.conditional_cache_on_arguments(condition=cache_on)
880 996 def _author(_repo_id, _commit_id):
881 997 repo_init = self._factory.repo_libgit2(wire)
882 998 with repo_init as repo:
883 999 commit = repo[commit_id]
884 1000
885 1001 if hasattr(commit, 'author'):
886 1002 author = commit.author
887 1003 else:
888 1004 author = commit.get_object().author
889 1005
890 1006 if author.email:
891 return u"{} <{}>".format(author.name, author.email)
1007 return f"{author.name} <{author.email}>"
892 1008
893 1009 try:
894 return u"{}".format(author.name)
1010 return f"{author.name}"
895 1011 except Exception:
896 return u"{}".format(safe_unicode(author.raw_name))
1012 return f"{safe_str(author.raw_name)}"
897 1013
898 1014 return _author(repo_id, commit_id)
899 1015
900 1016 @reraise_safe_exceptions
901 1017 def message(self, wire, commit_id):
902 1018 cache_on, context_uid, repo_id = self._cache_on(wire)
903 1019 region = self._region(wire)
1020
904 1021 @region.conditional_cache_on_arguments(condition=cache_on)
905 1022 def _message(_repo_id, _commit_id):
906 1023 repo_init = self._factory.repo_libgit2(wire)
907 1024 with repo_init as repo:
908 1025 commit = repo[commit_id]
909 1026 return commit.message
910 1027 return _message(repo_id, commit_id)
911 1028
912 1029 @reraise_safe_exceptions
913 1030 def parents(self, wire, commit_id):
914 1031 cache_on, context_uid, repo_id = self._cache_on(wire)
915 1032 region = self._region(wire)
1033
916 1034 @region.conditional_cache_on_arguments(condition=cache_on)
917 1035 def _parents(_repo_id, _commit_id):
918 1036 repo_init = self._factory.repo_libgit2(wire)
919 1037 with repo_init as repo:
920 1038 commit = repo[commit_id]
921 1039 if hasattr(commit, 'parent_ids'):
922 1040 parent_ids = commit.parent_ids
923 1041 else:
924 1042 parent_ids = commit.get_object().parent_ids
925 1043
926 1044 return [x.hex for x in parent_ids]
927 1045 return _parents(repo_id, commit_id)
928 1046
929 1047 @reraise_safe_exceptions
930 1048 def children(self, wire, commit_id):
931 1049 cache_on, context_uid, repo_id = self._cache_on(wire)
932 1050 region = self._region(wire)
1051
1052 head = self.head(wire)
1053
933 1054 @region.conditional_cache_on_arguments(condition=cache_on)
934 1055 def _children(_repo_id, _commit_id):
1056
935 1057 output, __ = self.run_git_command(
936 wire, ['rev-list', '--all', '--children'])
1058 wire, ['rev-list', '--all', '--children', f'{commit_id}^..{head}'])
937 1059
938 1060 child_ids = []
939 pat = re.compile(r'^%s' % commit_id)
940 for l in output.splitlines():
941 if pat.match(l):
942 found_ids = l.split(' ')[1:]
1061 pat = re.compile(fr'^{commit_id}')
1062 for line in output.splitlines():
1063 line = safe_str(line)
1064 if pat.match(line):
1065 found_ids = line.split(' ')[1:]
943 1066 child_ids.extend(found_ids)
1067 break
944 1068
945 1069 return child_ids
946 1070 return _children(repo_id, commit_id)
947 1071
948 1072 @reraise_safe_exceptions
949 1073 def set_refs(self, wire, key, value):
950 1074 repo_init = self._factory.repo_libgit2(wire)
951 1075 with repo_init as repo:
952 1076 repo.references.create(key, value, force=True)
953 1077
954 1078 @reraise_safe_exceptions
955 1079 def create_branch(self, wire, branch_name, commit_id, force=False):
956 1080 repo_init = self._factory.repo_libgit2(wire)
957 1081 with repo_init as repo:
958 commit = repo[commit_id]
1082 if commit_id:
1083 commit = repo[commit_id]
1084 else:
1085 # if commit is not given just use the HEAD
1086 commit = repo.head()
959 1087
960 1088 if force:
961 1089 repo.branches.local.create(branch_name, commit, force=force)
962 1090 elif not repo.branches.get(branch_name):
963 1091 # create only if that branch isn't existing
964 1092 repo.branches.local.create(branch_name, commit, force=force)
965 1093
966 1094 @reraise_safe_exceptions
967 1095 def remove_ref(self, wire, key):
968 1096 repo_init = self._factory.repo_libgit2(wire)
969 1097 with repo_init as repo:
970 1098 repo.references.delete(key)
971 1099
972 1100 @reraise_safe_exceptions
973 1101 def tag_remove(self, wire, tag_name):
974 1102 repo_init = self._factory.repo_libgit2(wire)
975 1103 with repo_init as repo:
976 key = 'refs/tags/{}'.format(tag_name)
1104 key = f'refs/tags/{tag_name}'
977 1105 repo.references.delete(key)
978 1106
979 1107 @reraise_safe_exceptions
980 1108 def tree_changes(self, wire, source_id, target_id):
981 # TODO(marcink): remove this seems it's only used by tests
982 1109 repo = self._factory.repo(wire)
1110 # source can be empty
1111 source_id = safe_bytes(source_id if source_id else b'')
1112 target_id = safe_bytes(target_id)
1113
983 1114 source = repo[source_id].tree if source_id else None
984 1115 target = repo[target_id].tree
985 1116 result = repo.object_store.tree_changes(source, target)
986 return list(result)
1117
1118 added = set()
1119 modified = set()
1120 deleted = set()
1121 for (old_path, new_path), (_, _), (_, _) in list(result):
1122 if new_path and old_path:
1123 modified.add(new_path)
1124 elif new_path and not old_path:
1125 added.add(new_path)
1126 elif not new_path and old_path:
1127 deleted.add(old_path)
1128
1129 return list(added), list(modified), list(deleted)
987 1130
988 1131 @reraise_safe_exceptions
989 1132 def tree_and_type_for_path(self, wire, commit_id, path):
990 1133
991 1134 cache_on, context_uid, repo_id = self._cache_on(wire)
992 1135 region = self._region(wire)
1136
993 1137 @region.conditional_cache_on_arguments(condition=cache_on)
994 1138 def _tree_and_type_for_path(_context_uid, _repo_id, _commit_id, _path):
995 1139 repo_init = self._factory.repo_libgit2(wire)
996 1140
997 1141 with repo_init as repo:
998 1142 commit = repo[commit_id]
999 1143 try:
1000 1144 tree = commit.tree[path]
1001 1145 except KeyError:
1002 1146 return None, None, None
1003 1147
1004 return tree.id.hex, tree.type, tree.filemode
1148 return tree.id.hex, tree.type_str, tree.filemode
1005 1149 return _tree_and_type_for_path(context_uid, repo_id, commit_id, path)
1006 1150
1007 1151 @reraise_safe_exceptions
1008 1152 def tree_items(self, wire, tree_id):
1009 1153 cache_on, context_uid, repo_id = self._cache_on(wire)
1010 1154 region = self._region(wire)
1155
1011 1156 @region.conditional_cache_on_arguments(condition=cache_on)
1012 1157 def _tree_items(_repo_id, _tree_id):
1013 1158
1014 1159 repo_init = self._factory.repo_libgit2(wire)
1015 1160 with repo_init as repo:
1016 1161 try:
1017 1162 tree = repo[tree_id]
1018 1163 except KeyError:
1019 raise ObjectMissing('No tree with id: {}'.format(tree_id))
1164 raise ObjectMissing(f'No tree with id: {tree_id}')
1020 1165
1021 1166 result = []
1022 1167 for item in tree:
1023 1168 item_sha = item.hex
1024 1169 item_mode = item.filemode
1025 item_type = item.type
1170 item_type = item.type_str
1026 1171
1027 1172 if item_type == 'commit':
1028 1173 # NOTE(marcink): submodules we translate to 'link' for backward compat
1029 1174 item_type = 'link'
1030 1175
1031 1176 result.append((item.name, item_mode, item_sha, item_type))
1032 1177 return result
1033 1178 return _tree_items(repo_id, tree_id)
1034 1179
1035 1180 @reraise_safe_exceptions
1036 1181 def diff_2(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context):
1037 1182 """
1038 1183 Old version that uses subprocess to call diff
1039 1184 """
1040 1185
1041 1186 flags = [
1042 1187 '-U%s' % context, '--patch',
1043 1188 '--binary',
1044 1189 '--find-renames',
1045 1190 '--no-indent-heuristic',
1046 1191 # '--indent-heuristic',
1047 1192 #'--full-index',
1048 1193 #'--abbrev=40'
1049 1194 ]
1050 1195
1051 1196 if opt_ignorews:
1052 1197 flags.append('--ignore-all-space')
1053 1198
1054 1199 if commit_id_1 == self.EMPTY_COMMIT:
1055 1200 cmd = ['show'] + flags + [commit_id_2]
1056 1201 else:
1057 1202 cmd = ['diff'] + flags + [commit_id_1, commit_id_2]
1058 1203
1059 1204 if file_filter:
1060 1205 cmd.extend(['--', file_filter])
1061 1206
1062 1207 diff, __ = self.run_git_command(wire, cmd)
1063 1208 # If we used 'show' command, strip first few lines (until actual diff
1064 1209 # starts)
1065 1210 if commit_id_1 == self.EMPTY_COMMIT:
1066 1211 lines = diff.splitlines()
1067 1212 x = 0
1068 1213 for line in lines:
1069 if line.startswith('diff'):
1214 if line.startswith(b'diff'):
1070 1215 break
1071 1216 x += 1
1072 1217 # Append new line just like 'diff' command do
1073 1218 diff = '\n'.join(lines[x:]) + '\n'
1074 1219 return diff
1075 1220
1076 1221 @reraise_safe_exceptions
1077 1222 def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_ignorews, context):
1078 1223 repo_init = self._factory.repo_libgit2(wire)
1224
1079 1225 with repo_init as repo:
1080 1226 swap = True
1081 1227 flags = 0
1082 1228 flags |= pygit2.GIT_DIFF_SHOW_BINARY
1083 1229
1084 1230 if opt_ignorews:
1085 1231 flags |= pygit2.GIT_DIFF_IGNORE_WHITESPACE
1086 1232
1087 1233 if commit_id_1 == self.EMPTY_COMMIT:
1088 1234 comm1 = repo[commit_id_2]
1089 1235 diff_obj = comm1.tree.diff_to_tree(
1090 1236 flags=flags, context_lines=context, swap=swap)
1091 1237
1092 1238 else:
1093 1239 comm1 = repo[commit_id_2]
1094 1240 comm2 = repo[commit_id_1]
1095 1241 diff_obj = comm1.tree.diff_to_tree(
1096 1242 comm2.tree, flags=flags, context_lines=context, swap=swap)
1097 1243 similar_flags = 0
1098 1244 similar_flags |= pygit2.GIT_DIFF_FIND_RENAMES
1099 1245 diff_obj.find_similar(flags=similar_flags)
1100 1246
1101 1247 if file_filter:
1102 1248 for p in diff_obj:
1103 1249 if p.delta.old_file.path == file_filter:
1104 return p.patch or ''
1250 return BytesEnvelope(p.data) or BytesEnvelope(b'')
1105 1251 # fo matching path == no diff
1106 return ''
1107 return diff_obj.patch or ''
1252 return BytesEnvelope(b'')
1253
1254 return BytesEnvelope(safe_bytes(diff_obj.patch)) or BytesEnvelope(b'')
1108 1255
1109 1256 @reraise_safe_exceptions
1110 1257 def node_history(self, wire, commit_id, path, limit):
1111 1258 cache_on, context_uid, repo_id = self._cache_on(wire)
1112 1259 region = self._region(wire)
1260
1113 1261 @region.conditional_cache_on_arguments(condition=cache_on)
1114 1262 def _node_history(_context_uid, _repo_id, _commit_id, _path, _limit):
1115 1263 # optimize for n==1, rev-list is much faster for that use-case
1116 1264 if limit == 1:
1117 1265 cmd = ['rev-list', '-1', commit_id, '--', path]
1118 1266 else:
1119 1267 cmd = ['log']
1120 1268 if limit:
1121 1269 cmd.extend(['-n', str(safe_int(limit, 0))])
1122 1270 cmd.extend(['--pretty=format: %H', '-s', commit_id, '--', path])
1123 1271
1124 1272 output, __ = self.run_git_command(wire, cmd)
1125 commit_ids = re.findall(r'[0-9a-fA-F]{40}', output)
1273 commit_ids = re.findall(rb'[0-9a-fA-F]{40}', output)
1126 1274
1127 1275 return [x for x in commit_ids]
1128 1276 return _node_history(context_uid, repo_id, commit_id, path, limit)
1129 1277
1130 1278 @reraise_safe_exceptions
1131 def node_annotate(self, wire, commit_id, path):
1132
1279 def node_annotate_legacy(self, wire, commit_id, path):
1280 # note: replaced by pygit2 implementation
1133 1281 cmd = ['blame', '-l', '--root', '-r', commit_id, '--', path]
1134 1282 # -l ==> outputs long shas (and we need all 40 characters)
1135 1283 # --root ==> doesn't put '^' character for boundaries
1136 1284 # -r commit_id ==> blames for the given commit
1137 1285 output, __ = self.run_git_command(wire, cmd)
1138 1286
1139 1287 result = []
1140 for i, blame_line in enumerate(output.split('\n')[:-1]):
1288 for i, blame_line in enumerate(output.splitlines()[:-1]):
1141 1289 line_no = i + 1
1142 commit_id, line = re.split(r' ', blame_line, 1)
1143 result.append((line_no, commit_id, line))
1290 blame_commit_id, line = re.split(rb' ', blame_line, 1)
1291 result.append((line_no, blame_commit_id, line))
1292
1144 1293 return result
1145 1294
1146 1295 @reraise_safe_exceptions
1296 def node_annotate(self, wire, commit_id, path):
1297
1298 result_libgit = []
1299 repo_init = self._factory.repo_libgit2(wire)
1300 with repo_init as repo:
1301 commit = repo[commit_id]
1302 blame_obj = repo.blame(path, newest_commit=commit_id)
1303 for i, line in enumerate(commit.tree[path].data.splitlines()):
1304 line_no = i + 1
1305 hunk = blame_obj.for_line(line_no)
1306 blame_commit_id = hunk.final_commit_id.hex
1307
1308 result_libgit.append((line_no, blame_commit_id, line))
1309
1310 return result_libgit
1311
1312 @reraise_safe_exceptions
1147 1313 def update_server_info(self, wire):
1148 1314 repo = self._factory.repo(wire)
1149 1315 update_server_info(repo)
1150 1316
1151 1317 @reraise_safe_exceptions
1152 1318 def get_all_commit_ids(self, wire):
1153 1319
1154 1320 cache_on, context_uid, repo_id = self._cache_on(wire)
1155 1321 region = self._region(wire)
1322
1156 1323 @region.conditional_cache_on_arguments(condition=cache_on)
1157 1324 def _get_all_commit_ids(_context_uid, _repo_id):
1158 1325
1159 1326 cmd = ['rev-list', '--reverse', '--date-order', '--branches', '--tags']
1160 1327 try:
1161 1328 output, __ = self.run_git_command(wire, cmd)
1162 1329 return output.splitlines()
1163 1330 except Exception:
1164 1331 # Can be raised for empty repositories
1165 1332 return []
1333
1334 @region.conditional_cache_on_arguments(condition=cache_on)
1335 def _get_all_commit_ids_pygit2(_context_uid, _repo_id):
1336 repo_init = self._factory.repo_libgit2(wire)
1337 from pygit2 import GIT_SORT_REVERSE, GIT_SORT_TIME, GIT_BRANCH_ALL
1338 results = []
1339 with repo_init as repo:
1340 for commit in repo.walk(repo.head.target, GIT_SORT_TIME | GIT_BRANCH_ALL | GIT_SORT_REVERSE):
1341 results.append(commit.id.hex)
1342
1166 1343 return _get_all_commit_ids(context_uid, repo_id)
1167 1344
1168 1345 @reraise_safe_exceptions
1169 1346 def run_git_command(self, wire, cmd, **opts):
1170 1347 path = wire.get('path', None)
1171 1348
1172 1349 if path and os.path.isdir(path):
1173 1350 opts['cwd'] = path
1174 1351
1175 1352 if '_bare' in opts:
1176 1353 _copts = []
1177 1354 del opts['_bare']
1178 1355 else:
1179 _copts = ['-c', 'core.quotepath=false', ]
1356 _copts = ['-c', 'core.quotepath=false',]
1180 1357 safe_call = False
1181 1358 if '_safe' in opts:
1182 1359 # no exc on failure
1183 1360 del opts['_safe']
1184 1361 safe_call = True
1185 1362
1186 1363 if '_copts' in opts:
1187 1364 _copts.extend(opts['_copts'] or [])
1188 1365 del opts['_copts']
1189 1366
1190 1367 gitenv = os.environ.copy()
1191 1368 gitenv.update(opts.pop('extra_env', {}))
1192 1369 # need to clean fix GIT_DIR !
1193 1370 if 'GIT_DIR' in gitenv:
1194 1371 del gitenv['GIT_DIR']
1195 1372 gitenv['GIT_CONFIG_NOGLOBAL'] = '1'
1196 1373 gitenv['GIT_DISCOVERY_ACROSS_FILESYSTEM'] = '1'
1197 1374
1198 1375 cmd = [settings.GIT_EXECUTABLE] + _copts + cmd
1199 1376 _opts = {'env': gitenv, 'shell': False}
1200 1377
1201 1378 proc = None
1202 1379 try:
1203 1380 _opts.update(opts)
1204 1381 proc = subprocessio.SubprocessIOChunker(cmd, **_opts)
1205 1382
1206 return ''.join(proc), ''.join(proc.error)
1207 except (EnvironmentError, OSError) as err:
1208 cmd = ' '.join(cmd) # human friendly CMD
1383 return b''.join(proc), b''.join(proc.stderr)
1384 except OSError as err:
1385 cmd = ' '.join(map(safe_str, cmd)) # human friendly CMD
1209 1386 tb_err = ("Couldn't run git command (%s).\n"
1210 1387 "Original error was:%s\n"
1211 1388 "Call options:%s\n"
1212 1389 % (cmd, err, _opts))
1213 1390 log.exception(tb_err)
1214 1391 if safe_call:
1215 1392 return '', err
1216 1393 else:
1217 1394 raise exceptions.VcsException()(tb_err)
1218 1395 finally:
1219 1396 if proc:
1220 1397 proc.close()
1221 1398
1222 1399 @reraise_safe_exceptions
1223 1400 def install_hooks(self, wire, force=False):
1224 1401 from vcsserver.hook_utils import install_git_hooks
1225 1402 bare = self.bare(wire)
1226 1403 path = wire['path']
1404 binary_dir = settings.BINARY_DIR
1405 if binary_dir:
1406 os.path.join(binary_dir, 'python3')
1227 1407 return install_git_hooks(path, bare, force_create=force)
1228 1408
1229 1409 @reraise_safe_exceptions
1230 1410 def get_hooks_info(self, wire):
1231 1411 from vcsserver.hook_utils import (
1232 1412 get_git_pre_hook_version, get_git_post_hook_version)
1233 1413 bare = self.bare(wire)
1234 1414 path = wire['path']
1235 1415 return {
1236 1416 'pre_version': get_git_pre_hook_version(path, bare),
1237 1417 'post_version': get_git_post_hook_version(path, bare),
1238 1418 }
1239 1419
1240 1420 @reraise_safe_exceptions
1241 1421 def set_head_ref(self, wire, head_name):
1242 1422 log.debug('Setting refs/head to `%s`', head_name)
1243 cmd = ['symbolic-ref', '"HEAD"', '"refs/heads/%s"' % head_name]
1244 output, __ = self.run_git_command(wire, cmd)
1245 return [head_name] + output.splitlines()
1423 repo_init = self._factory.repo_libgit2(wire)
1424 with repo_init as repo:
1425 repo.set_head(f'refs/heads/{head_name}')
1426
1427 return [head_name] + [f'set HEAD to refs/heads/{head_name}']
1246 1428
1247 1429 @reraise_safe_exceptions
1248 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1249 archive_dir_name, commit_id):
1430 def archive_repo(self, wire, archive_name_key, kind, mtime, archive_at_path,
1431 archive_dir_name, commit_id, cache_config):
1250 1432
1251 1433 def file_walker(_commit_id, path):
1252 1434 repo_init = self._factory.repo_libgit2(wire)
1253 1435
1254 1436 with repo_init as repo:
1255 1437 commit = repo[commit_id]
1256 1438
1257 1439 if path in ['', '/']:
1258 1440 tree = commit.tree
1259 1441 else:
1260 1442 tree = commit.tree[path.rstrip('/')]
1261 1443 tree_id = tree.id.hex
1262 1444 try:
1263 1445 tree = repo[tree_id]
1264 1446 except KeyError:
1265 raise ObjectMissing('No tree with id: {}'.format(tree_id))
1447 raise ObjectMissing(f'No tree with id: {tree_id}')
1266 1448
1267 1449 index = LibGit2Index.Index()
1268 1450 index.read_tree(tree)
1269 1451 file_iter = index
1270 1452
1271 for fn in file_iter:
1272 file_path = fn.path
1273 mode = fn.mode
1453 for file_node in file_iter:
1454 file_path = file_node.path
1455 mode = file_node.mode
1274 1456 is_link = stat.S_ISLNK(mode)
1275 1457 if mode == pygit2.GIT_FILEMODE_COMMIT:
1276 1458 log.debug('Skipping path %s as a commit node', file_path)
1277 1459 continue
1278 yield ArchiveNode(file_path, mode, is_link, repo[fn.hex].read_raw)
1460 yield ArchiveNode(file_path, mode, is_link, repo[file_node.hex].read_raw)
1279 1461
1280 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1281 archive_dir_name, commit_id)
1462 return store_archive_in_cache(
1463 file_walker, archive_name_key, kind, mtime, archive_at_path, archive_dir_name, commit_id, cache_config=cache_config)
@@ -1,1047 +1,1159 b''
1 1 # RhodeCode VCSServer provides access to different vcs backends via network.
2 # Copyright (C) 2014-2020 RhodeCode GmbH
2 # Copyright (C) 2014-2023 RhodeCode GmbH
3 3 #
4 4 # This program is free software; you can redistribute it and/or modify
5 5 # it under the terms of the GNU General Public License as published by
6 6 # the Free Software Foundation; either version 3 of the License, or
7 7 # (at your option) any later version.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU General Public License
15 15 # along with this program; if not, write to the Free Software Foundation,
16 16 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 import functools
17 import binascii
18 18 import io
19 19 import logging
20 import os
21 20 import stat
22 import urllib
23 import urllib2
21 import urllib.request
22 import urllib.parse
24 23 import traceback
24 import hashlib
25 25
26 26 from hgext import largefiles, rebase, purge
27 from hgext.strip import strip as hgext_strip
27
28 28 from mercurial import commands
29 29 from mercurial import unionrepo
30 30 from mercurial import verify
31 31 from mercurial import repair
32 32
33 33 import vcsserver
34 34 from vcsserver import exceptions
35 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original, archive_repo, ArchiveNode
35 from vcsserver.base import RepoFactory, obfuscate_qs, raise_from_original, store_archive_in_cache, ArchiveNode, BytesEnvelope, \
36 BinaryEnvelope
36 37 from vcsserver.hgcompat import (
37 38 archival, bin, clone, config as hgconfig, diffopts, hex, get_ctx,
38 39 hg_url as url_parser, httpbasicauthhandler, httpdigestauthhandler,
39 40 makepeer, instance, match, memctx, exchange, memfilectx, nullrev, hg_merge,
40 41 patch, peer, revrange, ui, hg_tag, Abort, LookupError, RepoError,
41 42 RepoLookupError, InterventionRequired, RequirementError,
42 alwaysmatcher, patternmatcher, hgutil)
43 alwaysmatcher, patternmatcher, hgutil, hgext_strip)
44 from vcsserver.str_utils import ascii_bytes, ascii_str, safe_str, safe_bytes
43 45 from vcsserver.vcs_base import RemoteBase
46 from vcsserver.config import hooks as hooks_config
47
44 48
45 49 log = logging.getLogger(__name__)
46 50
47 51
48 52 def make_ui_from_config(repo_config):
49 53
50 54 class LoggingUI(ui.ui):
55
51 56 def status(self, *msg, **opts):
52 log.info(' '.join(msg).rstrip('\n'))
53 super(LoggingUI, self).status(*msg, **opts)
57 str_msg = map(safe_str, msg)
58 log.info(' '.join(str_msg).rstrip('\n'))
59 #super(LoggingUI, self).status(*msg, **opts)
54 60
55 61 def warn(self, *msg, **opts):
56 log.warn(' '.join(msg).rstrip('\n'))
57 super(LoggingUI, self).warn(*msg, **opts)
62 str_msg = map(safe_str, msg)
63 log.warning('ui_logger:'+' '.join(str_msg).rstrip('\n'))
64 #super(LoggingUI, self).warn(*msg, **opts)
58 65
59 66 def error(self, *msg, **opts):
60 log.error(' '.join(msg).rstrip('\n'))
61 super(LoggingUI, self).error(*msg, **opts)
67 str_msg = map(safe_str, msg)
68 log.error('ui_logger:'+' '.join(str_msg).rstrip('\n'))
69 #super(LoggingUI, self).error(*msg, **opts)
62 70
63 71 def note(self, *msg, **opts):
64 log.info(' '.join(msg).rstrip('\n'))
65 super(LoggingUI, self).note(*msg, **opts)
72 str_msg = map(safe_str, msg)
73 log.info('ui_logger:'+' '.join(str_msg).rstrip('\n'))
74 #super(LoggingUI, self).note(*msg, **opts)
66 75
67 76 def debug(self, *msg, **opts):
68 log.debug(' '.join(msg).rstrip('\n'))
69 super(LoggingUI, self).debug(*msg, **opts)
77 str_msg = map(safe_str, msg)
78 log.debug('ui_logger:'+' '.join(str_msg).rstrip('\n'))
79 #super(LoggingUI, self).debug(*msg, **opts)
70 80
71 81 baseui = LoggingUI()
72 82
73 83 # clean the baseui object
74 84 baseui._ocfg = hgconfig.config()
75 85 baseui._ucfg = hgconfig.config()
76 86 baseui._tcfg = hgconfig.config()
77 87
78 88 for section, option, value in repo_config:
79 baseui.setconfig(section, option, value)
89 baseui.setconfig(ascii_bytes(section), ascii_bytes(option), ascii_bytes(value))
80 90
81 91 # make our hgweb quiet so it doesn't print output
82 baseui.setconfig('ui', 'quiet', 'true')
92 baseui.setconfig(b'ui', b'quiet', b'true')
83 93
84 baseui.setconfig('ui', 'paginate', 'never')
94 baseui.setconfig(b'ui', b'paginate', b'never')
85 95 # for better Error reporting of Mercurial
86 baseui.setconfig('ui', 'message-output', 'stderr')
96 baseui.setconfig(b'ui', b'message-output', b'stderr')
87 97
88 98 # force mercurial to only use 1 thread, otherwise it may try to set a
89 99 # signal in a non-main thread, thus generating a ValueError.
90 baseui.setconfig('worker', 'numcpus', 1)
100 baseui.setconfig(b'worker', b'numcpus', 1)
91 101
92 102 # If there is no config for the largefiles extension, we explicitly disable
93 103 # it here. This overrides settings from repositories hgrc file. Recent
94 104 # mercurial versions enable largefiles in hgrc on clone from largefile
95 105 # repo.
96 if not baseui.hasconfig('extensions', 'largefiles'):
106 if not baseui.hasconfig(b'extensions', b'largefiles'):
97 107 log.debug('Explicitly disable largefiles extension for repo.')
98 baseui.setconfig('extensions', 'largefiles', '!')
108 baseui.setconfig(b'extensions', b'largefiles', b'!')
99 109
100 110 return baseui
101 111
102 112
103 113 def reraise_safe_exceptions(func):
104 114 """Decorator for converting mercurial exceptions to something neutral."""
105 115
106 116 def wrapper(*args, **kwargs):
107 117 try:
108 118 return func(*args, **kwargs)
109 119 except (Abort, InterventionRequired) as e:
110 raise_from_original(exceptions.AbortException(e))
120 raise_from_original(exceptions.AbortException(e), e)
111 121 except RepoLookupError as e:
112 raise_from_original(exceptions.LookupException(e))
122 raise_from_original(exceptions.LookupException(e), e)
113 123 except RequirementError as e:
114 raise_from_original(exceptions.RequirementException(e))
124 raise_from_original(exceptions.RequirementException(e), e)
115 125 except RepoError as e:
116 raise_from_original(exceptions.VcsException(e))
126 raise_from_original(exceptions.VcsException(e), e)
117 127 except LookupError as e:
118 raise_from_original(exceptions.LookupException(e))
128 raise_from_original(exceptions.LookupException(e), e)
119 129 except Exception as e:
120 130 if not hasattr(e, '_vcs_kind'):
121 131 log.exception("Unhandled exception in hg remote call")
122 raise_from_original(exceptions.UnhandledException(e))
132 raise_from_original(exceptions.UnhandledException(e), e)
123 133
124 134 raise
125 135 return wrapper
126 136
127 137
128 138 class MercurialFactory(RepoFactory):
129 139 repo_type = 'hg'
130 140
131 141 def _create_config(self, config, hooks=True):
132 142 if not hooks:
133 hooks_to_clean = frozenset((
134 'changegroup.repo_size', 'preoutgoing.pre_pull',
135 'outgoing.pull_logger', 'prechangegroup.pre_push'))
143
144 hooks_to_clean = {
145
146 hooks_config.HOOK_REPO_SIZE,
147 hooks_config.HOOK_PRE_PULL,
148 hooks_config.HOOK_PULL,
149
150 hooks_config.HOOK_PRE_PUSH,
151 # TODO: what about PRETXT, this was disabled in pre 5.0.0
152 hooks_config.HOOK_PRETX_PUSH,
153
154 }
136 155 new_config = []
137 156 for section, option, value in config:
138 157 if section == 'hooks' and option in hooks_to_clean:
139 158 continue
140 159 new_config.append((section, option, value))
141 160 config = new_config
142 161
143 162 baseui = make_ui_from_config(config)
144 163 return baseui
145 164
146 165 def _create_repo(self, wire, create):
147 166 baseui = self._create_config(wire["config"])
148 return instance(baseui, wire["path"], create)
167 repo = instance(baseui, safe_bytes(wire["path"]), create)
168 log.debug('repository created: got HG object: %s', repo)
169 return repo
149 170
150 171 def repo(self, wire, create=False):
151 172 """
152 173 Get a repository instance for the given path.
153 174 """
154 175 return self._create_repo(wire, create)
155 176
156 177
157 178 def patch_ui_message_output(baseui):
158 baseui.setconfig('ui', 'quiet', 'false')
179 baseui.setconfig(b'ui', b'quiet', b'false')
159 180 output = io.BytesIO()
160 181
161 182 def write(data, **unused_kwargs):
162 183 output.write(data)
163 184
164 185 baseui.status = write
165 186 baseui.write = write
166 187 baseui.warn = write
167 188 baseui.debug = write
168 189
169 190 return baseui, output
170 191
171 192
193 def get_obfuscated_url(url_obj):
194 url_obj.passwd = b'*****' if url_obj.passwd else url_obj.passwd
195 url_obj.query = obfuscate_qs(url_obj.query)
196 obfuscated_uri = str(url_obj)
197 return obfuscated_uri
198
199
200 def normalize_url_for_hg(url: str):
201 _proto = None
202
203 if '+' in url[:url.find('://')]:
204 _proto = url[0:url.find('+')]
205 url = url[url.find('+') + 1:]
206 return url, _proto
207
208
172 209 class HgRemote(RemoteBase):
173 210
174 211 def __init__(self, factory):
175 212 self._factory = factory
176 213 self._bulk_methods = {
177 214 "affected_files": self.ctx_files,
178 215 "author": self.ctx_user,
179 216 "branch": self.ctx_branch,
180 217 "children": self.ctx_children,
181 218 "date": self.ctx_date,
182 219 "message": self.ctx_description,
183 220 "parents": self.ctx_parents,
184 221 "status": self.ctx_status,
185 222 "obsolete": self.ctx_obsolete,
186 223 "phase": self.ctx_phase,
187 224 "hidden": self.ctx_hidden,
188 225 "_file_paths": self.ctx_list,
189 226 }
227 self._bulk_file_methods = {
228 "size": self.fctx_size,
229 "data": self.fctx_node_data,
230 "flags": self.fctx_flags,
231 "is_binary": self.is_binary,
232 "md5": self.md5_hash,
233 }
190 234
191 235 def _get_ctx(self, repo, ref):
192 236 return get_ctx(repo, ref)
193 237
194 238 @reraise_safe_exceptions
195 239 def discover_hg_version(self):
196 240 from mercurial import util
197 return util.version()
241 return safe_str(util.version())
198 242
199 243 @reraise_safe_exceptions
200 244 def is_empty(self, wire):
201 245 repo = self._factory.repo(wire)
202 246
203 247 try:
204 248 return len(repo) == 0
205 249 except Exception:
206 250 log.exception("failed to read object_store")
207 251 return False
208 252
209 253 @reraise_safe_exceptions
210 254 def bookmarks(self, wire):
211 255 cache_on, context_uid, repo_id = self._cache_on(wire)
212 256 region = self._region(wire)
257
213 258 @region.conditional_cache_on_arguments(condition=cache_on)
214 259 def _bookmarks(_context_uid, _repo_id):
215 260 repo = self._factory.repo(wire)
216 return dict(repo._bookmarks)
261 return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo._bookmarks.items()}
217 262
218 263 return _bookmarks(context_uid, repo_id)
219 264
220 265 @reraise_safe_exceptions
221 266 def branches(self, wire, normal, closed):
222 267 cache_on, context_uid, repo_id = self._cache_on(wire)
223 268 region = self._region(wire)
269
224 270 @region.conditional_cache_on_arguments(condition=cache_on)
225 271 def _branches(_context_uid, _repo_id, _normal, _closed):
226 272 repo = self._factory.repo(wire)
227 273 iter_branches = repo.branchmap().iterbranches()
228 274 bt = {}
229 for branch_name, _heads, tip, is_closed in iter_branches:
275 for branch_name, _heads, tip_node, is_closed in iter_branches:
230 276 if normal and not is_closed:
231 bt[branch_name] = tip
277 bt[safe_str(branch_name)] = ascii_str(hex(tip_node))
232 278 if closed and is_closed:
233 bt[branch_name] = tip
279 bt[safe_str(branch_name)] = ascii_str(hex(tip_node))
234 280
235 281 return bt
236 282
237 283 return _branches(context_uid, repo_id, normal, closed)
238 284
239 285 @reraise_safe_exceptions
240 286 def bulk_request(self, wire, commit_id, pre_load):
241 287 cache_on, context_uid, repo_id = self._cache_on(wire)
242 288 region = self._region(wire)
289
243 290 @region.conditional_cache_on_arguments(condition=cache_on)
244 291 def _bulk_request(_repo_id, _commit_id, _pre_load):
245 292 result = {}
246 293 for attr in pre_load:
247 294 try:
248 295 method = self._bulk_methods[attr]
296 wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
249 297 result[attr] = method(wire, commit_id)
250 298 except KeyError as e:
251 299 raise exceptions.VcsException(e)(
252 300 'Unknown bulk attribute: "%s"' % attr)
253 301 return result
254 302
255 303 return _bulk_request(repo_id, commit_id, sorted(pre_load))
256 304
257 305 @reraise_safe_exceptions
258 306 def ctx_branch(self, wire, commit_id):
259 307 cache_on, context_uid, repo_id = self._cache_on(wire)
260 308 region = self._region(wire)
309
261 310 @region.conditional_cache_on_arguments(condition=cache_on)
262 311 def _ctx_branch(_repo_id, _commit_id):
263 312 repo = self._factory.repo(wire)
264 313 ctx = self._get_ctx(repo, commit_id)
265 314 return ctx.branch()
266 315 return _ctx_branch(repo_id, commit_id)
267 316
268 317 @reraise_safe_exceptions
269 318 def ctx_date(self, wire, commit_id):
270 319 cache_on, context_uid, repo_id = self._cache_on(wire)
271 320 region = self._region(wire)
321
272 322 @region.conditional_cache_on_arguments(condition=cache_on)
273 323 def _ctx_date(_repo_id, _commit_id):
274 324 repo = self._factory.repo(wire)
275 325 ctx = self._get_ctx(repo, commit_id)
276 326 return ctx.date()
277 327 return _ctx_date(repo_id, commit_id)
278 328
279 329 @reraise_safe_exceptions
280 330 def ctx_description(self, wire, revision):
281 331 repo = self._factory.repo(wire)
282 332 ctx = self._get_ctx(repo, revision)
283 333 return ctx.description()
284 334
285 335 @reraise_safe_exceptions
286 336 def ctx_files(self, wire, commit_id):
287 337 cache_on, context_uid, repo_id = self._cache_on(wire)
288 338 region = self._region(wire)
339
289 340 @region.conditional_cache_on_arguments(condition=cache_on)
290 341 def _ctx_files(_repo_id, _commit_id):
291 342 repo = self._factory.repo(wire)
292 343 ctx = self._get_ctx(repo, commit_id)
293 344 return ctx.files()
294 345
295 346 return _ctx_files(repo_id, commit_id)
296 347
297 348 @reraise_safe_exceptions
298 349 def ctx_list(self, path, revision):
299 350 repo = self._factory.repo(path)
300 351 ctx = self._get_ctx(repo, revision)
301 352 return list(ctx)
302 353
303 354 @reraise_safe_exceptions
304 355 def ctx_parents(self, wire, commit_id):
305 356 cache_on, context_uid, repo_id = self._cache_on(wire)
306 357 region = self._region(wire)
358
307 359 @region.conditional_cache_on_arguments(condition=cache_on)
308 360 def _ctx_parents(_repo_id, _commit_id):
309 361 repo = self._factory.repo(wire)
310 362 ctx = self._get_ctx(repo, commit_id)
311 363 return [parent.hex() for parent in ctx.parents()
312 364 if not (parent.hidden() or parent.obsolete())]
313 365
314 366 return _ctx_parents(repo_id, commit_id)
315 367
316 368 @reraise_safe_exceptions
317 369 def ctx_children(self, wire, commit_id):
318 370 cache_on, context_uid, repo_id = self._cache_on(wire)
319 371 region = self._region(wire)
372
320 373 @region.conditional_cache_on_arguments(condition=cache_on)
321 374 def _ctx_children(_repo_id, _commit_id):
322 375 repo = self._factory.repo(wire)
323 376 ctx = self._get_ctx(repo, commit_id)
324 377 return [child.hex() for child in ctx.children()
325 378 if not (child.hidden() or child.obsolete())]
326 379
327 380 return _ctx_children(repo_id, commit_id)
328 381
329 382 @reraise_safe_exceptions
330 383 def ctx_phase(self, wire, commit_id):
331 384 cache_on, context_uid, repo_id = self._cache_on(wire)
332 385 region = self._region(wire)
386
333 387 @region.conditional_cache_on_arguments(condition=cache_on)
334 388 def _ctx_phase(_context_uid, _repo_id, _commit_id):
335 389 repo = self._factory.repo(wire)
336 390 ctx = self._get_ctx(repo, commit_id)
337 391 # public=0, draft=1, secret=3
338 392 return ctx.phase()
339 393 return _ctx_phase(context_uid, repo_id, commit_id)
340 394
341 395 @reraise_safe_exceptions
342 396 def ctx_obsolete(self, wire, commit_id):
343 397 cache_on, context_uid, repo_id = self._cache_on(wire)
344 398 region = self._region(wire)
399
345 400 @region.conditional_cache_on_arguments(condition=cache_on)
346 401 def _ctx_obsolete(_context_uid, _repo_id, _commit_id):
347 402 repo = self._factory.repo(wire)
348 403 ctx = self._get_ctx(repo, commit_id)
349 404 return ctx.obsolete()
350 405 return _ctx_obsolete(context_uid, repo_id, commit_id)
351 406
352 407 @reraise_safe_exceptions
353 408 def ctx_hidden(self, wire, commit_id):
354 409 cache_on, context_uid, repo_id = self._cache_on(wire)
355 410 region = self._region(wire)
411
356 412 @region.conditional_cache_on_arguments(condition=cache_on)
357 413 def _ctx_hidden(_context_uid, _repo_id, _commit_id):
358 414 repo = self._factory.repo(wire)
359 415 ctx = self._get_ctx(repo, commit_id)
360 416 return ctx.hidden()
361 417 return _ctx_hidden(context_uid, repo_id, commit_id)
362 418
363 419 @reraise_safe_exceptions
364 420 def ctx_substate(self, wire, revision):
365 421 repo = self._factory.repo(wire)
366 422 ctx = self._get_ctx(repo, revision)
367 423 return ctx.substate
368 424
369 425 @reraise_safe_exceptions
370 426 def ctx_status(self, wire, revision):
371 427 repo = self._factory.repo(wire)
372 428 ctx = self._get_ctx(repo, revision)
373 429 status = repo[ctx.p1().node()].status(other=ctx.node())
374 430 # object of status (odd, custom named tuple in mercurial) is not
375 431 # correctly serializable, we make it a list, as the underling
376 432 # API expects this to be a list
377 433 return list(status)
378 434
379 435 @reraise_safe_exceptions
380 436 def ctx_user(self, wire, revision):
381 437 repo = self._factory.repo(wire)
382 438 ctx = self._get_ctx(repo, revision)
383 439 return ctx.user()
384 440
385 441 @reraise_safe_exceptions
386 442 def check_url(self, url, config):
387 _proto = None
388 if '+' in url[:url.find('://')]:
389 _proto = url[0:url.find('+')]
390 url = url[url.find('+') + 1:]
443 url, _proto = normalize_url_for_hg(url)
444 url_obj = url_parser(safe_bytes(url))
445
446 test_uri = safe_str(url_obj.authinfo()[0])
447 authinfo = url_obj.authinfo()[1]
448 obfuscated_uri = get_obfuscated_url(url_obj)
449 log.info("Checking URL for remote cloning/import: %s", obfuscated_uri)
450
391 451 handlers = []
392 url_obj = url_parser(url)
393 test_uri, authinfo = url_obj.authinfo()
394 url_obj.passwd = '*****' if url_obj.passwd else url_obj.passwd
395 url_obj.query = obfuscate_qs(url_obj.query)
396
397 cleaned_uri = str(url_obj)
398 log.info("Checking URL for remote cloning/import: %s", cleaned_uri)
399
400 452 if authinfo:
401 453 # create a password manager
402 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
454 passmgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
403 455 passmgr.add_password(*authinfo)
404 456
405 457 handlers.extend((httpbasicauthhandler(passmgr),
406 458 httpdigestauthhandler(passmgr)))
407 459
408 o = urllib2.build_opener(*handlers)
460 o = urllib.request.build_opener(*handlers)
409 461 o.addheaders = [('Content-Type', 'application/mercurial-0.1'),
410 462 ('Accept', 'application/mercurial-0.1')]
411 463
412 464 q = {"cmd": 'between'}
413 q.update({'pairs': "%s-%s" % ('0' * 40, '0' * 40)})
414 qs = '?%s' % urllib.urlencode(q)
415 cu = "%s%s" % (test_uri, qs)
416 req = urllib2.Request(cu, None, {})
465 q.update({'pairs': "{}-{}".format('0' * 40, '0' * 40)})
466 qs = '?%s' % urllib.parse.urlencode(q)
467 cu = f"{test_uri}{qs}"
468 req = urllib.request.Request(cu, None, {})
417 469
418 470 try:
419 log.debug("Trying to open URL %s", cleaned_uri)
471 log.debug("Trying to open URL %s", obfuscated_uri)
420 472 resp = o.open(req)
421 473 if resp.code != 200:
422 474 raise exceptions.URLError()('Return Code is not 200')
423 475 except Exception as e:
424 log.warning("URL cannot be opened: %s", cleaned_uri, exc_info=True)
476 log.warning("URL cannot be opened: %s", obfuscated_uri, exc_info=True)
425 477 # means it cannot be cloned
426 raise exceptions.URLError(e)("[%s] org_exc: %s" % (cleaned_uri, e))
478 raise exceptions.URLError(e)(f"[{obfuscated_uri}] org_exc: {e}")
427 479
428 480 # now check if it's a proper hg repo, but don't do it for svn
429 481 try:
430 482 if _proto == 'svn':
431 483 pass
432 484 else:
433 485 # check for pure hg repos
434 486 log.debug(
435 "Verifying if URL is a Mercurial repository: %s",
436 cleaned_uri)
487 "Verifying if URL is a Mercurial repository: %s", obfuscated_uri)
437 488 ui = make_ui_from_config(config)
438 peer_checker = makepeer(ui, url)
439 peer_checker.lookup('tip')
489 peer_checker = makepeer(ui, safe_bytes(url))
490 peer_checker.lookup(b'tip')
440 491 except Exception as e:
441 492 log.warning("URL is not a valid Mercurial repository: %s",
442 cleaned_uri)
493 obfuscated_uri)
443 494 raise exceptions.URLError(e)(
444 495 "url [%s] does not look like an hg repo org_exc: %s"
445 % (cleaned_uri, e))
496 % (obfuscated_uri, e))
446 497
447 log.info("URL is a valid Mercurial repository: %s", cleaned_uri)
498 log.info("URL is a valid Mercurial repository: %s", obfuscated_uri)
448 499 return True
449 500
450 501 @reraise_safe_exceptions
451 502 def diff(self, wire, commit_id_1, commit_id_2, file_filter, opt_git, opt_ignorews, context):
452 503 repo = self._factory.repo(wire)
453 504
454 505 if file_filter:
455 match_filter = match(file_filter[0], '', [file_filter[1]])
506 # unpack the file-filter
507 repo_path, node_path = file_filter
508 match_filter = match(safe_bytes(repo_path), b'', [safe_bytes(node_path)])
456 509 else:
457 510 match_filter = file_filter
458 511 opts = diffopts(git=opt_git, ignorews=opt_ignorews, context=context, showfunc=1)
459 512
460 513 try:
461 return "".join(patch.diff(
462 repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts))
514 diff_iter = patch.diff(
515 repo, node1=commit_id_1, node2=commit_id_2, match=match_filter, opts=opts)
516 return BytesEnvelope(b"".join(diff_iter))
463 517 except RepoLookupError as e:
464 518 raise exceptions.LookupException(e)()
465 519
466 520 @reraise_safe_exceptions
467 521 def node_history(self, wire, revision, path, limit):
468 522 cache_on, context_uid, repo_id = self._cache_on(wire)
469 523 region = self._region(wire)
524
470 525 @region.conditional_cache_on_arguments(condition=cache_on)
471 526 def _node_history(_context_uid, _repo_id, _revision, _path, _limit):
472 527 repo = self._factory.repo(wire)
473 528
474 529 ctx = self._get_ctx(repo, revision)
475 fctx = ctx.filectx(path)
530 fctx = ctx.filectx(safe_bytes(path))
476 531
477 532 def history_iter():
478 533 limit_rev = fctx.rev()
479 534 for obj in reversed(list(fctx.filelog())):
480 535 obj = fctx.filectx(obj)
481 536 ctx = obj.changectx()
482 537 if ctx.hidden() or ctx.obsolete():
483 538 continue
484 539
485 540 if limit_rev >= obj.rev():
486 541 yield obj
487 542
488 543 history = []
489 544 for cnt, obj in enumerate(history_iter()):
490 545 if limit and cnt >= limit:
491 546 break
492 547 history.append(hex(obj.node()))
493 548
494 549 return [x for x in history]
495 550 return _node_history(context_uid, repo_id, revision, path, limit)
496 551
497 552 @reraise_safe_exceptions
498 553 def node_history_untill(self, wire, revision, path, limit):
499 554 cache_on, context_uid, repo_id = self._cache_on(wire)
500 555 region = self._region(wire)
556
501 557 @region.conditional_cache_on_arguments(condition=cache_on)
502 558 def _node_history_until(_context_uid, _repo_id):
503 559 repo = self._factory.repo(wire)
504 560 ctx = self._get_ctx(repo, revision)
505 fctx = ctx.filectx(path)
561 fctx = ctx.filectx(safe_bytes(path))
506 562
507 563 file_log = list(fctx.filelog())
508 564 if limit:
509 565 # Limit to the last n items
510 566 file_log = file_log[-limit:]
511 567
512 568 return [hex(fctx.filectx(cs).node()) for cs in reversed(file_log)]
513 569 return _node_history_until(context_uid, repo_id, revision, path, limit)
514 570
515 571 @reraise_safe_exceptions
572 def bulk_file_request(self, wire, commit_id, path, pre_load):
573 cache_on, context_uid, repo_id = self._cache_on(wire)
574 region = self._region(wire)
575
576 @region.conditional_cache_on_arguments(condition=cache_on)
577 def _bulk_file_request(_repo_id, _commit_id, _path, _pre_load):
578 result = {}
579 for attr in pre_load:
580 try:
581 method = self._bulk_file_methods[attr]
582 wire.update({'cache': False}) # disable cache for bulk calls so we don't double cache
583 result[attr] = method(wire, _commit_id, _path)
584 except KeyError as e:
585 raise exceptions.VcsException(e)(f'Unknown bulk attribute: "{attr}"')
586 return BinaryEnvelope(result)
587
588 return _bulk_file_request(repo_id, commit_id, path, sorted(pre_load))
589
590 @reraise_safe_exceptions
516 591 def fctx_annotate(self, wire, revision, path):
517 592 repo = self._factory.repo(wire)
518 593 ctx = self._get_ctx(repo, revision)
519 fctx = ctx.filectx(path)
594 fctx = ctx.filectx(safe_bytes(path))
520 595
521 596 result = []
522 597 for i, annotate_obj in enumerate(fctx.annotate(), 1):
523 598 ln_no = i
524 599 sha = hex(annotate_obj.fctx.node())
525 600 content = annotate_obj.text
526 601 result.append((ln_no, sha, content))
527 602 return result
528 603
529 604 @reraise_safe_exceptions
530 605 def fctx_node_data(self, wire, revision, path):
531 606 repo = self._factory.repo(wire)
532 607 ctx = self._get_ctx(repo, revision)
533 fctx = ctx.filectx(path)
534 return fctx.data()
608 fctx = ctx.filectx(safe_bytes(path))
609 return BytesEnvelope(fctx.data())
535 610
536 611 @reraise_safe_exceptions
537 612 def fctx_flags(self, wire, commit_id, path):
538 613 cache_on, context_uid, repo_id = self._cache_on(wire)
539 614 region = self._region(wire)
615
540 616 @region.conditional_cache_on_arguments(condition=cache_on)
541 617 def _fctx_flags(_repo_id, _commit_id, _path):
542 618 repo = self._factory.repo(wire)
543 619 ctx = self._get_ctx(repo, commit_id)
544 fctx = ctx.filectx(path)
620 fctx = ctx.filectx(safe_bytes(path))
545 621 return fctx.flags()
546 622
547 623 return _fctx_flags(repo_id, commit_id, path)
548 624
549 625 @reraise_safe_exceptions
550 626 def fctx_size(self, wire, commit_id, path):
551 627 cache_on, context_uid, repo_id = self._cache_on(wire)
552 628 region = self._region(wire)
629
553 630 @region.conditional_cache_on_arguments(condition=cache_on)
554 631 def _fctx_size(_repo_id, _revision, _path):
555 632 repo = self._factory.repo(wire)
556 633 ctx = self._get_ctx(repo, commit_id)
557 fctx = ctx.filectx(path)
634 fctx = ctx.filectx(safe_bytes(path))
558 635 return fctx.size()
559 636 return _fctx_size(repo_id, commit_id, path)
560 637
561 638 @reraise_safe_exceptions
562 639 def get_all_commit_ids(self, wire, name):
563 640 cache_on, context_uid, repo_id = self._cache_on(wire)
564 641 region = self._region(wire)
642
565 643 @region.conditional_cache_on_arguments(condition=cache_on)
566 644 def _get_all_commit_ids(_context_uid, _repo_id, _name):
567 645 repo = self._factory.repo(wire)
568 repo = repo.filtered(name)
569 revs = map(lambda x: hex(x[7]), repo.changelog.index)
646 revs = [ascii_str(repo[x].hex()) for x in repo.filtered(b'visible').changelog.revs()]
570 647 return revs
571 648 return _get_all_commit_ids(context_uid, repo_id, name)
572 649
573 650 @reraise_safe_exceptions
574 651 def get_config_value(self, wire, section, name, untrusted=False):
575 652 repo = self._factory.repo(wire)
576 return repo.ui.config(section, name, untrusted=untrusted)
653 return repo.ui.config(ascii_bytes(section), ascii_bytes(name), untrusted=untrusted)
577 654
578 655 @reraise_safe_exceptions
579 656 def is_large_file(self, wire, commit_id, path):
580 657 cache_on, context_uid, repo_id = self._cache_on(wire)
581 658 region = self._region(wire)
659
582 660 @region.conditional_cache_on_arguments(condition=cache_on)
583 661 def _is_large_file(_context_uid, _repo_id, _commit_id, _path):
584 return largefiles.lfutil.isstandin(path)
662 return largefiles.lfutil.isstandin(safe_bytes(path))
585 663
586 664 return _is_large_file(context_uid, repo_id, commit_id, path)
587 665
588 666 @reraise_safe_exceptions
589 667 def is_binary(self, wire, revision, path):
590 668 cache_on, context_uid, repo_id = self._cache_on(wire)
669 region = self._region(wire)
591 670
592 region = self._region(wire)
593 671 @region.conditional_cache_on_arguments(condition=cache_on)
594 672 def _is_binary(_repo_id, _sha, _path):
595 673 repo = self._factory.repo(wire)
596 674 ctx = self._get_ctx(repo, revision)
597 fctx = ctx.filectx(path)
675 fctx = ctx.filectx(safe_bytes(path))
598 676 return fctx.isbinary()
599 677
600 678 return _is_binary(repo_id, revision, path)
601 679
602 680 @reraise_safe_exceptions
681 def md5_hash(self, wire, revision, path):
682 cache_on, context_uid, repo_id = self._cache_on(wire)
683 region = self._region(wire)
684
685 @region.conditional_cache_on_arguments(condition=cache_on)
686 def _md5_hash(_repo_id, _sha, _path):
687 repo = self._factory.repo(wire)
688 ctx = self._get_ctx(repo, revision)
689 fctx = ctx.filectx(safe_bytes(path))
690 return hashlib.md5(fctx.data()).hexdigest()
691
692 return _md5_hash(repo_id, revision, path)
693
694 @reraise_safe_exceptions
603 695 def in_largefiles_store(self, wire, sha):
604 696 repo = self._factory.repo(wire)
605 697 return largefiles.lfutil.instore(repo, sha)
606 698
607 699 @reraise_safe_exceptions
608 700 def in_user_cache(self, wire, sha):
609 701 repo = self._factory.repo(wire)
610 702 return largefiles.lfutil.inusercache(repo.ui, sha)
611 703
612 704 @reraise_safe_exceptions
613 705 def store_path(self, wire, sha):
614 706 repo = self._factory.repo(wire)
615 707 return largefiles.lfutil.storepath(repo, sha)
616 708
617 709 @reraise_safe_exceptions
618 710 def link(self, wire, sha, path):
619 711 repo = self._factory.repo(wire)
620 712 largefiles.lfutil.link(
621 713 largefiles.lfutil.usercachepath(repo.ui, sha), path)
622 714
623 715 @reraise_safe_exceptions
624 716 def localrepository(self, wire, create=False):
625 717 self._factory.repo(wire, create=create)
626 718
627 719 @reraise_safe_exceptions
628 720 def lookup(self, wire, revision, both):
629 721 cache_on, context_uid, repo_id = self._cache_on(wire)
722 region = self._region(wire)
630 723
631 region = self._region(wire)
632 724 @region.conditional_cache_on_arguments(condition=cache_on)
633 725 def _lookup(_context_uid, _repo_id, _revision, _both):
634
635 726 repo = self._factory.repo(wire)
636 727 rev = _revision
637 728 if isinstance(rev, int):
638 729 # NOTE(marcink):
639 730 # since Mercurial doesn't support negative indexes properly
640 731 # we need to shift accordingly by one to get proper index, e.g
641 732 # repo[-1] => repo[-2]
642 733 # repo[0] => repo[-1]
643 734 if rev <= 0:
644 735 rev = rev + -1
645 736 try:
646 737 ctx = self._get_ctx(repo, rev)
647 except (TypeError, RepoLookupError) as e:
738 except (TypeError, RepoLookupError, binascii.Error) as e:
648 739 e._org_exc_tb = traceback.format_exc()
649 740 raise exceptions.LookupException(e)(rev)
650 741 except LookupError as e:
651 742 e._org_exc_tb = traceback.format_exc()
652 743 raise exceptions.LookupException(e)(e.name)
653 744
654 745 if not both:
655 746 return ctx.hex()
656 747
657 748 ctx = repo[ctx.hex()]
658 749 return ctx.hex(), ctx.rev()
659 750
660 751 return _lookup(context_uid, repo_id, revision, both)
661 752
662 753 @reraise_safe_exceptions
663 754 def sync_push(self, wire, url):
664 755 if not self.check_url(url, wire['config']):
665 756 return
666 757
667 758 repo = self._factory.repo(wire)
668 759
669 760 # Disable any prompts for this repo
670 repo.ui.setconfig('ui', 'interactive', 'off', '-y')
761 repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
671 762
672 bookmarks = dict(repo._bookmarks).keys()
673 remote = peer(repo, {}, url)
763 bookmarks = list(dict(repo._bookmarks).keys())
764 remote = peer(repo, {}, safe_bytes(url))
674 765 # Disable any prompts for this remote
675 remote.ui.setconfig('ui', 'interactive', 'off', '-y')
766 remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
676 767
677 768 return exchange.push(
678 769 repo, remote, newbranch=True, bookmarks=bookmarks).cgresult
679 770
680 771 @reraise_safe_exceptions
681 772 def revision(self, wire, rev):
682 773 repo = self._factory.repo(wire)
683 774 ctx = self._get_ctx(repo, rev)
684 775 return ctx.rev()
685 776
686 777 @reraise_safe_exceptions
687 778 def rev_range(self, wire, commit_filter):
688 779 cache_on, context_uid, repo_id = self._cache_on(wire)
780 region = self._region(wire)
689 781
690 region = self._region(wire)
691 782 @region.conditional_cache_on_arguments(condition=cache_on)
692 783 def _rev_range(_context_uid, _repo_id, _filter):
693 784 repo = self._factory.repo(wire)
694 revisions = [rev for rev in revrange(repo, commit_filter)]
785 revisions = [
786 ascii_str(repo[rev].hex())
787 for rev in revrange(repo, list(map(ascii_bytes, commit_filter)))
788 ]
695 789 return revisions
696 790
697 791 return _rev_range(context_uid, repo_id, sorted(commit_filter))
698 792
699 793 @reraise_safe_exceptions
700 794 def rev_range_hash(self, wire, node):
701 795 repo = self._factory.repo(wire)
702 796
703 797 def get_revs(repo, rev_opt):
704 798 if rev_opt:
705 799 revs = revrange(repo, rev_opt)
706 800 if len(revs) == 0:
707 801 return (nullrev, nullrev)
708 802 return max(revs), min(revs)
709 803 else:
710 804 return len(repo) - 1, 0
711 805
712 806 stop, start = get_revs(repo, [node + ':'])
713 revs = [hex(repo[r].node()) for r in xrange(start, stop + 1)]
807 revs = [ascii_str(repo[r].hex()) for r in range(start, stop + 1)]
714 808 return revs
715 809
716 810 @reraise_safe_exceptions
717 811 def revs_from_revspec(self, wire, rev_spec, *args, **kwargs):
718 other_path = kwargs.pop('other_path', None)
812 org_path = safe_bytes(wire["path"])
813 other_path = safe_bytes(kwargs.pop('other_path', ''))
719 814
720 815 # case when we want to compare two independent repositories
721 816 if other_path and other_path != wire["path"]:
722 817 baseui = self._factory._create_config(wire["config"])
723 repo = unionrepo.makeunionrepository(baseui, other_path, wire["path"])
818 repo = unionrepo.makeunionrepository(baseui, other_path, org_path)
724 819 else:
725 820 repo = self._factory.repo(wire)
726 821 return list(repo.revs(rev_spec, *args))
727 822
728 823 @reraise_safe_exceptions
729 824 def verify(self, wire,):
730 825 repo = self._factory.repo(wire)
731 826 baseui = self._factory._create_config(wire['config'])
732 827
733 828 baseui, output = patch_ui_message_output(baseui)
734 829
735 830 repo.ui = baseui
736 831 verify.verify(repo)
737 832 return output.getvalue()
738 833
739 834 @reraise_safe_exceptions
740 835 def hg_update_cache(self, wire,):
741 836 repo = self._factory.repo(wire)
742 837 baseui = self._factory._create_config(wire['config'])
743 838 baseui, output = patch_ui_message_output(baseui)
744 839
745 840 repo.ui = baseui
746 841 with repo.wlock(), repo.lock():
747 842 repo.updatecaches(full=True)
748 843
749 844 return output.getvalue()
750 845
751 846 @reraise_safe_exceptions
752 847 def hg_rebuild_fn_cache(self, wire,):
753 848 repo = self._factory.repo(wire)
754 849 baseui = self._factory._create_config(wire['config'])
755 850 baseui, output = patch_ui_message_output(baseui)
756 851
757 852 repo.ui = baseui
758 853
759 854 repair.rebuildfncache(baseui, repo)
760 855
761 856 return output.getvalue()
762 857
763 858 @reraise_safe_exceptions
764 859 def tags(self, wire):
765 860 cache_on, context_uid, repo_id = self._cache_on(wire)
766 861 region = self._region(wire)
862
767 863 @region.conditional_cache_on_arguments(condition=cache_on)
768 864 def _tags(_context_uid, _repo_id):
769 865 repo = self._factory.repo(wire)
770 return repo.tags()
866 return {safe_str(name): ascii_str(hex(sha)) for name, sha in repo.tags().items()}
771 867
772 868 return _tags(context_uid, repo_id)
773 869
774 870 @reraise_safe_exceptions
775 def update(self, wire, node=None, clean=False):
871 def update(self, wire, node='', clean=False):
776 872 repo = self._factory.repo(wire)
777 873 baseui = self._factory._create_config(wire['config'])
874 node = safe_bytes(node)
875
778 876 commands.update(baseui, repo, node=node, clean=clean)
779 877
780 878 @reraise_safe_exceptions
781 879 def identify(self, wire):
782 880 repo = self._factory.repo(wire)
783 881 baseui = self._factory._create_config(wire['config'])
784 882 output = io.BytesIO()
785 883 baseui.write = output.write
786 884 # This is required to get a full node id
787 885 baseui.debugflag = True
788 886 commands.identify(baseui, repo, id=True)
789 887
790 888 return output.getvalue()
791 889
792 890 @reraise_safe_exceptions
793 891 def heads(self, wire, branch=None):
794 892 repo = self._factory.repo(wire)
795 893 baseui = self._factory._create_config(wire['config'])
796 894 output = io.BytesIO()
797 895
798 896 def write(data, **unused_kwargs):
799 897 output.write(data)
800 898
801 899 baseui.write = write
802 900 if branch:
803 args = [branch]
901 args = [safe_bytes(branch)]
804 902 else:
805 903 args = []
806 commands.heads(baseui, repo, template='{node} ', *args)
904 commands.heads(baseui, repo, template=b'{node} ', *args)
807 905
808 906 return output.getvalue()
809 907
810 908 @reraise_safe_exceptions
811 909 def ancestor(self, wire, revision1, revision2):
812 910 repo = self._factory.repo(wire)
813 911 changelog = repo.changelog
814 912 lookup = repo.lookup
815 a = changelog.ancestor(lookup(revision1), lookup(revision2))
913 a = changelog.ancestor(lookup(safe_bytes(revision1)), lookup(safe_bytes(revision2)))
816 914 return hex(a)
817 915
818 916 @reraise_safe_exceptions
819 917 def clone(self, wire, source, dest, update_after_clone=False, hooks=True):
820 918 baseui = self._factory._create_config(wire["config"], hooks=hooks)
821 clone(baseui, source, dest, noupdate=not update_after_clone)
919 clone(baseui, safe_bytes(source), safe_bytes(dest), noupdate=not update_after_clone)
822 920
823 921 @reraise_safe_exceptions
824 922 def commitctx(self, wire, message, parents, commit_time, commit_timezone, user, files, extra, removed, updated):
825 923
826 924 repo = self._factory.repo(wire)
827 925 baseui = self._factory._create_config(wire['config'])
828 publishing = baseui.configbool('phases', 'publish')
829 if publishing:
830 new_commit = 'public'
831 else:
832 new_commit = 'draft'
926 publishing = baseui.configbool(b'phases', b'publish')
833 927
834 def _filectxfn(_repo, ctx, path):
928 def _filectxfn(_repo, ctx, path: bytes):
835 929 """
836 930 Marks given path as added/changed/removed in a given _repo. This is
837 931 for internal mercurial commit function.
838 932 """
839 933
840 934 # check if this path is removed
841 if path in removed:
935 if safe_str(path) in removed:
842 936 # returning None is a way to mark node for removal
843 937 return None
844 938
845 939 # check if this path is added
846 940 for node in updated:
847 if node['path'] == path:
941 if safe_bytes(node['path']) == path:
848 942 return memfilectx(
849 943 _repo,
850 944 changectx=ctx,
851 path=node['path'],
852 data=node['content'],
945 path=safe_bytes(node['path']),
946 data=safe_bytes(node['content']),
853 947 islink=False,
854 948 isexec=bool(node['mode'] & stat.S_IXUSR),
855 949 copysource=False)
950 abort_exc = exceptions.AbortException()
951 raise abort_exc(f"Given path haven't been marked as added, changed or removed ({path})")
856 952
857 raise exceptions.AbortException()(
858 "Given path haven't been marked as added, "
859 "changed or removed (%s)" % path)
860
861 with repo.ui.configoverride({('phases', 'new-commit'): new_commit}):
862
953 if publishing:
954 new_commit_phase = b'public'
955 else:
956 new_commit_phase = b'draft'
957 with repo.ui.configoverride({(b'phases', b'new-commit'): new_commit_phase}):
958 kwargs = {safe_bytes(k): safe_bytes(v) for k, v in extra.items()}
863 959 commit_ctx = memctx(
864 960 repo=repo,
865 961 parents=parents,
866 text=message,
867 files=files,
962 text=safe_bytes(message),
963 files=[safe_bytes(x) for x in files],
868 964 filectxfn=_filectxfn,
869 user=user,
965 user=safe_bytes(user),
870 966 date=(commit_time, commit_timezone),
871 extra=extra)
967 extra=kwargs)
872 968
873 969 n = repo.commitctx(commit_ctx)
874 970 new_id = hex(n)
875 971
876 972 return new_id
877 973
878 974 @reraise_safe_exceptions
879 975 def pull(self, wire, url, commit_ids=None):
880 976 repo = self._factory.repo(wire)
881 977 # Disable any prompts for this repo
882 repo.ui.setconfig('ui', 'interactive', 'off', '-y')
978 repo.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
883 979
884 remote = peer(repo, {}, url)
980 remote = peer(repo, {}, safe_bytes(url))
885 981 # Disable any prompts for this remote
886 remote.ui.setconfig('ui', 'interactive', 'off', '-y')
982 remote.ui.setconfig(b'ui', b'interactive', b'off', b'-y')
887 983
888 984 if commit_ids:
889 985 commit_ids = [bin(commit_id) for commit_id in commit_ids]
890 986
891 987 return exchange.pull(
892 988 repo, remote, heads=commit_ids, force=None).cgresult
893 989
894 990 @reraise_safe_exceptions
895 def pull_cmd(self, wire, source, bookmark=None, branch=None, revision=None, hooks=True):
991 def pull_cmd(self, wire, source, bookmark='', branch='', revision='', hooks=True):
896 992 repo = self._factory.repo(wire)
897 993 baseui = self._factory._create_config(wire['config'], hooks=hooks)
898 994
995 source = safe_bytes(source)
996
899 997 # Mercurial internally has a lot of logic that checks ONLY if
900 998 # option is defined, we just pass those if they are defined then
901 999 opts = {}
1000
902 1001 if bookmark:
903 opts['bookmark'] = bookmark
1002 opts['bookmark'] = [safe_bytes(x) for x in bookmark] \
1003 if isinstance(bookmark, list) else safe_bytes(bookmark)
1004
904 1005 if branch:
905 opts['branch'] = branch
1006 opts['branch'] = [safe_bytes(x) for x in branch] \
1007 if isinstance(branch, list) else safe_bytes(branch)
1008
906 1009 if revision:
907 opts['rev'] = revision
1010 opts['rev'] = [safe_bytes(x) for x in revision] \
1011 if isinstance(revision, list) else safe_bytes(revision)
908 1012
909 1013 commands.pull(baseui, repo, source, **opts)
910 1014
911 1015 @reraise_safe_exceptions
912 def push(self, wire, revisions, dest_path, hooks=True, push_branches=False):
1016 def push(self, wire, revisions, dest_path, hooks: bool = True, push_branches: bool = False):
913 1017 repo = self._factory.repo(wire)
914 1018 baseui = self._factory._create_config(wire['config'], hooks=hooks)
915 commands.push(baseui, repo, dest=dest_path, rev=revisions,
1019
1020 revisions = [safe_bytes(x) for x in revisions] \
1021 if isinstance(revisions, list) else safe_bytes(revisions)
1022
1023 commands.push(baseui, repo, safe_bytes(dest_path),
1024 rev=revisions,
916 1025 new_branch=push_branches)
917 1026
918 1027 @reraise_safe_exceptions
919 1028 def strip(self, wire, revision, update, backup):
920 1029 repo = self._factory.repo(wire)
921 1030 ctx = self._get_ctx(repo, revision)
922 hgext_strip(
1031 hgext_strip.strip(
923 1032 repo.baseui, repo, ctx.node(), update=update, backup=backup)
924 1033
925 1034 @reraise_safe_exceptions
926 1035 def get_unresolved_files(self, wire):
927 1036 repo = self._factory.repo(wire)
928 1037
929 1038 log.debug('Calculating unresolved files for repo: %s', repo)
930 1039 output = io.BytesIO()
931 1040
932 1041 def write(data, **unused_kwargs):
933 1042 output.write(data)
934 1043
935 1044 baseui = self._factory._create_config(wire['config'])
936 1045 baseui.write = write
937 1046
938 1047 commands.resolve(baseui, repo, list=True)
939 1048 unresolved = output.getvalue().splitlines(0)
940 1049 return unresolved
941 1050
942 1051 @reraise_safe_exceptions
943 1052 def merge(self, wire, revision):
944 1053 repo = self._factory.repo(wire)
945 1054 baseui = self._factory._create_config(wire['config'])
946 repo.ui.setconfig('ui', 'merge', 'internal:dump')
1055 repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
947 1056
948 1057 # In case of sub repositories are used mercurial prompts the user in
949 1058 # case of merge conflicts or different sub repository sources. By
950 1059 # setting the interactive flag to `False` mercurial doesn't prompt the
951 1060 # used but instead uses a default value.
952 repo.ui.setconfig('ui', 'interactive', False)
953 commands.merge(baseui, repo, rev=revision)
1061 repo.ui.setconfig(b'ui', b'interactive', False)
1062 commands.merge(baseui, repo, rev=safe_bytes(revision))
954 1063
955 1064 @reraise_safe_exceptions
956 1065 def merge_state(self, wire):
957 1066 repo = self._factory.repo(wire)
958 repo.ui.setconfig('ui', 'merge', 'internal:dump')
1067 repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
959 1068
960 1069 # In case of sub repositories are used mercurial prompts the user in
961 1070 # case of merge conflicts or different sub repository sources. By
962 1071 # setting the interactive flag to `False` mercurial doesn't prompt the
963 1072 # used but instead uses a default value.
964 repo.ui.setconfig('ui', 'interactive', False)
1073 repo.ui.setconfig(b'ui', b'interactive', False)
965 1074 ms = hg_merge.mergestate(repo)
966 1075 return [x for x in ms.unresolved()]
967 1076
968 1077 @reraise_safe_exceptions
969 1078 def commit(self, wire, message, username, close_branch=False):
970 1079 repo = self._factory.repo(wire)
971 1080 baseui = self._factory._create_config(wire['config'])
972 repo.ui.setconfig('ui', 'username', username)
973 commands.commit(baseui, repo, message=message, close_branch=close_branch)
1081 repo.ui.setconfig(b'ui', b'username', safe_bytes(username))
1082 commands.commit(baseui, repo, message=safe_bytes(message), close_branch=close_branch)
974 1083
975 1084 @reraise_safe_exceptions
976 def rebase(self, wire, source=None, dest=None, abort=False):
1085 def rebase(self, wire, source='', dest='', abort=False):
977 1086 repo = self._factory.repo(wire)
978 1087 baseui = self._factory._create_config(wire['config'])
979 repo.ui.setconfig('ui', 'merge', 'internal:dump')
1088 repo.ui.setconfig(b'ui', b'merge', b'internal:dump')
980 1089 # In case of sub repositories are used mercurial prompts the user in
981 1090 # case of merge conflicts or different sub repository sources. By
982 1091 # setting the interactive flag to `False` mercurial doesn't prompt the
983 1092 # used but instead uses a default value.
984 repo.ui.setconfig('ui', 'interactive', False)
985 rebase.rebase(baseui, repo, base=source, dest=dest, abort=abort, keep=not abort)
1093 repo.ui.setconfig(b'ui', b'interactive', False)
1094
1095 rebase.rebase(baseui, repo, base=safe_bytes(source or ''), dest=safe_bytes(dest or ''),
1096 abort=abort, keep=not abort)
986 1097
987 1098 @reraise_safe_exceptions
988 1099 def tag(self, wire, name, revision, message, local, user, tag_time, tag_timezone):
989 1100 repo = self._factory.repo(wire)
990 1101 ctx = self._get_ctx(repo, revision)
991 1102 node = ctx.node()
992 1103
993 1104 date = (tag_time, tag_timezone)
994 1105 try:
995 hg_tag.tag(repo, name, node, message, local, user, date)
1106 hg_tag.tag(repo, safe_bytes(name), node, safe_bytes(message), local, safe_bytes(user), date)
996 1107 except Abort as e:
997 1108 log.exception("Tag operation aborted")
998 1109 # Exception can contain unicode which we convert
999 1110 raise exceptions.AbortException(e)(repr(e))
1000 1111
1001 1112 @reraise_safe_exceptions
1002 def bookmark(self, wire, bookmark, revision=None):
1113 def bookmark(self, wire, bookmark, revision=''):
1003 1114 repo = self._factory.repo(wire)
1004 1115 baseui = self._factory._create_config(wire['config'])
1005 commands.bookmark(baseui, repo, bookmark, rev=revision, force=True)
1116 revision = revision or ''
1117 commands.bookmark(baseui, repo, safe_bytes(bookmark), rev=safe_bytes(revision), force=True)
1006 1118
1007 1119 @reraise_safe_exceptions
1008 1120 def install_hooks(self, wire, force=False):
1009 1121 # we don't need any special hooks for Mercurial
1010 1122 pass
1011 1123
1012 1124 @reraise_safe_exceptions
1013 1125 def get_hooks_info(self, wire):
1014 1126 return {
1015 1127 'pre_version': vcsserver.__version__,
1016 1128 'post_version': vcsserver.__version__,
1017 1129 }
1018 1130
1019 1131 @reraise_safe_exceptions
1020 1132 def set_head_ref(self, wire, head_name):
1021 1133 pass
1022 1134
1023 1135 @reraise_safe_exceptions
1024 def archive_repo(self, wire, archive_dest_path, kind, mtime, archive_at_path,
1025 archive_dir_name, commit_id):
1136 def archive_repo(self, wire, archive_name_key, kind, mtime, archive_at_path,
1137 archive_dir_name, commit_id, cache_config):
1026 1138
1027 1139 def file_walker(_commit_id, path):
1028 1140 repo = self._factory.repo(wire)
1029 1141 ctx = repo[_commit_id]
1030 1142 is_root = path in ['', '/']
1031 1143 if is_root:
1032 1144 matcher = alwaysmatcher(badfn=None)
1033 1145 else:
1034 1146 matcher = patternmatcher('', [(b'glob', path+'/**', b'')], badfn=None)
1035 1147 file_iter = ctx.manifest().walk(matcher)
1036 1148
1037 1149 for fn in file_iter:
1038 1150 file_path = fn
1039 1151 flags = ctx.flags(fn)
1040 1152 mode = b'x' in flags and 0o755 or 0o644
1041 1153 is_link = b'l' in flags
1042 1154
1043 1155 yield ArchiveNode(file_path, mode, is_link, ctx[fn].data)
1044 1156
1045 return archive_repo(file_walker, archive_dest_path, kind, mtime, archive_at_path,
1046 archive_dir_name, commit_id)
1157 return store_archive_in_cache(
1158 file_walker, archive_name_key, kind, mtime, archive_at_path, archive_dir_name, commit_id, cache_config=cache_config)
1047 1159
1 NO CONTENT: file renamed from vcsserver/svn.py to vcsserver/remote/svn.py
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
This diff has been collapsed as it changes many lines, (1102 lines changed) Show them Hide them
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
1 NO CONTENT: file was removed
General Comments 0
You need to be logged in to leave comments. Login now