|
|
# RhodeCode VCSServer provides access to different vcs backends via network.
|
|
|
# Copyright (C) 2014-2020 RhodeCode GmbH
|
|
|
#
|
|
|
# This program is free software; you can redistribute it and/or modify
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
# the Free Software Foundation; either version 3 of the License, or
|
|
|
# (at your option) any later version.
|
|
|
#
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
# GNU General Public License for more details.
|
|
|
#
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
# along with this program; if not, write to the Free Software Foundation,
|
|
|
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
import os
|
|
|
import sys
|
|
|
import tempfile
|
|
|
import traceback
|
|
|
import logging
|
|
|
import urllib.parse
|
|
|
|
|
|
from vcsserver.lib.rc_cache.archive_cache import get_archival_cache_store
|
|
|
from vcsserver.lib.rc_cache import region_meta
|
|
|
|
|
|
from vcsserver import exceptions
|
|
|
from vcsserver.exceptions import NoContentException
|
|
|
from vcsserver.hgcompat import archival
|
|
|
from vcsserver.str_utils import safe_bytes
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
class RepoFactory(object):
|
|
|
"""
|
|
|
Utility to create instances of repository
|
|
|
|
|
|
It provides internal caching of the `repo` object based on
|
|
|
the :term:`call context`.
|
|
|
"""
|
|
|
repo_type = None
|
|
|
|
|
|
def __init__(self):
|
|
|
self._cache_region = region_meta.dogpile_cache_regions['repo_object']
|
|
|
|
|
|
def _create_config(self, path, config):
|
|
|
config = {}
|
|
|
return config
|
|
|
|
|
|
def _create_repo(self, wire, create):
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
def repo(self, wire, create=False):
|
|
|
raise NotImplementedError()
|
|
|
|
|
|
|
|
|
def obfuscate_qs(query_string):
|
|
|
if query_string is None:
|
|
|
return None
|
|
|
|
|
|
parsed = []
|
|
|
for k, v in urllib.parse.parse_qsl(query_string, keep_blank_values=True):
|
|
|
if k in ['auth_token', 'api_key']:
|
|
|
v = "*****"
|
|
|
parsed.append((k, v))
|
|
|
|
|
|
return '&'.join('{}{}'.format(
|
|
|
k, f'={v}' if v else '') for k, v in parsed)
|
|
|
|
|
|
|
|
|
def raise_from_original(new_type, org_exc: Exception):
|
|
|
"""
|
|
|
Raise a new exception type with original args and traceback.
|
|
|
"""
|
|
|
|
|
|
exc_type, exc_value, exc_traceback = sys.exc_info()
|
|
|
new_exc = new_type(*exc_value.args)
|
|
|
|
|
|
# store the original traceback into the new exc
|
|
|
new_exc._org_exc_tb = traceback.format_tb(exc_traceback)
|
|
|
|
|
|
try:
|
|
|
raise new_exc.with_traceback(exc_traceback)
|
|
|
finally:
|
|
|
del exc_traceback
|
|
|
|
|
|
|
|
|
|
|
|
class ArchiveNode(object):
|
|
|
def __init__(self, path, mode, is_link, raw_bytes):
|
|
|
self.path = path
|
|
|
self.mode = mode
|
|
|
self.is_link = is_link
|
|
|
self.raw_bytes = raw_bytes
|
|
|
|
|
|
|
|
|
def store_archive_in_cache(node_walker, archive_key, kind, mtime, archive_at_path, archive_dir_name,
|
|
|
commit_id, write_metadata=True, extra_metadata=None, cache_config=None):
|
|
|
"""
|
|
|
Function that would store an generate archive and send it to a dedicated backend store
|
|
|
In here we use diskcache
|
|
|
|
|
|
:param node_walker: a generator returning nodes to add to archive
|
|
|
:param archive_key: key used to store the path
|
|
|
:param kind: archive kind
|
|
|
:param mtime: time of creation
|
|
|
:param archive_at_path: default '/' the path at archive was started. if this is not '/' it means it's a partial archive
|
|
|
:param archive_dir_name: inside dir name when creating an archive
|
|
|
:param commit_id: commit sha of revision archive was created at
|
|
|
:param write_metadata:
|
|
|
:param extra_metadata:
|
|
|
:param cache_config:
|
|
|
|
|
|
walker should be a file walker, for example:
|
|
|
def node_walker():
|
|
|
for file_info in files:
|
|
|
yield ArchiveNode(fn, mode, is_link, ctx[fn].data)
|
|
|
"""
|
|
|
extra_metadata = extra_metadata or {}
|
|
|
|
|
|
d_cache = get_archival_cache_store(config=cache_config)
|
|
|
|
|
|
if archive_key in d_cache:
|
|
|
with d_cache as d_cache_reader:
|
|
|
reader, tag = d_cache_reader.get(archive_key, read=True, tag=True, retry=True)
|
|
|
return reader.name
|
|
|
|
|
|
archive_tmp_path = safe_bytes(tempfile.mkstemp()[1])
|
|
|
log.debug('Creating new temp archive in %s', archive_tmp_path)
|
|
|
|
|
|
if kind == "tgz":
|
|
|
archiver = archival.tarit(archive_tmp_path, mtime, b"gz")
|
|
|
elif kind == "tbz2":
|
|
|
archiver = archival.tarit(archive_tmp_path, mtime, b"bz2")
|
|
|
elif kind == 'zip':
|
|
|
archiver = archival.zipit(archive_tmp_path, mtime)
|
|
|
else:
|
|
|
raise exceptions.ArchiveException()(
|
|
|
f'Remote does not support: "{kind}" archive type.')
|
|
|
|
|
|
for f in node_walker(commit_id, archive_at_path):
|
|
|
f_path = os.path.join(safe_bytes(archive_dir_name), safe_bytes(f.path).lstrip(b'/'))
|
|
|
try:
|
|
|
archiver.addfile(f_path, f.mode, f.is_link, f.raw_bytes())
|
|
|
except NoContentException:
|
|
|
# NOTE(marcink): this is a special case for SVN so we can create "empty"
|
|
|
# directories which arent supported by archiver
|
|
|
archiver.addfile(os.path.join(f_path, b'.dir'), f.mode, f.is_link, b'')
|
|
|
|
|
|
if write_metadata:
|
|
|
metadata = dict([
|
|
|
('commit_id', commit_id),
|
|
|
('mtime', mtime),
|
|
|
])
|
|
|
metadata.update(extra_metadata)
|
|
|
|
|
|
meta = [safe_bytes(f"{f_name}:{value}") for f_name, value in metadata.items()]
|
|
|
f_path = os.path.join(safe_bytes(archive_dir_name), b'.archival.txt')
|
|
|
archiver.addfile(f_path, 0o644, False, b'\n'.join(meta))
|
|
|
|
|
|
archiver.done()
|
|
|
|
|
|
# ensure set & get are atomic
|
|
|
with d_cache.transact():
|
|
|
|
|
|
with open(archive_tmp_path, 'rb') as archive_file:
|
|
|
add_result = d_cache.set(archive_key, archive_file, read=True, tag='db-name', retry=True)
|
|
|
if not add_result:
|
|
|
log.error('Failed to store cache for key=%s', archive_key)
|
|
|
|
|
|
os.remove(archive_tmp_path)
|
|
|
|
|
|
reader, tag = d_cache.get(archive_key, read=True, tag=True, retry=True)
|
|
|
if not reader:
|
|
|
raise AssertionError(f'empty reader on key={archive_key} added={add_result}')
|
|
|
|
|
|
return reader.name
|
|
|
|
|
|
|
|
|
class BinaryEnvelope(object):
|
|
|
def __init__(self, val):
|
|
|
self.val = val
|
|
|
|
|
|
|
|
|
class BytesEnvelope(bytes):
|
|
|
def __new__(cls, content):
|
|
|
if isinstance(content, bytes):
|
|
|
return super().__new__(cls, content)
|
|
|
else:
|
|
|
raise TypeError('Content must be bytes.')
|
|
|
|
|
|
|
|
|
class BinaryBytesEnvelope(BytesEnvelope):
|
|
|
pass
|
|
|
|