diff --git a/rhodecode/lib/base.py b/rhodecode/lib/base.py --- a/rhodecode/lib/base.py +++ b/rhodecode/lib/base.py @@ -25,13 +25,14 @@ controllers import logging import socket +import base64 import markupsafe import ipaddress +import paste.httpheaders from paste.auth.basic import AuthBasicAuthenticator from paste.httpexceptions import HTTPUnauthorized, HTTPForbidden, get_exception -from paste.httpheaders import WWW_AUTHENTICATE, AUTHORIZATION import rhodecode from rhodecode.authentication.base import VCS_TYPE @@ -40,8 +41,10 @@ from rhodecode.lib import helpers as h from rhodecode.lib.auth import AuthUser, CookieStoreWrapper from rhodecode.lib.exceptions import UserCreationError from rhodecode.lib.utils import (password_changed, get_enabled_hook_classes) -from rhodecode.lib.utils2 import ( - str2bool, safe_unicode, AttributeDict, safe_int, sha1, aslist, safe_str) +from rhodecode.lib.utils2 import AttributeDict +from rhodecode.lib.str_utils import ascii_bytes, safe_int, safe_str +from rhodecode.lib.type_utils import aslist, str2bool +from rhodecode.lib.hash_utils import sha1 from rhodecode.model.db import Repository, User, ChangesetComment, UserBookmark from rhodecode.model.notification import NotificationModel from rhodecode.model.settings import VcsSettingsModel, SettingsModel @@ -108,18 +111,20 @@ def get_ip_addr(environ): proxy_key = 'HTTP_X_REAL_IP' proxy_key2 = 'HTTP_X_FORWARDED_FOR' def_key = 'REMOTE_ADDR' - _filters = lambda x: _filter_port(_filter_proxy(x)) + + def ip_filters(ip_): + return _filter_port(_filter_proxy(ip_)) ip = environ.get(proxy_key) if ip: - return _filters(ip) + return ip_filters(ip) ip = environ.get(proxy_key2) if ip: - return _filters(ip) + return ip_filters(ip) ip = environ.get(def_key, '0.0.0.0') - return _filters(ip) + return ip_filters(ip) def get_server_ip_addr(environ, log_errors=True): @@ -138,13 +143,6 @@ def get_server_port(environ): return environ.get('SERVER_PORT') -def get_access_path(environ): - path = environ.get('PATH_INFO') - org_req = environ.get('pylons.original_request') - if org_req: - path = org_req.environ.get('PATH_INFO') - return path - def get_user_agent(environ): return environ.get('HTTP_USER_AGENT') @@ -210,6 +208,7 @@ class BasicAuth(AuthBasicAuthenticator): def __init__(self, realm, authfunc, registry, auth_http_code=None, initial_call_detection=False, acl_repo_name=None, rc_realm=''): + super(BasicAuth, self).__init__(realm=realm, authfunc=authfunc) self.realm = realm self.rc_realm = rc_realm self.initial_call = initial_call_detection @@ -218,36 +217,40 @@ class BasicAuth(AuthBasicAuthenticator): self.acl_repo_name = acl_repo_name self._rc_auth_http_code = auth_http_code - def _get_response_from_code(self, http_code): + def _get_response_from_code(self, http_code, fallback): try: return get_exception(safe_int(http_code)) except Exception: - log.exception('Failed to fetch response for code %s', http_code) - return HTTPForbidden + log.exception('Failed to fetch response class for code %s, using fallback: %s', http_code, fallback) + return fallback def get_rc_realm(self): return safe_str(self.rc_realm) def build_authentication(self): - head = WWW_AUTHENTICATE.tuples('Basic realm="%s"' % self.realm) + header = [('WWW-Authenticate', f'Basic realm="{self.realm}"')] + + # NOTE: the initial_Call detection seems to be not working/not needed witg latest Mercurial + # investigate if we still need it. if self._rc_auth_http_code and not self.initial_call: # return alternative HTTP code if alternative http return code # is specified in RhodeCode config, but ONLY if it's not the # FIRST call - custom_response_klass = self._get_response_from_code( - self._rc_auth_http_code) - return custom_response_klass(headers=head) - return HTTPUnauthorized(headers=head) + custom_response_klass = self._get_response_from_code(self._rc_auth_http_code, fallback=HTTPUnauthorized) + log.debug('Using custom response class: %s', custom_response_klass) + return custom_response_klass(headers=header) + return HTTPUnauthorized(headers=header) def authenticate(self, environ): - authorization = AUTHORIZATION(environ) + authorization = paste.httpheaders.AUTHORIZATION(environ) if not authorization: return self.build_authentication() - (authmeth, auth) = authorization.split(' ', 1) - if 'basic' != authmeth.lower(): + (auth_meth, auth_creds_b64) = authorization.split(' ', 1) + if 'basic' != auth_meth.lower(): return self.build_authentication() - auth = auth.strip().decode('base64') - _parts = auth.split(':', 1) + + credentials = safe_str(base64.b64decode(auth_creds_b64.strip())) + _parts = credentials.split(':', 1) if len(_parts) == 2: username, password = _parts auth_data = self.authfunc( @@ -267,17 +270,11 @@ class BasicAuth(AuthBasicAuthenticator): def calculate_version_hash(config): return sha1( - config.get('beaker.session.secret', '') + - rhodecode.__version__)[:8] + config.get(b'beaker.session.secret', b'') + ascii_bytes(rhodecode.__version__) + )[:8] def get_current_lang(request): - # NOTE(marcink): remove after pyramid move - try: - return translation.get_lang()[0] - except: - pass - return getattr(request, '_LOCALE_', request.locale_name) @@ -360,9 +357,9 @@ def attach_context_attributes(context, r context.rhodecode_instanceid = config.get('instance_id') context.visual.cut_off_limit_diff = safe_int( - config.get('cut_off_limit_diff')) + config.get('cut_off_limit_diff'), default=0) context.visual.cut_off_limit_file = safe_int( - config.get('cut_off_limit_file')) + config.get('cut_off_limit_file'), default=0) context.license = AttributeDict({}) context.license.hide_license_info = str2bool( @@ -395,7 +392,7 @@ def attach_context_attributes(context, r # session settings per user - for k, v in request.session.items(): + for k, v in list(request.session.items()): pref = 'rc_user_session_attr.' if k and k.startswith(pref): k = k[len(pref):] @@ -437,7 +434,7 @@ def attach_context_attributes(context, r csrf_token = auth.get_csrf_token(session=request.session) context.csrf_token = csrf_token - context.backends = rhodecode.BACKENDS.keys() + context.backends = list(rhodecode.BACKENDS.keys()) unread_count = 0 user_bookmark_list = [] @@ -580,7 +577,7 @@ def bootstrap_config(request, registry_n config.include('pyramid_mako') config.include('rhodecode.lib.rc_beaker') config.include('rhodecode.lib.rc_cache') - + config.include('rhodecode.lib.rc_cache.archive_cache') add_events_routes(config) return config @@ -607,4 +604,3 @@ def bootstrap_request(**kwargs): request.session = ThinSession() return request - diff --git a/rhodecode/lib/caching_query.py b/rhodecode/lib/caching_query.py --- a/rhodecode/lib/caching_query.py +++ b/rhodecode/lib/caching_query.py @@ -73,7 +73,6 @@ class ORMCache: event.listen(session_factory, "do_orm_execute", self._do_orm_execute) def _do_orm_execute(self, orm_context): - for opt in orm_context.user_defined_options: if isinstance(opt, RelationshipCache): opt = opt._process_orm_context(orm_context) @@ -83,6 +82,10 @@ class ORMCache: if isinstance(opt, FromCache): dogpile_region = self.cache_regions[opt.region] + if dogpile_region.expiration_time <= 0: + # don't cache 0 time expiration cache + continue + if opt.cache_key: our_cache_key = f'SQL_CACHE_{opt.cache_key}' else: diff --git a/rhodecode/lib/channelstream.py b/rhodecode/lib/channelstream.py --- a/rhodecode/lib/channelstream.py +++ b/rhodecode/lib/channelstream.py @@ -25,7 +25,6 @@ import requests import datetime from dogpile.util.readwrite_lock import ReadWriteMutex -from pyramid.threadlocal import get_current_registry import rhodecode.lib.helpers as h from rhodecode.lib.auth import HasRepoPermissionAny @@ -236,6 +235,7 @@ def get_connection_validators(registry): def get_channelstream_config(registry=None): if not registry: + from pyramid.threadlocal import get_current_registry registry = get_current_registry() rhodecode_plugins = getattr(registry, 'rhodecode_plugins', {}) diff --git a/rhodecode/lib/codeblocks.py b/rhodecode/lib/codeblocks.py --- a/rhodecode/lib/codeblocks.py +++ b/rhodecode/lib/codeblocks.py @@ -20,7 +20,7 @@ import logging import difflib -from itertools import groupby +import itertools from pygments import lex from pygments.formatters.html import _get_ttype_class as pygment_token_class @@ -29,9 +29,10 @@ from pygments.lexers import get_lexer_by from rhodecode.lib.helpers import ( get_lexer_for_filenode, html_escape, get_custom_lexer) -from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode +from rhodecode.lib.str_utils import safe_str +from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict from rhodecode.lib.vcs.nodes import FileNode -from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError +from rhodecode.lib.vcs.exceptions import NodeDoesNotExistError from rhodecode.lib.diff_match_patch import diff_match_patch from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE @@ -46,9 +47,9 @@ log = logging.getLogger(__name__) def filenode_as_lines_tokens(filenode, lexer=None): org_lexer = lexer lexer = lexer or get_lexer_for_filenode(filenode) - log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', + log.debug('Generating file node pygment tokens for %s, file=`%s`, org_lexer:%s', lexer, filenode, org_lexer) - content = filenode.content + content = filenode.str_content tokens = tokenize_string(content, lexer) lines = split_token_stream(tokens, content) rv = list(lines) @@ -65,10 +66,15 @@ def tokenize_string(content, lexer): lexer.stripnl = False lexer.ensurenl = False + # pygments needs to operate on str + str_content = safe_str(content) + if isinstance(lexer, TextLexer): - lexed = [(Token.Text, content)] + # we convert content here to STR because pygments does that while tokenizing + # if we DON'T get a lexer for unknown file type + lexed = [(Token.Text, str_content)] else: - lexed = lex(content, lexer) + lexed = lex(str_content, lexer) for token_type, token_text in lexed: yield pygment_token_class(token_type), token_text @@ -84,7 +90,10 @@ def split_token_stream(tokens, content): """ token_buffer = [] + for token_class, token_text in tokens: + + # token_text, should be str parts = token_text.split('\n') for part in parts[:-1]: token_buffer.append((token_class, part)) @@ -97,7 +106,7 @@ def split_token_stream(tokens, content): yield token_buffer elif content: # this is a special case, we have the content, but tokenization didn't produce - # any results. THis can happen if know file extensions like .css have some bogus + # any results. This can happen if know file extensions like .css have some bogus # unicode content without any newline characters yield [(pygment_token_class(Token.Text), content)] @@ -144,7 +153,7 @@ def filenode_as_annotated_lines_tokens(f for line_no, tokens in enumerate(filenode_as_lines_tokens(filenode), 1)) - grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) + grouped_annotations_lines = itertools.groupby(annotations_lines, lambda x: x[0]) for annotation, group in grouped_annotations_lines: yield ( @@ -158,14 +167,14 @@ def render_tokenstream(tokenstream): for token_class, token_ops_texts in rollup_tokenstream(tokenstream): if token_class: - result.append('' % token_class) + result.append(f'') else: result.append('') for op_tag, token_text in token_ops_texts: if op_tag: - result.append('<%s>' % op_tag) + result.append(f'<{op_tag}>') # NOTE(marcink): in some cases of mixed encodings, we might run into # troubles in the html_escape, in this case we say unicode force on token_text @@ -173,7 +182,7 @@ def render_tokenstream(tokenstream): try: escaped_text = html_escape(token_text) except TypeError: - escaped_text = html_escape(safe_unicode(token_text)) + escaped_text = html_escape(safe_str(token_text)) # TODO: dan: investigate showing hidden characters like space/nl/tab # escaped_text = escaped_text.replace(' ', ' ') @@ -183,7 +192,7 @@ def render_tokenstream(tokenstream): result.append(escaped_text) if op_tag: - result.append('' % op_tag) + result.append(f'') result.append('') @@ -235,12 +244,13 @@ def rollup_tokenstream(tokenstream): tokenstream = ((t[0], '', t[1]) for t in tokenstream) result = [] - for token_class, op_list in groupby(tokenstream, lambda t: t[0]): + for token_class, op_list in itertools.groupby(tokenstream, lambda t: t[0]): ops = [] - for token_op, token_text_list in groupby(op_list, lambda o: o[1]): + for token_op, token_text_list in itertools.groupby(op_list, lambda o: o[1]): text_buffer = [] for t_class, t_op, t_text in token_text_list: text_buffer.append(t_text) + ops.append((token_op, ''.join(text_buffer))) result.append((token_class, ops)) return result @@ -262,24 +272,31 @@ def tokens_diff(old_tokens, new_tokens, old_tokens_result = [] new_tokens_result = [] - similarity = difflib.SequenceMatcher(None, + def int_convert(val): + if isinstance(val, int): + return str(val) + return val + + similarity = difflib.SequenceMatcher( + None, ''.join(token_text for token_class, token_text in old_tokens), ''.join(token_text for token_class, token_text in new_tokens) ).ratio() - if similarity < 0.6: # return, the blocks are too different + if similarity < 0.6: # return, the blocks are too different for token_class, token_text in old_tokens: old_tokens_result.append((token_class, '', token_text)) for token_class, token_text in new_tokens: new_tokens_result.append((token_class, '', token_text)) return old_tokens_result, new_tokens_result, similarity - token_sequence_matcher = difflib.SequenceMatcher(None, + token_sequence_matcher = difflib.SequenceMatcher( + None, [x[1] for x in old_tokens], [x[1] for x in new_tokens]) for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): - # check the differences by token block types first to give a more + # check the differences by token block types first to give a # nicer "block" level replacement vs character diffs if tag == 'equal': @@ -289,10 +306,10 @@ def tokens_diff(old_tokens, new_tokens, new_tokens_result.append((token_class, '', token_text)) elif tag == 'delete': for token_class, token_text in old_tokens[o1:o2]: - old_tokens_result.append((token_class, 'del', token_text)) + old_tokens_result.append((token_class, 'del', int_convert(token_text))) elif tag == 'insert': for token_class, token_text in new_tokens[n1:n2]: - new_tokens_result.append((token_class, 'ins', token_text)) + new_tokens_result.append((token_class, 'ins', int_convert(token_text))) elif tag == 'replace': # if same type token blocks must be replaced, do a diff on the # characters in the token blocks to show individual changes @@ -300,17 +317,17 @@ def tokens_diff(old_tokens, new_tokens, old_char_tokens = [] new_char_tokens = [] for token_class, token_text in old_tokens[o1:o2]: - for char in token_text: + for char in map(lambda i: i, token_text): old_char_tokens.append((token_class, char)) for token_class, token_text in new_tokens[n1:n2]: - for char in token_text: + for char in map(lambda i: i, token_text): new_char_tokens.append((token_class, char)) old_string = ''.join([token_text for - token_class, token_text in old_char_tokens]) + token_class, token_text in old_char_tokens]) new_string = ''.join([token_text for - token_class, token_text in new_char_tokens]) + token_class, token_text in new_char_tokens]) char_sequence = difflib.SequenceMatcher( None, old_string, new_string) @@ -334,11 +351,11 @@ def tokens_diff(old_tokens, new_tokens, b += l elif op == -1: for i, c in enumerate(rep): - obuffer.append((old_char_tokens[a+i][0], 'del', c)) + obuffer.append((old_char_tokens[a+i][0], 'del', int_convert(c))) a += l elif op == 1: for i, c in enumerate(rep): - nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) + nbuffer.append((new_char_tokens[b+i][0], 'ins', int_convert(c))) b += l else: for ctag, co1, co2, cn1, cn2 in copcodes: @@ -349,15 +366,15 @@ def tokens_diff(old_tokens, new_tokens, nbuffer.append((token_class, '', token_text)) elif ctag == 'delete': for token_class, token_text in old_char_tokens[co1:co2]: - obuffer.append((token_class, 'del', token_text)) + obuffer.append((token_class, 'del', int_convert(token_text))) elif ctag == 'insert': for token_class, token_text in new_char_tokens[cn1:cn2]: - nbuffer.append((token_class, 'ins', token_text)) + nbuffer.append((token_class, 'ins', int_convert(token_text))) elif ctag == 'replace': for token_class, token_text in old_char_tokens[co1:co2]: - obuffer.append((token_class, 'del', token_text)) + obuffer.append((token_class, 'del', int_convert(token_text))) for token_class, token_text in new_char_tokens[cn1:cn2]: - nbuffer.append((token_class, 'ins', token_text)) + nbuffer.append((token_class, 'ins', int_convert(token_text))) old_tokens_result.extend(obuffer) new_tokens_result.extend(nbuffer) @@ -366,13 +383,14 @@ def tokens_diff(old_tokens, new_tokens, def diffset_node_getter(commit): - def get_node(fname): + def get_diff_node(file_name): + try: - return commit.get_node(fname) + return commit.get_node(file_name, pre_load=['size', 'flags', 'data']) except NodeDoesNotExistError: return None - return get_node + return get_diff_node class DiffSet(object): @@ -553,13 +571,13 @@ class DiffSet(object): # this allows commenting on those if not file_chunks: actions = [] - for op_id, op_text in filediff.patch['stats']['ops'].items(): + for op_id, op_text in list(filediff.patch['stats']['ops'].items()): if op_id == DEL_FILENODE: actions.append('file was removed') elif op_id == BIN_FILENODE: actions.append('binary diff hidden') else: - actions.append(safe_unicode(op_text)) + actions.append(safe_str(op_text)) action_line = 'NO CONTENT: ' + \ ', '.join(actions) or 'UNDEFINED_ACTION' @@ -588,10 +606,11 @@ class DiffSet(object): before, after = [], [] for line in hunk['lines']: + if line['action'] in ['unmod', 'unmod-no-hl']: no_hl = line['action'] == 'unmod-no-hl' - result.lines.extend( - self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)) + parsed_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl) + result.lines.extend(parsed_lines) after.append(line) before.append(line) elif line['action'] == 'add': @@ -600,14 +619,17 @@ class DiffSet(object): before.append(line) elif line['action'] == 'old-no-nl': before.append(line) + #line['line'] = safe_str(line['line']) elif line['action'] == 'new-no-nl': + #line['line'] = safe_str(line['line']) after.append(line) all_actions = [x['action'] for x in after] + [x['action'] for x in before] no_hl = {x for x in all_actions} == {'unmod-no-hl'} - result.lines.extend( - self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)) - # NOTE(marcink): we must keep list() call here so we can cache the result... + parsed_no_hl_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl) + result.lines.extend(parsed_no_hl_lines) + + # NOTE(marcink): we must keep list() call here, so we can cache the result... result.unified = list(self.as_unified(result.lines)) result.sideside = result.lines @@ -631,14 +653,14 @@ class DiffSet(object): before_newline_line = before_lines.pop(-1) before_newline.content = '\n {}'.format( render_tokenstream( - [(x[0], '', x[1]) + [(x[0], '', safe_str(x[1])) for x in [('nonl', before_newline_line['line'])]])) if after_lines and after_lines[-1]['action'] == 'new-no-nl': after_newline_line = after_lines.pop(-1) after_newline.content = '\n {}'.format( render_tokenstream( - [(x[0], '', x[1]) + [(x[0], '', safe_str(x[1])) for x in [('nonl', after_newline_line['line'])]])) while before_lines or after_lines: @@ -655,7 +677,7 @@ class DiffSet(object): if before: if before['action'] == 'old-no-nl': - before_tokens = [('nonl', before['line'])] + before_tokens = [('nonl', safe_str(before['line']))] else: before_tokens = self.get_line_tokens( line_text=before['line'], line_number=before['old_lineno'], @@ -669,7 +691,7 @@ class DiffSet(object): if after: if after['action'] == 'new-no-nl': - after_tokens = [('nonl', after['line'])] + after_tokens = [('nonl', safe_str(after['line']))] else: after_tokens = self.get_line_tokens( line_text=after['line'], line_number=after['new_lineno'], @@ -715,14 +737,14 @@ class DiffSet(object): filename = input_file elif isinstance(input_file, FileNode): filenode = input_file - filename = input_file.unicode_path + filename = input_file.str_path hl_mode = self.HL_NONE if no_hl else self.highlight_mode if hl_mode == self.HL_REAL and filenode: lexer = self._get_lexer_for_filename(filename) - file_size_allowed = input_file.size < self.max_file_size_limit + file_size_allowed = filenode.size < self.max_file_size_limit if line_number and file_size_allowed: - return self.get_tokenized_filenode_line(input_file, line_number, lexer, source) + return self.get_tokenized_filenode_line(filenode, line_number, lexer, source) if hl_mode in (self.HL_REAL, self.HL_FAST) and filename: lexer = self._get_lexer_for_filename(filename) @@ -731,17 +753,17 @@ class DiffSet(object): return list(tokenize_string(line_text, plain_text_lexer)) def get_tokenized_filenode_line(self, filenode, line_number, lexer=None, source=''): + name_hash = hash(filenode) - def tokenize(_filenode): - self.highlighted_filenodes[source][filenode] = filenode_as_lines_tokens(filenode, lexer) + hl_node_code = self.highlighted_filenodes[source] - if filenode not in self.highlighted_filenodes[source]: - tokenize(filenode) + if name_hash not in hl_node_code: + hl_node_code[name_hash] = filenode_as_lines_tokens(filenode, lexer) try: - return self.highlighted_filenodes[source][filenode][line_number - 1] + return hl_node_code[name_hash][line_number - 1] except Exception: - log.exception('diff rendering error') + log.exception('diff rendering error on L:%s and file=%s', line_number - 1, filenode.name) return [('', 'L{}: rhodecode diff rendering error'.format(line_number))] def action_to_op(self, action): diff --git a/rhodecode/lib/db_manage.py b/rhodecode/lib/db_manage.py --- a/rhodecode/lib/db_manage.py +++ b/rhodecode/lib/db_manage.py @@ -53,20 +53,26 @@ def notify(msg): Notification for migrations messages """ ml = len(msg) + (4 * 2) - print(('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper()) + print((('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper())) class DbManage(object): def __init__(self, log_sql, dbconf, root, tests=False, - SESSION=None, cli_args=None): + SESSION=None, cli_args=None, enc_key=b''): + self.dbname = dbconf.split('/')[-1] self.tests = tests self.root = root self.dburi = dbconf self.log_sql = log_sql self.cli_args = cli_args or {} + self.sa = None + self.engine = None + self.enc_key = enc_key + # sets .sa .engine self.init_db(SESSION=SESSION) + self.ask_ok = self.get_ask_ok_func(self.cli_args.get('force_ask')) def db_exists(self): @@ -91,13 +97,16 @@ class DbManage(object): return ask_ok def init_db(self, SESSION=None): + if SESSION: self.sa = SESSION + self.engine = SESSION.bind else: # init new sessions engine = create_engine(self.dburi, echo=self.log_sql) - init_model(engine) + init_model(engine, encryption_key=self.enc_key) self.sa = Session() + self.engine = engine def create_tables(self, override=False): """ @@ -106,6 +115,8 @@ class DbManage(object): log.info("Existing database with the same name is going to be destroyed.") log.info("Setup command will run DROP ALL command on that database.") + engine = self.engine + if self.tests: destroy = True else: @@ -114,10 +125,10 @@ class DbManage(object): log.info('db tables bootstrap: Nothing done.') sys.exit(0) if destroy: - Base.metadata.drop_all() + Base.metadata.drop_all(bind=engine) checkfirst = not override - Base.metadata.create_all(checkfirst=checkfirst) + Base.metadata.create_all(bind=engine, checkfirst=checkfirst) log.info('Created tables for %s', self.dbname) def set_db_version(self): @@ -145,8 +156,7 @@ class DbManage(object): """ from rhodecode.lib.dbmigrate.migrate.versioning import api - from rhodecode.lib.dbmigrate.migrate.exceptions import \ - DatabaseNotControlledError + from rhodecode.lib.dbmigrate.migrate.exceptions import DatabaseNotControlledError if 'sqlite' in self.dburi: print( @@ -172,40 +182,39 @@ class DbManage(object): try: curr_version = api.db_version(db_uri, repository_path) - msg = ('Found current database db_uri under version ' - 'control with version {}'.format(curr_version)) + msg = (f'Found current database db_uri under version ' + f'control with version {curr_version}') except (RuntimeError, DatabaseNotControlledError): curr_version = 1 - msg = ('Current database is not under version control. Setting ' - 'as version %s' % curr_version) + msg = f'Current database is not under version control. ' \ + f'Setting as version {curr_version}' api.version_control(db_uri, repository_path, curr_version) notify(msg) - if curr_version == __dbversion__: log.info('This database is already at the newest version') sys.exit(0) - upgrade_steps = range(curr_version + 1, __dbversion__ + 1) - notify('attempting to upgrade database from ' - 'version %s to version %s' % (curr_version, __dbversion__)) + upgrade_steps = list(range(curr_version + 1, __dbversion__ + 1)) + notify(f'attempting to upgrade database from ' + f'version {curr_version} to version {__dbversion__}') # CALL THE PROPER ORDER OF STEPS TO PERFORM FULL UPGRADE _step = None for step in upgrade_steps: - notify('performing upgrade step %s' % step) + notify(f'performing upgrade step {step}') time.sleep(0.5) api.upgrade(db_uri, repository_path, step) self.sa.rollback() - notify('schema upgrade for step %s completed' % (step,)) + notify(f'schema upgrade for step {step} completed') _step = step self.run_post_migration_tasks() - notify('upgrade to version %s successful' % _step) + notify(f'upgrade to version {step} successful') def fix_repo_paths(self): """ @@ -231,8 +240,8 @@ class DbManage(object): used mostly for anonymous access """ def_user = self.sa.query(User)\ - .filter(User.username == User.DEFAULT_USER)\ - .one() + .filter(User.username == User.DEFAULT_USER)\ + .one() def_user.name = 'Anonymous' def_user.lastname = 'User' @@ -542,8 +551,8 @@ class DbManage(object): elif not os.access(path, os.W_OK) and path_ok: log.warning('No write permission to given path %s', path) - q = ('Given path %s is not writeable, do you want to ' - 'continue with read only mode ? [y/n]' % (path,)) + q = (f'Given path {path} is not writeable, do you want to ' + f'continue with read only mode ? [y/n]') if not self.ask_ok(q): log.error('Canceled by user') sys.exit(-1) @@ -557,8 +566,8 @@ class DbManage(object): real_path = os.path.normpath(os.path.realpath(path)) if real_path != os.path.normpath(path): - q = ('Path looks like a symlink, RhodeCode Enterprise will store ' - 'given path as %s ? [y/n]') % (real_path,) + q = (f'Path looks like a symlink, RhodeCode Enterprise will store ' + f'given path as {real_path} ? [y/n]') if not self.ask_ok(q): log.error('Canceled by user') sys.exit(-1) diff --git a/rhodecode/lib/exceptions.py b/rhodecode/lib/exceptions.py --- a/rhodecode/lib/exceptions.py +++ b/rhodecode/lib/exceptions.py @@ -109,10 +109,10 @@ class HTTPLockedRC(HTTPClientError): reason = None def __init__(self, message, *args, **kwargs): - from rhodecode import CONFIG - from rhodecode.lib.utils2 import safe_int - _code = CONFIG.get('lock_ret_code') - self.code = safe_int(_code, self.code) + import rhodecode + + self.code = rhodecode.ConfigGet().get_int('lock_ret_code', missing=self.code) + self.title = self.explanation = message super(HTTPLockedRC, self).__init__(*args, **kwargs) self.args = (message, ) @@ -180,3 +180,23 @@ class ArtifactMetadataBadValueType(Value class CommentVersionMismatch(ValueError): pass + + +class SignatureVerificationError(ValueError): + pass + + +def signature_verification_error(msg): + details = """ +Encryption signature verification failed. +Please check your value of secret key, and/or encrypted value stored. +Secret key stored inside .ini file: +`rhodecode.encrypted_values.secret` or defaults to +`beaker.session.secret` + +Probably the stored values were encrypted using a different secret then currently set in .ini file +""" + + final_msg = f'{msg}\n{details}' + return SignatureVerificationError(final_msg) + diff --git a/rhodecode/lib/feedgenerator/feedgenerator.py b/rhodecode/lib/feedgenerator/feedgenerator.py --- a/rhodecode/lib/feedgenerator/feedgenerator.py +++ b/rhodecode/lib/feedgenerator/feedgenerator.py @@ -35,7 +35,6 @@ http://web.archive.org/web/2011071803522 import datetime import io -import pytz from six.moves.urllib import parse as urlparse from rhodecode.lib.feedgenerator import datetime_safe @@ -227,7 +226,7 @@ class SyndicationFeed(object): latest_date = item_date # datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now - return latest_date or datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + return latest_date or datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) class Enclosure(object): diff --git a/rhodecode/lib/index/__init__.py b/rhodecode/lib/index/__init__.py --- a/rhodecode/lib/index/__init__.py +++ b/rhodecode/lib/index/__init__.py @@ -147,7 +147,7 @@ def searcher_from_config(config, prefix= if 'location' not in _config: _config['location'] = default_location if 'es_version' not in _config: - # use old legacy ES version set to 2 + # use an old legacy ES version set to 2 _config['es_version'] = '2' imported = importlib.import_module(_config.get('module', default_searcher)) diff --git a/rhodecode/lib/jsonalchemy.py b/rhodecode/lib/jsonalchemy.py --- a/rhodecode/lib/jsonalchemy.py +++ b/rhodecode/lib/jsonalchemy.py @@ -17,14 +17,13 @@ # RhodeCode Enterprise Edition, including its added features, Support services, # and proprietary license terms, please see https://rhodecode.com/licenses/ -import collections - import sqlalchemy from sqlalchemy import UnicodeText -from sqlalchemy.ext.mutable import Mutable +from sqlalchemy.ext.mutable import Mutable, \ + MutableList as MutationList, \ + MutableDict as MutationDict -from rhodecode.lib.ext_json import json -from rhodecode.lib.utils2 import safe_unicode +from rhodecode.lib import ext_json class JsonRaw(str): @@ -42,10 +41,6 @@ class JsonRaw(str): pass -# Set this to the standard dict if Order is not required -DictClass = collections.OrderedDict - - class JSONEncodedObj(sqlalchemy.types.TypeDecorator): """ Represents an immutable structure as a json-encoded string. @@ -56,12 +51,12 @@ class JSONEncodedObj(sqlalchemy.types.Ty impl = UnicodeText safe = True - enforce_unicode = True + enforce_str = True def __init__(self, *args, **kwargs): self.default = kwargs.pop('default', None) self.safe = kwargs.pop('safe_json', self.safe) - self.enforce_unicode = kwargs.pop('enforce_unicode', self.enforce_unicode) + self.enforce_str = kwargs.pop('enforce_str', self.enforce_str) self.dialect_map = kwargs.pop('dialect_map', {}) super(JSONEncodedObj, self).__init__(*args, **kwargs) @@ -74,9 +69,10 @@ class JSONEncodedObj(sqlalchemy.types.Ty if isinstance(value, JsonRaw): value = value elif value is not None: - value = json.dumps(value) - if self.enforce_unicode: - value = safe_unicode(value) + if self.enforce_str: + value = ext_json.str_json(value) + else: + value = ext_json.json.dumps(value) return value def process_result_value(self, value, dialect): @@ -85,8 +81,8 @@ class JSONEncodedObj(sqlalchemy.types.Ty if value is not None: try: - value = json.loads(value, object_pairs_hook=DictClass) - except Exception as e: + value = ext_json.json.loads(value) + except Exception: if self.safe and self.default is not None: return self.default() else: @@ -95,6 +91,7 @@ class JSONEncodedObj(sqlalchemy.types.Ty class MutationObj(Mutable): + @classmethod def coerce(cls, key, value): if isinstance(value, dict) and not isinstance(value, MutationDict): @@ -156,90 +153,6 @@ class MutationObj(Mutable): propagate=True) -class MutationDict(MutationObj, DictClass): - @classmethod - def coerce(cls, key, value): - """Convert plain dictionary to MutationDict""" - self = MutationDict( - (k, MutationObj.coerce(key, v)) for (k, v) in value.items()) - self._key = key - return self - - def de_coerce(self): - return dict(self) - - def __setitem__(self, key, value): - # Due to the way OrderedDict works, this is called during __init__. - # At this time we don't have a key set, but what is more, the value - # being set has already been coerced. So special case this and skip. - if hasattr(self, '_key'): - value = MutationObj.coerce(self._key, value) - DictClass.__setitem__(self, key, value) - self.changed() - - def __delitem__(self, key): - DictClass.__delitem__(self, key) - self.changed() - - def __setstate__(self, state): - self.__dict__ = state - - def __reduce_ex__(self, proto): - # support pickling of MutationDicts - d = dict(self) - return (self.__class__, (d,)) - - -class MutationList(MutationObj, list): - @classmethod - def coerce(cls, key, value): - """Convert plain list to MutationList""" - self = MutationList((MutationObj.coerce(key, v) for v in value)) - self._key = key - return self - - def de_coerce(self): - return list(self) - - def __setitem__(self, idx, value): - list.__setitem__(self, idx, MutationObj.coerce(self._key, value)) - self.changed() - - def __setslice__(self, start, stop, values): - list.__setslice__(self, start, stop, - (MutationObj.coerce(self._key, v) for v in values)) - self.changed() - - def __delitem__(self, idx): - list.__delitem__(self, idx) - self.changed() - - def __delslice__(self, start, stop): - list.__delslice__(self, start, stop) - self.changed() - - def append(self, value): - list.append(self, MutationObj.coerce(self._key, value)) - self.changed() - - def insert(self, idx, value): - list.insert(self, idx, MutationObj.coerce(self._key, value)) - self.changed() - - def extend(self, values): - list.extend(self, (MutationObj.coerce(self._key, v) for v in values)) - self.changed() - - def pop(self, *args, **kw): - value = list.pop(self, *args, **kw) - self.changed() - return value - - def remove(self, value): - list.remove(self, value) - self.changed() - - def JsonType(impl=None, **kwargs): """ Helper for using a mutation obj, it allows to use .with_variant easily. @@ -253,26 +166,8 @@ def JsonType(impl=None, **kwargs): if impl == 'list': return JSONEncodedObj(default=list, **kwargs) elif impl == 'dict': - return JSONEncodedObj(default=DictClass, **kwargs) + return JSONEncodedObj(default=dict, **kwargs) else: return JSONEncodedObj(**kwargs) -JSON = MutationObj.as_mutable(JsonType()) -""" -A type to encode/decode JSON on the fly - -sqltype is the string type for the underlying DB column:: - - Column(JSON) (defaults to UnicodeText) -""" - -JSONDict = MutationObj.as_mutable(JsonType('dict')) -""" -A type to encode/decode JSON dictionaries on the fly -""" - -JSONList = MutationObj.as_mutable(JsonType('list')) -""" -A type to encode/decode JSON lists` on the fly -"""