Show More
@@ -25,13 +25,14 b' controllers' | |||
|
25 | 25 | |
|
26 | 26 | import logging |
|
27 | 27 | import socket |
|
28 | import base64 | |
|
28 | 29 | |
|
29 | 30 | import markupsafe |
|
30 | 31 | import ipaddress |
|
31 | 32 | |
|
33 | import paste.httpheaders | |
|
32 | 34 | from paste.auth.basic import AuthBasicAuthenticator |
|
33 | 35 | from paste.httpexceptions import HTTPUnauthorized, HTTPForbidden, get_exception |
|
34 | from paste.httpheaders import WWW_AUTHENTICATE, AUTHORIZATION | |
|
35 | 36 | |
|
36 | 37 | import rhodecode |
|
37 | 38 | from rhodecode.authentication.base import VCS_TYPE |
@@ -40,8 +41,10 b' from rhodecode.lib import helpers as h' | |||
|
40 | 41 | from rhodecode.lib.auth import AuthUser, CookieStoreWrapper |
|
41 | 42 | from rhodecode.lib.exceptions import UserCreationError |
|
42 | 43 | from rhodecode.lib.utils import (password_changed, get_enabled_hook_classes) |
|
43 |
from rhodecode.lib.utils2 import |
|
|
44 | str2bool, safe_unicode, AttributeDict, safe_int, sha1, aslist, safe_str) | |
|
44 | from rhodecode.lib.utils2 import AttributeDict | |
|
45 | from rhodecode.lib.str_utils import ascii_bytes, safe_int, safe_str | |
|
46 | from rhodecode.lib.type_utils import aslist, str2bool | |
|
47 | from rhodecode.lib.hash_utils import sha1 | |
|
45 | 48 | from rhodecode.model.db import Repository, User, ChangesetComment, UserBookmark |
|
46 | 49 | from rhodecode.model.notification import NotificationModel |
|
47 | 50 | from rhodecode.model.settings import VcsSettingsModel, SettingsModel |
@@ -108,18 +111,20 b' def get_ip_addr(environ):' | |||
|
108 | 111 | proxy_key = 'HTTP_X_REAL_IP' |
|
109 | 112 | proxy_key2 = 'HTTP_X_FORWARDED_FOR' |
|
110 | 113 | def_key = 'REMOTE_ADDR' |
|
111 | _filters = lambda x: _filter_port(_filter_proxy(x)) | |
|
114 | ||
|
115 | def ip_filters(ip_): | |
|
116 | return _filter_port(_filter_proxy(ip_)) | |
|
112 | 117 | |
|
113 | 118 | ip = environ.get(proxy_key) |
|
114 | 119 | if ip: |
|
115 | return _filters(ip) | |
|
120 | return ip_filters(ip) | |
|
116 | 121 | |
|
117 | 122 | ip = environ.get(proxy_key2) |
|
118 | 123 | if ip: |
|
119 | return _filters(ip) | |
|
124 | return ip_filters(ip) | |
|
120 | 125 | |
|
121 | 126 | ip = environ.get(def_key, '0.0.0.0') |
|
122 | return _filters(ip) | |
|
127 | return ip_filters(ip) | |
|
123 | 128 | |
|
124 | 129 | |
|
125 | 130 | def get_server_ip_addr(environ, log_errors=True): |
@@ -138,13 +143,6 b' def get_server_port(environ):' | |||
|
138 | 143 | return environ.get('SERVER_PORT') |
|
139 | 144 | |
|
140 | 145 | |
|
141 | def get_access_path(environ): | |
|
142 | path = environ.get('PATH_INFO') | |
|
143 | org_req = environ.get('pylons.original_request') | |
|
144 | if org_req: | |
|
145 | path = org_req.environ.get('PATH_INFO') | |
|
146 | return path | |
|
147 | ||
|
148 | 146 | |
|
149 | 147 | def get_user_agent(environ): |
|
150 | 148 | return environ.get('HTTP_USER_AGENT') |
@@ -210,6 +208,7 b' class BasicAuth(AuthBasicAuthenticator):' | |||
|
210 | 208 | |
|
211 | 209 | def __init__(self, realm, authfunc, registry, auth_http_code=None, |
|
212 | 210 | initial_call_detection=False, acl_repo_name=None, rc_realm=''): |
|
211 | super(BasicAuth, self).__init__(realm=realm, authfunc=authfunc) | |
|
213 | 212 | self.realm = realm |
|
214 | 213 | self.rc_realm = rc_realm |
|
215 | 214 | self.initial_call = initial_call_detection |
@@ -218,36 +217,40 b' class BasicAuth(AuthBasicAuthenticator):' | |||
|
218 | 217 | self.acl_repo_name = acl_repo_name |
|
219 | 218 | self._rc_auth_http_code = auth_http_code |
|
220 | 219 | |
|
221 | def _get_response_from_code(self, http_code): | |
|
220 | def _get_response_from_code(self, http_code, fallback): | |
|
222 | 221 | try: |
|
223 | 222 | return get_exception(safe_int(http_code)) |
|
224 | 223 | except Exception: |
|
225 | log.exception('Failed to fetch response for code %s', http_code) | |
|
226 |
return |
|
|
224 | log.exception('Failed to fetch response class for code %s, using fallback: %s', http_code, fallback) | |
|
225 | return fallback | |
|
227 | 226 | |
|
228 | 227 | def get_rc_realm(self): |
|
229 | 228 | return safe_str(self.rc_realm) |
|
230 | 229 | |
|
231 | 230 | def build_authentication(self): |
|
232 |
head = |
|
|
231 | header = [('WWW-Authenticate', f'Basic realm="{self.realm}"')] | |
|
232 | ||
|
233 | # NOTE: the initial_Call detection seems to be not working/not needed witg latest Mercurial | |
|
234 | # investigate if we still need it. | |
|
233 | 235 | if self._rc_auth_http_code and not self.initial_call: |
|
234 | 236 | # return alternative HTTP code if alternative http return code |
|
235 | 237 | # is specified in RhodeCode config, but ONLY if it's not the |
|
236 | 238 | # FIRST call |
|
237 | custom_response_klass = self._get_response_from_code( | |
|
238 | self._rc_auth_http_code) | |
|
239 | return custom_response_klass(headers=head) | |
|
240 | return HTTPUnauthorized(headers=head) | |
|
239 | custom_response_klass = self._get_response_from_code(self._rc_auth_http_code, fallback=HTTPUnauthorized) | |
|
240 | log.debug('Using custom response class: %s', custom_response_klass) | |
|
241 | return custom_response_klass(headers=header) | |
|
242 | return HTTPUnauthorized(headers=header) | |
|
241 | 243 | |
|
242 | 244 | def authenticate(self, environ): |
|
243 | authorization = AUTHORIZATION(environ) | |
|
245 | authorization = paste.httpheaders.AUTHORIZATION(environ) | |
|
244 | 246 | if not authorization: |
|
245 | 247 | return self.build_authentication() |
|
246 | (authmeth, auth) = authorization.split(' ', 1) | |
|
247 | if 'basic' != authmeth.lower(): | |
|
248 | (auth_meth, auth_creds_b64) = authorization.split(' ', 1) | |
|
249 | if 'basic' != auth_meth.lower(): | |
|
248 | 250 | return self.build_authentication() |
|
249 | auth = auth.strip().decode('base64') | |
|
250 | _parts = auth.split(':', 1) | |
|
251 | ||
|
252 | credentials = safe_str(base64.b64decode(auth_creds_b64.strip())) | |
|
253 | _parts = credentials.split(':', 1) | |
|
251 | 254 | if len(_parts) == 2: |
|
252 | 255 | username, password = _parts |
|
253 | 256 | auth_data = self.authfunc( |
@@ -267,17 +270,11 b' class BasicAuth(AuthBasicAuthenticator):' | |||
|
267 | 270 | |
|
268 | 271 | def calculate_version_hash(config): |
|
269 | 272 | return sha1( |
|
270 | config.get('beaker.session.secret', '') + | |
|
271 | rhodecode.__version__)[:8] | |
|
273 | config.get(b'beaker.session.secret', b'') + ascii_bytes(rhodecode.__version__) | |
|
274 | )[:8] | |
|
272 | 275 | |
|
273 | 276 | |
|
274 | 277 | def get_current_lang(request): |
|
275 | # NOTE(marcink): remove after pyramid move | |
|
276 | try: | |
|
277 | return translation.get_lang()[0] | |
|
278 | except: | |
|
279 | pass | |
|
280 | ||
|
281 | 278 | return getattr(request, '_LOCALE_', request.locale_name) |
|
282 | 279 | |
|
283 | 280 | |
@@ -360,9 +357,9 b' def attach_context_attributes(context, r' | |||
|
360 | 357 | context.rhodecode_instanceid = config.get('instance_id') |
|
361 | 358 | |
|
362 | 359 | context.visual.cut_off_limit_diff = safe_int( |
|
363 | config.get('cut_off_limit_diff')) | |
|
360 | config.get('cut_off_limit_diff'), default=0) | |
|
364 | 361 | context.visual.cut_off_limit_file = safe_int( |
|
365 | config.get('cut_off_limit_file')) | |
|
362 | config.get('cut_off_limit_file'), default=0) | |
|
366 | 363 | |
|
367 | 364 | context.license = AttributeDict({}) |
|
368 | 365 | context.license.hide_license_info = str2bool( |
@@ -395,7 +392,7 b' def attach_context_attributes(context, r' | |||
|
395 | 392 | |
|
396 | 393 | # session settings per user |
|
397 | 394 | |
|
398 | for k, v in request.session.items(): | |
|
395 | for k, v in list(request.session.items()): | |
|
399 | 396 | pref = 'rc_user_session_attr.' |
|
400 | 397 | if k and k.startswith(pref): |
|
401 | 398 | k = k[len(pref):] |
@@ -437,7 +434,7 b' def attach_context_attributes(context, r' | |||
|
437 | 434 | csrf_token = auth.get_csrf_token(session=request.session) |
|
438 | 435 | |
|
439 | 436 | context.csrf_token = csrf_token |
|
440 | context.backends = rhodecode.BACKENDS.keys() | |
|
437 | context.backends = list(rhodecode.BACKENDS.keys()) | |
|
441 | 438 | |
|
442 | 439 | unread_count = 0 |
|
443 | 440 | user_bookmark_list = [] |
@@ -580,7 +577,7 b' def bootstrap_config(request, registry_n' | |||
|
580 | 577 | config.include('pyramid_mako') |
|
581 | 578 | config.include('rhodecode.lib.rc_beaker') |
|
582 | 579 | config.include('rhodecode.lib.rc_cache') |
|
583 | ||
|
580 | config.include('rhodecode.lib.rc_cache.archive_cache') | |
|
584 | 581 | add_events_routes(config) |
|
585 | 582 | |
|
586 | 583 | return config |
@@ -607,4 +604,3 b' def bootstrap_request(**kwargs):' | |||
|
607 | 604 | request.session = ThinSession() |
|
608 | 605 | |
|
609 | 606 | return request |
|
610 |
@@ -73,7 +73,6 b' class ORMCache:' | |||
|
73 | 73 | event.listen(session_factory, "do_orm_execute", self._do_orm_execute) |
|
74 | 74 | |
|
75 | 75 | def _do_orm_execute(self, orm_context): |
|
76 | ||
|
77 | 76 | for opt in orm_context.user_defined_options: |
|
78 | 77 | if isinstance(opt, RelationshipCache): |
|
79 | 78 | opt = opt._process_orm_context(orm_context) |
@@ -83,6 +82,10 b' class ORMCache:' | |||
|
83 | 82 | if isinstance(opt, FromCache): |
|
84 | 83 | dogpile_region = self.cache_regions[opt.region] |
|
85 | 84 | |
|
85 | if dogpile_region.expiration_time <= 0: | |
|
86 | # don't cache 0 time expiration cache | |
|
87 | continue | |
|
88 | ||
|
86 | 89 | if opt.cache_key: |
|
87 | 90 | our_cache_key = f'SQL_CACHE_{opt.cache_key}' |
|
88 | 91 | else: |
@@ -25,7 +25,6 b' import requests' | |||
|
25 | 25 | import datetime |
|
26 | 26 | |
|
27 | 27 | from dogpile.util.readwrite_lock import ReadWriteMutex |
|
28 | from pyramid.threadlocal import get_current_registry | |
|
29 | 28 | |
|
30 | 29 | import rhodecode.lib.helpers as h |
|
31 | 30 | from rhodecode.lib.auth import HasRepoPermissionAny |
@@ -236,6 +235,7 b' def get_connection_validators(registry):' | |||
|
236 | 235 | |
|
237 | 236 | def get_channelstream_config(registry=None): |
|
238 | 237 | if not registry: |
|
238 | from pyramid.threadlocal import get_current_registry | |
|
239 | 239 | registry = get_current_registry() |
|
240 | 240 | |
|
241 | 241 | rhodecode_plugins = getattr(registry, 'rhodecode_plugins', {}) |
@@ -20,7 +20,7 b'' | |||
|
20 | 20 | |
|
21 | 21 | import logging |
|
22 | 22 | import difflib |
|
23 | from itertools import groupby | |
|
23 | import itertools | |
|
24 | 24 | |
|
25 | 25 | from pygments import lex |
|
26 | 26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class |
@@ -29,9 +29,10 b' from pygments.lexers import get_lexer_by' | |||
|
29 | 29 | |
|
30 | 30 | from rhodecode.lib.helpers import ( |
|
31 | 31 | get_lexer_for_filenode, html_escape, get_custom_lexer) |
|
32 | from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode | |
|
32 | from rhodecode.lib.str_utils import safe_str | |
|
33 | from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict | |
|
33 | 34 | from rhodecode.lib.vcs.nodes import FileNode |
|
34 |
from rhodecode.lib.vcs.exceptions import |
|
|
35 | from rhodecode.lib.vcs.exceptions import NodeDoesNotExistError | |
|
35 | 36 | from rhodecode.lib.diff_match_patch import diff_match_patch |
|
36 | 37 | from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE |
|
37 | 38 | |
@@ -46,9 +47,9 b' log = logging.getLogger(__name__)' | |||
|
46 | 47 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
47 | 48 | org_lexer = lexer |
|
48 | 49 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
49 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', | |
|
50 | log.debug('Generating file node pygment tokens for %s, file=`%s`, org_lexer:%s', | |
|
50 | 51 | lexer, filenode, org_lexer) |
|
51 | content = filenode.content | |
|
52 | content = filenode.str_content | |
|
52 | 53 | tokens = tokenize_string(content, lexer) |
|
53 | 54 | lines = split_token_stream(tokens, content) |
|
54 | 55 | rv = list(lines) |
@@ -65,10 +66,15 b' def tokenize_string(content, lexer):' | |||
|
65 | 66 | lexer.stripnl = False |
|
66 | 67 | lexer.ensurenl = False |
|
67 | 68 | |
|
69 | # pygments needs to operate on str | |
|
70 | str_content = safe_str(content) | |
|
71 | ||
|
68 | 72 | if isinstance(lexer, TextLexer): |
|
69 | lexed = [(Token.Text, content)] | |
|
73 | # we convert content here to STR because pygments does that while tokenizing | |
|
74 | # if we DON'T get a lexer for unknown file type | |
|
75 | lexed = [(Token.Text, str_content)] | |
|
70 | 76 | else: |
|
71 | lexed = lex(content, lexer) | |
|
77 | lexed = lex(str_content, lexer) | |
|
72 | 78 | |
|
73 | 79 | for token_type, token_text in lexed: |
|
74 | 80 | yield pygment_token_class(token_type), token_text |
@@ -84,7 +90,10 b' def split_token_stream(tokens, content):' | |||
|
84 | 90 | """ |
|
85 | 91 | |
|
86 | 92 | token_buffer = [] |
|
93 | ||
|
87 | 94 | for token_class, token_text in tokens: |
|
95 | ||
|
96 | # token_text, should be str | |
|
88 | 97 | parts = token_text.split('\n') |
|
89 | 98 | for part in parts[:-1]: |
|
90 | 99 | token_buffer.append((token_class, part)) |
@@ -97,7 +106,7 b' def split_token_stream(tokens, content):' | |||
|
97 | 106 | yield token_buffer |
|
98 | 107 | elif content: |
|
99 | 108 | # this is a special case, we have the content, but tokenization didn't produce |
|
100 |
# any results. T |
|
|
109 | # any results. This can happen if know file extensions like .css have some bogus | |
|
101 | 110 | # unicode content without any newline characters |
|
102 | 111 | yield [(pygment_token_class(Token.Text), content)] |
|
103 | 112 | |
@@ -144,7 +153,7 b' def filenode_as_annotated_lines_tokens(f' | |||
|
144 | 153 | for line_no, tokens |
|
145 | 154 | in enumerate(filenode_as_lines_tokens(filenode), 1)) |
|
146 | 155 | |
|
147 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) | |
|
156 | grouped_annotations_lines = itertools.groupby(annotations_lines, lambda x: x[0]) | |
|
148 | 157 | |
|
149 | 158 | for annotation, group in grouped_annotations_lines: |
|
150 | 159 | yield ( |
@@ -158,14 +167,14 b' def render_tokenstream(tokenstream):' | |||
|
158 | 167 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): |
|
159 | 168 | |
|
160 | 169 | if token_class: |
|
161 |
result.append('<span class=" |
|
|
170 | result.append(f'<span class="{token_class}">') | |
|
162 | 171 | else: |
|
163 | 172 | result.append('<span>') |
|
164 | 173 | |
|
165 | 174 | for op_tag, token_text in token_ops_texts: |
|
166 | 175 | |
|
167 | 176 | if op_tag: |
|
168 |
result.append('< |
|
|
177 | result.append(f'<{op_tag}>') | |
|
169 | 178 | |
|
170 | 179 | # NOTE(marcink): in some cases of mixed encodings, we might run into |
|
171 | 180 | # troubles in the html_escape, in this case we say unicode force on token_text |
@@ -173,7 +182,7 b' def render_tokenstream(tokenstream):' | |||
|
173 | 182 | try: |
|
174 | 183 | escaped_text = html_escape(token_text) |
|
175 | 184 | except TypeError: |
|
176 |
escaped_text = html_escape(safe_ |
|
|
185 | escaped_text = html_escape(safe_str(token_text)) | |
|
177 | 186 | |
|
178 | 187 | # TODO: dan: investigate showing hidden characters like space/nl/tab |
|
179 | 188 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') |
@@ -183,7 +192,7 b' def render_tokenstream(tokenstream):' | |||
|
183 | 192 | result.append(escaped_text) |
|
184 | 193 | |
|
185 | 194 | if op_tag: |
|
186 |
result.append('</ |
|
|
195 | result.append(f'</{op_tag}>') | |
|
187 | 196 | |
|
188 | 197 | result.append('</span>') |
|
189 | 198 | |
@@ -235,12 +244,13 b' def rollup_tokenstream(tokenstream):' | |||
|
235 | 244 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) |
|
236 | 245 | |
|
237 | 246 | result = [] |
|
238 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): | |
|
247 | for token_class, op_list in itertools.groupby(tokenstream, lambda t: t[0]): | |
|
239 | 248 | ops = [] |
|
240 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): | |
|
249 | for token_op, token_text_list in itertools.groupby(op_list, lambda o: o[1]): | |
|
241 | 250 | text_buffer = [] |
|
242 | 251 | for t_class, t_op, t_text in token_text_list: |
|
243 | 252 | text_buffer.append(t_text) |
|
253 | ||
|
244 | 254 | ops.append((token_op, ''.join(text_buffer))) |
|
245 | 255 | result.append((token_class, ops)) |
|
246 | 256 | return result |
@@ -262,24 +272,31 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
262 | 272 | old_tokens_result = [] |
|
263 | 273 | new_tokens_result = [] |
|
264 | 274 | |
|
265 | similarity = difflib.SequenceMatcher(None, | |
|
275 | def int_convert(val): | |
|
276 | if isinstance(val, int): | |
|
277 | return str(val) | |
|
278 | return val | |
|
279 | ||
|
280 | similarity = difflib.SequenceMatcher( | |
|
281 | None, | |
|
266 | 282 | ''.join(token_text for token_class, token_text in old_tokens), |
|
267 | 283 | ''.join(token_text for token_class, token_text in new_tokens) |
|
268 | 284 | ).ratio() |
|
269 | 285 | |
|
270 | if similarity < 0.6: # return, the blocks are too different | |
|
286 | if similarity < 0.6: # return, the blocks are too different | |
|
271 | 287 | for token_class, token_text in old_tokens: |
|
272 | 288 | old_tokens_result.append((token_class, '', token_text)) |
|
273 | 289 | for token_class, token_text in new_tokens: |
|
274 | 290 | new_tokens_result.append((token_class, '', token_text)) |
|
275 | 291 | return old_tokens_result, new_tokens_result, similarity |
|
276 | 292 | |
|
277 |
token_sequence_matcher = difflib.SequenceMatcher( |
|
|
293 | token_sequence_matcher = difflib.SequenceMatcher( | |
|
294 | None, | |
|
278 | 295 | [x[1] for x in old_tokens], |
|
279 | 296 | [x[1] for x in new_tokens]) |
|
280 | 297 | |
|
281 | 298 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): |
|
282 |
# check the differences by token block types first to give a |
|
|
299 | # check the differences by token block types first to give a | |
|
283 | 300 | # nicer "block" level replacement vs character diffs |
|
284 | 301 | |
|
285 | 302 | if tag == 'equal': |
@@ -289,10 +306,10 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
289 | 306 | new_tokens_result.append((token_class, '', token_text)) |
|
290 | 307 | elif tag == 'delete': |
|
291 | 308 | for token_class, token_text in old_tokens[o1:o2]: |
|
292 | old_tokens_result.append((token_class, 'del', token_text)) | |
|
309 | old_tokens_result.append((token_class, 'del', int_convert(token_text))) | |
|
293 | 310 | elif tag == 'insert': |
|
294 | 311 | for token_class, token_text in new_tokens[n1:n2]: |
|
295 | new_tokens_result.append((token_class, 'ins', token_text)) | |
|
312 | new_tokens_result.append((token_class, 'ins', int_convert(token_text))) | |
|
296 | 313 | elif tag == 'replace': |
|
297 | 314 | # if same type token blocks must be replaced, do a diff on the |
|
298 | 315 | # characters in the token blocks to show individual changes |
@@ -300,17 +317,17 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
300 | 317 | old_char_tokens = [] |
|
301 | 318 | new_char_tokens = [] |
|
302 | 319 | for token_class, token_text in old_tokens[o1:o2]: |
|
303 | for char in token_text: | |
|
320 | for char in map(lambda i: i, token_text): | |
|
304 | 321 | old_char_tokens.append((token_class, char)) |
|
305 | 322 | |
|
306 | 323 | for token_class, token_text in new_tokens[n1:n2]: |
|
307 | for char in token_text: | |
|
324 | for char in map(lambda i: i, token_text): | |
|
308 | 325 | new_char_tokens.append((token_class, char)) |
|
309 | 326 | |
|
310 | 327 | old_string = ''.join([token_text for |
|
311 | token_class, token_text in old_char_tokens]) | |
|
328 | token_class, token_text in old_char_tokens]) | |
|
312 | 329 | new_string = ''.join([token_text for |
|
313 | token_class, token_text in new_char_tokens]) | |
|
330 | token_class, token_text in new_char_tokens]) | |
|
314 | 331 | |
|
315 | 332 | char_sequence = difflib.SequenceMatcher( |
|
316 | 333 | None, old_string, new_string) |
@@ -334,11 +351,11 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
334 | 351 | b += l |
|
335 | 352 | elif op == -1: |
|
336 | 353 | for i, c in enumerate(rep): |
|
337 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) | |
|
354 | obuffer.append((old_char_tokens[a+i][0], 'del', int_convert(c))) | |
|
338 | 355 | a += l |
|
339 | 356 | elif op == 1: |
|
340 | 357 | for i, c in enumerate(rep): |
|
341 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) | |
|
358 | nbuffer.append((new_char_tokens[b+i][0], 'ins', int_convert(c))) | |
|
342 | 359 | b += l |
|
343 | 360 | else: |
|
344 | 361 | for ctag, co1, co2, cn1, cn2 in copcodes: |
@@ -349,15 +366,15 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
349 | 366 | nbuffer.append((token_class, '', token_text)) |
|
350 | 367 | elif ctag == 'delete': |
|
351 | 368 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
352 | obuffer.append((token_class, 'del', token_text)) | |
|
369 | obuffer.append((token_class, 'del', int_convert(token_text))) | |
|
353 | 370 | elif ctag == 'insert': |
|
354 | 371 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
355 | nbuffer.append((token_class, 'ins', token_text)) | |
|
372 | nbuffer.append((token_class, 'ins', int_convert(token_text))) | |
|
356 | 373 | elif ctag == 'replace': |
|
357 | 374 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
358 | obuffer.append((token_class, 'del', token_text)) | |
|
375 | obuffer.append((token_class, 'del', int_convert(token_text))) | |
|
359 | 376 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
360 | nbuffer.append((token_class, 'ins', token_text)) | |
|
377 | nbuffer.append((token_class, 'ins', int_convert(token_text))) | |
|
361 | 378 | |
|
362 | 379 | old_tokens_result.extend(obuffer) |
|
363 | 380 | new_tokens_result.extend(nbuffer) |
@@ -366,13 +383,14 b' def tokens_diff(old_tokens, new_tokens, ' | |||
|
366 | 383 | |
|
367 | 384 | |
|
368 | 385 | def diffset_node_getter(commit): |
|
369 | def get_node(fname): | |
|
386 | def get_diff_node(file_name): | |
|
387 | ||
|
370 | 388 | try: |
|
371 | return commit.get_node(fname) | |
|
389 | return commit.get_node(file_name, pre_load=['size', 'flags', 'data']) | |
|
372 | 390 | except NodeDoesNotExistError: |
|
373 | 391 | return None |
|
374 | 392 | |
|
375 | return get_node | |
|
393 | return get_diff_node | |
|
376 | 394 | |
|
377 | 395 | |
|
378 | 396 | class DiffSet(object): |
@@ -553,13 +571,13 b' class DiffSet(object):' | |||
|
553 | 571 | # this allows commenting on those |
|
554 | 572 | if not file_chunks: |
|
555 | 573 | actions = [] |
|
556 | for op_id, op_text in filediff.patch['stats']['ops'].items(): | |
|
574 | for op_id, op_text in list(filediff.patch['stats']['ops'].items()): | |
|
557 | 575 | if op_id == DEL_FILENODE: |
|
558 | 576 | actions.append('file was removed') |
|
559 | 577 | elif op_id == BIN_FILENODE: |
|
560 | 578 | actions.append('binary diff hidden') |
|
561 | 579 | else: |
|
562 |
actions.append(safe_ |
|
|
580 | actions.append(safe_str(op_text)) | |
|
563 | 581 | action_line = 'NO CONTENT: ' + \ |
|
564 | 582 | ', '.join(actions) or 'UNDEFINED_ACTION' |
|
565 | 583 | |
@@ -588,10 +606,11 b' class DiffSet(object):' | |||
|
588 | 606 | before, after = [], [] |
|
589 | 607 | |
|
590 | 608 | for line in hunk['lines']: |
|
609 | ||
|
591 | 610 | if line['action'] in ['unmod', 'unmod-no-hl']: |
|
592 | 611 | no_hl = line['action'] == 'unmod-no-hl' |
|
593 | result.lines.extend( | |
|
594 | self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)) | |
|
612 | parsed_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl) | |
|
613 | result.lines.extend(parsed_lines) | |
|
595 | 614 | after.append(line) |
|
596 | 615 | before.append(line) |
|
597 | 616 | elif line['action'] == 'add': |
@@ -600,14 +619,17 b' class DiffSet(object):' | |||
|
600 | 619 | before.append(line) |
|
601 | 620 | elif line['action'] == 'old-no-nl': |
|
602 | 621 | before.append(line) |
|
622 | #line['line'] = safe_str(line['line']) | |
|
603 | 623 | elif line['action'] == 'new-no-nl': |
|
624 | #line['line'] = safe_str(line['line']) | |
|
604 | 625 | after.append(line) |
|
605 | 626 | |
|
606 | 627 | all_actions = [x['action'] for x in after] + [x['action'] for x in before] |
|
607 | 628 | no_hl = {x for x in all_actions} == {'unmod-no-hl'} |
|
608 | result.lines.extend( | |
|
609 | self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)) | |
|
610 | # NOTE(marcink): we must keep list() call here so we can cache the result... | |
|
629 | parsed_no_hl_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl) | |
|
630 | result.lines.extend(parsed_no_hl_lines) | |
|
631 | ||
|
632 | # NOTE(marcink): we must keep list() call here, so we can cache the result... | |
|
611 | 633 | result.unified = list(self.as_unified(result.lines)) |
|
612 | 634 | result.sideside = result.lines |
|
613 | 635 | |
@@ -631,14 +653,14 b' class DiffSet(object):' | |||
|
631 | 653 | before_newline_line = before_lines.pop(-1) |
|
632 | 654 | before_newline.content = '\n {}'.format( |
|
633 | 655 | render_tokenstream( |
|
634 | [(x[0], '', x[1]) | |
|
656 | [(x[0], '', safe_str(x[1])) | |
|
635 | 657 | for x in [('nonl', before_newline_line['line'])]])) |
|
636 | 658 | |
|
637 | 659 | if after_lines and after_lines[-1]['action'] == 'new-no-nl': |
|
638 | 660 | after_newline_line = after_lines.pop(-1) |
|
639 | 661 | after_newline.content = '\n {}'.format( |
|
640 | 662 | render_tokenstream( |
|
641 | [(x[0], '', x[1]) | |
|
663 | [(x[0], '', safe_str(x[1])) | |
|
642 | 664 | for x in [('nonl', after_newline_line['line'])]])) |
|
643 | 665 | |
|
644 | 666 | while before_lines or after_lines: |
@@ -655,7 +677,7 b' class DiffSet(object):' | |||
|
655 | 677 | |
|
656 | 678 | if before: |
|
657 | 679 | if before['action'] == 'old-no-nl': |
|
658 | before_tokens = [('nonl', before['line'])] | |
|
680 | before_tokens = [('nonl', safe_str(before['line']))] | |
|
659 | 681 | else: |
|
660 | 682 | before_tokens = self.get_line_tokens( |
|
661 | 683 | line_text=before['line'], line_number=before['old_lineno'], |
@@ -669,7 +691,7 b' class DiffSet(object):' | |||
|
669 | 691 | |
|
670 | 692 | if after: |
|
671 | 693 | if after['action'] == 'new-no-nl': |
|
672 | after_tokens = [('nonl', after['line'])] | |
|
694 | after_tokens = [('nonl', safe_str(after['line']))] | |
|
673 | 695 | else: |
|
674 | 696 | after_tokens = self.get_line_tokens( |
|
675 | 697 | line_text=after['line'], line_number=after['new_lineno'], |
@@ -715,14 +737,14 b' class DiffSet(object):' | |||
|
715 | 737 | filename = input_file |
|
716 | 738 | elif isinstance(input_file, FileNode): |
|
717 | 739 | filenode = input_file |
|
718 |
filename = input_file. |
|
|
740 | filename = input_file.str_path | |
|
719 | 741 | |
|
720 | 742 | hl_mode = self.HL_NONE if no_hl else self.highlight_mode |
|
721 | 743 | if hl_mode == self.HL_REAL and filenode: |
|
722 | 744 | lexer = self._get_lexer_for_filename(filename) |
|
723 |
file_size_allowed = |
|
|
745 | file_size_allowed = filenode.size < self.max_file_size_limit | |
|
724 | 746 | if line_number and file_size_allowed: |
|
725 |
return self.get_tokenized_filenode_line( |
|
|
747 | return self.get_tokenized_filenode_line(filenode, line_number, lexer, source) | |
|
726 | 748 | |
|
727 | 749 | if hl_mode in (self.HL_REAL, self.HL_FAST) and filename: |
|
728 | 750 | lexer = self._get_lexer_for_filename(filename) |
@@ -731,17 +753,17 b' class DiffSet(object):' | |||
|
731 | 753 | return list(tokenize_string(line_text, plain_text_lexer)) |
|
732 | 754 | |
|
733 | 755 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None, source=''): |
|
756 | name_hash = hash(filenode) | |
|
734 | 757 | |
|
735 | def tokenize(_filenode): | |
|
736 | self.highlighted_filenodes[source][filenode] = filenode_as_lines_tokens(filenode, lexer) | |
|
758 | hl_node_code = self.highlighted_filenodes[source] | |
|
737 | 759 | |
|
738 | if filenode not in self.highlighted_filenodes[source]: | |
|
739 | tokenize(filenode) | |
|
760 | if name_hash not in hl_node_code: | |
|
761 | hl_node_code[name_hash] = filenode_as_lines_tokens(filenode, lexer) | |
|
740 | 762 | |
|
741 | 763 | try: |
|
742 |
return |
|
|
764 | return hl_node_code[name_hash][line_number - 1] | |
|
743 | 765 | except Exception: |
|
744 | log.exception('diff rendering error') | |
|
766 | log.exception('diff rendering error on L:%s and file=%s', line_number - 1, filenode.name) | |
|
745 | 767 | return [('', 'L{}: rhodecode diff rendering error'.format(line_number))] |
|
746 | 768 | |
|
747 | 769 | def action_to_op(self, action): |
@@ -53,20 +53,26 b' def notify(msg):' | |||
|
53 | 53 | Notification for migrations messages |
|
54 | 54 | """ |
|
55 | 55 | ml = len(msg) + (4 * 2) |
|
56 | print(('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper()) | |
|
56 | print((('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper())) | |
|
57 | 57 | |
|
58 | 58 | |
|
59 | 59 | class DbManage(object): |
|
60 | 60 | |
|
61 | 61 | def __init__(self, log_sql, dbconf, root, tests=False, |
|
62 | SESSION=None, cli_args=None): | |
|
62 | SESSION=None, cli_args=None, enc_key=b''): | |
|
63 | ||
|
63 | 64 | self.dbname = dbconf.split('/')[-1] |
|
64 | 65 | self.tests = tests |
|
65 | 66 | self.root = root |
|
66 | 67 | self.dburi = dbconf |
|
67 | 68 | self.log_sql = log_sql |
|
68 | 69 | self.cli_args = cli_args or {} |
|
70 | self.sa = None | |
|
71 | self.engine = None | |
|
72 | self.enc_key = enc_key | |
|
73 | # sets .sa .engine | |
|
69 | 74 | self.init_db(SESSION=SESSION) |
|
75 | ||
|
70 | 76 | self.ask_ok = self.get_ask_ok_func(self.cli_args.get('force_ask')) |
|
71 | 77 | |
|
72 | 78 | def db_exists(self): |
@@ -91,13 +97,16 b' class DbManage(object):' | |||
|
91 | 97 | return ask_ok |
|
92 | 98 | |
|
93 | 99 | def init_db(self, SESSION=None): |
|
100 | ||
|
94 | 101 | if SESSION: |
|
95 | 102 | self.sa = SESSION |
|
103 | self.engine = SESSION.bind | |
|
96 | 104 | else: |
|
97 | 105 | # init new sessions |
|
98 | 106 | engine = create_engine(self.dburi, echo=self.log_sql) |
|
99 | init_model(engine) | |
|
107 | init_model(engine, encryption_key=self.enc_key) | |
|
100 | 108 | self.sa = Session() |
|
109 | self.engine = engine | |
|
101 | 110 | |
|
102 | 111 | def create_tables(self, override=False): |
|
103 | 112 | """ |
@@ -106,6 +115,8 b' class DbManage(object):' | |||
|
106 | 115 | |
|
107 | 116 | log.info("Existing database with the same name is going to be destroyed.") |
|
108 | 117 | log.info("Setup command will run DROP ALL command on that database.") |
|
118 | engine = self.engine | |
|
119 | ||
|
109 | 120 | if self.tests: |
|
110 | 121 | destroy = True |
|
111 | 122 | else: |
@@ -114,10 +125,10 b' class DbManage(object):' | |||
|
114 | 125 | log.info('db tables bootstrap: Nothing done.') |
|
115 | 126 | sys.exit(0) |
|
116 | 127 | if destroy: |
|
117 | Base.metadata.drop_all() | |
|
128 | Base.metadata.drop_all(bind=engine) | |
|
118 | 129 | |
|
119 | 130 | checkfirst = not override |
|
120 | Base.metadata.create_all(checkfirst=checkfirst) | |
|
131 | Base.metadata.create_all(bind=engine, checkfirst=checkfirst) | |
|
121 | 132 | log.info('Created tables for %s', self.dbname) |
|
122 | 133 | |
|
123 | 134 | def set_db_version(self): |
@@ -145,8 +156,7 b' class DbManage(object):' | |||
|
145 | 156 | """ |
|
146 | 157 | |
|
147 | 158 | from rhodecode.lib.dbmigrate.migrate.versioning import api |
|
148 |
from rhodecode.lib.dbmigrate.migrate.exceptions import |
|
|
149 | DatabaseNotControlledError | |
|
159 | from rhodecode.lib.dbmigrate.migrate.exceptions import DatabaseNotControlledError | |
|
150 | 160 | |
|
151 | 161 | if 'sqlite' in self.dburi: |
|
152 | 162 | print( |
@@ -172,40 +182,39 b' class DbManage(object):' | |||
|
172 | 182 | |
|
173 | 183 | try: |
|
174 | 184 | curr_version = api.db_version(db_uri, repository_path) |
|
175 | msg = ('Found current database db_uri under version ' | |
|
176 |
'control with version {}' |
|
|
185 | msg = (f'Found current database db_uri under version ' | |
|
186 | f'control with version {curr_version}') | |
|
177 | 187 | |
|
178 | 188 | except (RuntimeError, DatabaseNotControlledError): |
|
179 | 189 | curr_version = 1 |
|
180 |
msg = |
|
|
181 |
|
|
|
190 | msg = f'Current database is not under version control. ' \ | |
|
191 | f'Setting as version {curr_version}' | |
|
182 | 192 | api.version_control(db_uri, repository_path, curr_version) |
|
183 | 193 | |
|
184 | 194 | notify(msg) |
|
185 | 195 | |
|
186 | ||
|
187 | 196 | if curr_version == __dbversion__: |
|
188 | 197 | log.info('This database is already at the newest version') |
|
189 | 198 | sys.exit(0) |
|
190 | 199 | |
|
191 | upgrade_steps = range(curr_version + 1, __dbversion__ + 1) | |
|
192 | notify('attempting to upgrade database from ' | |
|
193 |
'version |
|
|
200 | upgrade_steps = list(range(curr_version + 1, __dbversion__ + 1)) | |
|
201 | notify(f'attempting to upgrade database from ' | |
|
202 | f'version {curr_version} to version {__dbversion__}') | |
|
194 | 203 | |
|
195 | 204 | # CALL THE PROPER ORDER OF STEPS TO PERFORM FULL UPGRADE |
|
196 | 205 | _step = None |
|
197 | 206 | for step in upgrade_steps: |
|
198 |
notify('performing upgrade step |
|
|
207 | notify(f'performing upgrade step {step}') | |
|
199 | 208 | time.sleep(0.5) |
|
200 | 209 | |
|
201 | 210 | api.upgrade(db_uri, repository_path, step) |
|
202 | 211 | self.sa.rollback() |
|
203 |
notify('schema upgrade for step |
|
|
212 | notify(f'schema upgrade for step {step} completed') | |
|
204 | 213 | |
|
205 | 214 | _step = step |
|
206 | 215 | |
|
207 | 216 | self.run_post_migration_tasks() |
|
208 |
notify('upgrade to version |
|
|
217 | notify(f'upgrade to version {step} successful') | |
|
209 | 218 | |
|
210 | 219 | def fix_repo_paths(self): |
|
211 | 220 | """ |
@@ -231,8 +240,8 b' class DbManage(object):' | |||
|
231 | 240 | used mostly for anonymous access |
|
232 | 241 | """ |
|
233 | 242 | def_user = self.sa.query(User)\ |
|
234 |
|
|
|
235 |
|
|
|
243 | .filter(User.username == User.DEFAULT_USER)\ | |
|
244 | .one() | |
|
236 | 245 | |
|
237 | 246 | def_user.name = 'Anonymous' |
|
238 | 247 | def_user.lastname = 'User' |
@@ -542,8 +551,8 b' class DbManage(object):' | |||
|
542 | 551 | elif not os.access(path, os.W_OK) and path_ok: |
|
543 | 552 | log.warning('No write permission to given path %s', path) |
|
544 | 553 | |
|
545 |
q = ('Given path |
|
|
546 |
'continue with read only mode ? [y/n]' |
|
|
554 | q = (f'Given path {path} is not writeable, do you want to ' | |
|
555 | f'continue with read only mode ? [y/n]') | |
|
547 | 556 | if not self.ask_ok(q): |
|
548 | 557 | log.error('Canceled by user') |
|
549 | 558 | sys.exit(-1) |
@@ -557,8 +566,8 b' class DbManage(object):' | |||
|
557 | 566 | real_path = os.path.normpath(os.path.realpath(path)) |
|
558 | 567 | |
|
559 | 568 | if real_path != os.path.normpath(path): |
|
560 | q = ('Path looks like a symlink, RhodeCode Enterprise will store ' | |
|
561 |
'given path as |
|
|
569 | q = (f'Path looks like a symlink, RhodeCode Enterprise will store ' | |
|
570 | f'given path as {real_path} ? [y/n]') | |
|
562 | 571 | if not self.ask_ok(q): |
|
563 | 572 | log.error('Canceled by user') |
|
564 | 573 | sys.exit(-1) |
@@ -109,10 +109,10 b' class HTTPLockedRC(HTTPClientError):' | |||
|
109 | 109 | reason = None |
|
110 | 110 | |
|
111 | 111 | def __init__(self, message, *args, **kwargs): |
|
112 |
|
|
|
113 | from rhodecode.lib.utils2 import safe_int | |
|
114 | _code = CONFIG.get('lock_ret_code') | |
|
115 | self.code = safe_int(_code, self.code) | |
|
112 | import rhodecode | |
|
113 | ||
|
114 | self.code = rhodecode.ConfigGet().get_int('lock_ret_code', missing=self.code) | |
|
115 | ||
|
116 | 116 | self.title = self.explanation = message |
|
117 | 117 | super(HTTPLockedRC, self).__init__(*args, **kwargs) |
|
118 | 118 | self.args = (message, ) |
@@ -180,3 +180,23 b' class ArtifactMetadataBadValueType(Value' | |||
|
180 | 180 | |
|
181 | 181 | class CommentVersionMismatch(ValueError): |
|
182 | 182 | pass |
|
183 | ||
|
184 | ||
|
185 | class SignatureVerificationError(ValueError): | |
|
186 | pass | |
|
187 | ||
|
188 | ||
|
189 | def signature_verification_error(msg): | |
|
190 | details = """ | |
|
191 | Encryption signature verification failed. | |
|
192 | Please check your value of secret key, and/or encrypted value stored. | |
|
193 | Secret key stored inside .ini file: | |
|
194 | `rhodecode.encrypted_values.secret` or defaults to | |
|
195 | `beaker.session.secret` | |
|
196 | ||
|
197 | Probably the stored values were encrypted using a different secret then currently set in .ini file | |
|
198 | """ | |
|
199 | ||
|
200 | final_msg = f'{msg}\n{details}' | |
|
201 | return SignatureVerificationError(final_msg) | |
|
202 |
@@ -35,7 +35,6 b' http://web.archive.org/web/2011071803522' | |||
|
35 | 35 | import datetime |
|
36 | 36 | import io |
|
37 | 37 | |
|
38 | import pytz | |
|
39 | 38 | from six.moves.urllib import parse as urlparse |
|
40 | 39 | |
|
41 | 40 | from rhodecode.lib.feedgenerator import datetime_safe |
@@ -227,7 +226,7 b' class SyndicationFeed(object):' | |||
|
227 | 226 | latest_date = item_date |
|
228 | 227 | |
|
229 | 228 | # datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now |
|
230 |
return latest_date or datetime.datetime.utcnow().replace(tzinfo= |
|
|
229 | return latest_date or datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc) | |
|
231 | 230 | |
|
232 | 231 | |
|
233 | 232 | class Enclosure(object): |
@@ -147,7 +147,7 b' def searcher_from_config(config, prefix=' | |||
|
147 | 147 | if 'location' not in _config: |
|
148 | 148 | _config['location'] = default_location |
|
149 | 149 | if 'es_version' not in _config: |
|
150 | # use old legacy ES version set to 2 | |
|
150 | # use an old legacy ES version set to 2 | |
|
151 | 151 | _config['es_version'] = '2' |
|
152 | 152 | |
|
153 | 153 | imported = importlib.import_module(_config.get('module', default_searcher)) |
@@ -17,14 +17,13 b'' | |||
|
17 | 17 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | 18 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | 19 | |
|
20 | import collections | |
|
21 | ||
|
22 | 20 | import sqlalchemy |
|
23 | 21 | from sqlalchemy import UnicodeText |
|
24 | from sqlalchemy.ext.mutable import Mutable | |
|
22 | from sqlalchemy.ext.mutable import Mutable, \ | |
|
23 | MutableList as MutationList, \ | |
|
24 | MutableDict as MutationDict | |
|
25 | 25 | |
|
26 |
from rhodecode.lib |
|
|
27 | from rhodecode.lib.utils2 import safe_unicode | |
|
26 | from rhodecode.lib import ext_json | |
|
28 | 27 | |
|
29 | 28 | |
|
30 | 29 | class JsonRaw(str): |
@@ -42,10 +41,6 b' class JsonRaw(str):' | |||
|
42 | 41 | pass |
|
43 | 42 | |
|
44 | 43 | |
|
45 | # Set this to the standard dict if Order is not required | |
|
46 | DictClass = collections.OrderedDict | |
|
47 | ||
|
48 | ||
|
49 | 44 | class JSONEncodedObj(sqlalchemy.types.TypeDecorator): |
|
50 | 45 | """ |
|
51 | 46 | Represents an immutable structure as a json-encoded string. |
@@ -56,12 +51,12 b' class JSONEncodedObj(sqlalchemy.types.Ty' | |||
|
56 | 51 | |
|
57 | 52 | impl = UnicodeText |
|
58 | 53 | safe = True |
|
59 |
enforce_ |
|
|
54 | enforce_str = True | |
|
60 | 55 | |
|
61 | 56 | def __init__(self, *args, **kwargs): |
|
62 | 57 | self.default = kwargs.pop('default', None) |
|
63 | 58 | self.safe = kwargs.pop('safe_json', self.safe) |
|
64 |
self.enforce_ |
|
|
59 | self.enforce_str = kwargs.pop('enforce_str', self.enforce_str) | |
|
65 | 60 | self.dialect_map = kwargs.pop('dialect_map', {}) |
|
66 | 61 | super(JSONEncodedObj, self).__init__(*args, **kwargs) |
|
67 | 62 | |
@@ -74,9 +69,10 b' class JSONEncodedObj(sqlalchemy.types.Ty' | |||
|
74 | 69 | if isinstance(value, JsonRaw): |
|
75 | 70 | value = value |
|
76 | 71 | elif value is not None: |
|
77 | value = json.dumps(value) | |
|
78 | if self.enforce_unicode: | |
|
79 | value = safe_unicode(value) | |
|
72 | if self.enforce_str: | |
|
73 | value = ext_json.str_json(value) | |
|
74 | else: | |
|
75 | value = ext_json.json.dumps(value) | |
|
80 | 76 | return value |
|
81 | 77 | |
|
82 | 78 | def process_result_value(self, value, dialect): |
@@ -85,8 +81,8 b' class JSONEncodedObj(sqlalchemy.types.Ty' | |||
|
85 | 81 | |
|
86 | 82 | if value is not None: |
|
87 | 83 | try: |
|
88 |
value = json.loads(value |
|
|
89 |
except Exception |
|
|
84 | value = ext_json.json.loads(value) | |
|
85 | except Exception: | |
|
90 | 86 | if self.safe and self.default is not None: |
|
91 | 87 | return self.default() |
|
92 | 88 | else: |
@@ -95,6 +91,7 b' class JSONEncodedObj(sqlalchemy.types.Ty' | |||
|
95 | 91 | |
|
96 | 92 | |
|
97 | 93 | class MutationObj(Mutable): |
|
94 | ||
|
98 | 95 | @classmethod |
|
99 | 96 | def coerce(cls, key, value): |
|
100 | 97 | if isinstance(value, dict) and not isinstance(value, MutationDict): |
@@ -156,90 +153,6 b' class MutationObj(Mutable):' | |||
|
156 | 153 | propagate=True) |
|
157 | 154 | |
|
158 | 155 | |
|
159 | class MutationDict(MutationObj, DictClass): | |
|
160 | @classmethod | |
|
161 | def coerce(cls, key, value): | |
|
162 | """Convert plain dictionary to MutationDict""" | |
|
163 | self = MutationDict( | |
|
164 | (k, MutationObj.coerce(key, v)) for (k, v) in value.items()) | |
|
165 | self._key = key | |
|
166 | return self | |
|
167 | ||
|
168 | def de_coerce(self): | |
|
169 | return dict(self) | |
|
170 | ||
|
171 | def __setitem__(self, key, value): | |
|
172 | # Due to the way OrderedDict works, this is called during __init__. | |
|
173 | # At this time we don't have a key set, but what is more, the value | |
|
174 | # being set has already been coerced. So special case this and skip. | |
|
175 | if hasattr(self, '_key'): | |
|
176 | value = MutationObj.coerce(self._key, value) | |
|
177 | DictClass.__setitem__(self, key, value) | |
|
178 | self.changed() | |
|
179 | ||
|
180 | def __delitem__(self, key): | |
|
181 | DictClass.__delitem__(self, key) | |
|
182 | self.changed() | |
|
183 | ||
|
184 | def __setstate__(self, state): | |
|
185 | self.__dict__ = state | |
|
186 | ||
|
187 | def __reduce_ex__(self, proto): | |
|
188 | # support pickling of MutationDicts | |
|
189 | d = dict(self) | |
|
190 | return (self.__class__, (d,)) | |
|
191 | ||
|
192 | ||
|
193 | class MutationList(MutationObj, list): | |
|
194 | @classmethod | |
|
195 | def coerce(cls, key, value): | |
|
196 | """Convert plain list to MutationList""" | |
|
197 | self = MutationList((MutationObj.coerce(key, v) for v in value)) | |
|
198 | self._key = key | |
|
199 | return self | |
|
200 | ||
|
201 | def de_coerce(self): | |
|
202 | return list(self) | |
|
203 | ||
|
204 | def __setitem__(self, idx, value): | |
|
205 | list.__setitem__(self, idx, MutationObj.coerce(self._key, value)) | |
|
206 | self.changed() | |
|
207 | ||
|
208 | def __setslice__(self, start, stop, values): | |
|
209 | list.__setslice__(self, start, stop, | |
|
210 | (MutationObj.coerce(self._key, v) for v in values)) | |
|
211 | self.changed() | |
|
212 | ||
|
213 | def __delitem__(self, idx): | |
|
214 | list.__delitem__(self, idx) | |
|
215 | self.changed() | |
|
216 | ||
|
217 | def __delslice__(self, start, stop): | |
|
218 | list.__delslice__(self, start, stop) | |
|
219 | self.changed() | |
|
220 | ||
|
221 | def append(self, value): | |
|
222 | list.append(self, MutationObj.coerce(self._key, value)) | |
|
223 | self.changed() | |
|
224 | ||
|
225 | def insert(self, idx, value): | |
|
226 | list.insert(self, idx, MutationObj.coerce(self._key, value)) | |
|
227 | self.changed() | |
|
228 | ||
|
229 | def extend(self, values): | |
|
230 | list.extend(self, (MutationObj.coerce(self._key, v) for v in values)) | |
|
231 | self.changed() | |
|
232 | ||
|
233 | def pop(self, *args, **kw): | |
|
234 | value = list.pop(self, *args, **kw) | |
|
235 | self.changed() | |
|
236 | return value | |
|
237 | ||
|
238 | def remove(self, value): | |
|
239 | list.remove(self, value) | |
|
240 | self.changed() | |
|
241 | ||
|
242 | ||
|
243 | 156 | def JsonType(impl=None, **kwargs): |
|
244 | 157 | """ |
|
245 | 158 | Helper for using a mutation obj, it allows to use .with_variant easily. |
@@ -253,26 +166,8 b' def JsonType(impl=None, **kwargs):' | |||
|
253 | 166 | if impl == 'list': |
|
254 | 167 | return JSONEncodedObj(default=list, **kwargs) |
|
255 | 168 | elif impl == 'dict': |
|
256 |
return JSONEncodedObj(default= |
|
|
169 | return JSONEncodedObj(default=dict, **kwargs) | |
|
257 | 170 | else: |
|
258 | 171 | return JSONEncodedObj(**kwargs) |
|
259 | 172 | |
|
260 | 173 | |
|
261 | JSON = MutationObj.as_mutable(JsonType()) | |
|
262 | """ | |
|
263 | A type to encode/decode JSON on the fly | |
|
264 | ||
|
265 | sqltype is the string type for the underlying DB column:: | |
|
266 | ||
|
267 | Column(JSON) (defaults to UnicodeText) | |
|
268 | """ | |
|
269 | ||
|
270 | JSONDict = MutationObj.as_mutable(JsonType('dict')) | |
|
271 | """ | |
|
272 | A type to encode/decode JSON dictionaries on the fly | |
|
273 | """ | |
|
274 | ||
|
275 | JSONList = MutationObj.as_mutable(JsonType('list')) | |
|
276 | """ | |
|
277 | A type to encode/decode JSON lists` on the fly | |
|
278 | """ |
General Comments 0
You need to be logged in to leave comments.
Login now