##// END OF EJS Templates
libs: major refactor for python3
super-admin -
r5085:4eab4aa8 default
parent child Browse files
Show More
@@ -25,13 +25,14 b' controllers'
25
25
26 import logging
26 import logging
27 import socket
27 import socket
28 import base64
28
29
29 import markupsafe
30 import markupsafe
30 import ipaddress
31 import ipaddress
31
32
33 import paste.httpheaders
32 from paste.auth.basic import AuthBasicAuthenticator
34 from paste.auth.basic import AuthBasicAuthenticator
33 from paste.httpexceptions import HTTPUnauthorized, HTTPForbidden, get_exception
35 from paste.httpexceptions import HTTPUnauthorized, HTTPForbidden, get_exception
34 from paste.httpheaders import WWW_AUTHENTICATE, AUTHORIZATION
35
36
36 import rhodecode
37 import rhodecode
37 from rhodecode.authentication.base import VCS_TYPE
38 from rhodecode.authentication.base import VCS_TYPE
@@ -40,8 +41,10 b' from rhodecode.lib import helpers as h'
40 from rhodecode.lib.auth import AuthUser, CookieStoreWrapper
41 from rhodecode.lib.auth import AuthUser, CookieStoreWrapper
41 from rhodecode.lib.exceptions import UserCreationError
42 from rhodecode.lib.exceptions import UserCreationError
42 from rhodecode.lib.utils import (password_changed, get_enabled_hook_classes)
43 from rhodecode.lib.utils import (password_changed, get_enabled_hook_classes)
43 from rhodecode.lib.utils2 import (
44 from rhodecode.lib.utils2 import AttributeDict
44 str2bool, safe_unicode, AttributeDict, safe_int, sha1, aslist, safe_str)
45 from rhodecode.lib.str_utils import ascii_bytes, safe_int, safe_str
46 from rhodecode.lib.type_utils import aslist, str2bool
47 from rhodecode.lib.hash_utils import sha1
45 from rhodecode.model.db import Repository, User, ChangesetComment, UserBookmark
48 from rhodecode.model.db import Repository, User, ChangesetComment, UserBookmark
46 from rhodecode.model.notification import NotificationModel
49 from rhodecode.model.notification import NotificationModel
47 from rhodecode.model.settings import VcsSettingsModel, SettingsModel
50 from rhodecode.model.settings import VcsSettingsModel, SettingsModel
@@ -108,18 +111,20 b' def get_ip_addr(environ):'
108 proxy_key = 'HTTP_X_REAL_IP'
111 proxy_key = 'HTTP_X_REAL_IP'
109 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
112 proxy_key2 = 'HTTP_X_FORWARDED_FOR'
110 def_key = 'REMOTE_ADDR'
113 def_key = 'REMOTE_ADDR'
111 _filters = lambda x: _filter_port(_filter_proxy(x))
114
115 def ip_filters(ip_):
116 return _filter_port(_filter_proxy(ip_))
112
117
113 ip = environ.get(proxy_key)
118 ip = environ.get(proxy_key)
114 if ip:
119 if ip:
115 return _filters(ip)
120 return ip_filters(ip)
116
121
117 ip = environ.get(proxy_key2)
122 ip = environ.get(proxy_key2)
118 if ip:
123 if ip:
119 return _filters(ip)
124 return ip_filters(ip)
120
125
121 ip = environ.get(def_key, '0.0.0.0')
126 ip = environ.get(def_key, '0.0.0.0')
122 return _filters(ip)
127 return ip_filters(ip)
123
128
124
129
125 def get_server_ip_addr(environ, log_errors=True):
130 def get_server_ip_addr(environ, log_errors=True):
@@ -138,13 +143,6 b' def get_server_port(environ):'
138 return environ.get('SERVER_PORT')
143 return environ.get('SERVER_PORT')
139
144
140
145
141 def get_access_path(environ):
142 path = environ.get('PATH_INFO')
143 org_req = environ.get('pylons.original_request')
144 if org_req:
145 path = org_req.environ.get('PATH_INFO')
146 return path
147
148
146
149 def get_user_agent(environ):
147 def get_user_agent(environ):
150 return environ.get('HTTP_USER_AGENT')
148 return environ.get('HTTP_USER_AGENT')
@@ -210,6 +208,7 b' class BasicAuth(AuthBasicAuthenticator):'
210
208
211 def __init__(self, realm, authfunc, registry, auth_http_code=None,
209 def __init__(self, realm, authfunc, registry, auth_http_code=None,
212 initial_call_detection=False, acl_repo_name=None, rc_realm=''):
210 initial_call_detection=False, acl_repo_name=None, rc_realm=''):
211 super(BasicAuth, self).__init__(realm=realm, authfunc=authfunc)
213 self.realm = realm
212 self.realm = realm
214 self.rc_realm = rc_realm
213 self.rc_realm = rc_realm
215 self.initial_call = initial_call_detection
214 self.initial_call = initial_call_detection
@@ -218,36 +217,40 b' class BasicAuth(AuthBasicAuthenticator):'
218 self.acl_repo_name = acl_repo_name
217 self.acl_repo_name = acl_repo_name
219 self._rc_auth_http_code = auth_http_code
218 self._rc_auth_http_code = auth_http_code
220
219
221 def _get_response_from_code(self, http_code):
220 def _get_response_from_code(self, http_code, fallback):
222 try:
221 try:
223 return get_exception(safe_int(http_code))
222 return get_exception(safe_int(http_code))
224 except Exception:
223 except Exception:
225 log.exception('Failed to fetch response for code %s', http_code)
224 log.exception('Failed to fetch response class for code %s, using fallback: %s', http_code, fallback)
226 return HTTPForbidden
225 return fallback
227
226
228 def get_rc_realm(self):
227 def get_rc_realm(self):
229 return safe_str(self.rc_realm)
228 return safe_str(self.rc_realm)
230
229
231 def build_authentication(self):
230 def build_authentication(self):
232 head = WWW_AUTHENTICATE.tuples('Basic realm="%s"' % self.realm)
231 header = [('WWW-Authenticate', f'Basic realm="{self.realm}"')]
232
233 # NOTE: the initial_Call detection seems to be not working/not needed witg latest Mercurial
234 # investigate if we still need it.
233 if self._rc_auth_http_code and not self.initial_call:
235 if self._rc_auth_http_code and not self.initial_call:
234 # return alternative HTTP code if alternative http return code
236 # return alternative HTTP code if alternative http return code
235 # is specified in RhodeCode config, but ONLY if it's not the
237 # is specified in RhodeCode config, but ONLY if it's not the
236 # FIRST call
238 # FIRST call
237 custom_response_klass = self._get_response_from_code(
239 custom_response_klass = self._get_response_from_code(self._rc_auth_http_code, fallback=HTTPUnauthorized)
238 self._rc_auth_http_code)
240 log.debug('Using custom response class: %s', custom_response_klass)
239 return custom_response_klass(headers=head)
241 return custom_response_klass(headers=header)
240 return HTTPUnauthorized(headers=head)
242 return HTTPUnauthorized(headers=header)
241
243
242 def authenticate(self, environ):
244 def authenticate(self, environ):
243 authorization = AUTHORIZATION(environ)
245 authorization = paste.httpheaders.AUTHORIZATION(environ)
244 if not authorization:
246 if not authorization:
245 return self.build_authentication()
247 return self.build_authentication()
246 (authmeth, auth) = authorization.split(' ', 1)
248 (auth_meth, auth_creds_b64) = authorization.split(' ', 1)
247 if 'basic' != authmeth.lower():
249 if 'basic' != auth_meth.lower():
248 return self.build_authentication()
250 return self.build_authentication()
249 auth = auth.strip().decode('base64')
251
250 _parts = auth.split(':', 1)
252 credentials = safe_str(base64.b64decode(auth_creds_b64.strip()))
253 _parts = credentials.split(':', 1)
251 if len(_parts) == 2:
254 if len(_parts) == 2:
252 username, password = _parts
255 username, password = _parts
253 auth_data = self.authfunc(
256 auth_data = self.authfunc(
@@ -267,17 +270,11 b' class BasicAuth(AuthBasicAuthenticator):'
267
270
268 def calculate_version_hash(config):
271 def calculate_version_hash(config):
269 return sha1(
272 return sha1(
270 config.get('beaker.session.secret', '') +
273 config.get(b'beaker.session.secret', b'') + ascii_bytes(rhodecode.__version__)
271 rhodecode.__version__)[:8]
274 )[:8]
272
275
273
276
274 def get_current_lang(request):
277 def get_current_lang(request):
275 # NOTE(marcink): remove after pyramid move
276 try:
277 return translation.get_lang()[0]
278 except:
279 pass
280
281 return getattr(request, '_LOCALE_', request.locale_name)
278 return getattr(request, '_LOCALE_', request.locale_name)
282
279
283
280
@@ -360,9 +357,9 b' def attach_context_attributes(context, r'
360 context.rhodecode_instanceid = config.get('instance_id')
357 context.rhodecode_instanceid = config.get('instance_id')
361
358
362 context.visual.cut_off_limit_diff = safe_int(
359 context.visual.cut_off_limit_diff = safe_int(
363 config.get('cut_off_limit_diff'))
360 config.get('cut_off_limit_diff'), default=0)
364 context.visual.cut_off_limit_file = safe_int(
361 context.visual.cut_off_limit_file = safe_int(
365 config.get('cut_off_limit_file'))
362 config.get('cut_off_limit_file'), default=0)
366
363
367 context.license = AttributeDict({})
364 context.license = AttributeDict({})
368 context.license.hide_license_info = str2bool(
365 context.license.hide_license_info = str2bool(
@@ -395,7 +392,7 b' def attach_context_attributes(context, r'
395
392
396 # session settings per user
393 # session settings per user
397
394
398 for k, v in request.session.items():
395 for k, v in list(request.session.items()):
399 pref = 'rc_user_session_attr.'
396 pref = 'rc_user_session_attr.'
400 if k and k.startswith(pref):
397 if k and k.startswith(pref):
401 k = k[len(pref):]
398 k = k[len(pref):]
@@ -437,7 +434,7 b' def attach_context_attributes(context, r'
437 csrf_token = auth.get_csrf_token(session=request.session)
434 csrf_token = auth.get_csrf_token(session=request.session)
438
435
439 context.csrf_token = csrf_token
436 context.csrf_token = csrf_token
440 context.backends = rhodecode.BACKENDS.keys()
437 context.backends = list(rhodecode.BACKENDS.keys())
441
438
442 unread_count = 0
439 unread_count = 0
443 user_bookmark_list = []
440 user_bookmark_list = []
@@ -580,7 +577,7 b' def bootstrap_config(request, registry_n'
580 config.include('pyramid_mako')
577 config.include('pyramid_mako')
581 config.include('rhodecode.lib.rc_beaker')
578 config.include('rhodecode.lib.rc_beaker')
582 config.include('rhodecode.lib.rc_cache')
579 config.include('rhodecode.lib.rc_cache')
583
580 config.include('rhodecode.lib.rc_cache.archive_cache')
584 add_events_routes(config)
581 add_events_routes(config)
585
582
586 return config
583 return config
@@ -607,4 +604,3 b' def bootstrap_request(**kwargs):'
607 request.session = ThinSession()
604 request.session = ThinSession()
608
605
609 return request
606 return request
610
@@ -73,7 +73,6 b' class ORMCache:'
73 event.listen(session_factory, "do_orm_execute", self._do_orm_execute)
73 event.listen(session_factory, "do_orm_execute", self._do_orm_execute)
74
74
75 def _do_orm_execute(self, orm_context):
75 def _do_orm_execute(self, orm_context):
76
77 for opt in orm_context.user_defined_options:
76 for opt in orm_context.user_defined_options:
78 if isinstance(opt, RelationshipCache):
77 if isinstance(opt, RelationshipCache):
79 opt = opt._process_orm_context(orm_context)
78 opt = opt._process_orm_context(orm_context)
@@ -83,6 +82,10 b' class ORMCache:'
83 if isinstance(opt, FromCache):
82 if isinstance(opt, FromCache):
84 dogpile_region = self.cache_regions[opt.region]
83 dogpile_region = self.cache_regions[opt.region]
85
84
85 if dogpile_region.expiration_time <= 0:
86 # don't cache 0 time expiration cache
87 continue
88
86 if opt.cache_key:
89 if opt.cache_key:
87 our_cache_key = f'SQL_CACHE_{opt.cache_key}'
90 our_cache_key = f'SQL_CACHE_{opt.cache_key}'
88 else:
91 else:
@@ -25,7 +25,6 b' import requests'
25 import datetime
25 import datetime
26
26
27 from dogpile.util.readwrite_lock import ReadWriteMutex
27 from dogpile.util.readwrite_lock import ReadWriteMutex
28 from pyramid.threadlocal import get_current_registry
29
28
30 import rhodecode.lib.helpers as h
29 import rhodecode.lib.helpers as h
31 from rhodecode.lib.auth import HasRepoPermissionAny
30 from rhodecode.lib.auth import HasRepoPermissionAny
@@ -236,6 +235,7 b' def get_connection_validators(registry):'
236
235
237 def get_channelstream_config(registry=None):
236 def get_channelstream_config(registry=None):
238 if not registry:
237 if not registry:
238 from pyramid.threadlocal import get_current_registry
239 registry = get_current_registry()
239 registry = get_current_registry()
240
240
241 rhodecode_plugins = getattr(registry, 'rhodecode_plugins', {})
241 rhodecode_plugins = getattr(registry, 'rhodecode_plugins', {})
@@ -20,7 +20,7 b''
20
20
21 import logging
21 import logging
22 import difflib
22 import difflib
23 from itertools import groupby
23 import itertools
24
24
25 from pygments import lex
25 from pygments import lex
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
@@ -29,9 +29,10 b' from pygments.lexers import get_lexer_by'
29
29
30 from rhodecode.lib.helpers import (
30 from rhodecode.lib.helpers import (
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
32 from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode
32 from rhodecode.lib.str_utils import safe_str
33 from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict
33 from rhodecode.lib.vcs.nodes import FileNode
34 from rhodecode.lib.vcs.nodes import FileNode
34 from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError
35 from rhodecode.lib.vcs.exceptions import NodeDoesNotExistError
35 from rhodecode.lib.diff_match_patch import diff_match_patch
36 from rhodecode.lib.diff_match_patch import diff_match_patch
36 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
37 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
37
38
@@ -46,9 +47,9 b' log = logging.getLogger(__name__)'
46 def filenode_as_lines_tokens(filenode, lexer=None):
47 def filenode_as_lines_tokens(filenode, lexer=None):
47 org_lexer = lexer
48 org_lexer = lexer
48 lexer = lexer or get_lexer_for_filenode(filenode)
49 lexer = lexer or get_lexer_for_filenode(filenode)
49 log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s',
50 log.debug('Generating file node pygment tokens for %s, file=`%s`, org_lexer:%s',
50 lexer, filenode, org_lexer)
51 lexer, filenode, org_lexer)
51 content = filenode.content
52 content = filenode.str_content
52 tokens = tokenize_string(content, lexer)
53 tokens = tokenize_string(content, lexer)
53 lines = split_token_stream(tokens, content)
54 lines = split_token_stream(tokens, content)
54 rv = list(lines)
55 rv = list(lines)
@@ -65,10 +66,15 b' def tokenize_string(content, lexer):'
65 lexer.stripnl = False
66 lexer.stripnl = False
66 lexer.ensurenl = False
67 lexer.ensurenl = False
67
68
69 # pygments needs to operate on str
70 str_content = safe_str(content)
71
68 if isinstance(lexer, TextLexer):
72 if isinstance(lexer, TextLexer):
69 lexed = [(Token.Text, content)]
73 # we convert content here to STR because pygments does that while tokenizing
74 # if we DON'T get a lexer for unknown file type
75 lexed = [(Token.Text, str_content)]
70 else:
76 else:
71 lexed = lex(content, lexer)
77 lexed = lex(str_content, lexer)
72
78
73 for token_type, token_text in lexed:
79 for token_type, token_text in lexed:
74 yield pygment_token_class(token_type), token_text
80 yield pygment_token_class(token_type), token_text
@@ -84,7 +90,10 b' def split_token_stream(tokens, content):'
84 """
90 """
85
91
86 token_buffer = []
92 token_buffer = []
93
87 for token_class, token_text in tokens:
94 for token_class, token_text in tokens:
95
96 # token_text, should be str
88 parts = token_text.split('\n')
97 parts = token_text.split('\n')
89 for part in parts[:-1]:
98 for part in parts[:-1]:
90 token_buffer.append((token_class, part))
99 token_buffer.append((token_class, part))
@@ -97,7 +106,7 b' def split_token_stream(tokens, content):'
97 yield token_buffer
106 yield token_buffer
98 elif content:
107 elif content:
99 # this is a special case, we have the content, but tokenization didn't produce
108 # this is a special case, we have the content, but tokenization didn't produce
100 # any results. THis can happen if know file extensions like .css have some bogus
109 # any results. This can happen if know file extensions like .css have some bogus
101 # unicode content without any newline characters
110 # unicode content without any newline characters
102 yield [(pygment_token_class(Token.Text), content)]
111 yield [(pygment_token_class(Token.Text), content)]
103
112
@@ -144,7 +153,7 b' def filenode_as_annotated_lines_tokens(f'
144 for line_no, tokens
153 for line_no, tokens
145 in enumerate(filenode_as_lines_tokens(filenode), 1))
154 in enumerate(filenode_as_lines_tokens(filenode), 1))
146
155
147 grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0])
156 grouped_annotations_lines = itertools.groupby(annotations_lines, lambda x: x[0])
148
157
149 for annotation, group in grouped_annotations_lines:
158 for annotation, group in grouped_annotations_lines:
150 yield (
159 yield (
@@ -158,14 +167,14 b' def render_tokenstream(tokenstream):'
158 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
167 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
159
168
160 if token_class:
169 if token_class:
161 result.append('<span class="%s">' % token_class)
170 result.append(f'<span class="{token_class}">')
162 else:
171 else:
163 result.append('<span>')
172 result.append('<span>')
164
173
165 for op_tag, token_text in token_ops_texts:
174 for op_tag, token_text in token_ops_texts:
166
175
167 if op_tag:
176 if op_tag:
168 result.append('<%s>' % op_tag)
177 result.append(f'<{op_tag}>')
169
178
170 # NOTE(marcink): in some cases of mixed encodings, we might run into
179 # NOTE(marcink): in some cases of mixed encodings, we might run into
171 # troubles in the html_escape, in this case we say unicode force on token_text
180 # troubles in the html_escape, in this case we say unicode force on token_text
@@ -173,7 +182,7 b' def render_tokenstream(tokenstream):'
173 try:
182 try:
174 escaped_text = html_escape(token_text)
183 escaped_text = html_escape(token_text)
175 except TypeError:
184 except TypeError:
176 escaped_text = html_escape(safe_unicode(token_text))
185 escaped_text = html_escape(safe_str(token_text))
177
186
178 # TODO: dan: investigate showing hidden characters like space/nl/tab
187 # TODO: dan: investigate showing hidden characters like space/nl/tab
179 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
188 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
@@ -183,7 +192,7 b' def render_tokenstream(tokenstream):'
183 result.append(escaped_text)
192 result.append(escaped_text)
184
193
185 if op_tag:
194 if op_tag:
186 result.append('</%s>' % op_tag)
195 result.append(f'</{op_tag}>')
187
196
188 result.append('</span>')
197 result.append('</span>')
189
198
@@ -235,12 +244,13 b' def rollup_tokenstream(tokenstream):'
235 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
244 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
236
245
237 result = []
246 result = []
238 for token_class, op_list in groupby(tokenstream, lambda t: t[0]):
247 for token_class, op_list in itertools.groupby(tokenstream, lambda t: t[0]):
239 ops = []
248 ops = []
240 for token_op, token_text_list in groupby(op_list, lambda o: o[1]):
249 for token_op, token_text_list in itertools.groupby(op_list, lambda o: o[1]):
241 text_buffer = []
250 text_buffer = []
242 for t_class, t_op, t_text in token_text_list:
251 for t_class, t_op, t_text in token_text_list:
243 text_buffer.append(t_text)
252 text_buffer.append(t_text)
253
244 ops.append((token_op, ''.join(text_buffer)))
254 ops.append((token_op, ''.join(text_buffer)))
245 result.append((token_class, ops))
255 result.append((token_class, ops))
246 return result
256 return result
@@ -262,7 +272,13 b' def tokens_diff(old_tokens, new_tokens, '
262 old_tokens_result = []
272 old_tokens_result = []
263 new_tokens_result = []
273 new_tokens_result = []
264
274
265 similarity = difflib.SequenceMatcher(None,
275 def int_convert(val):
276 if isinstance(val, int):
277 return str(val)
278 return val
279
280 similarity = difflib.SequenceMatcher(
281 None,
266 ''.join(token_text for token_class, token_text in old_tokens),
282 ''.join(token_text for token_class, token_text in old_tokens),
267 ''.join(token_text for token_class, token_text in new_tokens)
283 ''.join(token_text for token_class, token_text in new_tokens)
268 ).ratio()
284 ).ratio()
@@ -274,12 +290,13 b' def tokens_diff(old_tokens, new_tokens, '
274 new_tokens_result.append((token_class, '', token_text))
290 new_tokens_result.append((token_class, '', token_text))
275 return old_tokens_result, new_tokens_result, similarity
291 return old_tokens_result, new_tokens_result, similarity
276
292
277 token_sequence_matcher = difflib.SequenceMatcher(None,
293 token_sequence_matcher = difflib.SequenceMatcher(
294 None,
278 [x[1] for x in old_tokens],
295 [x[1] for x in old_tokens],
279 [x[1] for x in new_tokens])
296 [x[1] for x in new_tokens])
280
297
281 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
298 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
282 # check the differences by token block types first to give a more
299 # check the differences by token block types first to give a
283 # nicer "block" level replacement vs character diffs
300 # nicer "block" level replacement vs character diffs
284
301
285 if tag == 'equal':
302 if tag == 'equal':
@@ -289,10 +306,10 b' def tokens_diff(old_tokens, new_tokens, '
289 new_tokens_result.append((token_class, '', token_text))
306 new_tokens_result.append((token_class, '', token_text))
290 elif tag == 'delete':
307 elif tag == 'delete':
291 for token_class, token_text in old_tokens[o1:o2]:
308 for token_class, token_text in old_tokens[o1:o2]:
292 old_tokens_result.append((token_class, 'del', token_text))
309 old_tokens_result.append((token_class, 'del', int_convert(token_text)))
293 elif tag == 'insert':
310 elif tag == 'insert':
294 for token_class, token_text in new_tokens[n1:n2]:
311 for token_class, token_text in new_tokens[n1:n2]:
295 new_tokens_result.append((token_class, 'ins', token_text))
312 new_tokens_result.append((token_class, 'ins', int_convert(token_text)))
296 elif tag == 'replace':
313 elif tag == 'replace':
297 # if same type token blocks must be replaced, do a diff on the
314 # if same type token blocks must be replaced, do a diff on the
298 # characters in the token blocks to show individual changes
315 # characters in the token blocks to show individual changes
@@ -300,11 +317,11 b' def tokens_diff(old_tokens, new_tokens, '
300 old_char_tokens = []
317 old_char_tokens = []
301 new_char_tokens = []
318 new_char_tokens = []
302 for token_class, token_text in old_tokens[o1:o2]:
319 for token_class, token_text in old_tokens[o1:o2]:
303 for char in token_text:
320 for char in map(lambda i: i, token_text):
304 old_char_tokens.append((token_class, char))
321 old_char_tokens.append((token_class, char))
305
322
306 for token_class, token_text in new_tokens[n1:n2]:
323 for token_class, token_text in new_tokens[n1:n2]:
307 for char in token_text:
324 for char in map(lambda i: i, token_text):
308 new_char_tokens.append((token_class, char))
325 new_char_tokens.append((token_class, char))
309
326
310 old_string = ''.join([token_text for
327 old_string = ''.join([token_text for
@@ -334,11 +351,11 b' def tokens_diff(old_tokens, new_tokens, '
334 b += l
351 b += l
335 elif op == -1:
352 elif op == -1:
336 for i, c in enumerate(rep):
353 for i, c in enumerate(rep):
337 obuffer.append((old_char_tokens[a+i][0], 'del', c))
354 obuffer.append((old_char_tokens[a+i][0], 'del', int_convert(c)))
338 a += l
355 a += l
339 elif op == 1:
356 elif op == 1:
340 for i, c in enumerate(rep):
357 for i, c in enumerate(rep):
341 nbuffer.append((new_char_tokens[b+i][0], 'ins', c))
358 nbuffer.append((new_char_tokens[b+i][0], 'ins', int_convert(c)))
342 b += l
359 b += l
343 else:
360 else:
344 for ctag, co1, co2, cn1, cn2 in copcodes:
361 for ctag, co1, co2, cn1, cn2 in copcodes:
@@ -349,15 +366,15 b' def tokens_diff(old_tokens, new_tokens, '
349 nbuffer.append((token_class, '', token_text))
366 nbuffer.append((token_class, '', token_text))
350 elif ctag == 'delete':
367 elif ctag == 'delete':
351 for token_class, token_text in old_char_tokens[co1:co2]:
368 for token_class, token_text in old_char_tokens[co1:co2]:
352 obuffer.append((token_class, 'del', token_text))
369 obuffer.append((token_class, 'del', int_convert(token_text)))
353 elif ctag == 'insert':
370 elif ctag == 'insert':
354 for token_class, token_text in new_char_tokens[cn1:cn2]:
371 for token_class, token_text in new_char_tokens[cn1:cn2]:
355 nbuffer.append((token_class, 'ins', token_text))
372 nbuffer.append((token_class, 'ins', int_convert(token_text)))
356 elif ctag == 'replace':
373 elif ctag == 'replace':
357 for token_class, token_text in old_char_tokens[co1:co2]:
374 for token_class, token_text in old_char_tokens[co1:co2]:
358 obuffer.append((token_class, 'del', token_text))
375 obuffer.append((token_class, 'del', int_convert(token_text)))
359 for token_class, token_text in new_char_tokens[cn1:cn2]:
376 for token_class, token_text in new_char_tokens[cn1:cn2]:
360 nbuffer.append((token_class, 'ins', token_text))
377 nbuffer.append((token_class, 'ins', int_convert(token_text)))
361
378
362 old_tokens_result.extend(obuffer)
379 old_tokens_result.extend(obuffer)
363 new_tokens_result.extend(nbuffer)
380 new_tokens_result.extend(nbuffer)
@@ -366,13 +383,14 b' def tokens_diff(old_tokens, new_tokens, '
366
383
367
384
368 def diffset_node_getter(commit):
385 def diffset_node_getter(commit):
369 def get_node(fname):
386 def get_diff_node(file_name):
387
370 try:
388 try:
371 return commit.get_node(fname)
389 return commit.get_node(file_name, pre_load=['size', 'flags', 'data'])
372 except NodeDoesNotExistError:
390 except NodeDoesNotExistError:
373 return None
391 return None
374
392
375 return get_node
393 return get_diff_node
376
394
377
395
378 class DiffSet(object):
396 class DiffSet(object):
@@ -553,13 +571,13 b' class DiffSet(object):'
553 # this allows commenting on those
571 # this allows commenting on those
554 if not file_chunks:
572 if not file_chunks:
555 actions = []
573 actions = []
556 for op_id, op_text in filediff.patch['stats']['ops'].items():
574 for op_id, op_text in list(filediff.patch['stats']['ops'].items()):
557 if op_id == DEL_FILENODE:
575 if op_id == DEL_FILENODE:
558 actions.append('file was removed')
576 actions.append('file was removed')
559 elif op_id == BIN_FILENODE:
577 elif op_id == BIN_FILENODE:
560 actions.append('binary diff hidden')
578 actions.append('binary diff hidden')
561 else:
579 else:
562 actions.append(safe_unicode(op_text))
580 actions.append(safe_str(op_text))
563 action_line = 'NO CONTENT: ' + \
581 action_line = 'NO CONTENT: ' + \
564 ', '.join(actions) or 'UNDEFINED_ACTION'
582 ', '.join(actions) or 'UNDEFINED_ACTION'
565
583
@@ -588,10 +606,11 b' class DiffSet(object):'
588 before, after = [], []
606 before, after = [], []
589
607
590 for line in hunk['lines']:
608 for line in hunk['lines']:
609
591 if line['action'] in ['unmod', 'unmod-no-hl']:
610 if line['action'] in ['unmod', 'unmod-no-hl']:
592 no_hl = line['action'] == 'unmod-no-hl'
611 no_hl = line['action'] == 'unmod-no-hl'
593 result.lines.extend(
612 parsed_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)
594 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
613 result.lines.extend(parsed_lines)
595 after.append(line)
614 after.append(line)
596 before.append(line)
615 before.append(line)
597 elif line['action'] == 'add':
616 elif line['action'] == 'add':
@@ -600,14 +619,17 b' class DiffSet(object):'
600 before.append(line)
619 before.append(line)
601 elif line['action'] == 'old-no-nl':
620 elif line['action'] == 'old-no-nl':
602 before.append(line)
621 before.append(line)
622 #line['line'] = safe_str(line['line'])
603 elif line['action'] == 'new-no-nl':
623 elif line['action'] == 'new-no-nl':
624 #line['line'] = safe_str(line['line'])
604 after.append(line)
625 after.append(line)
605
626
606 all_actions = [x['action'] for x in after] + [x['action'] for x in before]
627 all_actions = [x['action'] for x in after] + [x['action'] for x in before]
607 no_hl = {x for x in all_actions} == {'unmod-no-hl'}
628 no_hl = {x for x in all_actions} == {'unmod-no-hl'}
608 result.lines.extend(
629 parsed_no_hl_lines = self.parse_lines(before, after, source_file, target_file, no_hl=no_hl)
609 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
630 result.lines.extend(parsed_no_hl_lines)
610 # NOTE(marcink): we must keep list() call here so we can cache the result...
631
632 # NOTE(marcink): we must keep list() call here, so we can cache the result...
611 result.unified = list(self.as_unified(result.lines))
633 result.unified = list(self.as_unified(result.lines))
612 result.sideside = result.lines
634 result.sideside = result.lines
613
635
@@ -631,14 +653,14 b' class DiffSet(object):'
631 before_newline_line = before_lines.pop(-1)
653 before_newline_line = before_lines.pop(-1)
632 before_newline.content = '\n {}'.format(
654 before_newline.content = '\n {}'.format(
633 render_tokenstream(
655 render_tokenstream(
634 [(x[0], '', x[1])
656 [(x[0], '', safe_str(x[1]))
635 for x in [('nonl', before_newline_line['line'])]]))
657 for x in [('nonl', before_newline_line['line'])]]))
636
658
637 if after_lines and after_lines[-1]['action'] == 'new-no-nl':
659 if after_lines and after_lines[-1]['action'] == 'new-no-nl':
638 after_newline_line = after_lines.pop(-1)
660 after_newline_line = after_lines.pop(-1)
639 after_newline.content = '\n {}'.format(
661 after_newline.content = '\n {}'.format(
640 render_tokenstream(
662 render_tokenstream(
641 [(x[0], '', x[1])
663 [(x[0], '', safe_str(x[1]))
642 for x in [('nonl', after_newline_line['line'])]]))
664 for x in [('nonl', after_newline_line['line'])]]))
643
665
644 while before_lines or after_lines:
666 while before_lines or after_lines:
@@ -655,7 +677,7 b' class DiffSet(object):'
655
677
656 if before:
678 if before:
657 if before['action'] == 'old-no-nl':
679 if before['action'] == 'old-no-nl':
658 before_tokens = [('nonl', before['line'])]
680 before_tokens = [('nonl', safe_str(before['line']))]
659 else:
681 else:
660 before_tokens = self.get_line_tokens(
682 before_tokens = self.get_line_tokens(
661 line_text=before['line'], line_number=before['old_lineno'],
683 line_text=before['line'], line_number=before['old_lineno'],
@@ -669,7 +691,7 b' class DiffSet(object):'
669
691
670 if after:
692 if after:
671 if after['action'] == 'new-no-nl':
693 if after['action'] == 'new-no-nl':
672 after_tokens = [('nonl', after['line'])]
694 after_tokens = [('nonl', safe_str(after['line']))]
673 else:
695 else:
674 after_tokens = self.get_line_tokens(
696 after_tokens = self.get_line_tokens(
675 line_text=after['line'], line_number=after['new_lineno'],
697 line_text=after['line'], line_number=after['new_lineno'],
@@ -715,14 +737,14 b' class DiffSet(object):'
715 filename = input_file
737 filename = input_file
716 elif isinstance(input_file, FileNode):
738 elif isinstance(input_file, FileNode):
717 filenode = input_file
739 filenode = input_file
718 filename = input_file.unicode_path
740 filename = input_file.str_path
719
741
720 hl_mode = self.HL_NONE if no_hl else self.highlight_mode
742 hl_mode = self.HL_NONE if no_hl else self.highlight_mode
721 if hl_mode == self.HL_REAL and filenode:
743 if hl_mode == self.HL_REAL and filenode:
722 lexer = self._get_lexer_for_filename(filename)
744 lexer = self._get_lexer_for_filename(filename)
723 file_size_allowed = input_file.size < self.max_file_size_limit
745 file_size_allowed = filenode.size < self.max_file_size_limit
724 if line_number and file_size_allowed:
746 if line_number and file_size_allowed:
725 return self.get_tokenized_filenode_line(input_file, line_number, lexer, source)
747 return self.get_tokenized_filenode_line(filenode, line_number, lexer, source)
726
748
727 if hl_mode in (self.HL_REAL, self.HL_FAST) and filename:
749 if hl_mode in (self.HL_REAL, self.HL_FAST) and filename:
728 lexer = self._get_lexer_for_filename(filename)
750 lexer = self._get_lexer_for_filename(filename)
@@ -731,17 +753,17 b' class DiffSet(object):'
731 return list(tokenize_string(line_text, plain_text_lexer))
753 return list(tokenize_string(line_text, plain_text_lexer))
732
754
733 def get_tokenized_filenode_line(self, filenode, line_number, lexer=None, source=''):
755 def get_tokenized_filenode_line(self, filenode, line_number, lexer=None, source=''):
756 name_hash = hash(filenode)
734
757
735 def tokenize(_filenode):
758 hl_node_code = self.highlighted_filenodes[source]
736 self.highlighted_filenodes[source][filenode] = filenode_as_lines_tokens(filenode, lexer)
737
759
738 if filenode not in self.highlighted_filenodes[source]:
760 if name_hash not in hl_node_code:
739 tokenize(filenode)
761 hl_node_code[name_hash] = filenode_as_lines_tokens(filenode, lexer)
740
762
741 try:
763 try:
742 return self.highlighted_filenodes[source][filenode][line_number - 1]
764 return hl_node_code[name_hash][line_number - 1]
743 except Exception:
765 except Exception:
744 log.exception('diff rendering error')
766 log.exception('diff rendering error on L:%s and file=%s', line_number - 1, filenode.name)
745 return [('', 'L{}: rhodecode diff rendering error'.format(line_number))]
767 return [('', 'L{}: rhodecode diff rendering error'.format(line_number))]
746
768
747 def action_to_op(self, action):
769 def action_to_op(self, action):
@@ -53,20 +53,26 b' def notify(msg):'
53 Notification for migrations messages
53 Notification for migrations messages
54 """
54 """
55 ml = len(msg) + (4 * 2)
55 ml = len(msg) + (4 * 2)
56 print(('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper())
56 print((('\n%s\n*** %s ***\n%s' % ('*' * ml, msg, '*' * ml)).upper()))
57
57
58
58
59 class DbManage(object):
59 class DbManage(object):
60
60
61 def __init__(self, log_sql, dbconf, root, tests=False,
61 def __init__(self, log_sql, dbconf, root, tests=False,
62 SESSION=None, cli_args=None):
62 SESSION=None, cli_args=None, enc_key=b''):
63
63 self.dbname = dbconf.split('/')[-1]
64 self.dbname = dbconf.split('/')[-1]
64 self.tests = tests
65 self.tests = tests
65 self.root = root
66 self.root = root
66 self.dburi = dbconf
67 self.dburi = dbconf
67 self.log_sql = log_sql
68 self.log_sql = log_sql
68 self.cli_args = cli_args or {}
69 self.cli_args = cli_args or {}
70 self.sa = None
71 self.engine = None
72 self.enc_key = enc_key
73 # sets .sa .engine
69 self.init_db(SESSION=SESSION)
74 self.init_db(SESSION=SESSION)
75
70 self.ask_ok = self.get_ask_ok_func(self.cli_args.get('force_ask'))
76 self.ask_ok = self.get_ask_ok_func(self.cli_args.get('force_ask'))
71
77
72 def db_exists(self):
78 def db_exists(self):
@@ -91,13 +97,16 b' class DbManage(object):'
91 return ask_ok
97 return ask_ok
92
98
93 def init_db(self, SESSION=None):
99 def init_db(self, SESSION=None):
100
94 if SESSION:
101 if SESSION:
95 self.sa = SESSION
102 self.sa = SESSION
103 self.engine = SESSION.bind
96 else:
104 else:
97 # init new sessions
105 # init new sessions
98 engine = create_engine(self.dburi, echo=self.log_sql)
106 engine = create_engine(self.dburi, echo=self.log_sql)
99 init_model(engine)
107 init_model(engine, encryption_key=self.enc_key)
100 self.sa = Session()
108 self.sa = Session()
109 self.engine = engine
101
110
102 def create_tables(self, override=False):
111 def create_tables(self, override=False):
103 """
112 """
@@ -106,6 +115,8 b' class DbManage(object):'
106
115
107 log.info("Existing database with the same name is going to be destroyed.")
116 log.info("Existing database with the same name is going to be destroyed.")
108 log.info("Setup command will run DROP ALL command on that database.")
117 log.info("Setup command will run DROP ALL command on that database.")
118 engine = self.engine
119
109 if self.tests:
120 if self.tests:
110 destroy = True
121 destroy = True
111 else:
122 else:
@@ -114,10 +125,10 b' class DbManage(object):'
114 log.info('db tables bootstrap: Nothing done.')
125 log.info('db tables bootstrap: Nothing done.')
115 sys.exit(0)
126 sys.exit(0)
116 if destroy:
127 if destroy:
117 Base.metadata.drop_all()
128 Base.metadata.drop_all(bind=engine)
118
129
119 checkfirst = not override
130 checkfirst = not override
120 Base.metadata.create_all(checkfirst=checkfirst)
131 Base.metadata.create_all(bind=engine, checkfirst=checkfirst)
121 log.info('Created tables for %s', self.dbname)
132 log.info('Created tables for %s', self.dbname)
122
133
123 def set_db_version(self):
134 def set_db_version(self):
@@ -145,8 +156,7 b' class DbManage(object):'
145 """
156 """
146
157
147 from rhodecode.lib.dbmigrate.migrate.versioning import api
158 from rhodecode.lib.dbmigrate.migrate.versioning import api
148 from rhodecode.lib.dbmigrate.migrate.exceptions import \
159 from rhodecode.lib.dbmigrate.migrate.exceptions import DatabaseNotControlledError
149 DatabaseNotControlledError
150
160
151 if 'sqlite' in self.dburi:
161 if 'sqlite' in self.dburi:
152 print(
162 print(
@@ -172,40 +182,39 b' class DbManage(object):'
172
182
173 try:
183 try:
174 curr_version = api.db_version(db_uri, repository_path)
184 curr_version = api.db_version(db_uri, repository_path)
175 msg = ('Found current database db_uri under version '
185 msg = (f'Found current database db_uri under version '
176 'control with version {}'.format(curr_version))
186 f'control with version {curr_version}')
177
187
178 except (RuntimeError, DatabaseNotControlledError):
188 except (RuntimeError, DatabaseNotControlledError):
179 curr_version = 1
189 curr_version = 1
180 msg = ('Current database is not under version control. Setting '
190 msg = f'Current database is not under version control. ' \
181 'as version %s' % curr_version)
191 f'Setting as version {curr_version}'
182 api.version_control(db_uri, repository_path, curr_version)
192 api.version_control(db_uri, repository_path, curr_version)
183
193
184 notify(msg)
194 notify(msg)
185
195
186
187 if curr_version == __dbversion__:
196 if curr_version == __dbversion__:
188 log.info('This database is already at the newest version')
197 log.info('This database is already at the newest version')
189 sys.exit(0)
198 sys.exit(0)
190
199
191 upgrade_steps = range(curr_version + 1, __dbversion__ + 1)
200 upgrade_steps = list(range(curr_version + 1, __dbversion__ + 1))
192 notify('attempting to upgrade database from '
201 notify(f'attempting to upgrade database from '
193 'version %s to version %s' % (curr_version, __dbversion__))
202 f'version {curr_version} to version {__dbversion__}')
194
203
195 # CALL THE PROPER ORDER OF STEPS TO PERFORM FULL UPGRADE
204 # CALL THE PROPER ORDER OF STEPS TO PERFORM FULL UPGRADE
196 _step = None
205 _step = None
197 for step in upgrade_steps:
206 for step in upgrade_steps:
198 notify('performing upgrade step %s' % step)
207 notify(f'performing upgrade step {step}')
199 time.sleep(0.5)
208 time.sleep(0.5)
200
209
201 api.upgrade(db_uri, repository_path, step)
210 api.upgrade(db_uri, repository_path, step)
202 self.sa.rollback()
211 self.sa.rollback()
203 notify('schema upgrade for step %s completed' % (step,))
212 notify(f'schema upgrade for step {step} completed')
204
213
205 _step = step
214 _step = step
206
215
207 self.run_post_migration_tasks()
216 self.run_post_migration_tasks()
208 notify('upgrade to version %s successful' % _step)
217 notify(f'upgrade to version {step} successful')
209
218
210 def fix_repo_paths(self):
219 def fix_repo_paths(self):
211 """
220 """
@@ -542,8 +551,8 b' class DbManage(object):'
542 elif not os.access(path, os.W_OK) and path_ok:
551 elif not os.access(path, os.W_OK) and path_ok:
543 log.warning('No write permission to given path %s', path)
552 log.warning('No write permission to given path %s', path)
544
553
545 q = ('Given path %s is not writeable, do you want to '
554 q = (f'Given path {path} is not writeable, do you want to '
546 'continue with read only mode ? [y/n]' % (path,))
555 f'continue with read only mode ? [y/n]')
547 if not self.ask_ok(q):
556 if not self.ask_ok(q):
548 log.error('Canceled by user')
557 log.error('Canceled by user')
549 sys.exit(-1)
558 sys.exit(-1)
@@ -557,8 +566,8 b' class DbManage(object):'
557 real_path = os.path.normpath(os.path.realpath(path))
566 real_path = os.path.normpath(os.path.realpath(path))
558
567
559 if real_path != os.path.normpath(path):
568 if real_path != os.path.normpath(path):
560 q = ('Path looks like a symlink, RhodeCode Enterprise will store '
569 q = (f'Path looks like a symlink, RhodeCode Enterprise will store '
561 'given path as %s ? [y/n]') % (real_path,)
570 f'given path as {real_path} ? [y/n]')
562 if not self.ask_ok(q):
571 if not self.ask_ok(q):
563 log.error('Canceled by user')
572 log.error('Canceled by user')
564 sys.exit(-1)
573 sys.exit(-1)
@@ -109,10 +109,10 b' class HTTPLockedRC(HTTPClientError):'
109 reason = None
109 reason = None
110
110
111 def __init__(self, message, *args, **kwargs):
111 def __init__(self, message, *args, **kwargs):
112 from rhodecode import CONFIG
112 import rhodecode
113 from rhodecode.lib.utils2 import safe_int
113
114 _code = CONFIG.get('lock_ret_code')
114 self.code = rhodecode.ConfigGet().get_int('lock_ret_code', missing=self.code)
115 self.code = safe_int(_code, self.code)
115
116 self.title = self.explanation = message
116 self.title = self.explanation = message
117 super(HTTPLockedRC, self).__init__(*args, **kwargs)
117 super(HTTPLockedRC, self).__init__(*args, **kwargs)
118 self.args = (message, )
118 self.args = (message, )
@@ -180,3 +180,23 b' class ArtifactMetadataBadValueType(Value'
180
180
181 class CommentVersionMismatch(ValueError):
181 class CommentVersionMismatch(ValueError):
182 pass
182 pass
183
184
185 class SignatureVerificationError(ValueError):
186 pass
187
188
189 def signature_verification_error(msg):
190 details = """
191 Encryption signature verification failed.
192 Please check your value of secret key, and/or encrypted value stored.
193 Secret key stored inside .ini file:
194 `rhodecode.encrypted_values.secret` or defaults to
195 `beaker.session.secret`
196
197 Probably the stored values were encrypted using a different secret then currently set in .ini file
198 """
199
200 final_msg = f'{msg}\n{details}'
201 return SignatureVerificationError(final_msg)
202
@@ -35,7 +35,6 b' http://web.archive.org/web/2011071803522'
35 import datetime
35 import datetime
36 import io
36 import io
37
37
38 import pytz
39 from six.moves.urllib import parse as urlparse
38 from six.moves.urllib import parse as urlparse
40
39
41 from rhodecode.lib.feedgenerator import datetime_safe
40 from rhodecode.lib.feedgenerator import datetime_safe
@@ -227,7 +226,7 b' class SyndicationFeed(object):'
227 latest_date = item_date
226 latest_date = item_date
228
227
229 # datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
228 # datetime.now(tz=utc) is slower, as documented in django.utils.timezone.now
230 return latest_date or datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
229 return latest_date or datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)
231
230
232
231
233 class Enclosure(object):
232 class Enclosure(object):
@@ -147,7 +147,7 b' def searcher_from_config(config, prefix='
147 if 'location' not in _config:
147 if 'location' not in _config:
148 _config['location'] = default_location
148 _config['location'] = default_location
149 if 'es_version' not in _config:
149 if 'es_version' not in _config:
150 # use old legacy ES version set to 2
150 # use an old legacy ES version set to 2
151 _config['es_version'] = '2'
151 _config['es_version'] = '2'
152
152
153 imported = importlib.import_module(_config.get('module', default_searcher))
153 imported = importlib.import_module(_config.get('module', default_searcher))
@@ -17,14 +17,13 b''
17 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 # and proprietary license terms, please see https://rhodecode.com/licenses/
19
19
20 import collections
21
22 import sqlalchemy
20 import sqlalchemy
23 from sqlalchemy import UnicodeText
21 from sqlalchemy import UnicodeText
24 from sqlalchemy.ext.mutable import Mutable
22 from sqlalchemy.ext.mutable import Mutable, \
23 MutableList as MutationList, \
24 MutableDict as MutationDict
25
25
26 from rhodecode.lib.ext_json import json
26 from rhodecode.lib import ext_json
27 from rhodecode.lib.utils2 import safe_unicode
28
27
29
28
30 class JsonRaw(str):
29 class JsonRaw(str):
@@ -42,10 +41,6 b' class JsonRaw(str):'
42 pass
41 pass
43
42
44
43
45 # Set this to the standard dict if Order is not required
46 DictClass = collections.OrderedDict
47
48
49 class JSONEncodedObj(sqlalchemy.types.TypeDecorator):
44 class JSONEncodedObj(sqlalchemy.types.TypeDecorator):
50 """
45 """
51 Represents an immutable structure as a json-encoded string.
46 Represents an immutable structure as a json-encoded string.
@@ -56,12 +51,12 b' class JSONEncodedObj(sqlalchemy.types.Ty'
56
51
57 impl = UnicodeText
52 impl = UnicodeText
58 safe = True
53 safe = True
59 enforce_unicode = True
54 enforce_str = True
60
55
61 def __init__(self, *args, **kwargs):
56 def __init__(self, *args, **kwargs):
62 self.default = kwargs.pop('default', None)
57 self.default = kwargs.pop('default', None)
63 self.safe = kwargs.pop('safe_json', self.safe)
58 self.safe = kwargs.pop('safe_json', self.safe)
64 self.enforce_unicode = kwargs.pop('enforce_unicode', self.enforce_unicode)
59 self.enforce_str = kwargs.pop('enforce_str', self.enforce_str)
65 self.dialect_map = kwargs.pop('dialect_map', {})
60 self.dialect_map = kwargs.pop('dialect_map', {})
66 super(JSONEncodedObj, self).__init__(*args, **kwargs)
61 super(JSONEncodedObj, self).__init__(*args, **kwargs)
67
62
@@ -74,9 +69,10 b' class JSONEncodedObj(sqlalchemy.types.Ty'
74 if isinstance(value, JsonRaw):
69 if isinstance(value, JsonRaw):
75 value = value
70 value = value
76 elif value is not None:
71 elif value is not None:
77 value = json.dumps(value)
72 if self.enforce_str:
78 if self.enforce_unicode:
73 value = ext_json.str_json(value)
79 value = safe_unicode(value)
74 else:
75 value = ext_json.json.dumps(value)
80 return value
76 return value
81
77
82 def process_result_value(self, value, dialect):
78 def process_result_value(self, value, dialect):
@@ -85,8 +81,8 b' class JSONEncodedObj(sqlalchemy.types.Ty'
85
81
86 if value is not None:
82 if value is not None:
87 try:
83 try:
88 value = json.loads(value, object_pairs_hook=DictClass)
84 value = ext_json.json.loads(value)
89 except Exception as e:
85 except Exception:
90 if self.safe and self.default is not None:
86 if self.safe and self.default is not None:
91 return self.default()
87 return self.default()
92 else:
88 else:
@@ -95,6 +91,7 b' class JSONEncodedObj(sqlalchemy.types.Ty'
95
91
96
92
97 class MutationObj(Mutable):
93 class MutationObj(Mutable):
94
98 @classmethod
95 @classmethod
99 def coerce(cls, key, value):
96 def coerce(cls, key, value):
100 if isinstance(value, dict) and not isinstance(value, MutationDict):
97 if isinstance(value, dict) and not isinstance(value, MutationDict):
@@ -156,90 +153,6 b' class MutationObj(Mutable):'
156 propagate=True)
153 propagate=True)
157
154
158
155
159 class MutationDict(MutationObj, DictClass):
160 @classmethod
161 def coerce(cls, key, value):
162 """Convert plain dictionary to MutationDict"""
163 self = MutationDict(
164 (k, MutationObj.coerce(key, v)) for (k, v) in value.items())
165 self._key = key
166 return self
167
168 def de_coerce(self):
169 return dict(self)
170
171 def __setitem__(self, key, value):
172 # Due to the way OrderedDict works, this is called during __init__.
173 # At this time we don't have a key set, but what is more, the value
174 # being set has already been coerced. So special case this and skip.
175 if hasattr(self, '_key'):
176 value = MutationObj.coerce(self._key, value)
177 DictClass.__setitem__(self, key, value)
178 self.changed()
179
180 def __delitem__(self, key):
181 DictClass.__delitem__(self, key)
182 self.changed()
183
184 def __setstate__(self, state):
185 self.__dict__ = state
186
187 def __reduce_ex__(self, proto):
188 # support pickling of MutationDicts
189 d = dict(self)
190 return (self.__class__, (d,))
191
192
193 class MutationList(MutationObj, list):
194 @classmethod
195 def coerce(cls, key, value):
196 """Convert plain list to MutationList"""
197 self = MutationList((MutationObj.coerce(key, v) for v in value))
198 self._key = key
199 return self
200
201 def de_coerce(self):
202 return list(self)
203
204 def __setitem__(self, idx, value):
205 list.__setitem__(self, idx, MutationObj.coerce(self._key, value))
206 self.changed()
207
208 def __setslice__(self, start, stop, values):
209 list.__setslice__(self, start, stop,
210 (MutationObj.coerce(self._key, v) for v in values))
211 self.changed()
212
213 def __delitem__(self, idx):
214 list.__delitem__(self, idx)
215 self.changed()
216
217 def __delslice__(self, start, stop):
218 list.__delslice__(self, start, stop)
219 self.changed()
220
221 def append(self, value):
222 list.append(self, MutationObj.coerce(self._key, value))
223 self.changed()
224
225 def insert(self, idx, value):
226 list.insert(self, idx, MutationObj.coerce(self._key, value))
227 self.changed()
228
229 def extend(self, values):
230 list.extend(self, (MutationObj.coerce(self._key, v) for v in values))
231 self.changed()
232
233 def pop(self, *args, **kw):
234 value = list.pop(self, *args, **kw)
235 self.changed()
236 return value
237
238 def remove(self, value):
239 list.remove(self, value)
240 self.changed()
241
242
243 def JsonType(impl=None, **kwargs):
156 def JsonType(impl=None, **kwargs):
244 """
157 """
245 Helper for using a mutation obj, it allows to use .with_variant easily.
158 Helper for using a mutation obj, it allows to use .with_variant easily.
@@ -253,26 +166,8 b' def JsonType(impl=None, **kwargs):'
253 if impl == 'list':
166 if impl == 'list':
254 return JSONEncodedObj(default=list, **kwargs)
167 return JSONEncodedObj(default=list, **kwargs)
255 elif impl == 'dict':
168 elif impl == 'dict':
256 return JSONEncodedObj(default=DictClass, **kwargs)
169 return JSONEncodedObj(default=dict, **kwargs)
257 else:
170 else:
258 return JSONEncodedObj(**kwargs)
171 return JSONEncodedObj(**kwargs)
259
172
260
173
261 JSON = MutationObj.as_mutable(JsonType())
262 """
263 A type to encode/decode JSON on the fly
264
265 sqltype is the string type for the underlying DB column::
266
267 Column(JSON) (defaults to UnicodeText)
268 """
269
270 JSONDict = MutationObj.as_mutable(JsonType('dict'))
271 """
272 A type to encode/decode JSON dictionaries on the fly
273 """
274
275 JSONList = MutationObj.as_mutable(JsonType('list'))
276 """
277 A type to encode/decode JSON lists` on the fly
278 """
General Comments 0
You need to be logged in to leave comments. Login now