##// END OF EJS Templates
audit-logs: added *basic* support for NOT query term in audit logs.
marcink -
r1824:fdf0761c default
parent child Browse files
Show More
@@ -1,2035 +1,2036 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 from collections import OrderedDict
39 from collections import OrderedDict
40
40
41 import pygments
41 import pygments
42 import itertools
42 import itertools
43 import fnmatch
43 import fnmatch
44
44
45 from datetime import datetime
45 from datetime import datetime
46 from functools import partial
46 from functools import partial
47 from pygments.formatters.html import HtmlFormatter
47 from pygments.formatters.html import HtmlFormatter
48 from pygments import highlight as code_highlight
48 from pygments import highlight as code_highlight
49 from pygments.lexers import (
49 from pygments.lexers import (
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51 from pylons import url as pylons_url
51 from pylons import url as pylons_url
52 from pylons.i18n.translation import _, ungettext
52 from pylons.i18n.translation import _, ungettext
53 from pyramid.threadlocal import get_current_request
53 from pyramid.threadlocal import get_current_request
54
54
55 from webhelpers.html import literal, HTML, escape
55 from webhelpers.html import literal, HTML, escape
56 from webhelpers.html.tools import *
56 from webhelpers.html.tools import *
57 from webhelpers.html.builder import make_tag
57 from webhelpers.html.builder import make_tag
58 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
59 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
60 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
61 submit, text, password, textarea, title, ul, xml_declaration, radio
61 submit, text, password, textarea, title, ul, xml_declaration, radio
62 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 from webhelpers.html.tools import auto_link, button_to, highlight, \
63 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
64 from webhelpers.pylonslib import Flash as _Flash
64 from webhelpers.pylonslib import Flash as _Flash
65 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
65 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
66 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
66 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
67 replace_whitespace, urlify, truncate, wrap_paragraphs
67 replace_whitespace, urlify, truncate, wrap_paragraphs
68 from webhelpers.date import time_ago_in_words
68 from webhelpers.date import time_ago_in_words
69 from webhelpers.paginate import Page as _Page
69 from webhelpers.paginate import Page as _Page
70 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
70 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
71 convert_boolean_attrs, NotGiven, _make_safe_id_component
71 convert_boolean_attrs, NotGiven, _make_safe_id_component
72 from webhelpers2.number import format_byte_size
72 from webhelpers2.number import format_byte_size
73
73
74 from rhodecode.lib.action_parser import action_parser
74 from rhodecode.lib.action_parser import action_parser
75 from rhodecode.lib.ext_json import json
75 from rhodecode.lib.ext_json import json
76 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
76 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
77 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
77 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
78 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
78 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
79 AttributeDict, safe_int, md5, md5_safe
79 AttributeDict, safe_int, md5, md5_safe
80 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
80 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
81 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
81 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
82 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
82 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
83 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
83 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
84 from rhodecode.model.changeset_status import ChangesetStatusModel
84 from rhodecode.model.changeset_status import ChangesetStatusModel
85 from rhodecode.model.db import Permission, User, Repository
85 from rhodecode.model.db import Permission, User, Repository
86 from rhodecode.model.repo_group import RepoGroupModel
86 from rhodecode.model.repo_group import RepoGroupModel
87 from rhodecode.model.settings import IssueTrackerSettingsModel
87 from rhodecode.model.settings import IssueTrackerSettingsModel
88
88
89 log = logging.getLogger(__name__)
89 log = logging.getLogger(__name__)
90
90
91
91
92 DEFAULT_USER = User.DEFAULT_USER
92 DEFAULT_USER = User.DEFAULT_USER
93 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
93 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
94
94
95
95
96 def url(*args, **kw):
96 def url(*args, **kw):
97 return pylons_url(*args, **kw)
97 return pylons_url(*args, **kw)
98
98
99
99
100 def pylons_url_current(*args, **kw):
100 def pylons_url_current(*args, **kw):
101 """
101 """
102 This function overrides pylons.url.current() which returns the current
102 This function overrides pylons.url.current() which returns the current
103 path so that it will also work from a pyramid only context. This
103 path so that it will also work from a pyramid only context. This
104 should be removed once port to pyramid is complete.
104 should be removed once port to pyramid is complete.
105 """
105 """
106 if not args and not kw:
106 if not args and not kw:
107 request = get_current_request()
107 request = get_current_request()
108 return request.path
108 return request.path
109 return pylons_url.current(*args, **kw)
109 return pylons_url.current(*args, **kw)
110
110
111 url.current = pylons_url_current
111 url.current = pylons_url_current
112
112
113
113
114 def url_replace(**qargs):
114 def url_replace(**qargs):
115 """ Returns the current request url while replacing query string args """
115 """ Returns the current request url while replacing query string args """
116
116
117 request = get_current_request()
117 request = get_current_request()
118 new_args = request.GET.mixed()
118 new_args = request.GET.mixed()
119 new_args.update(qargs)
119 new_args.update(qargs)
120 return url('', **new_args)
120 return url('', **new_args)
121
121
122
122
123 def asset(path, ver=None, **kwargs):
123 def asset(path, ver=None, **kwargs):
124 """
124 """
125 Helper to generate a static asset file path for rhodecode assets
125 Helper to generate a static asset file path for rhodecode assets
126
126
127 eg. h.asset('images/image.png', ver='3923')
127 eg. h.asset('images/image.png', ver='3923')
128
128
129 :param path: path of asset
129 :param path: path of asset
130 :param ver: optional version query param to append as ?ver=
130 :param ver: optional version query param to append as ?ver=
131 """
131 """
132 request = get_current_request()
132 request = get_current_request()
133 query = {}
133 query = {}
134 query.update(kwargs)
134 query.update(kwargs)
135 if ver:
135 if ver:
136 query = {'ver': ver}
136 query = {'ver': ver}
137 return request.static_path(
137 return request.static_path(
138 'rhodecode:public/{}'.format(path), _query=query)
138 'rhodecode:public/{}'.format(path), _query=query)
139
139
140
140
141 default_html_escape_table = {
141 default_html_escape_table = {
142 ord('&'): u'&amp;',
142 ord('&'): u'&amp;',
143 ord('<'): u'&lt;',
143 ord('<'): u'&lt;',
144 ord('>'): u'&gt;',
144 ord('>'): u'&gt;',
145 ord('"'): u'&quot;',
145 ord('"'): u'&quot;',
146 ord("'"): u'&#39;',
146 ord("'"): u'&#39;',
147 }
147 }
148
148
149
149
150 def html_escape(text, html_escape_table=default_html_escape_table):
150 def html_escape(text, html_escape_table=default_html_escape_table):
151 """Produce entities within text."""
151 """Produce entities within text."""
152 return text.translate(html_escape_table)
152 return text.translate(html_escape_table)
153
153
154
154
155 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
155 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
156 """
156 """
157 Truncate string ``s`` at the first occurrence of ``sub``.
157 Truncate string ``s`` at the first occurrence of ``sub``.
158
158
159 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
159 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
160 """
160 """
161 suffix_if_chopped = suffix_if_chopped or ''
161 suffix_if_chopped = suffix_if_chopped or ''
162 pos = s.find(sub)
162 pos = s.find(sub)
163 if pos == -1:
163 if pos == -1:
164 return s
164 return s
165
165
166 if inclusive:
166 if inclusive:
167 pos += len(sub)
167 pos += len(sub)
168
168
169 chopped = s[:pos]
169 chopped = s[:pos]
170 left = s[pos:].strip()
170 left = s[pos:].strip()
171
171
172 if left and suffix_if_chopped:
172 if left and suffix_if_chopped:
173 chopped += suffix_if_chopped
173 chopped += suffix_if_chopped
174
174
175 return chopped
175 return chopped
176
176
177
177
178 def shorter(text, size=20):
178 def shorter(text, size=20):
179 postfix = '...'
179 postfix = '...'
180 if len(text) > size:
180 if len(text) > size:
181 return text[:size - len(postfix)] + postfix
181 return text[:size - len(postfix)] + postfix
182 return text
182 return text
183
183
184
184
185 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
185 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
186 """
186 """
187 Reset button
187 Reset button
188 """
188 """
189 _set_input_attrs(attrs, type, name, value)
189 _set_input_attrs(attrs, type, name, value)
190 _set_id_attr(attrs, id, name)
190 _set_id_attr(attrs, id, name)
191 convert_boolean_attrs(attrs, ["disabled"])
191 convert_boolean_attrs(attrs, ["disabled"])
192 return HTML.input(**attrs)
192 return HTML.input(**attrs)
193
193
194 reset = _reset
194 reset = _reset
195 safeid = _make_safe_id_component
195 safeid = _make_safe_id_component
196
196
197
197
198 def branding(name, length=40):
198 def branding(name, length=40):
199 return truncate(name, length, indicator="")
199 return truncate(name, length, indicator="")
200
200
201
201
202 def FID(raw_id, path):
202 def FID(raw_id, path):
203 """
203 """
204 Creates a unique ID for filenode based on it's hash of path and commit
204 Creates a unique ID for filenode based on it's hash of path and commit
205 it's safe to use in urls
205 it's safe to use in urls
206
206
207 :param raw_id:
207 :param raw_id:
208 :param path:
208 :param path:
209 """
209 """
210
210
211 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
211 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
212
212
213
213
214 class _GetError(object):
214 class _GetError(object):
215 """Get error from form_errors, and represent it as span wrapped error
215 """Get error from form_errors, and represent it as span wrapped error
216 message
216 message
217
217
218 :param field_name: field to fetch errors for
218 :param field_name: field to fetch errors for
219 :param form_errors: form errors dict
219 :param form_errors: form errors dict
220 """
220 """
221
221
222 def __call__(self, field_name, form_errors):
222 def __call__(self, field_name, form_errors):
223 tmpl = """<span class="error_msg">%s</span>"""
223 tmpl = """<span class="error_msg">%s</span>"""
224 if form_errors and field_name in form_errors:
224 if form_errors and field_name in form_errors:
225 return literal(tmpl % form_errors.get(field_name))
225 return literal(tmpl % form_errors.get(field_name))
226
226
227 get_error = _GetError()
227 get_error = _GetError()
228
228
229
229
230 class _ToolTip(object):
230 class _ToolTip(object):
231
231
232 def __call__(self, tooltip_title, trim_at=50):
232 def __call__(self, tooltip_title, trim_at=50):
233 """
233 """
234 Special function just to wrap our text into nice formatted
234 Special function just to wrap our text into nice formatted
235 autowrapped text
235 autowrapped text
236
236
237 :param tooltip_title:
237 :param tooltip_title:
238 """
238 """
239 tooltip_title = escape(tooltip_title)
239 tooltip_title = escape(tooltip_title)
240 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
240 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
241 return tooltip_title
241 return tooltip_title
242 tooltip = _ToolTip()
242 tooltip = _ToolTip()
243
243
244
244
245 def files_breadcrumbs(repo_name, commit_id, file_path):
245 def files_breadcrumbs(repo_name, commit_id, file_path):
246 if isinstance(file_path, str):
246 if isinstance(file_path, str):
247 file_path = safe_unicode(file_path)
247 file_path = safe_unicode(file_path)
248
248
249 # TODO: johbo: Is this always a url like path, or is this operating
249 # TODO: johbo: Is this always a url like path, or is this operating
250 # system dependent?
250 # system dependent?
251 path_segments = file_path.split('/')
251 path_segments = file_path.split('/')
252
252
253 repo_name_html = escape(repo_name)
253 repo_name_html = escape(repo_name)
254 if len(path_segments) == 1 and path_segments[0] == '':
254 if len(path_segments) == 1 and path_segments[0] == '':
255 url_segments = [repo_name_html]
255 url_segments = [repo_name_html]
256 else:
256 else:
257 url_segments = [
257 url_segments = [
258 link_to(
258 link_to(
259 repo_name_html,
259 repo_name_html,
260 url('files_home',
260 url('files_home',
261 repo_name=repo_name,
261 repo_name=repo_name,
262 revision=commit_id,
262 revision=commit_id,
263 f_path=''),
263 f_path=''),
264 class_='pjax-link')]
264 class_='pjax-link')]
265
265
266 last_cnt = len(path_segments) - 1
266 last_cnt = len(path_segments) - 1
267 for cnt, segment in enumerate(path_segments):
267 for cnt, segment in enumerate(path_segments):
268 if not segment:
268 if not segment:
269 continue
269 continue
270 segment_html = escape(segment)
270 segment_html = escape(segment)
271
271
272 if cnt != last_cnt:
272 if cnt != last_cnt:
273 url_segments.append(
273 url_segments.append(
274 link_to(
274 link_to(
275 segment_html,
275 segment_html,
276 url('files_home',
276 url('files_home',
277 repo_name=repo_name,
277 repo_name=repo_name,
278 revision=commit_id,
278 revision=commit_id,
279 f_path='/'.join(path_segments[:cnt + 1])),
279 f_path='/'.join(path_segments[:cnt + 1])),
280 class_='pjax-link'))
280 class_='pjax-link'))
281 else:
281 else:
282 url_segments.append(segment_html)
282 url_segments.append(segment_html)
283
283
284 return literal('/'.join(url_segments))
284 return literal('/'.join(url_segments))
285
285
286
286
287 class CodeHtmlFormatter(HtmlFormatter):
287 class CodeHtmlFormatter(HtmlFormatter):
288 """
288 """
289 My code Html Formatter for source codes
289 My code Html Formatter for source codes
290 """
290 """
291
291
292 def wrap(self, source, outfile):
292 def wrap(self, source, outfile):
293 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
293 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
294
294
295 def _wrap_code(self, source):
295 def _wrap_code(self, source):
296 for cnt, it in enumerate(source):
296 for cnt, it in enumerate(source):
297 i, t = it
297 i, t = it
298 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
298 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
299 yield i, t
299 yield i, t
300
300
301 def _wrap_tablelinenos(self, inner):
301 def _wrap_tablelinenos(self, inner):
302 dummyoutfile = StringIO.StringIO()
302 dummyoutfile = StringIO.StringIO()
303 lncount = 0
303 lncount = 0
304 for t, line in inner:
304 for t, line in inner:
305 if t:
305 if t:
306 lncount += 1
306 lncount += 1
307 dummyoutfile.write(line)
307 dummyoutfile.write(line)
308
308
309 fl = self.linenostart
309 fl = self.linenostart
310 mw = len(str(lncount + fl - 1))
310 mw = len(str(lncount + fl - 1))
311 sp = self.linenospecial
311 sp = self.linenospecial
312 st = self.linenostep
312 st = self.linenostep
313 la = self.lineanchors
313 la = self.lineanchors
314 aln = self.anchorlinenos
314 aln = self.anchorlinenos
315 nocls = self.noclasses
315 nocls = self.noclasses
316 if sp:
316 if sp:
317 lines = []
317 lines = []
318
318
319 for i in range(fl, fl + lncount):
319 for i in range(fl, fl + lncount):
320 if i % st == 0:
320 if i % st == 0:
321 if i % sp == 0:
321 if i % sp == 0:
322 if aln:
322 if aln:
323 lines.append('<a href="#%s%d" class="special">%*d</a>' %
323 lines.append('<a href="#%s%d" class="special">%*d</a>' %
324 (la, i, mw, i))
324 (la, i, mw, i))
325 else:
325 else:
326 lines.append('<span class="special">%*d</span>' % (mw, i))
326 lines.append('<span class="special">%*d</span>' % (mw, i))
327 else:
327 else:
328 if aln:
328 if aln:
329 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
329 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
330 else:
330 else:
331 lines.append('%*d' % (mw, i))
331 lines.append('%*d' % (mw, i))
332 else:
332 else:
333 lines.append('')
333 lines.append('')
334 ls = '\n'.join(lines)
334 ls = '\n'.join(lines)
335 else:
335 else:
336 lines = []
336 lines = []
337 for i in range(fl, fl + lncount):
337 for i in range(fl, fl + lncount):
338 if i % st == 0:
338 if i % st == 0:
339 if aln:
339 if aln:
340 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
340 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
341 else:
341 else:
342 lines.append('%*d' % (mw, i))
342 lines.append('%*d' % (mw, i))
343 else:
343 else:
344 lines.append('')
344 lines.append('')
345 ls = '\n'.join(lines)
345 ls = '\n'.join(lines)
346
346
347 # in case you wonder about the seemingly redundant <div> here: since the
347 # in case you wonder about the seemingly redundant <div> here: since the
348 # content in the other cell also is wrapped in a div, some browsers in
348 # content in the other cell also is wrapped in a div, some browsers in
349 # some configurations seem to mess up the formatting...
349 # some configurations seem to mess up the formatting...
350 if nocls:
350 if nocls:
351 yield 0, ('<table class="%stable">' % self.cssclass +
351 yield 0, ('<table class="%stable">' % self.cssclass +
352 '<tr><td><div class="linenodiv" '
352 '<tr><td><div class="linenodiv" '
353 'style="background-color: #f0f0f0; padding-right: 10px">'
353 'style="background-color: #f0f0f0; padding-right: 10px">'
354 '<pre style="line-height: 125%">' +
354 '<pre style="line-height: 125%">' +
355 ls + '</pre></div></td><td id="hlcode" class="code">')
355 ls + '</pre></div></td><td id="hlcode" class="code">')
356 else:
356 else:
357 yield 0, ('<table class="%stable">' % self.cssclass +
357 yield 0, ('<table class="%stable">' % self.cssclass +
358 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
358 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
359 ls + '</pre></div></td><td id="hlcode" class="code">')
359 ls + '</pre></div></td><td id="hlcode" class="code">')
360 yield 0, dummyoutfile.getvalue()
360 yield 0, dummyoutfile.getvalue()
361 yield 0, '</td></tr></table>'
361 yield 0, '</td></tr></table>'
362
362
363
363
364 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
364 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
365 def __init__(self, **kw):
365 def __init__(self, **kw):
366 # only show these line numbers if set
366 # only show these line numbers if set
367 self.only_lines = kw.pop('only_line_numbers', [])
367 self.only_lines = kw.pop('only_line_numbers', [])
368 self.query_terms = kw.pop('query_terms', [])
368 self.query_terms = kw.pop('query_terms', [])
369 self.max_lines = kw.pop('max_lines', 5)
369 self.max_lines = kw.pop('max_lines', 5)
370 self.line_context = kw.pop('line_context', 3)
370 self.line_context = kw.pop('line_context', 3)
371 self.url = kw.pop('url', None)
371 self.url = kw.pop('url', None)
372
372
373 super(CodeHtmlFormatter, self).__init__(**kw)
373 super(CodeHtmlFormatter, self).__init__(**kw)
374
374
375 def _wrap_code(self, source):
375 def _wrap_code(self, source):
376 for cnt, it in enumerate(source):
376 for cnt, it in enumerate(source):
377 i, t = it
377 i, t = it
378 t = '<pre>%s</pre>' % t
378 t = '<pre>%s</pre>' % t
379 yield i, t
379 yield i, t
380
380
381 def _wrap_tablelinenos(self, inner):
381 def _wrap_tablelinenos(self, inner):
382 yield 0, '<table class="code-highlight %stable">' % self.cssclass
382 yield 0, '<table class="code-highlight %stable">' % self.cssclass
383
383
384 last_shown_line_number = 0
384 last_shown_line_number = 0
385 current_line_number = 1
385 current_line_number = 1
386
386
387 for t, line in inner:
387 for t, line in inner:
388 if not t:
388 if not t:
389 yield t, line
389 yield t, line
390 continue
390 continue
391
391
392 if current_line_number in self.only_lines:
392 if current_line_number in self.only_lines:
393 if last_shown_line_number + 1 != current_line_number:
393 if last_shown_line_number + 1 != current_line_number:
394 yield 0, '<tr>'
394 yield 0, '<tr>'
395 yield 0, '<td class="line">...</td>'
395 yield 0, '<td class="line">...</td>'
396 yield 0, '<td id="hlcode" class="code"></td>'
396 yield 0, '<td id="hlcode" class="code"></td>'
397 yield 0, '</tr>'
397 yield 0, '</tr>'
398
398
399 yield 0, '<tr>'
399 yield 0, '<tr>'
400 if self.url:
400 if self.url:
401 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
401 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
402 self.url, current_line_number, current_line_number)
402 self.url, current_line_number, current_line_number)
403 else:
403 else:
404 yield 0, '<td class="line"><a href="">%i</a></td>' % (
404 yield 0, '<td class="line"><a href="">%i</a></td>' % (
405 current_line_number)
405 current_line_number)
406 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
406 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
407 yield 0, '</tr>'
407 yield 0, '</tr>'
408
408
409 last_shown_line_number = current_line_number
409 last_shown_line_number = current_line_number
410
410
411 current_line_number += 1
411 current_line_number += 1
412
412
413
413
414 yield 0, '</table>'
414 yield 0, '</table>'
415
415
416
416
417 def extract_phrases(text_query):
417 def extract_phrases(text_query):
418 """
418 """
419 Extracts phrases from search term string making sure phrases
419 Extracts phrases from search term string making sure phrases
420 contained in double quotes are kept together - and discarding empty values
420 contained in double quotes are kept together - and discarding empty values
421 or fully whitespace values eg.
421 or fully whitespace values eg.
422
422
423 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
423 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
424
424
425 """
425 """
426
426
427 in_phrase = False
427 in_phrase = False
428 buf = ''
428 buf = ''
429 phrases = []
429 phrases = []
430 for char in text_query:
430 for char in text_query:
431 if in_phrase:
431 if in_phrase:
432 if char == '"': # end phrase
432 if char == '"': # end phrase
433 phrases.append(buf)
433 phrases.append(buf)
434 buf = ''
434 buf = ''
435 in_phrase = False
435 in_phrase = False
436 continue
436 continue
437 else:
437 else:
438 buf += char
438 buf += char
439 continue
439 continue
440 else:
440 else:
441 if char == '"': # start phrase
441 if char == '"': # start phrase
442 in_phrase = True
442 in_phrase = True
443 phrases.append(buf)
443 phrases.append(buf)
444 buf = ''
444 buf = ''
445 continue
445 continue
446 elif char == ' ':
446 elif char == ' ':
447 phrases.append(buf)
447 phrases.append(buf)
448 buf = ''
448 buf = ''
449 continue
449 continue
450 else:
450 else:
451 buf += char
451 buf += char
452
452
453 phrases.append(buf)
453 phrases.append(buf)
454 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
454 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
455 return phrases
455 return phrases
456
456
457
457
458 def get_matching_offsets(text, phrases):
458 def get_matching_offsets(text, phrases):
459 """
459 """
460 Returns a list of string offsets in `text` that the list of `terms` match
460 Returns a list of string offsets in `text` that the list of `terms` match
461
461
462 >>> get_matching_offsets('some text here', ['some', 'here'])
462 >>> get_matching_offsets('some text here', ['some', 'here'])
463 [(0, 4), (10, 14)]
463 [(0, 4), (10, 14)]
464
464
465 """
465 """
466 offsets = []
466 offsets = []
467 for phrase in phrases:
467 for phrase in phrases:
468 for match in re.finditer(phrase, text):
468 for match in re.finditer(phrase, text):
469 offsets.append((match.start(), match.end()))
469 offsets.append((match.start(), match.end()))
470
470
471 return offsets
471 return offsets
472
472
473
473
474 def normalize_text_for_matching(x):
474 def normalize_text_for_matching(x):
475 """
475 """
476 Replaces all non alnum characters to spaces and lower cases the string,
476 Replaces all non alnum characters to spaces and lower cases the string,
477 useful for comparing two text strings without punctuation
477 useful for comparing two text strings without punctuation
478 """
478 """
479 return re.sub(r'[^\w]', ' ', x.lower())
479 return re.sub(r'[^\w]', ' ', x.lower())
480
480
481
481
482 def get_matching_line_offsets(lines, terms):
482 def get_matching_line_offsets(lines, terms):
483 """ Return a set of `lines` indices (starting from 1) matching a
483 """ Return a set of `lines` indices (starting from 1) matching a
484 text search query, along with `context` lines above/below matching lines
484 text search query, along with `context` lines above/below matching lines
485
485
486 :param lines: list of strings representing lines
486 :param lines: list of strings representing lines
487 :param terms: search term string to match in lines eg. 'some text'
487 :param terms: search term string to match in lines eg. 'some text'
488 :param context: number of lines above/below a matching line to add to result
488 :param context: number of lines above/below a matching line to add to result
489 :param max_lines: cut off for lines of interest
489 :param max_lines: cut off for lines of interest
490 eg.
490 eg.
491
491
492 text = '''
492 text = '''
493 words words words
493 words words words
494 words words words
494 words words words
495 some text some
495 some text some
496 words words words
496 words words words
497 words words words
497 words words words
498 text here what
498 text here what
499 '''
499 '''
500 get_matching_line_offsets(text, 'text', context=1)
500 get_matching_line_offsets(text, 'text', context=1)
501 {3: [(5, 9)], 6: [(0, 4)]]
501 {3: [(5, 9)], 6: [(0, 4)]]
502
502
503 """
503 """
504 matching_lines = {}
504 matching_lines = {}
505 phrases = [normalize_text_for_matching(phrase)
505 phrases = [normalize_text_for_matching(phrase)
506 for phrase in extract_phrases(terms)]
506 for phrase in extract_phrases(terms)]
507
507
508 for line_index, line in enumerate(lines, start=1):
508 for line_index, line in enumerate(lines, start=1):
509 match_offsets = get_matching_offsets(
509 match_offsets = get_matching_offsets(
510 normalize_text_for_matching(line), phrases)
510 normalize_text_for_matching(line), phrases)
511 if match_offsets:
511 if match_offsets:
512 matching_lines[line_index] = match_offsets
512 matching_lines[line_index] = match_offsets
513
513
514 return matching_lines
514 return matching_lines
515
515
516
516
517 def hsv_to_rgb(h, s, v):
517 def hsv_to_rgb(h, s, v):
518 """ Convert hsv color values to rgb """
518 """ Convert hsv color values to rgb """
519
519
520 if s == 0.0:
520 if s == 0.0:
521 return v, v, v
521 return v, v, v
522 i = int(h * 6.0) # XXX assume int() truncates!
522 i = int(h * 6.0) # XXX assume int() truncates!
523 f = (h * 6.0) - i
523 f = (h * 6.0) - i
524 p = v * (1.0 - s)
524 p = v * (1.0 - s)
525 q = v * (1.0 - s * f)
525 q = v * (1.0 - s * f)
526 t = v * (1.0 - s * (1.0 - f))
526 t = v * (1.0 - s * (1.0 - f))
527 i = i % 6
527 i = i % 6
528 if i == 0:
528 if i == 0:
529 return v, t, p
529 return v, t, p
530 if i == 1:
530 if i == 1:
531 return q, v, p
531 return q, v, p
532 if i == 2:
532 if i == 2:
533 return p, v, t
533 return p, v, t
534 if i == 3:
534 if i == 3:
535 return p, q, v
535 return p, q, v
536 if i == 4:
536 if i == 4:
537 return t, p, v
537 return t, p, v
538 if i == 5:
538 if i == 5:
539 return v, p, q
539 return v, p, q
540
540
541
541
542 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
542 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
543 """
543 """
544 Generator for getting n of evenly distributed colors using
544 Generator for getting n of evenly distributed colors using
545 hsv color and golden ratio. It always return same order of colors
545 hsv color and golden ratio. It always return same order of colors
546
546
547 :param n: number of colors to generate
547 :param n: number of colors to generate
548 :param saturation: saturation of returned colors
548 :param saturation: saturation of returned colors
549 :param lightness: lightness of returned colors
549 :param lightness: lightness of returned colors
550 :returns: RGB tuple
550 :returns: RGB tuple
551 """
551 """
552
552
553 golden_ratio = 0.618033988749895
553 golden_ratio = 0.618033988749895
554 h = 0.22717784590367374
554 h = 0.22717784590367374
555
555
556 for _ in xrange(n):
556 for _ in xrange(n):
557 h += golden_ratio
557 h += golden_ratio
558 h %= 1
558 h %= 1
559 HSV_tuple = [h, saturation, lightness]
559 HSV_tuple = [h, saturation, lightness]
560 RGB_tuple = hsv_to_rgb(*HSV_tuple)
560 RGB_tuple = hsv_to_rgb(*HSV_tuple)
561 yield map(lambda x: str(int(x * 256)), RGB_tuple)
561 yield map(lambda x: str(int(x * 256)), RGB_tuple)
562
562
563
563
564 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
564 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
565 """
565 """
566 Returns a function which when called with an argument returns a unique
566 Returns a function which when called with an argument returns a unique
567 color for that argument, eg.
567 color for that argument, eg.
568
568
569 :param n: number of colors to generate
569 :param n: number of colors to generate
570 :param saturation: saturation of returned colors
570 :param saturation: saturation of returned colors
571 :param lightness: lightness of returned colors
571 :param lightness: lightness of returned colors
572 :returns: css RGB string
572 :returns: css RGB string
573
573
574 >>> color_hash = color_hasher()
574 >>> color_hash = color_hasher()
575 >>> color_hash('hello')
575 >>> color_hash('hello')
576 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
577 >>> color_hash('hello')
577 >>> color_hash('hello')
578 'rgb(34, 12, 59)'
578 'rgb(34, 12, 59)'
579 >>> color_hash('other')
579 >>> color_hash('other')
580 'rgb(90, 224, 159)'
580 'rgb(90, 224, 159)'
581 """
581 """
582
582
583 color_dict = {}
583 color_dict = {}
584 cgenerator = unique_color_generator(
584 cgenerator = unique_color_generator(
585 saturation=saturation, lightness=lightness)
585 saturation=saturation, lightness=lightness)
586
586
587 def get_color_string(thing):
587 def get_color_string(thing):
588 if thing in color_dict:
588 if thing in color_dict:
589 col = color_dict[thing]
589 col = color_dict[thing]
590 else:
590 else:
591 col = color_dict[thing] = cgenerator.next()
591 col = color_dict[thing] = cgenerator.next()
592 return "rgb(%s)" % (', '.join(col))
592 return "rgb(%s)" % (', '.join(col))
593
593
594 return get_color_string
594 return get_color_string
595
595
596
596
597 def get_lexer_safe(mimetype=None, filepath=None):
597 def get_lexer_safe(mimetype=None, filepath=None):
598 """
598 """
599 Tries to return a relevant pygments lexer using mimetype/filepath name,
599 Tries to return a relevant pygments lexer using mimetype/filepath name,
600 defaulting to plain text if none could be found
600 defaulting to plain text if none could be found
601 """
601 """
602 lexer = None
602 lexer = None
603 try:
603 try:
604 if mimetype:
604 if mimetype:
605 lexer = get_lexer_for_mimetype(mimetype)
605 lexer = get_lexer_for_mimetype(mimetype)
606 if not lexer:
606 if not lexer:
607 lexer = get_lexer_for_filename(filepath)
607 lexer = get_lexer_for_filename(filepath)
608 except pygments.util.ClassNotFound:
608 except pygments.util.ClassNotFound:
609 pass
609 pass
610
610
611 if not lexer:
611 if not lexer:
612 lexer = get_lexer_by_name('text')
612 lexer = get_lexer_by_name('text')
613
613
614 return lexer
614 return lexer
615
615
616
616
617 def get_lexer_for_filenode(filenode):
617 def get_lexer_for_filenode(filenode):
618 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
618 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
619 return lexer
619 return lexer
620
620
621
621
622 def pygmentize(filenode, **kwargs):
622 def pygmentize(filenode, **kwargs):
623 """
623 """
624 pygmentize function using pygments
624 pygmentize function using pygments
625
625
626 :param filenode:
626 :param filenode:
627 """
627 """
628 lexer = get_lexer_for_filenode(filenode)
628 lexer = get_lexer_for_filenode(filenode)
629 return literal(code_highlight(filenode.content, lexer,
629 return literal(code_highlight(filenode.content, lexer,
630 CodeHtmlFormatter(**kwargs)))
630 CodeHtmlFormatter(**kwargs)))
631
631
632
632
633 def is_following_repo(repo_name, user_id):
633 def is_following_repo(repo_name, user_id):
634 from rhodecode.model.scm import ScmModel
634 from rhodecode.model.scm import ScmModel
635 return ScmModel().is_following_repo(repo_name, user_id)
635 return ScmModel().is_following_repo(repo_name, user_id)
636
636
637
637
638 class _Message(object):
638 class _Message(object):
639 """A message returned by ``Flash.pop_messages()``.
639 """A message returned by ``Flash.pop_messages()``.
640
640
641 Converting the message to a string returns the message text. Instances
641 Converting the message to a string returns the message text. Instances
642 also have the following attributes:
642 also have the following attributes:
643
643
644 * ``message``: the message text.
644 * ``message``: the message text.
645 * ``category``: the category specified when the message was created.
645 * ``category``: the category specified when the message was created.
646 """
646 """
647
647
648 def __init__(self, category, message):
648 def __init__(self, category, message):
649 self.category = category
649 self.category = category
650 self.message = message
650 self.message = message
651
651
652 def __str__(self):
652 def __str__(self):
653 return self.message
653 return self.message
654
654
655 __unicode__ = __str__
655 __unicode__ = __str__
656
656
657 def __html__(self):
657 def __html__(self):
658 return escape(safe_unicode(self.message))
658 return escape(safe_unicode(self.message))
659
659
660
660
661 class Flash(_Flash):
661 class Flash(_Flash):
662
662
663 def pop_messages(self):
663 def pop_messages(self):
664 """Return all accumulated messages and delete them from the session.
664 """Return all accumulated messages and delete them from the session.
665
665
666 The return value is a list of ``Message`` objects.
666 The return value is a list of ``Message`` objects.
667 """
667 """
668 from pylons import session
668 from pylons import session
669
669
670 messages = []
670 messages = []
671
671
672 # Pop the 'old' pylons flash messages. They are tuples of the form
672 # Pop the 'old' pylons flash messages. They are tuples of the form
673 # (category, message)
673 # (category, message)
674 for cat, msg in session.pop(self.session_key, []):
674 for cat, msg in session.pop(self.session_key, []):
675 messages.append(_Message(cat, msg))
675 messages.append(_Message(cat, msg))
676
676
677 # Pop the 'new' pyramid flash messages for each category as list
677 # Pop the 'new' pyramid flash messages for each category as list
678 # of strings.
678 # of strings.
679 for cat in self.categories:
679 for cat in self.categories:
680 for msg in session.pop_flash(queue=cat):
680 for msg in session.pop_flash(queue=cat):
681 messages.append(_Message(cat, msg))
681 messages.append(_Message(cat, msg))
682 # Map messages from the default queue to the 'notice' category.
682 # Map messages from the default queue to the 'notice' category.
683 for msg in session.pop_flash():
683 for msg in session.pop_flash():
684 messages.append(_Message('notice', msg))
684 messages.append(_Message('notice', msg))
685
685
686 session.save()
686 session.save()
687 return messages
687 return messages
688
688
689 def json_alerts(self):
689 def json_alerts(self):
690 payloads = []
690 payloads = []
691 messages = flash.pop_messages()
691 messages = flash.pop_messages()
692 if messages:
692 if messages:
693 for message in messages:
693 for message in messages:
694 subdata = {}
694 subdata = {}
695 if hasattr(message.message, 'rsplit'):
695 if hasattr(message.message, 'rsplit'):
696 flash_data = message.message.rsplit('|DELIM|', 1)
696 flash_data = message.message.rsplit('|DELIM|', 1)
697 org_message = flash_data[0]
697 org_message = flash_data[0]
698 if len(flash_data) > 1:
698 if len(flash_data) > 1:
699 subdata = json.loads(flash_data[1])
699 subdata = json.loads(flash_data[1])
700 else:
700 else:
701 org_message = message.message
701 org_message = message.message
702 payloads.append({
702 payloads.append({
703 'message': {
703 'message': {
704 'message': u'{}'.format(org_message),
704 'message': u'{}'.format(org_message),
705 'level': message.category,
705 'level': message.category,
706 'force': True,
706 'force': True,
707 'subdata': subdata
707 'subdata': subdata
708 }
708 }
709 })
709 })
710 return json.dumps(payloads)
710 return json.dumps(payloads)
711
711
712 flash = Flash()
712 flash = Flash()
713
713
714 #==============================================================================
714 #==============================================================================
715 # SCM FILTERS available via h.
715 # SCM FILTERS available via h.
716 #==============================================================================
716 #==============================================================================
717 from rhodecode.lib.vcs.utils import author_name, author_email
717 from rhodecode.lib.vcs.utils import author_name, author_email
718 from rhodecode.lib.utils2 import credentials_filter, age as _age
718 from rhodecode.lib.utils2 import credentials_filter, age as _age
719 from rhodecode.model.db import User, ChangesetStatus
719 from rhodecode.model.db import User, ChangesetStatus
720
720
721 age = _age
721 age = _age
722 capitalize = lambda x: x.capitalize()
722 capitalize = lambda x: x.capitalize()
723 email = author_email
723 email = author_email
724 short_id = lambda x: x[:12]
724 short_id = lambda x: x[:12]
725 hide_credentials = lambda x: ''.join(credentials_filter(x))
725 hide_credentials = lambda x: ''.join(credentials_filter(x))
726
726
727
727
728 def age_component(datetime_iso, value=None, time_is_local=False):
728 def age_component(datetime_iso, value=None, time_is_local=False):
729 title = value or format_date(datetime_iso)
729 title = value or format_date(datetime_iso)
730 tzinfo = '+00:00'
730 tzinfo = '+00:00'
731
731
732 # detect if we have a timezone info, otherwise, add it
732 # detect if we have a timezone info, otherwise, add it
733 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
733 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
734 if time_is_local:
734 if time_is_local:
735 tzinfo = time.strftime("+%H:%M",
735 tzinfo = time.strftime("+%H:%M",
736 time.gmtime(
736 time.gmtime(
737 (datetime.now() - datetime.utcnow()).seconds + 1
737 (datetime.now() - datetime.utcnow()).seconds + 1
738 )
738 )
739 )
739 )
740
740
741 return literal(
741 return literal(
742 '<time class="timeago tooltip" '
742 '<time class="timeago tooltip" '
743 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
743 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
744 datetime_iso, title, tzinfo))
744 datetime_iso, title, tzinfo))
745
745
746
746
747 def _shorten_commit_id(commit_id):
747 def _shorten_commit_id(commit_id):
748 from rhodecode import CONFIG
748 from rhodecode import CONFIG
749 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
749 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
750 return commit_id[:def_len]
750 return commit_id[:def_len]
751
751
752
752
753 def show_id(commit):
753 def show_id(commit):
754 """
754 """
755 Configurable function that shows ID
755 Configurable function that shows ID
756 by default it's r123:fffeeefffeee
756 by default it's r123:fffeeefffeee
757
757
758 :param commit: commit instance
758 :param commit: commit instance
759 """
759 """
760 from rhodecode import CONFIG
760 from rhodecode import CONFIG
761 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
761 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
762
762
763 raw_id = _shorten_commit_id(commit.raw_id)
763 raw_id = _shorten_commit_id(commit.raw_id)
764 if show_idx:
764 if show_idx:
765 return 'r%s:%s' % (commit.idx, raw_id)
765 return 'r%s:%s' % (commit.idx, raw_id)
766 else:
766 else:
767 return '%s' % (raw_id, )
767 return '%s' % (raw_id, )
768
768
769
769
770 def format_date(date):
770 def format_date(date):
771 """
771 """
772 use a standardized formatting for dates used in RhodeCode
772 use a standardized formatting for dates used in RhodeCode
773
773
774 :param date: date/datetime object
774 :param date: date/datetime object
775 :return: formatted date
775 :return: formatted date
776 """
776 """
777
777
778 if date:
778 if date:
779 _fmt = "%a, %d %b %Y %H:%M:%S"
779 _fmt = "%a, %d %b %Y %H:%M:%S"
780 return safe_unicode(date.strftime(_fmt))
780 return safe_unicode(date.strftime(_fmt))
781
781
782 return u""
782 return u""
783
783
784
784
785 class _RepoChecker(object):
785 class _RepoChecker(object):
786
786
787 def __init__(self, backend_alias):
787 def __init__(self, backend_alias):
788 self._backend_alias = backend_alias
788 self._backend_alias = backend_alias
789
789
790 def __call__(self, repository):
790 def __call__(self, repository):
791 if hasattr(repository, 'alias'):
791 if hasattr(repository, 'alias'):
792 _type = repository.alias
792 _type = repository.alias
793 elif hasattr(repository, 'repo_type'):
793 elif hasattr(repository, 'repo_type'):
794 _type = repository.repo_type
794 _type = repository.repo_type
795 else:
795 else:
796 _type = repository
796 _type = repository
797 return _type == self._backend_alias
797 return _type == self._backend_alias
798
798
799 is_git = _RepoChecker('git')
799 is_git = _RepoChecker('git')
800 is_hg = _RepoChecker('hg')
800 is_hg = _RepoChecker('hg')
801 is_svn = _RepoChecker('svn')
801 is_svn = _RepoChecker('svn')
802
802
803
803
804 def get_repo_type_by_name(repo_name):
804 def get_repo_type_by_name(repo_name):
805 repo = Repository.get_by_repo_name(repo_name)
805 repo = Repository.get_by_repo_name(repo_name)
806 return repo.repo_type
806 return repo.repo_type
807
807
808
808
809 def is_svn_without_proxy(repository):
809 def is_svn_without_proxy(repository):
810 if is_svn(repository):
810 if is_svn(repository):
811 from rhodecode.model.settings import VcsSettingsModel
811 from rhodecode.model.settings import VcsSettingsModel
812 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
812 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
813 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
813 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
814 return False
814 return False
815
815
816
816
817 def discover_user(author):
817 def discover_user(author):
818 """
818 """
819 Tries to discover RhodeCode User based on the autho string. Author string
819 Tries to discover RhodeCode User based on the autho string. Author string
820 is typically `FirstName LastName <email@address.com>`
820 is typically `FirstName LastName <email@address.com>`
821 """
821 """
822
822
823 # if author is already an instance use it for extraction
823 # if author is already an instance use it for extraction
824 if isinstance(author, User):
824 if isinstance(author, User):
825 return author
825 return author
826
826
827 # Valid email in the attribute passed, see if they're in the system
827 # Valid email in the attribute passed, see if they're in the system
828 _email = author_email(author)
828 _email = author_email(author)
829 if _email != '':
829 if _email != '':
830 user = User.get_by_email(_email, case_insensitive=True, cache=True)
830 user = User.get_by_email(_email, case_insensitive=True, cache=True)
831 if user is not None:
831 if user is not None:
832 return user
832 return user
833
833
834 # Maybe it's a username, we try to extract it and fetch by username ?
834 # Maybe it's a username, we try to extract it and fetch by username ?
835 _author = author_name(author)
835 _author = author_name(author)
836 user = User.get_by_username(_author, case_insensitive=True, cache=True)
836 user = User.get_by_username(_author, case_insensitive=True, cache=True)
837 if user is not None:
837 if user is not None:
838 return user
838 return user
839
839
840 return None
840 return None
841
841
842
842
843 def email_or_none(author):
843 def email_or_none(author):
844 # extract email from the commit string
844 # extract email from the commit string
845 _email = author_email(author)
845 _email = author_email(author)
846
846
847 # If we have an email, use it, otherwise
847 # If we have an email, use it, otherwise
848 # see if it contains a username we can get an email from
848 # see if it contains a username we can get an email from
849 if _email != '':
849 if _email != '':
850 return _email
850 return _email
851 else:
851 else:
852 user = User.get_by_username(
852 user = User.get_by_username(
853 author_name(author), case_insensitive=True, cache=True)
853 author_name(author), case_insensitive=True, cache=True)
854
854
855 if user is not None:
855 if user is not None:
856 return user.email
856 return user.email
857
857
858 # No valid email, not a valid user in the system, none!
858 # No valid email, not a valid user in the system, none!
859 return None
859 return None
860
860
861
861
862 def link_to_user(author, length=0, **kwargs):
862 def link_to_user(author, length=0, **kwargs):
863 user = discover_user(author)
863 user = discover_user(author)
864 # user can be None, but if we have it already it means we can re-use it
864 # user can be None, but if we have it already it means we can re-use it
865 # in the person() function, so we save 1 intensive-query
865 # in the person() function, so we save 1 intensive-query
866 if user:
866 if user:
867 author = user
867 author = user
868
868
869 display_person = person(author, 'username_or_name_or_email')
869 display_person = person(author, 'username_or_name_or_email')
870 if length:
870 if length:
871 display_person = shorter(display_person, length)
871 display_person = shorter(display_person, length)
872
872
873 if user:
873 if user:
874 return link_to(
874 return link_to(
875 escape(display_person),
875 escape(display_person),
876 route_path('user_profile', username=user.username),
876 route_path('user_profile', username=user.username),
877 **kwargs)
877 **kwargs)
878 else:
878 else:
879 return escape(display_person)
879 return escape(display_person)
880
880
881
881
882 def person(author, show_attr="username_and_name"):
882 def person(author, show_attr="username_and_name"):
883 user = discover_user(author)
883 user = discover_user(author)
884 if user:
884 if user:
885 return getattr(user, show_attr)
885 return getattr(user, show_attr)
886 else:
886 else:
887 _author = author_name(author)
887 _author = author_name(author)
888 _email = email(author)
888 _email = email(author)
889 return _author or _email
889 return _author or _email
890
890
891
891
892 def author_string(email):
892 def author_string(email):
893 if email:
893 if email:
894 user = User.get_by_email(email, case_insensitive=True, cache=True)
894 user = User.get_by_email(email, case_insensitive=True, cache=True)
895 if user:
895 if user:
896 if user.first_name or user.last_name:
896 if user.first_name or user.last_name:
897 return '%s %s &lt;%s&gt;' % (
897 return '%s %s &lt;%s&gt;' % (
898 user.first_name, user.last_name, email)
898 user.first_name, user.last_name, email)
899 else:
899 else:
900 return email
900 return email
901 else:
901 else:
902 return email
902 return email
903 else:
903 else:
904 return None
904 return None
905
905
906
906
907 def person_by_id(id_, show_attr="username_and_name"):
907 def person_by_id(id_, show_attr="username_and_name"):
908 # attr to return from fetched user
908 # attr to return from fetched user
909 person_getter = lambda usr: getattr(usr, show_attr)
909 person_getter = lambda usr: getattr(usr, show_attr)
910
910
911 #maybe it's an ID ?
911 #maybe it's an ID ?
912 if str(id_).isdigit() or isinstance(id_, int):
912 if str(id_).isdigit() or isinstance(id_, int):
913 id_ = int(id_)
913 id_ = int(id_)
914 user = User.get(id_)
914 user = User.get(id_)
915 if user is not None:
915 if user is not None:
916 return person_getter(user)
916 return person_getter(user)
917 return id_
917 return id_
918
918
919
919
920 def gravatar_with_user(author, show_disabled=False):
920 def gravatar_with_user(author, show_disabled=False):
921 from rhodecode.lib.utils import PartialRenderer
921 from rhodecode.lib.utils import PartialRenderer
922 _render = PartialRenderer('base/base.mako')
922 _render = PartialRenderer('base/base.mako')
923 return _render('gravatar_with_user', author, show_disabled=show_disabled)
923 return _render('gravatar_with_user', author, show_disabled=show_disabled)
924
924
925
925
926 def desc_stylize(value):
926 def desc_stylize(value):
927 """
927 """
928 converts tags from value into html equivalent
928 converts tags from value into html equivalent
929
929
930 :param value:
930 :param value:
931 """
931 """
932 if not value:
932 if not value:
933 return ''
933 return ''
934
934
935 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
936 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
936 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
937 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
937 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
938 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
938 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
939 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
939 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
940 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
940 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
941 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
941 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
942 '<div class="metatag" tag="lang">\\2</div>', value)
942 '<div class="metatag" tag="lang">\\2</div>', value)
943 value = re.sub(r'\[([a-z]+)\]',
943 value = re.sub(r'\[([a-z]+)\]',
944 '<div class="metatag" tag="\\1">\\1</div>', value)
944 '<div class="metatag" tag="\\1">\\1</div>', value)
945
945
946 return value
946 return value
947
947
948
948
949 def escaped_stylize(value):
949 def escaped_stylize(value):
950 """
950 """
951 converts tags from value into html equivalent, but escaping its value first
951 converts tags from value into html equivalent, but escaping its value first
952 """
952 """
953 if not value:
953 if not value:
954 return ''
954 return ''
955
955
956 # Using default webhelper escape method, but has to force it as a
956 # Using default webhelper escape method, but has to force it as a
957 # plain unicode instead of a markup tag to be used in regex expressions
957 # plain unicode instead of a markup tag to be used in regex expressions
958 value = unicode(escape(safe_unicode(value)))
958 value = unicode(escape(safe_unicode(value)))
959
959
960 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
961 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
961 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
962 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
962 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
963 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
963 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
964 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
964 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
965 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
965 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
966 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
966 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
967 '<div class="metatag" tag="lang">\\2</div>', value)
967 '<div class="metatag" tag="lang">\\2</div>', value)
968 value = re.sub(r'\[([a-z]+)\]',
968 value = re.sub(r'\[([a-z]+)\]',
969 '<div class="metatag" tag="\\1">\\1</div>', value)
969 '<div class="metatag" tag="\\1">\\1</div>', value)
970
970
971 return value
971 return value
972
972
973
973
974 def bool2icon(value):
974 def bool2icon(value):
975 """
975 """
976 Returns boolean value of a given value, represented as html element with
976 Returns boolean value of a given value, represented as html element with
977 classes that will represent icons
977 classes that will represent icons
978
978
979 :param value: given value to convert to html node
979 :param value: given value to convert to html node
980 """
980 """
981
981
982 if value: # does bool conversion
982 if value: # does bool conversion
983 return HTML.tag('i', class_="icon-true")
983 return HTML.tag('i', class_="icon-true")
984 else: # not true as bool
984 else: # not true as bool
985 return HTML.tag('i', class_="icon-false")
985 return HTML.tag('i', class_="icon-false")
986
986
987
987
988 #==============================================================================
988 #==============================================================================
989 # PERMS
989 # PERMS
990 #==============================================================================
990 #==============================================================================
991 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
991 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
992 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
992 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
993 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
993 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
994 csrf_token_key
994 csrf_token_key
995
995
996
996
997 #==============================================================================
997 #==============================================================================
998 # GRAVATAR URL
998 # GRAVATAR URL
999 #==============================================================================
999 #==============================================================================
1000 class InitialsGravatar(object):
1000 class InitialsGravatar(object):
1001 def __init__(self, email_address, first_name, last_name, size=30,
1001 def __init__(self, email_address, first_name, last_name, size=30,
1002 background=None, text_color='#fff'):
1002 background=None, text_color='#fff'):
1003 self.size = size
1003 self.size = size
1004 self.first_name = first_name
1004 self.first_name = first_name
1005 self.last_name = last_name
1005 self.last_name = last_name
1006 self.email_address = email_address
1006 self.email_address = email_address
1007 self.background = background or self.str2color(email_address)
1007 self.background = background or self.str2color(email_address)
1008 self.text_color = text_color
1008 self.text_color = text_color
1009
1009
1010 def get_color_bank(self):
1010 def get_color_bank(self):
1011 """
1011 """
1012 returns a predefined list of colors that gravatars can use.
1012 returns a predefined list of colors that gravatars can use.
1013 Those are randomized distinct colors that guarantee readability and
1013 Those are randomized distinct colors that guarantee readability and
1014 uniqueness.
1014 uniqueness.
1015
1015
1016 generated with: http://phrogz.net/css/distinct-colors.html
1016 generated with: http://phrogz.net/css/distinct-colors.html
1017 """
1017 """
1018 return [
1018 return [
1019 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1019 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1020 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1020 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1021 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1021 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1022 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1022 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1023 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1023 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1024 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1024 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1025 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1025 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1026 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1026 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1027 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1027 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1028 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1028 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1029 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1029 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1030 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1030 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1031 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1031 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1032 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1032 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1033 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1033 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1034 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1034 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1035 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1035 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1036 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1036 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1037 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1037 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1038 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1038 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1039 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1039 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1040 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1040 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1041 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1041 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1042 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1042 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1043 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1043 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1044 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1044 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1045 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1045 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1046 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1046 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1047 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1047 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1048 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1048 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1049 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1049 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1050 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1050 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1051 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1051 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1052 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1052 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1053 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1053 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1054 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1054 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1055 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1055 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1056 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1056 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1057 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1057 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1058 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1058 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1059 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1059 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1060 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1060 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1061 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1061 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1062 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1062 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1063 '#4f8c46', '#368dd9', '#5c0073'
1063 '#4f8c46', '#368dd9', '#5c0073'
1064 ]
1064 ]
1065
1065
1066 def rgb_to_hex_color(self, rgb_tuple):
1066 def rgb_to_hex_color(self, rgb_tuple):
1067 """
1067 """
1068 Converts an rgb_tuple passed to an hex color.
1068 Converts an rgb_tuple passed to an hex color.
1069
1069
1070 :param rgb_tuple: tuple with 3 ints represents rgb color space
1070 :param rgb_tuple: tuple with 3 ints represents rgb color space
1071 """
1071 """
1072 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1072 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1073
1073
1074 def email_to_int_list(self, email_str):
1074 def email_to_int_list(self, email_str):
1075 """
1075 """
1076 Get every byte of the hex digest value of email and turn it to integer.
1076 Get every byte of the hex digest value of email and turn it to integer.
1077 It's going to be always between 0-255
1077 It's going to be always between 0-255
1078 """
1078 """
1079 digest = md5_safe(email_str.lower())
1079 digest = md5_safe(email_str.lower())
1080 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1080 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1081
1081
1082 def pick_color_bank_index(self, email_str, color_bank):
1082 def pick_color_bank_index(self, email_str, color_bank):
1083 return self.email_to_int_list(email_str)[0] % len(color_bank)
1083 return self.email_to_int_list(email_str)[0] % len(color_bank)
1084
1084
1085 def str2color(self, email_str):
1085 def str2color(self, email_str):
1086 """
1086 """
1087 Tries to map in a stable algorithm an email to color
1087 Tries to map in a stable algorithm an email to color
1088
1088
1089 :param email_str:
1089 :param email_str:
1090 """
1090 """
1091 color_bank = self.get_color_bank()
1091 color_bank = self.get_color_bank()
1092 # pick position (module it's length so we always find it in the
1092 # pick position (module it's length so we always find it in the
1093 # bank even if it's smaller than 256 values
1093 # bank even if it's smaller than 256 values
1094 pos = self.pick_color_bank_index(email_str, color_bank)
1094 pos = self.pick_color_bank_index(email_str, color_bank)
1095 return color_bank[pos]
1095 return color_bank[pos]
1096
1096
1097 def normalize_email(self, email_address):
1097 def normalize_email(self, email_address):
1098 import unicodedata
1098 import unicodedata
1099 # default host used to fill in the fake/missing email
1099 # default host used to fill in the fake/missing email
1100 default_host = u'localhost'
1100 default_host = u'localhost'
1101
1101
1102 if not email_address:
1102 if not email_address:
1103 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1103 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1104
1104
1105 email_address = safe_unicode(email_address)
1105 email_address = safe_unicode(email_address)
1106
1106
1107 if u'@' not in email_address:
1107 if u'@' not in email_address:
1108 email_address = u'%s@%s' % (email_address, default_host)
1108 email_address = u'%s@%s' % (email_address, default_host)
1109
1109
1110 if email_address.endswith(u'@'):
1110 if email_address.endswith(u'@'):
1111 email_address = u'%s%s' % (email_address, default_host)
1111 email_address = u'%s%s' % (email_address, default_host)
1112
1112
1113 email_address = unicodedata.normalize('NFKD', email_address)\
1113 email_address = unicodedata.normalize('NFKD', email_address)\
1114 .encode('ascii', 'ignore')
1114 .encode('ascii', 'ignore')
1115 return email_address
1115 return email_address
1116
1116
1117 def get_initials(self):
1117 def get_initials(self):
1118 """
1118 """
1119 Returns 2 letter initials calculated based on the input.
1119 Returns 2 letter initials calculated based on the input.
1120 The algorithm picks first given email address, and takes first letter
1120 The algorithm picks first given email address, and takes first letter
1121 of part before @, and then the first letter of server name. In case
1121 of part before @, and then the first letter of server name. In case
1122 the part before @ is in a format of `somestring.somestring2` it replaces
1122 the part before @ is in a format of `somestring.somestring2` it replaces
1123 the server letter with first letter of somestring2
1123 the server letter with first letter of somestring2
1124
1124
1125 In case function was initialized with both first and lastname, this
1125 In case function was initialized with both first and lastname, this
1126 overrides the extraction from email by first letter of the first and
1126 overrides the extraction from email by first letter of the first and
1127 last name. We add special logic to that functionality, In case Full name
1127 last name. We add special logic to that functionality, In case Full name
1128 is compound, like Guido Von Rossum, we use last part of the last name
1128 is compound, like Guido Von Rossum, we use last part of the last name
1129 (Von Rossum) picking `R`.
1129 (Von Rossum) picking `R`.
1130
1130
1131 Function also normalizes the non-ascii characters to they ascii
1131 Function also normalizes the non-ascii characters to they ascii
1132 representation, eg Δ„ => A
1132 representation, eg Δ„ => A
1133 """
1133 """
1134 import unicodedata
1134 import unicodedata
1135 # replace non-ascii to ascii
1135 # replace non-ascii to ascii
1136 first_name = unicodedata.normalize(
1136 first_name = unicodedata.normalize(
1137 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1137 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1138 last_name = unicodedata.normalize(
1138 last_name = unicodedata.normalize(
1139 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1139 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1140
1140
1141 # do NFKD encoding, and also make sure email has proper format
1141 # do NFKD encoding, and also make sure email has proper format
1142 email_address = self.normalize_email(self.email_address)
1142 email_address = self.normalize_email(self.email_address)
1143
1143
1144 # first push the email initials
1144 # first push the email initials
1145 prefix, server = email_address.split('@', 1)
1145 prefix, server = email_address.split('@', 1)
1146
1146
1147 # check if prefix is maybe a 'first_name.last_name' syntax
1147 # check if prefix is maybe a 'first_name.last_name' syntax
1148 _dot_split = prefix.rsplit('.', 1)
1148 _dot_split = prefix.rsplit('.', 1)
1149 if len(_dot_split) == 2:
1149 if len(_dot_split) == 2:
1150 initials = [_dot_split[0][0], _dot_split[1][0]]
1150 initials = [_dot_split[0][0], _dot_split[1][0]]
1151 else:
1151 else:
1152 initials = [prefix[0], server[0]]
1152 initials = [prefix[0], server[0]]
1153
1153
1154 # then try to replace either first_name or last_name
1154 # then try to replace either first_name or last_name
1155 fn_letter = (first_name or " ")[0].strip()
1155 fn_letter = (first_name or " ")[0].strip()
1156 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1156 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1157
1157
1158 if fn_letter:
1158 if fn_letter:
1159 initials[0] = fn_letter
1159 initials[0] = fn_letter
1160
1160
1161 if ln_letter:
1161 if ln_letter:
1162 initials[1] = ln_letter
1162 initials[1] = ln_letter
1163
1163
1164 return ''.join(initials).upper()
1164 return ''.join(initials).upper()
1165
1165
1166 def get_img_data_by_type(self, font_family, img_type):
1166 def get_img_data_by_type(self, font_family, img_type):
1167 default_user = """
1167 default_user = """
1168 <svg xmlns="http://www.w3.org/2000/svg"
1168 <svg xmlns="http://www.w3.org/2000/svg"
1169 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1169 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1170 viewBox="-15 -10 439.165 429.164"
1170 viewBox="-15 -10 439.165 429.164"
1171
1171
1172 xml:space="preserve"
1172 xml:space="preserve"
1173 style="background:{background};" >
1173 style="background:{background};" >
1174
1174
1175 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1175 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1176 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1176 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1177 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1177 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1178 168.596,153.916,216.671,
1178 168.596,153.916,216.671,
1179 204.583,216.671z" fill="{text_color}"/>
1179 204.583,216.671z" fill="{text_color}"/>
1180 <path d="M407.164,374.717L360.88,
1180 <path d="M407.164,374.717L360.88,
1181 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1181 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1182 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1182 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1183 15.366-44.203,23.488-69.076,23.488c-24.877,
1183 15.366-44.203,23.488-69.076,23.488c-24.877,
1184 0-48.762-8.122-69.078-23.488
1184 0-48.762-8.122-69.078-23.488
1185 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1185 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1186 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1186 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1187 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1187 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1188 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1188 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1189 19.402-10.527 C409.699,390.129,
1189 19.402-10.527 C409.699,390.129,
1190 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1190 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1191 </svg>""".format(
1191 </svg>""".format(
1192 size=self.size,
1192 size=self.size,
1193 background='#979797', # @grey4
1193 background='#979797', # @grey4
1194 text_color=self.text_color,
1194 text_color=self.text_color,
1195 font_family=font_family)
1195 font_family=font_family)
1196
1196
1197 return {
1197 return {
1198 "default_user": default_user
1198 "default_user": default_user
1199 }[img_type]
1199 }[img_type]
1200
1200
1201 def get_img_data(self, svg_type=None):
1201 def get_img_data(self, svg_type=None):
1202 """
1202 """
1203 generates the svg metadata for image
1203 generates the svg metadata for image
1204 """
1204 """
1205
1205
1206 font_family = ','.join([
1206 font_family = ','.join([
1207 'proximanovaregular',
1207 'proximanovaregular',
1208 'Proxima Nova Regular',
1208 'Proxima Nova Regular',
1209 'Proxima Nova',
1209 'Proxima Nova',
1210 'Arial',
1210 'Arial',
1211 'Lucida Grande',
1211 'Lucida Grande',
1212 'sans-serif'
1212 'sans-serif'
1213 ])
1213 ])
1214 if svg_type:
1214 if svg_type:
1215 return self.get_img_data_by_type(font_family, svg_type)
1215 return self.get_img_data_by_type(font_family, svg_type)
1216
1216
1217 initials = self.get_initials()
1217 initials = self.get_initials()
1218 img_data = """
1218 img_data = """
1219 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1219 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1220 width="{size}" height="{size}"
1220 width="{size}" height="{size}"
1221 style="width: 100%; height: 100%; background-color: {background}"
1221 style="width: 100%; height: 100%; background-color: {background}"
1222 viewBox="0 0 {size} {size}">
1222 viewBox="0 0 {size} {size}">
1223 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1223 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1224 pointer-events="auto" fill="{text_color}"
1224 pointer-events="auto" fill="{text_color}"
1225 font-family="{font_family}"
1225 font-family="{font_family}"
1226 style="font-weight: 400; font-size: {f_size}px;">{text}
1226 style="font-weight: 400; font-size: {f_size}px;">{text}
1227 </text>
1227 </text>
1228 </svg>""".format(
1228 </svg>""".format(
1229 size=self.size,
1229 size=self.size,
1230 f_size=self.size/1.85, # scale the text inside the box nicely
1230 f_size=self.size/1.85, # scale the text inside the box nicely
1231 background=self.background,
1231 background=self.background,
1232 text_color=self.text_color,
1232 text_color=self.text_color,
1233 text=initials.upper(),
1233 text=initials.upper(),
1234 font_family=font_family)
1234 font_family=font_family)
1235
1235
1236 return img_data
1236 return img_data
1237
1237
1238 def generate_svg(self, svg_type=None):
1238 def generate_svg(self, svg_type=None):
1239 img_data = self.get_img_data(svg_type)
1239 img_data = self.get_img_data(svg_type)
1240 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1240 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1241
1241
1242
1242
1243 def initials_gravatar(email_address, first_name, last_name, size=30):
1243 def initials_gravatar(email_address, first_name, last_name, size=30):
1244 svg_type = None
1244 svg_type = None
1245 if email_address == User.DEFAULT_USER_EMAIL:
1245 if email_address == User.DEFAULT_USER_EMAIL:
1246 svg_type = 'default_user'
1246 svg_type = 'default_user'
1247 klass = InitialsGravatar(email_address, first_name, last_name, size)
1247 klass = InitialsGravatar(email_address, first_name, last_name, size)
1248 return klass.generate_svg(svg_type=svg_type)
1248 return klass.generate_svg(svg_type=svg_type)
1249
1249
1250
1250
1251 def gravatar_url(email_address, size=30, request=None):
1251 def gravatar_url(email_address, size=30, request=None):
1252 request = get_current_request()
1252 request = get_current_request()
1253 if request and hasattr(request, 'call_context'):
1253 if request and hasattr(request, 'call_context'):
1254 _use_gravatar = request.call_context.visual.use_gravatar
1254 _use_gravatar = request.call_context.visual.use_gravatar
1255 _gravatar_url = request.call_context.visual.gravatar_url
1255 _gravatar_url = request.call_context.visual.gravatar_url
1256 else:
1256 else:
1257 # doh, we need to re-import those to mock it later
1257 # doh, we need to re-import those to mock it later
1258 from pylons import tmpl_context as c
1258 from pylons import tmpl_context as c
1259
1259
1260 _use_gravatar = c.visual.use_gravatar
1260 _use_gravatar = c.visual.use_gravatar
1261 _gravatar_url = c.visual.gravatar_url
1261 _gravatar_url = c.visual.gravatar_url
1262
1262
1263 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1263 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1264
1264
1265 email_address = email_address or User.DEFAULT_USER_EMAIL
1265 email_address = email_address or User.DEFAULT_USER_EMAIL
1266 if isinstance(email_address, unicode):
1266 if isinstance(email_address, unicode):
1267 # hashlib crashes on unicode items
1267 # hashlib crashes on unicode items
1268 email_address = safe_str(email_address)
1268 email_address = safe_str(email_address)
1269
1269
1270 # empty email or default user
1270 # empty email or default user
1271 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1271 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1272 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1272 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1273
1273
1274 if _use_gravatar:
1274 if _use_gravatar:
1275 # TODO: Disuse pyramid thread locals. Think about another solution to
1275 # TODO: Disuse pyramid thread locals. Think about another solution to
1276 # get the host and schema here.
1276 # get the host and schema here.
1277 request = get_current_request()
1277 request = get_current_request()
1278 tmpl = safe_str(_gravatar_url)
1278 tmpl = safe_str(_gravatar_url)
1279 tmpl = tmpl.replace('{email}', email_address)\
1279 tmpl = tmpl.replace('{email}', email_address)\
1280 .replace('{md5email}', md5_safe(email_address.lower())) \
1280 .replace('{md5email}', md5_safe(email_address.lower())) \
1281 .replace('{netloc}', request.host)\
1281 .replace('{netloc}', request.host)\
1282 .replace('{scheme}', request.scheme)\
1282 .replace('{scheme}', request.scheme)\
1283 .replace('{size}', safe_str(size))
1283 .replace('{size}', safe_str(size))
1284 return tmpl
1284 return tmpl
1285 else:
1285 else:
1286 return initials_gravatar(email_address, '', '', size=size)
1286 return initials_gravatar(email_address, '', '', size=size)
1287
1287
1288
1288
1289 class Page(_Page):
1289 class Page(_Page):
1290 """
1290 """
1291 Custom pager to match rendering style with paginator
1291 Custom pager to match rendering style with paginator
1292 """
1292 """
1293
1293
1294 def _get_pos(self, cur_page, max_page, items):
1294 def _get_pos(self, cur_page, max_page, items):
1295 edge = (items / 2) + 1
1295 edge = (items / 2) + 1
1296 if (cur_page <= edge):
1296 if (cur_page <= edge):
1297 radius = max(items / 2, items - cur_page)
1297 radius = max(items / 2, items - cur_page)
1298 elif (max_page - cur_page) < edge:
1298 elif (max_page - cur_page) < edge:
1299 radius = (items - 1) - (max_page - cur_page)
1299 radius = (items - 1) - (max_page - cur_page)
1300 else:
1300 else:
1301 radius = items / 2
1301 radius = items / 2
1302
1302
1303 left = max(1, (cur_page - (radius)))
1303 left = max(1, (cur_page - (radius)))
1304 right = min(max_page, cur_page + (radius))
1304 right = min(max_page, cur_page + (radius))
1305 return left, cur_page, right
1305 return left, cur_page, right
1306
1306
1307 def _range(self, regexp_match):
1307 def _range(self, regexp_match):
1308 """
1308 """
1309 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1309 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1310
1310
1311 Arguments:
1311 Arguments:
1312
1312
1313 regexp_match
1313 regexp_match
1314 A "re" (regular expressions) match object containing the
1314 A "re" (regular expressions) match object containing the
1315 radius of linked pages around the current page in
1315 radius of linked pages around the current page in
1316 regexp_match.group(1) as a string
1316 regexp_match.group(1) as a string
1317
1317
1318 This function is supposed to be called as a callable in
1318 This function is supposed to be called as a callable in
1319 re.sub.
1319 re.sub.
1320
1320
1321 """
1321 """
1322 radius = int(regexp_match.group(1))
1322 radius = int(regexp_match.group(1))
1323
1323
1324 # Compute the first and last page number within the radius
1324 # Compute the first and last page number within the radius
1325 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1325 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1326 # -> leftmost_page = 5
1326 # -> leftmost_page = 5
1327 # -> rightmost_page = 9
1327 # -> rightmost_page = 9
1328 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1328 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1329 self.last_page,
1329 self.last_page,
1330 (radius * 2) + 1)
1330 (radius * 2) + 1)
1331 nav_items = []
1331 nav_items = []
1332
1332
1333 # Create a link to the first page (unless we are on the first page
1333 # Create a link to the first page (unless we are on the first page
1334 # or there would be no need to insert '..' spacers)
1334 # or there would be no need to insert '..' spacers)
1335 if self.page != self.first_page and self.first_page < leftmost_page:
1335 if self.page != self.first_page and self.first_page < leftmost_page:
1336 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1336 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1337
1337
1338 # Insert dots if there are pages between the first page
1338 # Insert dots if there are pages between the first page
1339 # and the currently displayed page range
1339 # and the currently displayed page range
1340 if leftmost_page - self.first_page > 1:
1340 if leftmost_page - self.first_page > 1:
1341 # Wrap in a SPAN tag if nolink_attr is set
1341 # Wrap in a SPAN tag if nolink_attr is set
1342 text = '..'
1342 text = '..'
1343 if self.dotdot_attr:
1343 if self.dotdot_attr:
1344 text = HTML.span(c=text, **self.dotdot_attr)
1344 text = HTML.span(c=text, **self.dotdot_attr)
1345 nav_items.append(text)
1345 nav_items.append(text)
1346
1346
1347 for thispage in xrange(leftmost_page, rightmost_page + 1):
1347 for thispage in xrange(leftmost_page, rightmost_page + 1):
1348 # Hilight the current page number and do not use a link
1348 # Hilight the current page number and do not use a link
1349 if thispage == self.page:
1349 if thispage == self.page:
1350 text = '%s' % (thispage,)
1350 text = '%s' % (thispage,)
1351 # Wrap in a SPAN tag if nolink_attr is set
1351 # Wrap in a SPAN tag if nolink_attr is set
1352 if self.curpage_attr:
1352 if self.curpage_attr:
1353 text = HTML.span(c=text, **self.curpage_attr)
1353 text = HTML.span(c=text, **self.curpage_attr)
1354 nav_items.append(text)
1354 nav_items.append(text)
1355 # Otherwise create just a link to that page
1355 # Otherwise create just a link to that page
1356 else:
1356 else:
1357 text = '%s' % (thispage,)
1357 text = '%s' % (thispage,)
1358 nav_items.append(self._pagerlink(thispage, text))
1358 nav_items.append(self._pagerlink(thispage, text))
1359
1359
1360 # Insert dots if there are pages between the displayed
1360 # Insert dots if there are pages between the displayed
1361 # page numbers and the end of the page range
1361 # page numbers and the end of the page range
1362 if self.last_page - rightmost_page > 1:
1362 if self.last_page - rightmost_page > 1:
1363 text = '..'
1363 text = '..'
1364 # Wrap in a SPAN tag if nolink_attr is set
1364 # Wrap in a SPAN tag if nolink_attr is set
1365 if self.dotdot_attr:
1365 if self.dotdot_attr:
1366 text = HTML.span(c=text, **self.dotdot_attr)
1366 text = HTML.span(c=text, **self.dotdot_attr)
1367 nav_items.append(text)
1367 nav_items.append(text)
1368
1368
1369 # Create a link to the very last page (unless we are on the last
1369 # Create a link to the very last page (unless we are on the last
1370 # page or there would be no need to insert '..' spacers)
1370 # page or there would be no need to insert '..' spacers)
1371 if self.page != self.last_page and rightmost_page < self.last_page:
1371 if self.page != self.last_page and rightmost_page < self.last_page:
1372 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1372 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1373
1373
1374 ## prerender links
1374 ## prerender links
1375 #_page_link = url.current()
1375 #_page_link = url.current()
1376 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1376 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1377 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1377 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1378 return self.separator.join(nav_items)
1378 return self.separator.join(nav_items)
1379
1379
1380 def pager(self, format='~2~', page_param='page', partial_param='partial',
1380 def pager(self, format='~2~', page_param='page', partial_param='partial',
1381 show_if_single_page=False, separator=' ', onclick=None,
1381 show_if_single_page=False, separator=' ', onclick=None,
1382 symbol_first='<<', symbol_last='>>',
1382 symbol_first='<<', symbol_last='>>',
1383 symbol_previous='<', symbol_next='>',
1383 symbol_previous='<', symbol_next='>',
1384 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1384 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1385 curpage_attr={'class': 'pager_curpage'},
1385 curpage_attr={'class': 'pager_curpage'},
1386 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1386 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1387
1387
1388 self.curpage_attr = curpage_attr
1388 self.curpage_attr = curpage_attr
1389 self.separator = separator
1389 self.separator = separator
1390 self.pager_kwargs = kwargs
1390 self.pager_kwargs = kwargs
1391 self.page_param = page_param
1391 self.page_param = page_param
1392 self.partial_param = partial_param
1392 self.partial_param = partial_param
1393 self.onclick = onclick
1393 self.onclick = onclick
1394 self.link_attr = link_attr
1394 self.link_attr = link_attr
1395 self.dotdot_attr = dotdot_attr
1395 self.dotdot_attr = dotdot_attr
1396
1396
1397 # Don't show navigator if there is no more than one page
1397 # Don't show navigator if there is no more than one page
1398 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1398 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1399 return ''
1399 return ''
1400
1400
1401 from string import Template
1401 from string import Template
1402 # Replace ~...~ in token format by range of pages
1402 # Replace ~...~ in token format by range of pages
1403 result = re.sub(r'~(\d+)~', self._range, format)
1403 result = re.sub(r'~(\d+)~', self._range, format)
1404
1404
1405 # Interpolate '%' variables
1405 # Interpolate '%' variables
1406 result = Template(result).safe_substitute({
1406 result = Template(result).safe_substitute({
1407 'first_page': self.first_page,
1407 'first_page': self.first_page,
1408 'last_page': self.last_page,
1408 'last_page': self.last_page,
1409 'page': self.page,
1409 'page': self.page,
1410 'page_count': self.page_count,
1410 'page_count': self.page_count,
1411 'items_per_page': self.items_per_page,
1411 'items_per_page': self.items_per_page,
1412 'first_item': self.first_item,
1412 'first_item': self.first_item,
1413 'last_item': self.last_item,
1413 'last_item': self.last_item,
1414 'item_count': self.item_count,
1414 'item_count': self.item_count,
1415 'link_first': self.page > self.first_page and \
1415 'link_first': self.page > self.first_page and \
1416 self._pagerlink(self.first_page, symbol_first) or '',
1416 self._pagerlink(self.first_page, symbol_first) or '',
1417 'link_last': self.page < self.last_page and \
1417 'link_last': self.page < self.last_page and \
1418 self._pagerlink(self.last_page, symbol_last) or '',
1418 self._pagerlink(self.last_page, symbol_last) or '',
1419 'link_previous': self.previous_page and \
1419 'link_previous': self.previous_page and \
1420 self._pagerlink(self.previous_page, symbol_previous) \
1420 self._pagerlink(self.previous_page, symbol_previous) \
1421 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1421 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1422 'link_next': self.next_page and \
1422 'link_next': self.next_page and \
1423 self._pagerlink(self.next_page, symbol_next) \
1423 self._pagerlink(self.next_page, symbol_next) \
1424 or HTML.span(symbol_next, class_="pg-next disabled")
1424 or HTML.span(symbol_next, class_="pg-next disabled")
1425 })
1425 })
1426
1426
1427 return literal(result)
1427 return literal(result)
1428
1428
1429
1429
1430 #==============================================================================
1430 #==============================================================================
1431 # REPO PAGER, PAGER FOR REPOSITORY
1431 # REPO PAGER, PAGER FOR REPOSITORY
1432 #==============================================================================
1432 #==============================================================================
1433 class RepoPage(Page):
1433 class RepoPage(Page):
1434
1434
1435 def __init__(self, collection, page=1, items_per_page=20,
1435 def __init__(self, collection, page=1, items_per_page=20,
1436 item_count=None, url=None, **kwargs):
1436 item_count=None, url=None, **kwargs):
1437
1437
1438 """Create a "RepoPage" instance. special pager for paging
1438 """Create a "RepoPage" instance. special pager for paging
1439 repository
1439 repository
1440 """
1440 """
1441 self._url_generator = url
1441 self._url_generator = url
1442
1442
1443 # Safe the kwargs class-wide so they can be used in the pager() method
1443 # Safe the kwargs class-wide so they can be used in the pager() method
1444 self.kwargs = kwargs
1444 self.kwargs = kwargs
1445
1445
1446 # Save a reference to the collection
1446 # Save a reference to the collection
1447 self.original_collection = collection
1447 self.original_collection = collection
1448
1448
1449 self.collection = collection
1449 self.collection = collection
1450
1450
1451 # The self.page is the number of the current page.
1451 # The self.page is the number of the current page.
1452 # The first page has the number 1!
1452 # The first page has the number 1!
1453 try:
1453 try:
1454 self.page = int(page) # make it int() if we get it as a string
1454 self.page = int(page) # make it int() if we get it as a string
1455 except (ValueError, TypeError):
1455 except (ValueError, TypeError):
1456 self.page = 1
1456 self.page = 1
1457
1457
1458 self.items_per_page = items_per_page
1458 self.items_per_page = items_per_page
1459
1459
1460 # Unless the user tells us how many items the collections has
1460 # Unless the user tells us how many items the collections has
1461 # we calculate that ourselves.
1461 # we calculate that ourselves.
1462 if item_count is not None:
1462 if item_count is not None:
1463 self.item_count = item_count
1463 self.item_count = item_count
1464 else:
1464 else:
1465 self.item_count = len(self.collection)
1465 self.item_count = len(self.collection)
1466
1466
1467 # Compute the number of the first and last available page
1467 # Compute the number of the first and last available page
1468 if self.item_count > 0:
1468 if self.item_count > 0:
1469 self.first_page = 1
1469 self.first_page = 1
1470 self.page_count = int(math.ceil(float(self.item_count) /
1470 self.page_count = int(math.ceil(float(self.item_count) /
1471 self.items_per_page))
1471 self.items_per_page))
1472 self.last_page = self.first_page + self.page_count - 1
1472 self.last_page = self.first_page + self.page_count - 1
1473
1473
1474 # Make sure that the requested page number is the range of
1474 # Make sure that the requested page number is the range of
1475 # valid pages
1475 # valid pages
1476 if self.page > self.last_page:
1476 if self.page > self.last_page:
1477 self.page = self.last_page
1477 self.page = self.last_page
1478 elif self.page < self.first_page:
1478 elif self.page < self.first_page:
1479 self.page = self.first_page
1479 self.page = self.first_page
1480
1480
1481 # Note: the number of items on this page can be less than
1481 # Note: the number of items on this page can be less than
1482 # items_per_page if the last page is not full
1482 # items_per_page if the last page is not full
1483 self.first_item = max(0, (self.item_count) - (self.page *
1483 self.first_item = max(0, (self.item_count) - (self.page *
1484 items_per_page))
1484 items_per_page))
1485 self.last_item = ((self.item_count - 1) - items_per_page *
1485 self.last_item = ((self.item_count - 1) - items_per_page *
1486 (self.page - 1))
1486 (self.page - 1))
1487
1487
1488 self.items = list(self.collection[self.first_item:self.last_item + 1])
1488 self.items = list(self.collection[self.first_item:self.last_item + 1])
1489
1489
1490 # Links to previous and next page
1490 # Links to previous and next page
1491 if self.page > self.first_page:
1491 if self.page > self.first_page:
1492 self.previous_page = self.page - 1
1492 self.previous_page = self.page - 1
1493 else:
1493 else:
1494 self.previous_page = None
1494 self.previous_page = None
1495
1495
1496 if self.page < self.last_page:
1496 if self.page < self.last_page:
1497 self.next_page = self.page + 1
1497 self.next_page = self.page + 1
1498 else:
1498 else:
1499 self.next_page = None
1499 self.next_page = None
1500
1500
1501 # No items available
1501 # No items available
1502 else:
1502 else:
1503 self.first_page = None
1503 self.first_page = None
1504 self.page_count = 0
1504 self.page_count = 0
1505 self.last_page = None
1505 self.last_page = None
1506 self.first_item = None
1506 self.first_item = None
1507 self.last_item = None
1507 self.last_item = None
1508 self.previous_page = None
1508 self.previous_page = None
1509 self.next_page = None
1509 self.next_page = None
1510 self.items = []
1510 self.items = []
1511
1511
1512 # This is a subclass of the 'list' type. Initialise the list now.
1512 # This is a subclass of the 'list' type. Initialise the list now.
1513 list.__init__(self, reversed(self.items))
1513 list.__init__(self, reversed(self.items))
1514
1514
1515
1515
1516 def changed_tooltip(nodes):
1516 def changed_tooltip(nodes):
1517 """
1517 """
1518 Generates a html string for changed nodes in commit page.
1518 Generates a html string for changed nodes in commit page.
1519 It limits the output to 30 entries
1519 It limits the output to 30 entries
1520
1520
1521 :param nodes: LazyNodesGenerator
1521 :param nodes: LazyNodesGenerator
1522 """
1522 """
1523 if nodes:
1523 if nodes:
1524 pref = ': <br/> '
1524 pref = ': <br/> '
1525 suf = ''
1525 suf = ''
1526 if len(nodes) > 30:
1526 if len(nodes) > 30:
1527 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1527 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1528 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1528 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1529 for x in nodes[:30]]) + suf)
1529 for x in nodes[:30]]) + suf)
1530 else:
1530 else:
1531 return ': ' + _('No Files')
1531 return ': ' + _('No Files')
1532
1532
1533
1533
1534 def breadcrumb_repo_link(repo):
1534 def breadcrumb_repo_link(repo):
1535 """
1535 """
1536 Makes a breadcrumbs path link to repo
1536 Makes a breadcrumbs path link to repo
1537
1537
1538 ex::
1538 ex::
1539 group >> subgroup >> repo
1539 group >> subgroup >> repo
1540
1540
1541 :param repo: a Repository instance
1541 :param repo: a Repository instance
1542 """
1542 """
1543
1543
1544 path = [
1544 path = [
1545 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1545 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1546 for group in repo.groups_with_parents
1546 for group in repo.groups_with_parents
1547 ] + [
1547 ] + [
1548 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1548 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1549 ]
1549 ]
1550
1550
1551 return literal(' &raquo; '.join(path))
1551 return literal(' &raquo; '.join(path))
1552
1552
1553
1553
1554 def format_byte_size_binary(file_size):
1554 def format_byte_size_binary(file_size):
1555 """
1555 """
1556 Formats file/folder sizes to standard.
1556 Formats file/folder sizes to standard.
1557 """
1557 """
1558 formatted_size = format_byte_size(file_size, binary=True)
1558 formatted_size = format_byte_size(file_size, binary=True)
1559 return formatted_size
1559 return formatted_size
1560
1560
1561
1561
1562 def urlify_text(text_, safe=True):
1562 def urlify_text(text_, safe=True):
1563 """
1563 """
1564 Extrac urls from text and make html links out of them
1564 Extrac urls from text and make html links out of them
1565
1565
1566 :param text_:
1566 :param text_:
1567 """
1567 """
1568
1568
1569 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1569 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1570 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1570 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1571
1571
1572 def url_func(match_obj):
1572 def url_func(match_obj):
1573 url_full = match_obj.groups()[0]
1573 url_full = match_obj.groups()[0]
1574 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1574 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1575 _newtext = url_pat.sub(url_func, text_)
1575 _newtext = url_pat.sub(url_func, text_)
1576 if safe:
1576 if safe:
1577 return literal(_newtext)
1577 return literal(_newtext)
1578 return _newtext
1578 return _newtext
1579
1579
1580
1580
1581 def urlify_commits(text_, repository):
1581 def urlify_commits(text_, repository):
1582 """
1582 """
1583 Extract commit ids from text and make link from them
1583 Extract commit ids from text and make link from them
1584
1584
1585 :param text_:
1585 :param text_:
1586 :param repository: repo name to build the URL with
1586 :param repository: repo name to build the URL with
1587 """
1587 """
1588 from pylons import url # doh, we need to re-import url to mock it later
1588 from pylons import url # doh, we need to re-import url to mock it later
1589 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1589 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1590
1590
1591 def url_func(match_obj):
1591 def url_func(match_obj):
1592 commit_id = match_obj.groups()[1]
1592 commit_id = match_obj.groups()[1]
1593 pref = match_obj.groups()[0]
1593 pref = match_obj.groups()[0]
1594 suf = match_obj.groups()[2]
1594 suf = match_obj.groups()[2]
1595
1595
1596 tmpl = (
1596 tmpl = (
1597 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1597 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1598 '%(commit_id)s</a>%(suf)s'
1598 '%(commit_id)s</a>%(suf)s'
1599 )
1599 )
1600 return tmpl % {
1600 return tmpl % {
1601 'pref': pref,
1601 'pref': pref,
1602 'cls': 'revision-link',
1602 'cls': 'revision-link',
1603 'url': url('changeset_home', repo_name=repository,
1603 'url': url('changeset_home', repo_name=repository,
1604 revision=commit_id, qualified=True),
1604 revision=commit_id, qualified=True),
1605 'commit_id': commit_id,
1605 'commit_id': commit_id,
1606 'suf': suf
1606 'suf': suf
1607 }
1607 }
1608
1608
1609 newtext = URL_PAT.sub(url_func, text_)
1609 newtext = URL_PAT.sub(url_func, text_)
1610
1610
1611 return newtext
1611 return newtext
1612
1612
1613
1613
1614 def _process_url_func(match_obj, repo_name, uid, entry,
1614 def _process_url_func(match_obj, repo_name, uid, entry,
1615 return_raw_data=False, link_format='html'):
1615 return_raw_data=False, link_format='html'):
1616 pref = ''
1616 pref = ''
1617 if match_obj.group().startswith(' '):
1617 if match_obj.group().startswith(' '):
1618 pref = ' '
1618 pref = ' '
1619
1619
1620 issue_id = ''.join(match_obj.groups())
1620 issue_id = ''.join(match_obj.groups())
1621
1621
1622 if link_format == 'html':
1622 if link_format == 'html':
1623 tmpl = (
1623 tmpl = (
1624 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1624 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1625 '%(issue-prefix)s%(id-repr)s'
1625 '%(issue-prefix)s%(id-repr)s'
1626 '</a>')
1626 '</a>')
1627 elif link_format == 'rst':
1627 elif link_format == 'rst':
1628 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1628 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1629 elif link_format == 'markdown':
1629 elif link_format == 'markdown':
1630 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1630 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1631 else:
1631 else:
1632 raise ValueError('Bad link_format:{}'.format(link_format))
1632 raise ValueError('Bad link_format:{}'.format(link_format))
1633
1633
1634 (repo_name_cleaned,
1634 (repo_name_cleaned,
1635 parent_group_name) = RepoGroupModel().\
1635 parent_group_name) = RepoGroupModel().\
1636 _get_group_name_and_parent(repo_name)
1636 _get_group_name_and_parent(repo_name)
1637
1637
1638 # variables replacement
1638 # variables replacement
1639 named_vars = {
1639 named_vars = {
1640 'id': issue_id,
1640 'id': issue_id,
1641 'repo': repo_name,
1641 'repo': repo_name,
1642 'repo_name': repo_name_cleaned,
1642 'repo_name': repo_name_cleaned,
1643 'group_name': parent_group_name
1643 'group_name': parent_group_name
1644 }
1644 }
1645 # named regex variables
1645 # named regex variables
1646 named_vars.update(match_obj.groupdict())
1646 named_vars.update(match_obj.groupdict())
1647 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1647 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1648
1648
1649 data = {
1649 data = {
1650 'pref': pref,
1650 'pref': pref,
1651 'cls': 'issue-tracker-link',
1651 'cls': 'issue-tracker-link',
1652 'url': _url,
1652 'url': _url,
1653 'id-repr': issue_id,
1653 'id-repr': issue_id,
1654 'issue-prefix': entry['pref'],
1654 'issue-prefix': entry['pref'],
1655 'serv': entry['url'],
1655 'serv': entry['url'],
1656 }
1656 }
1657 if return_raw_data:
1657 if return_raw_data:
1658 return {
1658 return {
1659 'id': issue_id,
1659 'id': issue_id,
1660 'url': _url
1660 'url': _url
1661 }
1661 }
1662 return tmpl % data
1662 return tmpl % data
1663
1663
1664
1664
1665 def process_patterns(text_string, repo_name, link_format='html'):
1665 def process_patterns(text_string, repo_name, link_format='html'):
1666 allowed_formats = ['html', 'rst', 'markdown']
1666 allowed_formats = ['html', 'rst', 'markdown']
1667 if link_format not in allowed_formats:
1667 if link_format not in allowed_formats:
1668 raise ValueError('Link format can be only one of:{} got {}'.format(
1668 raise ValueError('Link format can be only one of:{} got {}'.format(
1669 allowed_formats, link_format))
1669 allowed_formats, link_format))
1670
1670
1671 repo = None
1671 repo = None
1672 if repo_name:
1672 if repo_name:
1673 # Retrieving repo_name to avoid invalid repo_name to explode on
1673 # Retrieving repo_name to avoid invalid repo_name to explode on
1674 # IssueTrackerSettingsModel but still passing invalid name further down
1674 # IssueTrackerSettingsModel but still passing invalid name further down
1675 repo = Repository.get_by_repo_name(repo_name, cache=True)
1675 repo = Repository.get_by_repo_name(repo_name, cache=True)
1676
1676
1677 settings_model = IssueTrackerSettingsModel(repo=repo)
1677 settings_model = IssueTrackerSettingsModel(repo=repo)
1678 active_entries = settings_model.get_settings(cache=True)
1678 active_entries = settings_model.get_settings(cache=True)
1679
1679
1680 issues_data = []
1680 issues_data = []
1681 newtext = text_string
1681 newtext = text_string
1682
1682
1683 for uid, entry in active_entries.items():
1683 for uid, entry in active_entries.items():
1684 log.debug('found issue tracker entry with uid %s' % (uid,))
1684 log.debug('found issue tracker entry with uid %s' % (uid,))
1685
1685
1686 if not (entry['pat'] and entry['url']):
1686 if not (entry['pat'] and entry['url']):
1687 log.debug('skipping due to missing data')
1687 log.debug('skipping due to missing data')
1688 continue
1688 continue
1689
1689
1690 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1690 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1691 % (uid, entry['pat'], entry['url'], entry['pref']))
1691 % (uid, entry['pat'], entry['url'], entry['pref']))
1692
1692
1693 try:
1693 try:
1694 pattern = re.compile(r'%s' % entry['pat'])
1694 pattern = re.compile(r'%s' % entry['pat'])
1695 except re.error:
1695 except re.error:
1696 log.exception(
1696 log.exception(
1697 'issue tracker pattern: `%s` failed to compile',
1697 'issue tracker pattern: `%s` failed to compile',
1698 entry['pat'])
1698 entry['pat'])
1699 continue
1699 continue
1700
1700
1701 data_func = partial(
1701 data_func = partial(
1702 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1702 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1703 return_raw_data=True)
1703 return_raw_data=True)
1704
1704
1705 for match_obj in pattern.finditer(text_string):
1705 for match_obj in pattern.finditer(text_string):
1706 issues_data.append(data_func(match_obj))
1706 issues_data.append(data_func(match_obj))
1707
1707
1708 url_func = partial(
1708 url_func = partial(
1709 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1709 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1710 link_format=link_format)
1710 link_format=link_format)
1711
1711
1712 newtext = pattern.sub(url_func, newtext)
1712 newtext = pattern.sub(url_func, newtext)
1713 log.debug('processed prefix:uid `%s`' % (uid,))
1713 log.debug('processed prefix:uid `%s`' % (uid,))
1714
1714
1715 return newtext, issues_data
1715 return newtext, issues_data
1716
1716
1717
1717
1718 def urlify_commit_message(commit_text, repository=None):
1718 def urlify_commit_message(commit_text, repository=None):
1719 """
1719 """
1720 Parses given text message and makes proper links.
1720 Parses given text message and makes proper links.
1721 issues are linked to given issue-server, and rest is a commit link
1721 issues are linked to given issue-server, and rest is a commit link
1722
1722
1723 :param commit_text:
1723 :param commit_text:
1724 :param repository:
1724 :param repository:
1725 """
1725 """
1726 from pylons import url # doh, we need to re-import url to mock it later
1726 from pylons import url # doh, we need to re-import url to mock it later
1727
1727
1728 def escaper(string):
1728 def escaper(string):
1729 return string.replace('<', '&lt;').replace('>', '&gt;')
1729 return string.replace('<', '&lt;').replace('>', '&gt;')
1730
1730
1731 newtext = escaper(commit_text)
1731 newtext = escaper(commit_text)
1732
1732
1733 # extract http/https links and make them real urls
1733 # extract http/https links and make them real urls
1734 newtext = urlify_text(newtext, safe=False)
1734 newtext = urlify_text(newtext, safe=False)
1735
1735
1736 # urlify commits - extract commit ids and make link out of them, if we have
1736 # urlify commits - extract commit ids and make link out of them, if we have
1737 # the scope of repository present.
1737 # the scope of repository present.
1738 if repository:
1738 if repository:
1739 newtext = urlify_commits(newtext, repository)
1739 newtext = urlify_commits(newtext, repository)
1740
1740
1741 # process issue tracker patterns
1741 # process issue tracker patterns
1742 newtext, issues = process_patterns(newtext, repository or '')
1742 newtext, issues = process_patterns(newtext, repository or '')
1743
1743
1744 return literal(newtext)
1744 return literal(newtext)
1745
1745
1746
1746
1747 def render_binary(repo_name, file_obj):
1747 def render_binary(repo_name, file_obj):
1748 """
1748 """
1749 Choose how to render a binary file
1749 Choose how to render a binary file
1750 """
1750 """
1751 filename = file_obj.name
1751 filename = file_obj.name
1752
1752
1753 # images
1753 # images
1754 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1754 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1755 if fnmatch.fnmatch(filename, pat=ext):
1755 if fnmatch.fnmatch(filename, pat=ext):
1756 alt = filename
1756 alt = filename
1757 src = url('files_raw_home', repo_name=repo_name,
1757 src = url('files_raw_home', repo_name=repo_name,
1758 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1758 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1759 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1759 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1760
1760
1761
1761
1762 def renderer_from_filename(filename, exclude=None):
1762 def renderer_from_filename(filename, exclude=None):
1763 """
1763 """
1764 choose a renderer based on filename, this works only for text based files
1764 choose a renderer based on filename, this works only for text based files
1765 """
1765 """
1766
1766
1767 # ipython
1767 # ipython
1768 for ext in ['*.ipynb']:
1768 for ext in ['*.ipynb']:
1769 if fnmatch.fnmatch(filename, pat=ext):
1769 if fnmatch.fnmatch(filename, pat=ext):
1770 return 'jupyter'
1770 return 'jupyter'
1771
1771
1772 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1772 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1773 if is_markup:
1773 if is_markup:
1774 return is_markup
1774 return is_markup
1775 return None
1775 return None
1776
1776
1777
1777
1778 def render(source, renderer='rst', mentions=False, relative_url=None,
1778 def render(source, renderer='rst', mentions=False, relative_url=None,
1779 repo_name=None):
1779 repo_name=None):
1780
1780
1781 def maybe_convert_relative_links(html_source):
1781 def maybe_convert_relative_links(html_source):
1782 if relative_url:
1782 if relative_url:
1783 return relative_links(html_source, relative_url)
1783 return relative_links(html_source, relative_url)
1784 return html_source
1784 return html_source
1785
1785
1786 if renderer == 'rst':
1786 if renderer == 'rst':
1787 if repo_name:
1787 if repo_name:
1788 # process patterns on comments if we pass in repo name
1788 # process patterns on comments if we pass in repo name
1789 source, issues = process_patterns(
1789 source, issues = process_patterns(
1790 source, repo_name, link_format='rst')
1790 source, repo_name, link_format='rst')
1791
1791
1792 return literal(
1792 return literal(
1793 '<div class="rst-block">%s</div>' %
1793 '<div class="rst-block">%s</div>' %
1794 maybe_convert_relative_links(
1794 maybe_convert_relative_links(
1795 MarkupRenderer.rst(source, mentions=mentions)))
1795 MarkupRenderer.rst(source, mentions=mentions)))
1796 elif renderer == 'markdown':
1796 elif renderer == 'markdown':
1797 if repo_name:
1797 if repo_name:
1798 # process patterns on comments if we pass in repo name
1798 # process patterns on comments if we pass in repo name
1799 source, issues = process_patterns(
1799 source, issues = process_patterns(
1800 source, repo_name, link_format='markdown')
1800 source, repo_name, link_format='markdown')
1801
1801
1802 return literal(
1802 return literal(
1803 '<div class="markdown-block">%s</div>' %
1803 '<div class="markdown-block">%s</div>' %
1804 maybe_convert_relative_links(
1804 maybe_convert_relative_links(
1805 MarkupRenderer.markdown(source, flavored=True,
1805 MarkupRenderer.markdown(source, flavored=True,
1806 mentions=mentions)))
1806 mentions=mentions)))
1807 elif renderer == 'jupyter':
1807 elif renderer == 'jupyter':
1808 return literal(
1808 return literal(
1809 '<div class="ipynb">%s</div>' %
1809 '<div class="ipynb">%s</div>' %
1810 maybe_convert_relative_links(
1810 maybe_convert_relative_links(
1811 MarkupRenderer.jupyter(source)))
1811 MarkupRenderer.jupyter(source)))
1812
1812
1813 # None means just show the file-source
1813 # None means just show the file-source
1814 return None
1814 return None
1815
1815
1816
1816
1817 def commit_status(repo, commit_id):
1817 def commit_status(repo, commit_id):
1818 return ChangesetStatusModel().get_status(repo, commit_id)
1818 return ChangesetStatusModel().get_status(repo, commit_id)
1819
1819
1820
1820
1821 def commit_status_lbl(commit_status):
1821 def commit_status_lbl(commit_status):
1822 return dict(ChangesetStatus.STATUSES).get(commit_status)
1822 return dict(ChangesetStatus.STATUSES).get(commit_status)
1823
1823
1824
1824
1825 def commit_time(repo_name, commit_id):
1825 def commit_time(repo_name, commit_id):
1826 repo = Repository.get_by_repo_name(repo_name)
1826 repo = Repository.get_by_repo_name(repo_name)
1827 commit = repo.get_commit(commit_id=commit_id)
1827 commit = repo.get_commit(commit_id=commit_id)
1828 return commit.date
1828 return commit.date
1829
1829
1830
1830
1831 def get_permission_name(key):
1831 def get_permission_name(key):
1832 return dict(Permission.PERMS).get(key)
1832 return dict(Permission.PERMS).get(key)
1833
1833
1834
1834
1835 def journal_filter_help():
1835 def journal_filter_help():
1836 return _(
1836 return _(
1837 'Example filter terms:\n' +
1837 'Example filter terms:\n' +
1838 ' repository:vcs\n' +
1838 ' repository:vcs\n' +
1839 ' username:marcin\n' +
1839 ' username:marcin\n' +
1840 ' username:(NOT marcin)\n' +
1840 ' action:*push*\n' +
1841 ' action:*push*\n' +
1841 ' ip:127.0.0.1\n' +
1842 ' ip:127.0.0.1\n' +
1842 ' date:20120101\n' +
1843 ' date:20120101\n' +
1843 ' date:[20120101100000 TO 20120102]\n' +
1844 ' date:[20120101100000 TO 20120102]\n' +
1844 '\n' +
1845 '\n' +
1845 'Generate wildcards using \'*\' character:\n' +
1846 'Generate wildcards using \'*\' character:\n' +
1846 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1847 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1847 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1848 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1848 '\n' +
1849 '\n' +
1849 'Optional AND / OR operators in queries\n' +
1850 'Optional AND / OR operators in queries\n' +
1850 ' "repository:vcs OR repository:test"\n' +
1851 ' "repository:vcs OR repository:test"\n' +
1851 ' "username:test AND repository:test*"\n'
1852 ' "username:test AND repository:test*"\n'
1852 )
1853 )
1853
1854
1854
1855
1855 def search_filter_help(searcher):
1856 def search_filter_help(searcher):
1856
1857
1857 terms = ''
1858 terms = ''
1858 return _(
1859 return _(
1859 'Example filter terms for `{searcher}` search:\n' +
1860 'Example filter terms for `{searcher}` search:\n' +
1860 '{terms}\n' +
1861 '{terms}\n' +
1861 'Generate wildcards using \'*\' character:\n' +
1862 'Generate wildcards using \'*\' character:\n' +
1862 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1863 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1863 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1864 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1864 '\n' +
1865 '\n' +
1865 'Optional AND / OR operators in queries\n' +
1866 'Optional AND / OR operators in queries\n' +
1866 ' "repo_name:vcs OR repo_name:test"\n' +
1867 ' "repo_name:vcs OR repo_name:test"\n' +
1867 ' "owner:test AND repo_name:test*"\n' +
1868 ' "owner:test AND repo_name:test*"\n' +
1868 'More: {search_doc}'
1869 'More: {search_doc}'
1869 ).format(searcher=searcher.name,
1870 ).format(searcher=searcher.name,
1870 terms=terms, search_doc=searcher.query_lang_doc)
1871 terms=terms, search_doc=searcher.query_lang_doc)
1871
1872
1872
1873
1873 def not_mapped_error(repo_name):
1874 def not_mapped_error(repo_name):
1874 flash(_('%s repository is not mapped to db perhaps'
1875 flash(_('%s repository is not mapped to db perhaps'
1875 ' it was created or renamed from the filesystem'
1876 ' it was created or renamed from the filesystem'
1876 ' please run the application again'
1877 ' please run the application again'
1877 ' in order to rescan repositories') % repo_name, category='error')
1878 ' in order to rescan repositories') % repo_name, category='error')
1878
1879
1879
1880
1880 def ip_range(ip_addr):
1881 def ip_range(ip_addr):
1881 from rhodecode.model.db import UserIpMap
1882 from rhodecode.model.db import UserIpMap
1882 s, e = UserIpMap._get_ip_range(ip_addr)
1883 s, e = UserIpMap._get_ip_range(ip_addr)
1883 return '%s - %s' % (s, e)
1884 return '%s - %s' % (s, e)
1884
1885
1885
1886
1886 def form(url, method='post', needs_csrf_token=True, **attrs):
1887 def form(url, method='post', needs_csrf_token=True, **attrs):
1887 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1888 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1888 if method.lower() != 'get' and needs_csrf_token:
1889 if method.lower() != 'get' and needs_csrf_token:
1889 raise Exception(
1890 raise Exception(
1890 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1891 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1891 'CSRF token. If the endpoint does not require such token you can ' +
1892 'CSRF token. If the endpoint does not require such token you can ' +
1892 'explicitly set the parameter needs_csrf_token to false.')
1893 'explicitly set the parameter needs_csrf_token to false.')
1893
1894
1894 return wh_form(url, method=method, **attrs)
1895 return wh_form(url, method=method, **attrs)
1895
1896
1896
1897
1897 def secure_form(url, method="POST", multipart=False, **attrs):
1898 def secure_form(url, method="POST", multipart=False, **attrs):
1898 """Start a form tag that points the action to an url. This
1899 """Start a form tag that points the action to an url. This
1899 form tag will also include the hidden field containing
1900 form tag will also include the hidden field containing
1900 the auth token.
1901 the auth token.
1901
1902
1902 The url options should be given either as a string, or as a
1903 The url options should be given either as a string, or as a
1903 ``url()`` function. The method for the form defaults to POST.
1904 ``url()`` function. The method for the form defaults to POST.
1904
1905
1905 Options:
1906 Options:
1906
1907
1907 ``multipart``
1908 ``multipart``
1908 If set to True, the enctype is set to "multipart/form-data".
1909 If set to True, the enctype is set to "multipart/form-data".
1909 ``method``
1910 ``method``
1910 The method to use when submitting the form, usually either
1911 The method to use when submitting the form, usually either
1911 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1912 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1912 hidden input with name _method is added to simulate the verb
1913 hidden input with name _method is added to simulate the verb
1913 over POST.
1914 over POST.
1914
1915
1915 """
1916 """
1916 from webhelpers.pylonslib.secure_form import insecure_form
1917 from webhelpers.pylonslib.secure_form import insecure_form
1917 form = insecure_form(url, method, multipart, **attrs)
1918 form = insecure_form(url, method, multipart, **attrs)
1918 token = csrf_input()
1919 token = csrf_input()
1919 return literal("%s\n%s" % (form, token))
1920 return literal("%s\n%s" % (form, token))
1920
1921
1921 def csrf_input():
1922 def csrf_input():
1922 return literal(
1923 return literal(
1923 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1924 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1924 csrf_token_key, csrf_token_key, get_csrf_token()))
1925 csrf_token_key, csrf_token_key, get_csrf_token()))
1925
1926
1926 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1927 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1927 select_html = select(name, selected, options, **attrs)
1928 select_html = select(name, selected, options, **attrs)
1928 select2 = """
1929 select2 = """
1929 <script>
1930 <script>
1930 $(document).ready(function() {
1931 $(document).ready(function() {
1931 $('#%s').select2({
1932 $('#%s').select2({
1932 containerCssClass: 'drop-menu',
1933 containerCssClass: 'drop-menu',
1933 dropdownCssClass: 'drop-menu-dropdown',
1934 dropdownCssClass: 'drop-menu-dropdown',
1934 dropdownAutoWidth: true%s
1935 dropdownAutoWidth: true%s
1935 });
1936 });
1936 });
1937 });
1937 </script>
1938 </script>
1938 """
1939 """
1939 filter_option = """,
1940 filter_option = """,
1940 minimumResultsForSearch: -1
1941 minimumResultsForSearch: -1
1941 """
1942 """
1942 input_id = attrs.get('id') or name
1943 input_id = attrs.get('id') or name
1943 filter_enabled = "" if enable_filter else filter_option
1944 filter_enabled = "" if enable_filter else filter_option
1944 select_script = literal(select2 % (input_id, filter_enabled))
1945 select_script = literal(select2 % (input_id, filter_enabled))
1945
1946
1946 return literal(select_html+select_script)
1947 return literal(select_html+select_script)
1947
1948
1948
1949
1949 def get_visual_attr(tmpl_context_var, attr_name):
1950 def get_visual_attr(tmpl_context_var, attr_name):
1950 """
1951 """
1951 A safe way to get a variable from visual variable of template context
1952 A safe way to get a variable from visual variable of template context
1952
1953
1953 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1954 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1954 :param attr_name: name of the attribute we fetch from the c.visual
1955 :param attr_name: name of the attribute we fetch from the c.visual
1955 """
1956 """
1956 visual = getattr(tmpl_context_var, 'visual', None)
1957 visual = getattr(tmpl_context_var, 'visual', None)
1957 if not visual:
1958 if not visual:
1958 return
1959 return
1959 else:
1960 else:
1960 return getattr(visual, attr_name, None)
1961 return getattr(visual, attr_name, None)
1961
1962
1962
1963
1963 def get_last_path_part(file_node):
1964 def get_last_path_part(file_node):
1964 if not file_node.path:
1965 if not file_node.path:
1965 return u''
1966 return u''
1966
1967
1967 path = safe_unicode(file_node.path.split('/')[-1])
1968 path = safe_unicode(file_node.path.split('/')[-1])
1968 return u'../' + path
1969 return u'../' + path
1969
1970
1970
1971
1971 def route_url(*args, **kwargs):
1972 def route_url(*args, **kwargs):
1972 """
1973 """
1973 Wrapper around pyramids `route_url` (fully qualified url) function.
1974 Wrapper around pyramids `route_url` (fully qualified url) function.
1974 It is used to generate URLs from within pylons views or templates.
1975 It is used to generate URLs from within pylons views or templates.
1975 This will be removed when pyramid migration if finished.
1976 This will be removed when pyramid migration if finished.
1976 """
1977 """
1977 req = get_current_request()
1978 req = get_current_request()
1978 return req.route_url(*args, **kwargs)
1979 return req.route_url(*args, **kwargs)
1979
1980
1980
1981
1981 def route_path(*args, **kwargs):
1982 def route_path(*args, **kwargs):
1982 """
1983 """
1983 Wrapper around pyramids `route_path` function. It is used to generate
1984 Wrapper around pyramids `route_path` function. It is used to generate
1984 URLs from within pylons views or templates. This will be removed when
1985 URLs from within pylons views or templates. This will be removed when
1985 pyramid migration if finished.
1986 pyramid migration if finished.
1986 """
1987 """
1987 req = get_current_request()
1988 req = get_current_request()
1988 return req.route_path(*args, **kwargs)
1989 return req.route_path(*args, **kwargs)
1989
1990
1990
1991
1991 def route_path_or_none(*args, **kwargs):
1992 def route_path_or_none(*args, **kwargs):
1992 try:
1993 try:
1993 return route_path(*args, **kwargs)
1994 return route_path(*args, **kwargs)
1994 except KeyError:
1995 except KeyError:
1995 return None
1996 return None
1996
1997
1997
1998
1998 def static_url(*args, **kwds):
1999 def static_url(*args, **kwds):
1999 """
2000 """
2000 Wrapper around pyramids `route_path` function. It is used to generate
2001 Wrapper around pyramids `route_path` function. It is used to generate
2001 URLs from within pylons views or templates. This will be removed when
2002 URLs from within pylons views or templates. This will be removed when
2002 pyramid migration if finished.
2003 pyramid migration if finished.
2003 """
2004 """
2004 req = get_current_request()
2005 req = get_current_request()
2005 return req.static_url(*args, **kwds)
2006 return req.static_url(*args, **kwds)
2006
2007
2007
2008
2008 def resource_path(*args, **kwds):
2009 def resource_path(*args, **kwds):
2009 """
2010 """
2010 Wrapper around pyramids `route_path` function. It is used to generate
2011 Wrapper around pyramids `route_path` function. It is used to generate
2011 URLs from within pylons views or templates. This will be removed when
2012 URLs from within pylons views or templates. This will be removed when
2012 pyramid migration if finished.
2013 pyramid migration if finished.
2013 """
2014 """
2014 req = get_current_request()
2015 req = get_current_request()
2015 return req.resource_path(*args, **kwds)
2016 return req.resource_path(*args, **kwds)
2016
2017
2017
2018
2018 def api_call_example(method, args):
2019 def api_call_example(method, args):
2019 """
2020 """
2020 Generates an API call example via CURL
2021 Generates an API call example via CURL
2021 """
2022 """
2022 args_json = json.dumps(OrderedDict([
2023 args_json = json.dumps(OrderedDict([
2023 ('id', 1),
2024 ('id', 1),
2024 ('auth_token', 'SECRET'),
2025 ('auth_token', 'SECRET'),
2025 ('method', method),
2026 ('method', method),
2026 ('args', args)
2027 ('args', args)
2027 ]))
2028 ]))
2028 return literal(
2029 return literal(
2029 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2030 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2030 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2031 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2031 "and needs to be of `api calls` role."
2032 "and needs to be of `api calls` role."
2032 .format(
2033 .format(
2033 api_url=route_url('apiv2'),
2034 api_url=route_url('apiv2'),
2034 token_url=route_url('my_account_auth_tokens'),
2035 token_url=route_url('my_account_auth_tokens'),
2035 data=args_json))
2036 data=args_json))
@@ -1,112 +1,123 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import logging
21 import logging
22
22
23 from whoosh.qparser.default import QueryParser, query
23 from whoosh.qparser.default import QueryParser, query
24 from whoosh.qparser.dateparse import DateParserPlugin
24 from whoosh.qparser.dateparse import DateParserPlugin
25 from whoosh.fields import (TEXT, Schema, DATETIME)
25 from whoosh.fields import (TEXT, Schema, DATETIME)
26 from sqlalchemy.sql.expression import or_, and_, func
26 from sqlalchemy.sql.expression import or_, and_, not_, func
27
27
28 from rhodecode.model.db import UserLog
28 from rhodecode.model.db import UserLog
29 from rhodecode.lib.utils2 import remove_prefix, remove_suffix
29 from rhodecode.lib.utils2 import remove_prefix, remove_suffix, safe_unicode
30
30
31 # JOURNAL SCHEMA used only to generate queries in journal. We use whoosh
31 # JOURNAL SCHEMA used only to generate queries in journal. We use whoosh
32 # querylang to build sql queries and filter journals
32 # querylang to build sql queries and filter journals
33 JOURNAL_SCHEMA = Schema(
33 JOURNAL_SCHEMA = Schema(
34 username=TEXT(),
34 username=TEXT(),
35 date=DATETIME(),
35 date=DATETIME(),
36 action=TEXT(),
36 action=TEXT(),
37 repository=TEXT(),
37 repository=TEXT(),
38 ip=TEXT(),
38 ip=TEXT(),
39 )
39 )
40
40
41 log = logging.getLogger(__name__)
41 log = logging.getLogger(__name__)
42
42
43
43
44 def user_log_filter(user_log, search_term):
44 def user_log_filter(user_log, search_term):
45 """
45 """
46 Filters sqlalchemy user_log based on search_term with whoosh Query language
46 Filters sqlalchemy user_log based on search_term with whoosh Query language
47 http://packages.python.org/Whoosh/querylang.html
47 http://packages.python.org/Whoosh/querylang.html
48
48
49 :param user_log:
49 :param user_log:
50 :param search_term:
50 :param search_term:
51 """
51 """
52 log.debug('Initial search term: %r' % search_term)
52 log.debug('Initial search term: %r' % search_term)
53 qry = None
53 qry = None
54 if search_term:
54 if search_term:
55 qp = QueryParser('repository', schema=JOURNAL_SCHEMA)
55 qp = QueryParser('repository', schema=JOURNAL_SCHEMA)
56 qp.add_plugin(DateParserPlugin())
56 qp.add_plugin(DateParserPlugin())
57 qry = qp.parse(unicode(search_term))
57 qry = qp.parse(safe_unicode(search_term))
58 log.debug('Filtering using parsed query %r' % qry)
58 log.debug('Filtering using parsed query %r' % qry)
59
59
60 def wildcard_handler(col, wc_term):
60 def wildcard_handler(col, wc_term):
61 if wc_term.startswith('*') and not wc_term.endswith('*'):
61 if wc_term.startswith('*') and not wc_term.endswith('*'):
62 # postfix == endswith
62 # postfix == endswith
63 wc_term = remove_prefix(wc_term, prefix='*')
63 wc_term = remove_prefix(wc_term, prefix='*')
64 return func.lower(col).endswith(wc_term)
64 return func.lower(col).endswith(wc_term)
65 elif wc_term.startswith('*') and wc_term.endswith('*'):
65 elif wc_term.startswith('*') and wc_term.endswith('*'):
66 # wildcard == ilike
66 # wildcard == ilike
67 wc_term = remove_prefix(wc_term, prefix='*')
67 wc_term = remove_prefix(wc_term, prefix='*')
68 wc_term = remove_suffix(wc_term, suffix='*')
68 wc_term = remove_suffix(wc_term, suffix='*')
69 return func.lower(col).contains(wc_term)
69 return func.lower(col).contains(wc_term)
70
70
71 def get_filterion(field, val, term):
71 def get_filterion(field, val, term):
72
72
73 if field == 'repository':
73 if field == 'repository':
74 field = getattr(UserLog, 'repository_name')
74 field = getattr(UserLog, 'repository_name')
75 elif field == 'ip':
75 elif field == 'ip':
76 field = getattr(UserLog, 'user_ip')
76 field = getattr(UserLog, 'user_ip')
77 elif field == 'date':
77 elif field == 'date':
78 field = getattr(UserLog, 'action_date')
78 field = getattr(UserLog, 'action_date')
79 elif field == 'username':
79 elif field == 'username':
80 field = getattr(UserLog, 'username')
80 field = getattr(UserLog, 'username')
81 else:
81 else:
82 field = getattr(UserLog, field)
82 field = getattr(UserLog, field)
83 log.debug('filter field: %s val=>%s' % (field, val))
83 log.debug('filter field: %s val=>%s' % (field, val))
84
84
85 # sql filtering
85 # sql filtering
86 if isinstance(term, query.Wildcard):
86 if isinstance(term, query.Wildcard):
87 return wildcard_handler(field, val)
87 return wildcard_handler(field, val)
88 elif isinstance(term, query.Prefix):
88 elif isinstance(term, query.Prefix):
89 return func.lower(field).startswith(func.lower(val))
89 return func.lower(field).startswith(func.lower(val))
90 elif isinstance(term, query.DateRange):
90 elif isinstance(term, query.DateRange):
91 return and_(field >= val[0], field <= val[1])
91 return and_(field >= val[0], field <= val[1])
92 elif isinstance(term, query.Not):
93 return not_(field == val)
92 return func.lower(field) == func.lower(val)
94 return func.lower(field) == func.lower(val)
93
95
94 if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard,
96 if isinstance(qry, (query.And, query.Not, query.Term, query.Prefix,
95 query.DateRange)):
97 query.Wildcard, query.DateRange)):
96 if not isinstance(qry, query.And):
98 if not isinstance(qry, query.And):
97 qry = [qry]
99 qry = [qry]
100
98 for term in qry:
101 for term in qry:
99 field = term.fieldname
102 if isinstance(term, query.Not):
100 val = (term.text if not isinstance(term, query.DateRange)
103 not_term = [z for z in term.leaves()][0]
101 else [term.startdate, term.enddate])
104 field = not_term.fieldname
105 val = not_term.text
106 elif isinstance(term, query.DateRange):
107 field = term.fieldname
108 val = [term.startdate, term.enddate]
109 else:
110 field = term.fieldname
111 val = term.text
112
102 user_log = user_log.filter(get_filterion(field, val, term))
113 user_log = user_log.filter(get_filterion(field, val, term))
103 elif isinstance(qry, query.Or):
114 elif isinstance(qry, query.Or):
104 filters = []
115 filters = []
105 for term in qry:
116 for term in qry:
106 field = term.fieldname
117 field = term.fieldname
107 val = (term.text if not isinstance(term, query.DateRange)
118 val = (term.text if not isinstance(term, query.DateRange)
108 else [term.startdate, term.enddate])
119 else [term.startdate, term.enddate])
109 filters.append(get_filterion(field, val, term))
120 filters.append(get_filterion(field, val, term))
110 user_log = user_log.filter(or_(*filters))
121 user_log = user_log.filter(or_(*filters))
111
122
112 return user_log
123 return user_log
General Comments 0
You need to be logged in to leave comments. Login now