##// END OF EJS Templates
pyramid: make flash messages not rely strictly on pylons session....
marcink -
r1905:741dca9b default
parent child Browse files
Show More
@@ -1,2036 +1,2039 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 from collections import OrderedDict
39 from collections import OrderedDict
40
40
41 import pygments
41 import pygments
42 import itertools
42 import itertools
43 import fnmatch
43 import fnmatch
44
44
45 from datetime import datetime
45 from datetime import datetime
46 from functools import partial
46 from functools import partial
47 from pygments.formatters.html import HtmlFormatter
47 from pygments.formatters.html import HtmlFormatter
48 from pygments import highlight as code_highlight
48 from pygments import highlight as code_highlight
49 from pygments.lexers import (
49 from pygments.lexers import (
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51 from pylons import url as pylons_url
51 from pylons import url as pylons_url
52 from pylons.i18n.translation import _, ungettext
52 from pylons.i18n.translation import _, ungettext
53 from pyramid.threadlocal import get_current_request
53 from pyramid.threadlocal import get_current_request
54
54
55 from webhelpers.html import literal, HTML, escape
55 from webhelpers.html import literal, HTML, escape
56 from webhelpers.html.tools import *
56 from webhelpers.html.tools import *
57 from webhelpers.html.builder import make_tag
57 from webhelpers.html.builder import make_tag
58 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
59 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
60 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
61 submit, text, password, textarea, title, ul, xml_declaration, radio
61 submit, text, password, textarea, title, ul, xml_declaration, radio
62 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 from webhelpers.html.tools import auto_link, button_to, highlight, \
63 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
64 from webhelpers.pylonslib import Flash as _Flash
64 from webhelpers.pylonslib import Flash as _Flash
65 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
65 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
66 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
66 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
67 replace_whitespace, urlify, truncate, wrap_paragraphs
67 replace_whitespace, urlify, truncate, wrap_paragraphs
68 from webhelpers.date import time_ago_in_words
68 from webhelpers.date import time_ago_in_words
69 from webhelpers.paginate import Page as _Page
69 from webhelpers.paginate import Page as _Page
70 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
70 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
71 convert_boolean_attrs, NotGiven, _make_safe_id_component
71 convert_boolean_attrs, NotGiven, _make_safe_id_component
72 from webhelpers2.number import format_byte_size
72 from webhelpers2.number import format_byte_size
73
73
74 from rhodecode.lib.action_parser import action_parser
74 from rhodecode.lib.action_parser import action_parser
75 from rhodecode.lib.ext_json import json
75 from rhodecode.lib.ext_json import json
76 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
76 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
77 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
77 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
78 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
78 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
79 AttributeDict, safe_int, md5, md5_safe
79 AttributeDict, safe_int, md5, md5_safe
80 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
80 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
81 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
81 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
82 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
82 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
83 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
83 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
84 from rhodecode.model.changeset_status import ChangesetStatusModel
84 from rhodecode.model.changeset_status import ChangesetStatusModel
85 from rhodecode.model.db import Permission, User, Repository
85 from rhodecode.model.db import Permission, User, Repository
86 from rhodecode.model.repo_group import RepoGroupModel
86 from rhodecode.model.repo_group import RepoGroupModel
87 from rhodecode.model.settings import IssueTrackerSettingsModel
87 from rhodecode.model.settings import IssueTrackerSettingsModel
88
88
89 log = logging.getLogger(__name__)
89 log = logging.getLogger(__name__)
90
90
91
91
92 DEFAULT_USER = User.DEFAULT_USER
92 DEFAULT_USER = User.DEFAULT_USER
93 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
93 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
94
94
95
95
96 def url(*args, **kw):
96 def url(*args, **kw):
97 return pylons_url(*args, **kw)
97 return pylons_url(*args, **kw)
98
98
99
99
100 def pylons_url_current(*args, **kw):
100 def pylons_url_current(*args, **kw):
101 """
101 """
102 This function overrides pylons.url.current() which returns the current
102 This function overrides pylons.url.current() which returns the current
103 path so that it will also work from a pyramid only context. This
103 path so that it will also work from a pyramid only context. This
104 should be removed once port to pyramid is complete.
104 should be removed once port to pyramid is complete.
105 """
105 """
106 if not args and not kw:
106 if not args and not kw:
107 request = get_current_request()
107 request = get_current_request()
108 return request.path
108 return request.path
109 return pylons_url.current(*args, **kw)
109 return pylons_url.current(*args, **kw)
110
110
111 url.current = pylons_url_current
111 url.current = pylons_url_current
112
112
113
113
114 def url_replace(**qargs):
114 def url_replace(**qargs):
115 """ Returns the current request url while replacing query string args """
115 """ Returns the current request url while replacing query string args """
116
116
117 request = get_current_request()
117 request = get_current_request()
118 new_args = request.GET.mixed()
118 new_args = request.GET.mixed()
119 new_args.update(qargs)
119 new_args.update(qargs)
120 return url('', **new_args)
120 return url('', **new_args)
121
121
122
122
123 def asset(path, ver=None, **kwargs):
123 def asset(path, ver=None, **kwargs):
124 """
124 """
125 Helper to generate a static asset file path for rhodecode assets
125 Helper to generate a static asset file path for rhodecode assets
126
126
127 eg. h.asset('images/image.png', ver='3923')
127 eg. h.asset('images/image.png', ver='3923')
128
128
129 :param path: path of asset
129 :param path: path of asset
130 :param ver: optional version query param to append as ?ver=
130 :param ver: optional version query param to append as ?ver=
131 """
131 """
132 request = get_current_request()
132 request = get_current_request()
133 query = {}
133 query = {}
134 query.update(kwargs)
134 query.update(kwargs)
135 if ver:
135 if ver:
136 query = {'ver': ver}
136 query = {'ver': ver}
137 return request.static_path(
137 return request.static_path(
138 'rhodecode:public/{}'.format(path), _query=query)
138 'rhodecode:public/{}'.format(path), _query=query)
139
139
140
140
141 default_html_escape_table = {
141 default_html_escape_table = {
142 ord('&'): u'&amp;',
142 ord('&'): u'&amp;',
143 ord('<'): u'&lt;',
143 ord('<'): u'&lt;',
144 ord('>'): u'&gt;',
144 ord('>'): u'&gt;',
145 ord('"'): u'&quot;',
145 ord('"'): u'&quot;',
146 ord("'"): u'&#39;',
146 ord("'"): u'&#39;',
147 }
147 }
148
148
149
149
150 def html_escape(text, html_escape_table=default_html_escape_table):
150 def html_escape(text, html_escape_table=default_html_escape_table):
151 """Produce entities within text."""
151 """Produce entities within text."""
152 return text.translate(html_escape_table)
152 return text.translate(html_escape_table)
153
153
154
154
155 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
155 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
156 """
156 """
157 Truncate string ``s`` at the first occurrence of ``sub``.
157 Truncate string ``s`` at the first occurrence of ``sub``.
158
158
159 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
159 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
160 """
160 """
161 suffix_if_chopped = suffix_if_chopped or ''
161 suffix_if_chopped = suffix_if_chopped or ''
162 pos = s.find(sub)
162 pos = s.find(sub)
163 if pos == -1:
163 if pos == -1:
164 return s
164 return s
165
165
166 if inclusive:
166 if inclusive:
167 pos += len(sub)
167 pos += len(sub)
168
168
169 chopped = s[:pos]
169 chopped = s[:pos]
170 left = s[pos:].strip()
170 left = s[pos:].strip()
171
171
172 if left and suffix_if_chopped:
172 if left and suffix_if_chopped:
173 chopped += suffix_if_chopped
173 chopped += suffix_if_chopped
174
174
175 return chopped
175 return chopped
176
176
177
177
178 def shorter(text, size=20):
178 def shorter(text, size=20):
179 postfix = '...'
179 postfix = '...'
180 if len(text) > size:
180 if len(text) > size:
181 return text[:size - len(postfix)] + postfix
181 return text[:size - len(postfix)] + postfix
182 return text
182 return text
183
183
184
184
185 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
185 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
186 """
186 """
187 Reset button
187 Reset button
188 """
188 """
189 _set_input_attrs(attrs, type, name, value)
189 _set_input_attrs(attrs, type, name, value)
190 _set_id_attr(attrs, id, name)
190 _set_id_attr(attrs, id, name)
191 convert_boolean_attrs(attrs, ["disabled"])
191 convert_boolean_attrs(attrs, ["disabled"])
192 return HTML.input(**attrs)
192 return HTML.input(**attrs)
193
193
194 reset = _reset
194 reset = _reset
195 safeid = _make_safe_id_component
195 safeid = _make_safe_id_component
196
196
197
197
198 def branding(name, length=40):
198 def branding(name, length=40):
199 return truncate(name, length, indicator="")
199 return truncate(name, length, indicator="")
200
200
201
201
202 def FID(raw_id, path):
202 def FID(raw_id, path):
203 """
203 """
204 Creates a unique ID for filenode based on it's hash of path and commit
204 Creates a unique ID for filenode based on it's hash of path and commit
205 it's safe to use in urls
205 it's safe to use in urls
206
206
207 :param raw_id:
207 :param raw_id:
208 :param path:
208 :param path:
209 """
209 """
210
210
211 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
211 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
212
212
213
213
214 class _GetError(object):
214 class _GetError(object):
215 """Get error from form_errors, and represent it as span wrapped error
215 """Get error from form_errors, and represent it as span wrapped error
216 message
216 message
217
217
218 :param field_name: field to fetch errors for
218 :param field_name: field to fetch errors for
219 :param form_errors: form errors dict
219 :param form_errors: form errors dict
220 """
220 """
221
221
222 def __call__(self, field_name, form_errors):
222 def __call__(self, field_name, form_errors):
223 tmpl = """<span class="error_msg">%s</span>"""
223 tmpl = """<span class="error_msg">%s</span>"""
224 if form_errors and field_name in form_errors:
224 if form_errors and field_name in form_errors:
225 return literal(tmpl % form_errors.get(field_name))
225 return literal(tmpl % form_errors.get(field_name))
226
226
227 get_error = _GetError()
227 get_error = _GetError()
228
228
229
229
230 class _ToolTip(object):
230 class _ToolTip(object):
231
231
232 def __call__(self, tooltip_title, trim_at=50):
232 def __call__(self, tooltip_title, trim_at=50):
233 """
233 """
234 Special function just to wrap our text into nice formatted
234 Special function just to wrap our text into nice formatted
235 autowrapped text
235 autowrapped text
236
236
237 :param tooltip_title:
237 :param tooltip_title:
238 """
238 """
239 tooltip_title = escape(tooltip_title)
239 tooltip_title = escape(tooltip_title)
240 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
240 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
241 return tooltip_title
241 return tooltip_title
242 tooltip = _ToolTip()
242 tooltip = _ToolTip()
243
243
244
244
245 def files_breadcrumbs(repo_name, commit_id, file_path):
245 def files_breadcrumbs(repo_name, commit_id, file_path):
246 if isinstance(file_path, str):
246 if isinstance(file_path, str):
247 file_path = safe_unicode(file_path)
247 file_path = safe_unicode(file_path)
248
248
249 # TODO: johbo: Is this always a url like path, or is this operating
249 # TODO: johbo: Is this always a url like path, or is this operating
250 # system dependent?
250 # system dependent?
251 path_segments = file_path.split('/')
251 path_segments = file_path.split('/')
252
252
253 repo_name_html = escape(repo_name)
253 repo_name_html = escape(repo_name)
254 if len(path_segments) == 1 and path_segments[0] == '':
254 if len(path_segments) == 1 and path_segments[0] == '':
255 url_segments = [repo_name_html]
255 url_segments = [repo_name_html]
256 else:
256 else:
257 url_segments = [
257 url_segments = [
258 link_to(
258 link_to(
259 repo_name_html,
259 repo_name_html,
260 url('files_home',
260 url('files_home',
261 repo_name=repo_name,
261 repo_name=repo_name,
262 revision=commit_id,
262 revision=commit_id,
263 f_path=''),
263 f_path=''),
264 class_='pjax-link')]
264 class_='pjax-link')]
265
265
266 last_cnt = len(path_segments) - 1
266 last_cnt = len(path_segments) - 1
267 for cnt, segment in enumerate(path_segments):
267 for cnt, segment in enumerate(path_segments):
268 if not segment:
268 if not segment:
269 continue
269 continue
270 segment_html = escape(segment)
270 segment_html = escape(segment)
271
271
272 if cnt != last_cnt:
272 if cnt != last_cnt:
273 url_segments.append(
273 url_segments.append(
274 link_to(
274 link_to(
275 segment_html,
275 segment_html,
276 url('files_home',
276 url('files_home',
277 repo_name=repo_name,
277 repo_name=repo_name,
278 revision=commit_id,
278 revision=commit_id,
279 f_path='/'.join(path_segments[:cnt + 1])),
279 f_path='/'.join(path_segments[:cnt + 1])),
280 class_='pjax-link'))
280 class_='pjax-link'))
281 else:
281 else:
282 url_segments.append(segment_html)
282 url_segments.append(segment_html)
283
283
284 return literal('/'.join(url_segments))
284 return literal('/'.join(url_segments))
285
285
286
286
287 class CodeHtmlFormatter(HtmlFormatter):
287 class CodeHtmlFormatter(HtmlFormatter):
288 """
288 """
289 My code Html Formatter for source codes
289 My code Html Formatter for source codes
290 """
290 """
291
291
292 def wrap(self, source, outfile):
292 def wrap(self, source, outfile):
293 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
293 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
294
294
295 def _wrap_code(self, source):
295 def _wrap_code(self, source):
296 for cnt, it in enumerate(source):
296 for cnt, it in enumerate(source):
297 i, t = it
297 i, t = it
298 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
298 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
299 yield i, t
299 yield i, t
300
300
301 def _wrap_tablelinenos(self, inner):
301 def _wrap_tablelinenos(self, inner):
302 dummyoutfile = StringIO.StringIO()
302 dummyoutfile = StringIO.StringIO()
303 lncount = 0
303 lncount = 0
304 for t, line in inner:
304 for t, line in inner:
305 if t:
305 if t:
306 lncount += 1
306 lncount += 1
307 dummyoutfile.write(line)
307 dummyoutfile.write(line)
308
308
309 fl = self.linenostart
309 fl = self.linenostart
310 mw = len(str(lncount + fl - 1))
310 mw = len(str(lncount + fl - 1))
311 sp = self.linenospecial
311 sp = self.linenospecial
312 st = self.linenostep
312 st = self.linenostep
313 la = self.lineanchors
313 la = self.lineanchors
314 aln = self.anchorlinenos
314 aln = self.anchorlinenos
315 nocls = self.noclasses
315 nocls = self.noclasses
316 if sp:
316 if sp:
317 lines = []
317 lines = []
318
318
319 for i in range(fl, fl + lncount):
319 for i in range(fl, fl + lncount):
320 if i % st == 0:
320 if i % st == 0:
321 if i % sp == 0:
321 if i % sp == 0:
322 if aln:
322 if aln:
323 lines.append('<a href="#%s%d" class="special">%*d</a>' %
323 lines.append('<a href="#%s%d" class="special">%*d</a>' %
324 (la, i, mw, i))
324 (la, i, mw, i))
325 else:
325 else:
326 lines.append('<span class="special">%*d</span>' % (mw, i))
326 lines.append('<span class="special">%*d</span>' % (mw, i))
327 else:
327 else:
328 if aln:
328 if aln:
329 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
329 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
330 else:
330 else:
331 lines.append('%*d' % (mw, i))
331 lines.append('%*d' % (mw, i))
332 else:
332 else:
333 lines.append('')
333 lines.append('')
334 ls = '\n'.join(lines)
334 ls = '\n'.join(lines)
335 else:
335 else:
336 lines = []
336 lines = []
337 for i in range(fl, fl + lncount):
337 for i in range(fl, fl + lncount):
338 if i % st == 0:
338 if i % st == 0:
339 if aln:
339 if aln:
340 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
340 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
341 else:
341 else:
342 lines.append('%*d' % (mw, i))
342 lines.append('%*d' % (mw, i))
343 else:
343 else:
344 lines.append('')
344 lines.append('')
345 ls = '\n'.join(lines)
345 ls = '\n'.join(lines)
346
346
347 # in case you wonder about the seemingly redundant <div> here: since the
347 # in case you wonder about the seemingly redundant <div> here: since the
348 # content in the other cell also is wrapped in a div, some browsers in
348 # content in the other cell also is wrapped in a div, some browsers in
349 # some configurations seem to mess up the formatting...
349 # some configurations seem to mess up the formatting...
350 if nocls:
350 if nocls:
351 yield 0, ('<table class="%stable">' % self.cssclass +
351 yield 0, ('<table class="%stable">' % self.cssclass +
352 '<tr><td><div class="linenodiv" '
352 '<tr><td><div class="linenodiv" '
353 'style="background-color: #f0f0f0; padding-right: 10px">'
353 'style="background-color: #f0f0f0; padding-right: 10px">'
354 '<pre style="line-height: 125%">' +
354 '<pre style="line-height: 125%">' +
355 ls + '</pre></div></td><td id="hlcode" class="code">')
355 ls + '</pre></div></td><td id="hlcode" class="code">')
356 else:
356 else:
357 yield 0, ('<table class="%stable">' % self.cssclass +
357 yield 0, ('<table class="%stable">' % self.cssclass +
358 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
358 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
359 ls + '</pre></div></td><td id="hlcode" class="code">')
359 ls + '</pre></div></td><td id="hlcode" class="code">')
360 yield 0, dummyoutfile.getvalue()
360 yield 0, dummyoutfile.getvalue()
361 yield 0, '</td></tr></table>'
361 yield 0, '</td></tr></table>'
362
362
363
363
364 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
364 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
365 def __init__(self, **kw):
365 def __init__(self, **kw):
366 # only show these line numbers if set
366 # only show these line numbers if set
367 self.only_lines = kw.pop('only_line_numbers', [])
367 self.only_lines = kw.pop('only_line_numbers', [])
368 self.query_terms = kw.pop('query_terms', [])
368 self.query_terms = kw.pop('query_terms', [])
369 self.max_lines = kw.pop('max_lines', 5)
369 self.max_lines = kw.pop('max_lines', 5)
370 self.line_context = kw.pop('line_context', 3)
370 self.line_context = kw.pop('line_context', 3)
371 self.url = kw.pop('url', None)
371 self.url = kw.pop('url', None)
372
372
373 super(CodeHtmlFormatter, self).__init__(**kw)
373 super(CodeHtmlFormatter, self).__init__(**kw)
374
374
375 def _wrap_code(self, source):
375 def _wrap_code(self, source):
376 for cnt, it in enumerate(source):
376 for cnt, it in enumerate(source):
377 i, t = it
377 i, t = it
378 t = '<pre>%s</pre>' % t
378 t = '<pre>%s</pre>' % t
379 yield i, t
379 yield i, t
380
380
381 def _wrap_tablelinenos(self, inner):
381 def _wrap_tablelinenos(self, inner):
382 yield 0, '<table class="code-highlight %stable">' % self.cssclass
382 yield 0, '<table class="code-highlight %stable">' % self.cssclass
383
383
384 last_shown_line_number = 0
384 last_shown_line_number = 0
385 current_line_number = 1
385 current_line_number = 1
386
386
387 for t, line in inner:
387 for t, line in inner:
388 if not t:
388 if not t:
389 yield t, line
389 yield t, line
390 continue
390 continue
391
391
392 if current_line_number in self.only_lines:
392 if current_line_number in self.only_lines:
393 if last_shown_line_number + 1 != current_line_number:
393 if last_shown_line_number + 1 != current_line_number:
394 yield 0, '<tr>'
394 yield 0, '<tr>'
395 yield 0, '<td class="line">...</td>'
395 yield 0, '<td class="line">...</td>'
396 yield 0, '<td id="hlcode" class="code"></td>'
396 yield 0, '<td id="hlcode" class="code"></td>'
397 yield 0, '</tr>'
397 yield 0, '</tr>'
398
398
399 yield 0, '<tr>'
399 yield 0, '<tr>'
400 if self.url:
400 if self.url:
401 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
401 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
402 self.url, current_line_number, current_line_number)
402 self.url, current_line_number, current_line_number)
403 else:
403 else:
404 yield 0, '<td class="line"><a href="">%i</a></td>' % (
404 yield 0, '<td class="line"><a href="">%i</a></td>' % (
405 current_line_number)
405 current_line_number)
406 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
406 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
407 yield 0, '</tr>'
407 yield 0, '</tr>'
408
408
409 last_shown_line_number = current_line_number
409 last_shown_line_number = current_line_number
410
410
411 current_line_number += 1
411 current_line_number += 1
412
412
413
413
414 yield 0, '</table>'
414 yield 0, '</table>'
415
415
416
416
417 def extract_phrases(text_query):
417 def extract_phrases(text_query):
418 """
418 """
419 Extracts phrases from search term string making sure phrases
419 Extracts phrases from search term string making sure phrases
420 contained in double quotes are kept together - and discarding empty values
420 contained in double quotes are kept together - and discarding empty values
421 or fully whitespace values eg.
421 or fully whitespace values eg.
422
422
423 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
423 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
424
424
425 """
425 """
426
426
427 in_phrase = False
427 in_phrase = False
428 buf = ''
428 buf = ''
429 phrases = []
429 phrases = []
430 for char in text_query:
430 for char in text_query:
431 if in_phrase:
431 if in_phrase:
432 if char == '"': # end phrase
432 if char == '"': # end phrase
433 phrases.append(buf)
433 phrases.append(buf)
434 buf = ''
434 buf = ''
435 in_phrase = False
435 in_phrase = False
436 continue
436 continue
437 else:
437 else:
438 buf += char
438 buf += char
439 continue
439 continue
440 else:
440 else:
441 if char == '"': # start phrase
441 if char == '"': # start phrase
442 in_phrase = True
442 in_phrase = True
443 phrases.append(buf)
443 phrases.append(buf)
444 buf = ''
444 buf = ''
445 continue
445 continue
446 elif char == ' ':
446 elif char == ' ':
447 phrases.append(buf)
447 phrases.append(buf)
448 buf = ''
448 buf = ''
449 continue
449 continue
450 else:
450 else:
451 buf += char
451 buf += char
452
452
453 phrases.append(buf)
453 phrases.append(buf)
454 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
454 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
455 return phrases
455 return phrases
456
456
457
457
458 def get_matching_offsets(text, phrases):
458 def get_matching_offsets(text, phrases):
459 """
459 """
460 Returns a list of string offsets in `text` that the list of `terms` match
460 Returns a list of string offsets in `text` that the list of `terms` match
461
461
462 >>> get_matching_offsets('some text here', ['some', 'here'])
462 >>> get_matching_offsets('some text here', ['some', 'here'])
463 [(0, 4), (10, 14)]
463 [(0, 4), (10, 14)]
464
464
465 """
465 """
466 offsets = []
466 offsets = []
467 for phrase in phrases:
467 for phrase in phrases:
468 for match in re.finditer(phrase, text):
468 for match in re.finditer(phrase, text):
469 offsets.append((match.start(), match.end()))
469 offsets.append((match.start(), match.end()))
470
470
471 return offsets
471 return offsets
472
472
473
473
474 def normalize_text_for_matching(x):
474 def normalize_text_for_matching(x):
475 """
475 """
476 Replaces all non alnum characters to spaces and lower cases the string,
476 Replaces all non alnum characters to spaces and lower cases the string,
477 useful for comparing two text strings without punctuation
477 useful for comparing two text strings without punctuation
478 """
478 """
479 return re.sub(r'[^\w]', ' ', x.lower())
479 return re.sub(r'[^\w]', ' ', x.lower())
480
480
481
481
482 def get_matching_line_offsets(lines, terms):
482 def get_matching_line_offsets(lines, terms):
483 """ Return a set of `lines` indices (starting from 1) matching a
483 """ Return a set of `lines` indices (starting from 1) matching a
484 text search query, along with `context` lines above/below matching lines
484 text search query, along with `context` lines above/below matching lines
485
485
486 :param lines: list of strings representing lines
486 :param lines: list of strings representing lines
487 :param terms: search term string to match in lines eg. 'some text'
487 :param terms: search term string to match in lines eg. 'some text'
488 :param context: number of lines above/below a matching line to add to result
488 :param context: number of lines above/below a matching line to add to result
489 :param max_lines: cut off for lines of interest
489 :param max_lines: cut off for lines of interest
490 eg.
490 eg.
491
491
492 text = '''
492 text = '''
493 words words words
493 words words words
494 words words words
494 words words words
495 some text some
495 some text some
496 words words words
496 words words words
497 words words words
497 words words words
498 text here what
498 text here what
499 '''
499 '''
500 get_matching_line_offsets(text, 'text', context=1)
500 get_matching_line_offsets(text, 'text', context=1)
501 {3: [(5, 9)], 6: [(0, 4)]]
501 {3: [(5, 9)], 6: [(0, 4)]]
502
502
503 """
503 """
504 matching_lines = {}
504 matching_lines = {}
505 phrases = [normalize_text_for_matching(phrase)
505 phrases = [normalize_text_for_matching(phrase)
506 for phrase in extract_phrases(terms)]
506 for phrase in extract_phrases(terms)]
507
507
508 for line_index, line in enumerate(lines, start=1):
508 for line_index, line in enumerate(lines, start=1):
509 match_offsets = get_matching_offsets(
509 match_offsets = get_matching_offsets(
510 normalize_text_for_matching(line), phrases)
510 normalize_text_for_matching(line), phrases)
511 if match_offsets:
511 if match_offsets:
512 matching_lines[line_index] = match_offsets
512 matching_lines[line_index] = match_offsets
513
513
514 return matching_lines
514 return matching_lines
515
515
516
516
517 def hsv_to_rgb(h, s, v):
517 def hsv_to_rgb(h, s, v):
518 """ Convert hsv color values to rgb """
518 """ Convert hsv color values to rgb """
519
519
520 if s == 0.0:
520 if s == 0.0:
521 return v, v, v
521 return v, v, v
522 i = int(h * 6.0) # XXX assume int() truncates!
522 i = int(h * 6.0) # XXX assume int() truncates!
523 f = (h * 6.0) - i
523 f = (h * 6.0) - i
524 p = v * (1.0 - s)
524 p = v * (1.0 - s)
525 q = v * (1.0 - s * f)
525 q = v * (1.0 - s * f)
526 t = v * (1.0 - s * (1.0 - f))
526 t = v * (1.0 - s * (1.0 - f))
527 i = i % 6
527 i = i % 6
528 if i == 0:
528 if i == 0:
529 return v, t, p
529 return v, t, p
530 if i == 1:
530 if i == 1:
531 return q, v, p
531 return q, v, p
532 if i == 2:
532 if i == 2:
533 return p, v, t
533 return p, v, t
534 if i == 3:
534 if i == 3:
535 return p, q, v
535 return p, q, v
536 if i == 4:
536 if i == 4:
537 return t, p, v
537 return t, p, v
538 if i == 5:
538 if i == 5:
539 return v, p, q
539 return v, p, q
540
540
541
541
542 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
542 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
543 """
543 """
544 Generator for getting n of evenly distributed colors using
544 Generator for getting n of evenly distributed colors using
545 hsv color and golden ratio. It always return same order of colors
545 hsv color and golden ratio. It always return same order of colors
546
546
547 :param n: number of colors to generate
547 :param n: number of colors to generate
548 :param saturation: saturation of returned colors
548 :param saturation: saturation of returned colors
549 :param lightness: lightness of returned colors
549 :param lightness: lightness of returned colors
550 :returns: RGB tuple
550 :returns: RGB tuple
551 """
551 """
552
552
553 golden_ratio = 0.618033988749895
553 golden_ratio = 0.618033988749895
554 h = 0.22717784590367374
554 h = 0.22717784590367374
555
555
556 for _ in xrange(n):
556 for _ in xrange(n):
557 h += golden_ratio
557 h += golden_ratio
558 h %= 1
558 h %= 1
559 HSV_tuple = [h, saturation, lightness]
559 HSV_tuple = [h, saturation, lightness]
560 RGB_tuple = hsv_to_rgb(*HSV_tuple)
560 RGB_tuple = hsv_to_rgb(*HSV_tuple)
561 yield map(lambda x: str(int(x * 256)), RGB_tuple)
561 yield map(lambda x: str(int(x * 256)), RGB_tuple)
562
562
563
563
564 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
564 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
565 """
565 """
566 Returns a function which when called with an argument returns a unique
566 Returns a function which when called with an argument returns a unique
567 color for that argument, eg.
567 color for that argument, eg.
568
568
569 :param n: number of colors to generate
569 :param n: number of colors to generate
570 :param saturation: saturation of returned colors
570 :param saturation: saturation of returned colors
571 :param lightness: lightness of returned colors
571 :param lightness: lightness of returned colors
572 :returns: css RGB string
572 :returns: css RGB string
573
573
574 >>> color_hash = color_hasher()
574 >>> color_hash = color_hasher()
575 >>> color_hash('hello')
575 >>> color_hash('hello')
576 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
577 >>> color_hash('hello')
577 >>> color_hash('hello')
578 'rgb(34, 12, 59)'
578 'rgb(34, 12, 59)'
579 >>> color_hash('other')
579 >>> color_hash('other')
580 'rgb(90, 224, 159)'
580 'rgb(90, 224, 159)'
581 """
581 """
582
582
583 color_dict = {}
583 color_dict = {}
584 cgenerator = unique_color_generator(
584 cgenerator = unique_color_generator(
585 saturation=saturation, lightness=lightness)
585 saturation=saturation, lightness=lightness)
586
586
587 def get_color_string(thing):
587 def get_color_string(thing):
588 if thing in color_dict:
588 if thing in color_dict:
589 col = color_dict[thing]
589 col = color_dict[thing]
590 else:
590 else:
591 col = color_dict[thing] = cgenerator.next()
591 col = color_dict[thing] = cgenerator.next()
592 return "rgb(%s)" % (', '.join(col))
592 return "rgb(%s)" % (', '.join(col))
593
593
594 return get_color_string
594 return get_color_string
595
595
596
596
597 def get_lexer_safe(mimetype=None, filepath=None):
597 def get_lexer_safe(mimetype=None, filepath=None):
598 """
598 """
599 Tries to return a relevant pygments lexer using mimetype/filepath name,
599 Tries to return a relevant pygments lexer using mimetype/filepath name,
600 defaulting to plain text if none could be found
600 defaulting to plain text if none could be found
601 """
601 """
602 lexer = None
602 lexer = None
603 try:
603 try:
604 if mimetype:
604 if mimetype:
605 lexer = get_lexer_for_mimetype(mimetype)
605 lexer = get_lexer_for_mimetype(mimetype)
606 if not lexer:
606 if not lexer:
607 lexer = get_lexer_for_filename(filepath)
607 lexer = get_lexer_for_filename(filepath)
608 except pygments.util.ClassNotFound:
608 except pygments.util.ClassNotFound:
609 pass
609 pass
610
610
611 if not lexer:
611 if not lexer:
612 lexer = get_lexer_by_name('text')
612 lexer = get_lexer_by_name('text')
613
613
614 return lexer
614 return lexer
615
615
616
616
617 def get_lexer_for_filenode(filenode):
617 def get_lexer_for_filenode(filenode):
618 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
618 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
619 return lexer
619 return lexer
620
620
621
621
622 def pygmentize(filenode, **kwargs):
622 def pygmentize(filenode, **kwargs):
623 """
623 """
624 pygmentize function using pygments
624 pygmentize function using pygments
625
625
626 :param filenode:
626 :param filenode:
627 """
627 """
628 lexer = get_lexer_for_filenode(filenode)
628 lexer = get_lexer_for_filenode(filenode)
629 return literal(code_highlight(filenode.content, lexer,
629 return literal(code_highlight(filenode.content, lexer,
630 CodeHtmlFormatter(**kwargs)))
630 CodeHtmlFormatter(**kwargs)))
631
631
632
632
633 def is_following_repo(repo_name, user_id):
633 def is_following_repo(repo_name, user_id):
634 from rhodecode.model.scm import ScmModel
634 from rhodecode.model.scm import ScmModel
635 return ScmModel().is_following_repo(repo_name, user_id)
635 return ScmModel().is_following_repo(repo_name, user_id)
636
636
637
637
638 class _Message(object):
638 class _Message(object):
639 """A message returned by ``Flash.pop_messages()``.
639 """A message returned by ``Flash.pop_messages()``.
640
640
641 Converting the message to a string returns the message text. Instances
641 Converting the message to a string returns the message text. Instances
642 also have the following attributes:
642 also have the following attributes:
643
643
644 * ``message``: the message text.
644 * ``message``: the message text.
645 * ``category``: the category specified when the message was created.
645 * ``category``: the category specified when the message was created.
646 """
646 """
647
647
648 def __init__(self, category, message):
648 def __init__(self, category, message):
649 self.category = category
649 self.category = category
650 self.message = message
650 self.message = message
651
651
652 def __str__(self):
652 def __str__(self):
653 return self.message
653 return self.message
654
654
655 __unicode__ = __str__
655 __unicode__ = __str__
656
656
657 def __html__(self):
657 def __html__(self):
658 return escape(safe_unicode(self.message))
658 return escape(safe_unicode(self.message))
659
659
660
660
661 class Flash(_Flash):
661 class Flash(_Flash):
662
662
663 def pop_messages(self):
663 def pop_messages(self, request=None):
664 """Return all accumulated messages and delete them from the session.
664 """Return all accumulated messages and delete them from the session.
665
665
666 The return value is a list of ``Message`` objects.
666 The return value is a list of ``Message`` objects.
667 """
667 """
668 from pylons import session
668 messages = []
669
669
670 messages = []
670 if request:
671 session = request.session
672 else:
673 from pylons import session
671
674
672 # Pop the 'old' pylons flash messages. They are tuples of the form
675 # Pop the 'old' pylons flash messages. They are tuples of the form
673 # (category, message)
676 # (category, message)
674 for cat, msg in session.pop(self.session_key, []):
677 for cat, msg in session.pop(self.session_key, []):
675 messages.append(_Message(cat, msg))
678 messages.append(_Message(cat, msg))
676
679
677 # Pop the 'new' pyramid flash messages for each category as list
680 # Pop the 'new' pyramid flash messages for each category as list
678 # of strings.
681 # of strings.
679 for cat in self.categories:
682 for cat in self.categories:
680 for msg in session.pop_flash(queue=cat):
683 for msg in session.pop_flash(queue=cat):
681 messages.append(_Message(cat, msg))
684 messages.append(_Message(cat, msg))
682 # Map messages from the default queue to the 'notice' category.
685 # Map messages from the default queue to the 'notice' category.
683 for msg in session.pop_flash():
686 for msg in session.pop_flash():
684 messages.append(_Message('notice', msg))
687 messages.append(_Message('notice', msg))
685
688
686 session.save()
689 session.save()
687 return messages
690 return messages
688
691
689 def json_alerts(self):
692 def json_alerts(self):
690 payloads = []
693 payloads = []
691 messages = flash.pop_messages()
694 messages = flash.pop_messages()
692 if messages:
695 if messages:
693 for message in messages:
696 for message in messages:
694 subdata = {}
697 subdata = {}
695 if hasattr(message.message, 'rsplit'):
698 if hasattr(message.message, 'rsplit'):
696 flash_data = message.message.rsplit('|DELIM|', 1)
699 flash_data = message.message.rsplit('|DELIM|', 1)
697 org_message = flash_data[0]
700 org_message = flash_data[0]
698 if len(flash_data) > 1:
701 if len(flash_data) > 1:
699 subdata = json.loads(flash_data[1])
702 subdata = json.loads(flash_data[1])
700 else:
703 else:
701 org_message = message.message
704 org_message = message.message
702 payloads.append({
705 payloads.append({
703 'message': {
706 'message': {
704 'message': u'{}'.format(org_message),
707 'message': u'{}'.format(org_message),
705 'level': message.category,
708 'level': message.category,
706 'force': True,
709 'force': True,
707 'subdata': subdata
710 'subdata': subdata
708 }
711 }
709 })
712 })
710 return json.dumps(payloads)
713 return json.dumps(payloads)
711
714
712 flash = Flash()
715 flash = Flash()
713
716
714 #==============================================================================
717 #==============================================================================
715 # SCM FILTERS available via h.
718 # SCM FILTERS available via h.
716 #==============================================================================
719 #==============================================================================
717 from rhodecode.lib.vcs.utils import author_name, author_email
720 from rhodecode.lib.vcs.utils import author_name, author_email
718 from rhodecode.lib.utils2 import credentials_filter, age as _age
721 from rhodecode.lib.utils2 import credentials_filter, age as _age
719 from rhodecode.model.db import User, ChangesetStatus
722 from rhodecode.model.db import User, ChangesetStatus
720
723
721 age = _age
724 age = _age
722 capitalize = lambda x: x.capitalize()
725 capitalize = lambda x: x.capitalize()
723 email = author_email
726 email = author_email
724 short_id = lambda x: x[:12]
727 short_id = lambda x: x[:12]
725 hide_credentials = lambda x: ''.join(credentials_filter(x))
728 hide_credentials = lambda x: ''.join(credentials_filter(x))
726
729
727
730
728 def age_component(datetime_iso, value=None, time_is_local=False):
731 def age_component(datetime_iso, value=None, time_is_local=False):
729 title = value or format_date(datetime_iso)
732 title = value or format_date(datetime_iso)
730 tzinfo = '+00:00'
733 tzinfo = '+00:00'
731
734
732 # detect if we have a timezone info, otherwise, add it
735 # detect if we have a timezone info, otherwise, add it
733 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
736 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
734 if time_is_local:
737 if time_is_local:
735 tzinfo = time.strftime("+%H:%M",
738 tzinfo = time.strftime("+%H:%M",
736 time.gmtime(
739 time.gmtime(
737 (datetime.now() - datetime.utcnow()).seconds + 1
740 (datetime.now() - datetime.utcnow()).seconds + 1
738 )
741 )
739 )
742 )
740
743
741 return literal(
744 return literal(
742 '<time class="timeago tooltip" '
745 '<time class="timeago tooltip" '
743 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
746 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
744 datetime_iso, title, tzinfo))
747 datetime_iso, title, tzinfo))
745
748
746
749
747 def _shorten_commit_id(commit_id):
750 def _shorten_commit_id(commit_id):
748 from rhodecode import CONFIG
751 from rhodecode import CONFIG
749 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
752 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
750 return commit_id[:def_len]
753 return commit_id[:def_len]
751
754
752
755
753 def show_id(commit):
756 def show_id(commit):
754 """
757 """
755 Configurable function that shows ID
758 Configurable function that shows ID
756 by default it's r123:fffeeefffeee
759 by default it's r123:fffeeefffeee
757
760
758 :param commit: commit instance
761 :param commit: commit instance
759 """
762 """
760 from rhodecode import CONFIG
763 from rhodecode import CONFIG
761 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
764 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
762
765
763 raw_id = _shorten_commit_id(commit.raw_id)
766 raw_id = _shorten_commit_id(commit.raw_id)
764 if show_idx:
767 if show_idx:
765 return 'r%s:%s' % (commit.idx, raw_id)
768 return 'r%s:%s' % (commit.idx, raw_id)
766 else:
769 else:
767 return '%s' % (raw_id, )
770 return '%s' % (raw_id, )
768
771
769
772
770 def format_date(date):
773 def format_date(date):
771 """
774 """
772 use a standardized formatting for dates used in RhodeCode
775 use a standardized formatting for dates used in RhodeCode
773
776
774 :param date: date/datetime object
777 :param date: date/datetime object
775 :return: formatted date
778 :return: formatted date
776 """
779 """
777
780
778 if date:
781 if date:
779 _fmt = "%a, %d %b %Y %H:%M:%S"
782 _fmt = "%a, %d %b %Y %H:%M:%S"
780 return safe_unicode(date.strftime(_fmt))
783 return safe_unicode(date.strftime(_fmt))
781
784
782 return u""
785 return u""
783
786
784
787
785 class _RepoChecker(object):
788 class _RepoChecker(object):
786
789
787 def __init__(self, backend_alias):
790 def __init__(self, backend_alias):
788 self._backend_alias = backend_alias
791 self._backend_alias = backend_alias
789
792
790 def __call__(self, repository):
793 def __call__(self, repository):
791 if hasattr(repository, 'alias'):
794 if hasattr(repository, 'alias'):
792 _type = repository.alias
795 _type = repository.alias
793 elif hasattr(repository, 'repo_type'):
796 elif hasattr(repository, 'repo_type'):
794 _type = repository.repo_type
797 _type = repository.repo_type
795 else:
798 else:
796 _type = repository
799 _type = repository
797 return _type == self._backend_alias
800 return _type == self._backend_alias
798
801
799 is_git = _RepoChecker('git')
802 is_git = _RepoChecker('git')
800 is_hg = _RepoChecker('hg')
803 is_hg = _RepoChecker('hg')
801 is_svn = _RepoChecker('svn')
804 is_svn = _RepoChecker('svn')
802
805
803
806
804 def get_repo_type_by_name(repo_name):
807 def get_repo_type_by_name(repo_name):
805 repo = Repository.get_by_repo_name(repo_name)
808 repo = Repository.get_by_repo_name(repo_name)
806 return repo.repo_type
809 return repo.repo_type
807
810
808
811
809 def is_svn_without_proxy(repository):
812 def is_svn_without_proxy(repository):
810 if is_svn(repository):
813 if is_svn(repository):
811 from rhodecode.model.settings import VcsSettingsModel
814 from rhodecode.model.settings import VcsSettingsModel
812 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
815 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
813 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
816 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
814 return False
817 return False
815
818
816
819
817 def discover_user(author):
820 def discover_user(author):
818 """
821 """
819 Tries to discover RhodeCode User based on the autho string. Author string
822 Tries to discover RhodeCode User based on the autho string. Author string
820 is typically `FirstName LastName <email@address.com>`
823 is typically `FirstName LastName <email@address.com>`
821 """
824 """
822
825
823 # if author is already an instance use it for extraction
826 # if author is already an instance use it for extraction
824 if isinstance(author, User):
827 if isinstance(author, User):
825 return author
828 return author
826
829
827 # Valid email in the attribute passed, see if they're in the system
830 # Valid email in the attribute passed, see if they're in the system
828 _email = author_email(author)
831 _email = author_email(author)
829 if _email != '':
832 if _email != '':
830 user = User.get_by_email(_email, case_insensitive=True, cache=True)
833 user = User.get_by_email(_email, case_insensitive=True, cache=True)
831 if user is not None:
834 if user is not None:
832 return user
835 return user
833
836
834 # Maybe it's a username, we try to extract it and fetch by username ?
837 # Maybe it's a username, we try to extract it and fetch by username ?
835 _author = author_name(author)
838 _author = author_name(author)
836 user = User.get_by_username(_author, case_insensitive=True, cache=True)
839 user = User.get_by_username(_author, case_insensitive=True, cache=True)
837 if user is not None:
840 if user is not None:
838 return user
841 return user
839
842
840 return None
843 return None
841
844
842
845
843 def email_or_none(author):
846 def email_or_none(author):
844 # extract email from the commit string
847 # extract email from the commit string
845 _email = author_email(author)
848 _email = author_email(author)
846
849
847 # If we have an email, use it, otherwise
850 # If we have an email, use it, otherwise
848 # see if it contains a username we can get an email from
851 # see if it contains a username we can get an email from
849 if _email != '':
852 if _email != '':
850 return _email
853 return _email
851 else:
854 else:
852 user = User.get_by_username(
855 user = User.get_by_username(
853 author_name(author), case_insensitive=True, cache=True)
856 author_name(author), case_insensitive=True, cache=True)
854
857
855 if user is not None:
858 if user is not None:
856 return user.email
859 return user.email
857
860
858 # No valid email, not a valid user in the system, none!
861 # No valid email, not a valid user in the system, none!
859 return None
862 return None
860
863
861
864
862 def link_to_user(author, length=0, **kwargs):
865 def link_to_user(author, length=0, **kwargs):
863 user = discover_user(author)
866 user = discover_user(author)
864 # user can be None, but if we have it already it means we can re-use it
867 # user can be None, but if we have it already it means we can re-use it
865 # in the person() function, so we save 1 intensive-query
868 # in the person() function, so we save 1 intensive-query
866 if user:
869 if user:
867 author = user
870 author = user
868
871
869 display_person = person(author, 'username_or_name_or_email')
872 display_person = person(author, 'username_or_name_or_email')
870 if length:
873 if length:
871 display_person = shorter(display_person, length)
874 display_person = shorter(display_person, length)
872
875
873 if user:
876 if user:
874 return link_to(
877 return link_to(
875 escape(display_person),
878 escape(display_person),
876 route_path('user_profile', username=user.username),
879 route_path('user_profile', username=user.username),
877 **kwargs)
880 **kwargs)
878 else:
881 else:
879 return escape(display_person)
882 return escape(display_person)
880
883
881
884
882 def person(author, show_attr="username_and_name"):
885 def person(author, show_attr="username_and_name"):
883 user = discover_user(author)
886 user = discover_user(author)
884 if user:
887 if user:
885 return getattr(user, show_attr)
888 return getattr(user, show_attr)
886 else:
889 else:
887 _author = author_name(author)
890 _author = author_name(author)
888 _email = email(author)
891 _email = email(author)
889 return _author or _email
892 return _author or _email
890
893
891
894
892 def author_string(email):
895 def author_string(email):
893 if email:
896 if email:
894 user = User.get_by_email(email, case_insensitive=True, cache=True)
897 user = User.get_by_email(email, case_insensitive=True, cache=True)
895 if user:
898 if user:
896 if user.first_name or user.last_name:
899 if user.first_name or user.last_name:
897 return '%s %s &lt;%s&gt;' % (
900 return '%s %s &lt;%s&gt;' % (
898 user.first_name, user.last_name, email)
901 user.first_name, user.last_name, email)
899 else:
902 else:
900 return email
903 return email
901 else:
904 else:
902 return email
905 return email
903 else:
906 else:
904 return None
907 return None
905
908
906
909
907 def person_by_id(id_, show_attr="username_and_name"):
910 def person_by_id(id_, show_attr="username_and_name"):
908 # attr to return from fetched user
911 # attr to return from fetched user
909 person_getter = lambda usr: getattr(usr, show_attr)
912 person_getter = lambda usr: getattr(usr, show_attr)
910
913
911 #maybe it's an ID ?
914 #maybe it's an ID ?
912 if str(id_).isdigit() or isinstance(id_, int):
915 if str(id_).isdigit() or isinstance(id_, int):
913 id_ = int(id_)
916 id_ = int(id_)
914 user = User.get(id_)
917 user = User.get(id_)
915 if user is not None:
918 if user is not None:
916 return person_getter(user)
919 return person_getter(user)
917 return id_
920 return id_
918
921
919
922
920 def gravatar_with_user(author, show_disabled=False):
923 def gravatar_with_user(author, show_disabled=False):
921 from rhodecode.lib.utils import PartialRenderer
924 from rhodecode.lib.utils import PartialRenderer
922 _render = PartialRenderer('base/base.mako')
925 _render = PartialRenderer('base/base.mako')
923 return _render('gravatar_with_user', author, show_disabled=show_disabled)
926 return _render('gravatar_with_user', author, show_disabled=show_disabled)
924
927
925
928
926 def desc_stylize(value):
929 def desc_stylize(value):
927 """
930 """
928 converts tags from value into html equivalent
931 converts tags from value into html equivalent
929
932
930 :param value:
933 :param value:
931 """
934 """
932 if not value:
935 if not value:
933 return ''
936 return ''
934
937
935 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
938 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
936 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
939 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
937 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
940 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
938 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
941 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
939 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
942 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
940 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
943 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
941 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
944 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
942 '<div class="metatag" tag="lang">\\2</div>', value)
945 '<div class="metatag" tag="lang">\\2</div>', value)
943 value = re.sub(r'\[([a-z]+)\]',
946 value = re.sub(r'\[([a-z]+)\]',
944 '<div class="metatag" tag="\\1">\\1</div>', value)
947 '<div class="metatag" tag="\\1">\\1</div>', value)
945
948
946 return value
949 return value
947
950
948
951
949 def escaped_stylize(value):
952 def escaped_stylize(value):
950 """
953 """
951 converts tags from value into html equivalent, but escaping its value first
954 converts tags from value into html equivalent, but escaping its value first
952 """
955 """
953 if not value:
956 if not value:
954 return ''
957 return ''
955
958
956 # Using default webhelper escape method, but has to force it as a
959 # Using default webhelper escape method, but has to force it as a
957 # plain unicode instead of a markup tag to be used in regex expressions
960 # plain unicode instead of a markup tag to be used in regex expressions
958 value = unicode(escape(safe_unicode(value)))
961 value = unicode(escape(safe_unicode(value)))
959
962
960 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
963 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
961 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
964 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
962 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
965 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
963 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
966 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
964 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
967 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
965 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
968 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
966 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
969 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
967 '<div class="metatag" tag="lang">\\2</div>', value)
970 '<div class="metatag" tag="lang">\\2</div>', value)
968 value = re.sub(r'\[([a-z]+)\]',
971 value = re.sub(r'\[([a-z]+)\]',
969 '<div class="metatag" tag="\\1">\\1</div>', value)
972 '<div class="metatag" tag="\\1">\\1</div>', value)
970
973
971 return value
974 return value
972
975
973
976
974 def bool2icon(value):
977 def bool2icon(value):
975 """
978 """
976 Returns boolean value of a given value, represented as html element with
979 Returns boolean value of a given value, represented as html element with
977 classes that will represent icons
980 classes that will represent icons
978
981
979 :param value: given value to convert to html node
982 :param value: given value to convert to html node
980 """
983 """
981
984
982 if value: # does bool conversion
985 if value: # does bool conversion
983 return HTML.tag('i', class_="icon-true")
986 return HTML.tag('i', class_="icon-true")
984 else: # not true as bool
987 else: # not true as bool
985 return HTML.tag('i', class_="icon-false")
988 return HTML.tag('i', class_="icon-false")
986
989
987
990
988 #==============================================================================
991 #==============================================================================
989 # PERMS
992 # PERMS
990 #==============================================================================
993 #==============================================================================
991 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
994 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
992 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
995 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
993 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
996 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
994 csrf_token_key
997 csrf_token_key
995
998
996
999
997 #==============================================================================
1000 #==============================================================================
998 # GRAVATAR URL
1001 # GRAVATAR URL
999 #==============================================================================
1002 #==============================================================================
1000 class InitialsGravatar(object):
1003 class InitialsGravatar(object):
1001 def __init__(self, email_address, first_name, last_name, size=30,
1004 def __init__(self, email_address, first_name, last_name, size=30,
1002 background=None, text_color='#fff'):
1005 background=None, text_color='#fff'):
1003 self.size = size
1006 self.size = size
1004 self.first_name = first_name
1007 self.first_name = first_name
1005 self.last_name = last_name
1008 self.last_name = last_name
1006 self.email_address = email_address
1009 self.email_address = email_address
1007 self.background = background or self.str2color(email_address)
1010 self.background = background or self.str2color(email_address)
1008 self.text_color = text_color
1011 self.text_color = text_color
1009
1012
1010 def get_color_bank(self):
1013 def get_color_bank(self):
1011 """
1014 """
1012 returns a predefined list of colors that gravatars can use.
1015 returns a predefined list of colors that gravatars can use.
1013 Those are randomized distinct colors that guarantee readability and
1016 Those are randomized distinct colors that guarantee readability and
1014 uniqueness.
1017 uniqueness.
1015
1018
1016 generated with: http://phrogz.net/css/distinct-colors.html
1019 generated with: http://phrogz.net/css/distinct-colors.html
1017 """
1020 """
1018 return [
1021 return [
1019 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1022 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1020 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1023 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1021 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1024 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1022 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1025 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1023 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1026 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1024 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1027 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1025 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1028 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1026 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1029 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1027 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1030 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1028 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1031 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1029 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1032 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1030 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1033 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1031 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1034 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1032 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1035 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1033 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1036 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1034 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1037 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1035 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1038 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1036 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1039 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1037 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1040 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1038 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1041 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1039 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1042 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1040 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1043 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1041 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1044 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1042 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1045 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1043 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1046 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1044 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1047 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1045 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1048 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1046 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1049 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1047 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1050 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1048 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1051 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1049 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1052 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1050 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1053 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1051 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1054 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1052 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1055 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1053 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1056 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1054 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1057 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1055 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1058 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1056 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1059 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1057 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1060 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1058 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1061 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1059 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1062 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1060 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1063 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1061 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1064 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1062 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1065 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1063 '#4f8c46', '#368dd9', '#5c0073'
1066 '#4f8c46', '#368dd9', '#5c0073'
1064 ]
1067 ]
1065
1068
1066 def rgb_to_hex_color(self, rgb_tuple):
1069 def rgb_to_hex_color(self, rgb_tuple):
1067 """
1070 """
1068 Converts an rgb_tuple passed to an hex color.
1071 Converts an rgb_tuple passed to an hex color.
1069
1072
1070 :param rgb_tuple: tuple with 3 ints represents rgb color space
1073 :param rgb_tuple: tuple with 3 ints represents rgb color space
1071 """
1074 """
1072 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1075 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1073
1076
1074 def email_to_int_list(self, email_str):
1077 def email_to_int_list(self, email_str):
1075 """
1078 """
1076 Get every byte of the hex digest value of email and turn it to integer.
1079 Get every byte of the hex digest value of email and turn it to integer.
1077 It's going to be always between 0-255
1080 It's going to be always between 0-255
1078 """
1081 """
1079 digest = md5_safe(email_str.lower())
1082 digest = md5_safe(email_str.lower())
1080 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1083 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1081
1084
1082 def pick_color_bank_index(self, email_str, color_bank):
1085 def pick_color_bank_index(self, email_str, color_bank):
1083 return self.email_to_int_list(email_str)[0] % len(color_bank)
1086 return self.email_to_int_list(email_str)[0] % len(color_bank)
1084
1087
1085 def str2color(self, email_str):
1088 def str2color(self, email_str):
1086 """
1089 """
1087 Tries to map in a stable algorithm an email to color
1090 Tries to map in a stable algorithm an email to color
1088
1091
1089 :param email_str:
1092 :param email_str:
1090 """
1093 """
1091 color_bank = self.get_color_bank()
1094 color_bank = self.get_color_bank()
1092 # pick position (module it's length so we always find it in the
1095 # pick position (module it's length so we always find it in the
1093 # bank even if it's smaller than 256 values
1096 # bank even if it's smaller than 256 values
1094 pos = self.pick_color_bank_index(email_str, color_bank)
1097 pos = self.pick_color_bank_index(email_str, color_bank)
1095 return color_bank[pos]
1098 return color_bank[pos]
1096
1099
1097 def normalize_email(self, email_address):
1100 def normalize_email(self, email_address):
1098 import unicodedata
1101 import unicodedata
1099 # default host used to fill in the fake/missing email
1102 # default host used to fill in the fake/missing email
1100 default_host = u'localhost'
1103 default_host = u'localhost'
1101
1104
1102 if not email_address:
1105 if not email_address:
1103 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1106 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1104
1107
1105 email_address = safe_unicode(email_address)
1108 email_address = safe_unicode(email_address)
1106
1109
1107 if u'@' not in email_address:
1110 if u'@' not in email_address:
1108 email_address = u'%s@%s' % (email_address, default_host)
1111 email_address = u'%s@%s' % (email_address, default_host)
1109
1112
1110 if email_address.endswith(u'@'):
1113 if email_address.endswith(u'@'):
1111 email_address = u'%s%s' % (email_address, default_host)
1114 email_address = u'%s%s' % (email_address, default_host)
1112
1115
1113 email_address = unicodedata.normalize('NFKD', email_address)\
1116 email_address = unicodedata.normalize('NFKD', email_address)\
1114 .encode('ascii', 'ignore')
1117 .encode('ascii', 'ignore')
1115 return email_address
1118 return email_address
1116
1119
1117 def get_initials(self):
1120 def get_initials(self):
1118 """
1121 """
1119 Returns 2 letter initials calculated based on the input.
1122 Returns 2 letter initials calculated based on the input.
1120 The algorithm picks first given email address, and takes first letter
1123 The algorithm picks first given email address, and takes first letter
1121 of part before @, and then the first letter of server name. In case
1124 of part before @, and then the first letter of server name. In case
1122 the part before @ is in a format of `somestring.somestring2` it replaces
1125 the part before @ is in a format of `somestring.somestring2` it replaces
1123 the server letter with first letter of somestring2
1126 the server letter with first letter of somestring2
1124
1127
1125 In case function was initialized with both first and lastname, this
1128 In case function was initialized with both first and lastname, this
1126 overrides the extraction from email by first letter of the first and
1129 overrides the extraction from email by first letter of the first and
1127 last name. We add special logic to that functionality, In case Full name
1130 last name. We add special logic to that functionality, In case Full name
1128 is compound, like Guido Von Rossum, we use last part of the last name
1131 is compound, like Guido Von Rossum, we use last part of the last name
1129 (Von Rossum) picking `R`.
1132 (Von Rossum) picking `R`.
1130
1133
1131 Function also normalizes the non-ascii characters to they ascii
1134 Function also normalizes the non-ascii characters to they ascii
1132 representation, eg Δ„ => A
1135 representation, eg Δ„ => A
1133 """
1136 """
1134 import unicodedata
1137 import unicodedata
1135 # replace non-ascii to ascii
1138 # replace non-ascii to ascii
1136 first_name = unicodedata.normalize(
1139 first_name = unicodedata.normalize(
1137 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1140 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1138 last_name = unicodedata.normalize(
1141 last_name = unicodedata.normalize(
1139 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1142 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1140
1143
1141 # do NFKD encoding, and also make sure email has proper format
1144 # do NFKD encoding, and also make sure email has proper format
1142 email_address = self.normalize_email(self.email_address)
1145 email_address = self.normalize_email(self.email_address)
1143
1146
1144 # first push the email initials
1147 # first push the email initials
1145 prefix, server = email_address.split('@', 1)
1148 prefix, server = email_address.split('@', 1)
1146
1149
1147 # check if prefix is maybe a 'first_name.last_name' syntax
1150 # check if prefix is maybe a 'first_name.last_name' syntax
1148 _dot_split = prefix.rsplit('.', 1)
1151 _dot_split = prefix.rsplit('.', 1)
1149 if len(_dot_split) == 2:
1152 if len(_dot_split) == 2:
1150 initials = [_dot_split[0][0], _dot_split[1][0]]
1153 initials = [_dot_split[0][0], _dot_split[1][0]]
1151 else:
1154 else:
1152 initials = [prefix[0], server[0]]
1155 initials = [prefix[0], server[0]]
1153
1156
1154 # then try to replace either first_name or last_name
1157 # then try to replace either first_name or last_name
1155 fn_letter = (first_name or " ")[0].strip()
1158 fn_letter = (first_name or " ")[0].strip()
1156 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1159 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1157
1160
1158 if fn_letter:
1161 if fn_letter:
1159 initials[0] = fn_letter
1162 initials[0] = fn_letter
1160
1163
1161 if ln_letter:
1164 if ln_letter:
1162 initials[1] = ln_letter
1165 initials[1] = ln_letter
1163
1166
1164 return ''.join(initials).upper()
1167 return ''.join(initials).upper()
1165
1168
1166 def get_img_data_by_type(self, font_family, img_type):
1169 def get_img_data_by_type(self, font_family, img_type):
1167 default_user = """
1170 default_user = """
1168 <svg xmlns="http://www.w3.org/2000/svg"
1171 <svg xmlns="http://www.w3.org/2000/svg"
1169 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1172 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1170 viewBox="-15 -10 439.165 429.164"
1173 viewBox="-15 -10 439.165 429.164"
1171
1174
1172 xml:space="preserve"
1175 xml:space="preserve"
1173 style="background:{background};" >
1176 style="background:{background};" >
1174
1177
1175 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1178 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1176 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1179 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1177 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1180 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1178 168.596,153.916,216.671,
1181 168.596,153.916,216.671,
1179 204.583,216.671z" fill="{text_color}"/>
1182 204.583,216.671z" fill="{text_color}"/>
1180 <path d="M407.164,374.717L360.88,
1183 <path d="M407.164,374.717L360.88,
1181 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1184 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1182 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1185 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1183 15.366-44.203,23.488-69.076,23.488c-24.877,
1186 15.366-44.203,23.488-69.076,23.488c-24.877,
1184 0-48.762-8.122-69.078-23.488
1187 0-48.762-8.122-69.078-23.488
1185 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1188 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1186 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1189 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1187 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1190 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1188 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1191 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1189 19.402-10.527 C409.699,390.129,
1192 19.402-10.527 C409.699,390.129,
1190 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1193 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1191 </svg>""".format(
1194 </svg>""".format(
1192 size=self.size,
1195 size=self.size,
1193 background='#979797', # @grey4
1196 background='#979797', # @grey4
1194 text_color=self.text_color,
1197 text_color=self.text_color,
1195 font_family=font_family)
1198 font_family=font_family)
1196
1199
1197 return {
1200 return {
1198 "default_user": default_user
1201 "default_user": default_user
1199 }[img_type]
1202 }[img_type]
1200
1203
1201 def get_img_data(self, svg_type=None):
1204 def get_img_data(self, svg_type=None):
1202 """
1205 """
1203 generates the svg metadata for image
1206 generates the svg metadata for image
1204 """
1207 """
1205
1208
1206 font_family = ','.join([
1209 font_family = ','.join([
1207 'proximanovaregular',
1210 'proximanovaregular',
1208 'Proxima Nova Regular',
1211 'Proxima Nova Regular',
1209 'Proxima Nova',
1212 'Proxima Nova',
1210 'Arial',
1213 'Arial',
1211 'Lucida Grande',
1214 'Lucida Grande',
1212 'sans-serif'
1215 'sans-serif'
1213 ])
1216 ])
1214 if svg_type:
1217 if svg_type:
1215 return self.get_img_data_by_type(font_family, svg_type)
1218 return self.get_img_data_by_type(font_family, svg_type)
1216
1219
1217 initials = self.get_initials()
1220 initials = self.get_initials()
1218 img_data = """
1221 img_data = """
1219 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1222 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1220 width="{size}" height="{size}"
1223 width="{size}" height="{size}"
1221 style="width: 100%; height: 100%; background-color: {background}"
1224 style="width: 100%; height: 100%; background-color: {background}"
1222 viewBox="0 0 {size} {size}">
1225 viewBox="0 0 {size} {size}">
1223 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1226 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1224 pointer-events="auto" fill="{text_color}"
1227 pointer-events="auto" fill="{text_color}"
1225 font-family="{font_family}"
1228 font-family="{font_family}"
1226 style="font-weight: 400; font-size: {f_size}px;">{text}
1229 style="font-weight: 400; font-size: {f_size}px;">{text}
1227 </text>
1230 </text>
1228 </svg>""".format(
1231 </svg>""".format(
1229 size=self.size,
1232 size=self.size,
1230 f_size=self.size/1.85, # scale the text inside the box nicely
1233 f_size=self.size/1.85, # scale the text inside the box nicely
1231 background=self.background,
1234 background=self.background,
1232 text_color=self.text_color,
1235 text_color=self.text_color,
1233 text=initials.upper(),
1236 text=initials.upper(),
1234 font_family=font_family)
1237 font_family=font_family)
1235
1238
1236 return img_data
1239 return img_data
1237
1240
1238 def generate_svg(self, svg_type=None):
1241 def generate_svg(self, svg_type=None):
1239 img_data = self.get_img_data(svg_type)
1242 img_data = self.get_img_data(svg_type)
1240 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1243 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1241
1244
1242
1245
1243 def initials_gravatar(email_address, first_name, last_name, size=30):
1246 def initials_gravatar(email_address, first_name, last_name, size=30):
1244 svg_type = None
1247 svg_type = None
1245 if email_address == User.DEFAULT_USER_EMAIL:
1248 if email_address == User.DEFAULT_USER_EMAIL:
1246 svg_type = 'default_user'
1249 svg_type = 'default_user'
1247 klass = InitialsGravatar(email_address, first_name, last_name, size)
1250 klass = InitialsGravatar(email_address, first_name, last_name, size)
1248 return klass.generate_svg(svg_type=svg_type)
1251 return klass.generate_svg(svg_type=svg_type)
1249
1252
1250
1253
1251 def gravatar_url(email_address, size=30, request=None):
1254 def gravatar_url(email_address, size=30, request=None):
1252 request = get_current_request()
1255 request = get_current_request()
1253 if request and hasattr(request, 'call_context'):
1256 if request and hasattr(request, 'call_context'):
1254 _use_gravatar = request.call_context.visual.use_gravatar
1257 _use_gravatar = request.call_context.visual.use_gravatar
1255 _gravatar_url = request.call_context.visual.gravatar_url
1258 _gravatar_url = request.call_context.visual.gravatar_url
1256 else:
1259 else:
1257 # doh, we need to re-import those to mock it later
1260 # doh, we need to re-import those to mock it later
1258 from pylons import tmpl_context as c
1261 from pylons import tmpl_context as c
1259
1262
1260 _use_gravatar = c.visual.use_gravatar
1263 _use_gravatar = c.visual.use_gravatar
1261 _gravatar_url = c.visual.gravatar_url
1264 _gravatar_url = c.visual.gravatar_url
1262
1265
1263 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1266 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1264
1267
1265 email_address = email_address or User.DEFAULT_USER_EMAIL
1268 email_address = email_address or User.DEFAULT_USER_EMAIL
1266 if isinstance(email_address, unicode):
1269 if isinstance(email_address, unicode):
1267 # hashlib crashes on unicode items
1270 # hashlib crashes on unicode items
1268 email_address = safe_str(email_address)
1271 email_address = safe_str(email_address)
1269
1272
1270 # empty email or default user
1273 # empty email or default user
1271 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1274 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1272 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1275 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1273
1276
1274 if _use_gravatar:
1277 if _use_gravatar:
1275 # TODO: Disuse pyramid thread locals. Think about another solution to
1278 # TODO: Disuse pyramid thread locals. Think about another solution to
1276 # get the host and schema here.
1279 # get the host and schema here.
1277 request = get_current_request()
1280 request = get_current_request()
1278 tmpl = safe_str(_gravatar_url)
1281 tmpl = safe_str(_gravatar_url)
1279 tmpl = tmpl.replace('{email}', email_address)\
1282 tmpl = tmpl.replace('{email}', email_address)\
1280 .replace('{md5email}', md5_safe(email_address.lower())) \
1283 .replace('{md5email}', md5_safe(email_address.lower())) \
1281 .replace('{netloc}', request.host)\
1284 .replace('{netloc}', request.host)\
1282 .replace('{scheme}', request.scheme)\
1285 .replace('{scheme}', request.scheme)\
1283 .replace('{size}', safe_str(size))
1286 .replace('{size}', safe_str(size))
1284 return tmpl
1287 return tmpl
1285 else:
1288 else:
1286 return initials_gravatar(email_address, '', '', size=size)
1289 return initials_gravatar(email_address, '', '', size=size)
1287
1290
1288
1291
1289 class Page(_Page):
1292 class Page(_Page):
1290 """
1293 """
1291 Custom pager to match rendering style with paginator
1294 Custom pager to match rendering style with paginator
1292 """
1295 """
1293
1296
1294 def _get_pos(self, cur_page, max_page, items):
1297 def _get_pos(self, cur_page, max_page, items):
1295 edge = (items / 2) + 1
1298 edge = (items / 2) + 1
1296 if (cur_page <= edge):
1299 if (cur_page <= edge):
1297 radius = max(items / 2, items - cur_page)
1300 radius = max(items / 2, items - cur_page)
1298 elif (max_page - cur_page) < edge:
1301 elif (max_page - cur_page) < edge:
1299 radius = (items - 1) - (max_page - cur_page)
1302 radius = (items - 1) - (max_page - cur_page)
1300 else:
1303 else:
1301 radius = items / 2
1304 radius = items / 2
1302
1305
1303 left = max(1, (cur_page - (radius)))
1306 left = max(1, (cur_page - (radius)))
1304 right = min(max_page, cur_page + (radius))
1307 right = min(max_page, cur_page + (radius))
1305 return left, cur_page, right
1308 return left, cur_page, right
1306
1309
1307 def _range(self, regexp_match):
1310 def _range(self, regexp_match):
1308 """
1311 """
1309 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1312 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1310
1313
1311 Arguments:
1314 Arguments:
1312
1315
1313 regexp_match
1316 regexp_match
1314 A "re" (regular expressions) match object containing the
1317 A "re" (regular expressions) match object containing the
1315 radius of linked pages around the current page in
1318 radius of linked pages around the current page in
1316 regexp_match.group(1) as a string
1319 regexp_match.group(1) as a string
1317
1320
1318 This function is supposed to be called as a callable in
1321 This function is supposed to be called as a callable in
1319 re.sub.
1322 re.sub.
1320
1323
1321 """
1324 """
1322 radius = int(regexp_match.group(1))
1325 radius = int(regexp_match.group(1))
1323
1326
1324 # Compute the first and last page number within the radius
1327 # Compute the first and last page number within the radius
1325 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1328 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1326 # -> leftmost_page = 5
1329 # -> leftmost_page = 5
1327 # -> rightmost_page = 9
1330 # -> rightmost_page = 9
1328 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1331 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1329 self.last_page,
1332 self.last_page,
1330 (radius * 2) + 1)
1333 (radius * 2) + 1)
1331 nav_items = []
1334 nav_items = []
1332
1335
1333 # Create a link to the first page (unless we are on the first page
1336 # Create a link to the first page (unless we are on the first page
1334 # or there would be no need to insert '..' spacers)
1337 # or there would be no need to insert '..' spacers)
1335 if self.page != self.first_page and self.first_page < leftmost_page:
1338 if self.page != self.first_page and self.first_page < leftmost_page:
1336 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1339 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1337
1340
1338 # Insert dots if there are pages between the first page
1341 # Insert dots if there are pages between the first page
1339 # and the currently displayed page range
1342 # and the currently displayed page range
1340 if leftmost_page - self.first_page > 1:
1343 if leftmost_page - self.first_page > 1:
1341 # Wrap in a SPAN tag if nolink_attr is set
1344 # Wrap in a SPAN tag if nolink_attr is set
1342 text = '..'
1345 text = '..'
1343 if self.dotdot_attr:
1346 if self.dotdot_attr:
1344 text = HTML.span(c=text, **self.dotdot_attr)
1347 text = HTML.span(c=text, **self.dotdot_attr)
1345 nav_items.append(text)
1348 nav_items.append(text)
1346
1349
1347 for thispage in xrange(leftmost_page, rightmost_page + 1):
1350 for thispage in xrange(leftmost_page, rightmost_page + 1):
1348 # Hilight the current page number and do not use a link
1351 # Hilight the current page number and do not use a link
1349 if thispage == self.page:
1352 if thispage == self.page:
1350 text = '%s' % (thispage,)
1353 text = '%s' % (thispage,)
1351 # Wrap in a SPAN tag if nolink_attr is set
1354 # Wrap in a SPAN tag if nolink_attr is set
1352 if self.curpage_attr:
1355 if self.curpage_attr:
1353 text = HTML.span(c=text, **self.curpage_attr)
1356 text = HTML.span(c=text, **self.curpage_attr)
1354 nav_items.append(text)
1357 nav_items.append(text)
1355 # Otherwise create just a link to that page
1358 # Otherwise create just a link to that page
1356 else:
1359 else:
1357 text = '%s' % (thispage,)
1360 text = '%s' % (thispage,)
1358 nav_items.append(self._pagerlink(thispage, text))
1361 nav_items.append(self._pagerlink(thispage, text))
1359
1362
1360 # Insert dots if there are pages between the displayed
1363 # Insert dots if there are pages between the displayed
1361 # page numbers and the end of the page range
1364 # page numbers and the end of the page range
1362 if self.last_page - rightmost_page > 1:
1365 if self.last_page - rightmost_page > 1:
1363 text = '..'
1366 text = '..'
1364 # Wrap in a SPAN tag if nolink_attr is set
1367 # Wrap in a SPAN tag if nolink_attr is set
1365 if self.dotdot_attr:
1368 if self.dotdot_attr:
1366 text = HTML.span(c=text, **self.dotdot_attr)
1369 text = HTML.span(c=text, **self.dotdot_attr)
1367 nav_items.append(text)
1370 nav_items.append(text)
1368
1371
1369 # Create a link to the very last page (unless we are on the last
1372 # Create a link to the very last page (unless we are on the last
1370 # page or there would be no need to insert '..' spacers)
1373 # page or there would be no need to insert '..' spacers)
1371 if self.page != self.last_page and rightmost_page < self.last_page:
1374 if self.page != self.last_page and rightmost_page < self.last_page:
1372 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1375 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1373
1376
1374 ## prerender links
1377 ## prerender links
1375 #_page_link = url.current()
1378 #_page_link = url.current()
1376 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1379 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1377 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1380 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1378 return self.separator.join(nav_items)
1381 return self.separator.join(nav_items)
1379
1382
1380 def pager(self, format='~2~', page_param='page', partial_param='partial',
1383 def pager(self, format='~2~', page_param='page', partial_param='partial',
1381 show_if_single_page=False, separator=' ', onclick=None,
1384 show_if_single_page=False, separator=' ', onclick=None,
1382 symbol_first='<<', symbol_last='>>',
1385 symbol_first='<<', symbol_last='>>',
1383 symbol_previous='<', symbol_next='>',
1386 symbol_previous='<', symbol_next='>',
1384 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1387 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1385 curpage_attr={'class': 'pager_curpage'},
1388 curpage_attr={'class': 'pager_curpage'},
1386 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1389 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1387
1390
1388 self.curpage_attr = curpage_attr
1391 self.curpage_attr = curpage_attr
1389 self.separator = separator
1392 self.separator = separator
1390 self.pager_kwargs = kwargs
1393 self.pager_kwargs = kwargs
1391 self.page_param = page_param
1394 self.page_param = page_param
1392 self.partial_param = partial_param
1395 self.partial_param = partial_param
1393 self.onclick = onclick
1396 self.onclick = onclick
1394 self.link_attr = link_attr
1397 self.link_attr = link_attr
1395 self.dotdot_attr = dotdot_attr
1398 self.dotdot_attr = dotdot_attr
1396
1399
1397 # Don't show navigator if there is no more than one page
1400 # Don't show navigator if there is no more than one page
1398 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1401 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1399 return ''
1402 return ''
1400
1403
1401 from string import Template
1404 from string import Template
1402 # Replace ~...~ in token format by range of pages
1405 # Replace ~...~ in token format by range of pages
1403 result = re.sub(r'~(\d+)~', self._range, format)
1406 result = re.sub(r'~(\d+)~', self._range, format)
1404
1407
1405 # Interpolate '%' variables
1408 # Interpolate '%' variables
1406 result = Template(result).safe_substitute({
1409 result = Template(result).safe_substitute({
1407 'first_page': self.first_page,
1410 'first_page': self.first_page,
1408 'last_page': self.last_page,
1411 'last_page': self.last_page,
1409 'page': self.page,
1412 'page': self.page,
1410 'page_count': self.page_count,
1413 'page_count': self.page_count,
1411 'items_per_page': self.items_per_page,
1414 'items_per_page': self.items_per_page,
1412 'first_item': self.first_item,
1415 'first_item': self.first_item,
1413 'last_item': self.last_item,
1416 'last_item': self.last_item,
1414 'item_count': self.item_count,
1417 'item_count': self.item_count,
1415 'link_first': self.page > self.first_page and \
1418 'link_first': self.page > self.first_page and \
1416 self._pagerlink(self.first_page, symbol_first) or '',
1419 self._pagerlink(self.first_page, symbol_first) or '',
1417 'link_last': self.page < self.last_page and \
1420 'link_last': self.page < self.last_page and \
1418 self._pagerlink(self.last_page, symbol_last) or '',
1421 self._pagerlink(self.last_page, symbol_last) or '',
1419 'link_previous': self.previous_page and \
1422 'link_previous': self.previous_page and \
1420 self._pagerlink(self.previous_page, symbol_previous) \
1423 self._pagerlink(self.previous_page, symbol_previous) \
1421 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1424 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1422 'link_next': self.next_page and \
1425 'link_next': self.next_page and \
1423 self._pagerlink(self.next_page, symbol_next) \
1426 self._pagerlink(self.next_page, symbol_next) \
1424 or HTML.span(symbol_next, class_="pg-next disabled")
1427 or HTML.span(symbol_next, class_="pg-next disabled")
1425 })
1428 })
1426
1429
1427 return literal(result)
1430 return literal(result)
1428
1431
1429
1432
1430 #==============================================================================
1433 #==============================================================================
1431 # REPO PAGER, PAGER FOR REPOSITORY
1434 # REPO PAGER, PAGER FOR REPOSITORY
1432 #==============================================================================
1435 #==============================================================================
1433 class RepoPage(Page):
1436 class RepoPage(Page):
1434
1437
1435 def __init__(self, collection, page=1, items_per_page=20,
1438 def __init__(self, collection, page=1, items_per_page=20,
1436 item_count=None, url=None, **kwargs):
1439 item_count=None, url=None, **kwargs):
1437
1440
1438 """Create a "RepoPage" instance. special pager for paging
1441 """Create a "RepoPage" instance. special pager for paging
1439 repository
1442 repository
1440 """
1443 """
1441 self._url_generator = url
1444 self._url_generator = url
1442
1445
1443 # Safe the kwargs class-wide so they can be used in the pager() method
1446 # Safe the kwargs class-wide so they can be used in the pager() method
1444 self.kwargs = kwargs
1447 self.kwargs = kwargs
1445
1448
1446 # Save a reference to the collection
1449 # Save a reference to the collection
1447 self.original_collection = collection
1450 self.original_collection = collection
1448
1451
1449 self.collection = collection
1452 self.collection = collection
1450
1453
1451 # The self.page is the number of the current page.
1454 # The self.page is the number of the current page.
1452 # The first page has the number 1!
1455 # The first page has the number 1!
1453 try:
1456 try:
1454 self.page = int(page) # make it int() if we get it as a string
1457 self.page = int(page) # make it int() if we get it as a string
1455 except (ValueError, TypeError):
1458 except (ValueError, TypeError):
1456 self.page = 1
1459 self.page = 1
1457
1460
1458 self.items_per_page = items_per_page
1461 self.items_per_page = items_per_page
1459
1462
1460 # Unless the user tells us how many items the collections has
1463 # Unless the user tells us how many items the collections has
1461 # we calculate that ourselves.
1464 # we calculate that ourselves.
1462 if item_count is not None:
1465 if item_count is not None:
1463 self.item_count = item_count
1466 self.item_count = item_count
1464 else:
1467 else:
1465 self.item_count = len(self.collection)
1468 self.item_count = len(self.collection)
1466
1469
1467 # Compute the number of the first and last available page
1470 # Compute the number of the first and last available page
1468 if self.item_count > 0:
1471 if self.item_count > 0:
1469 self.first_page = 1
1472 self.first_page = 1
1470 self.page_count = int(math.ceil(float(self.item_count) /
1473 self.page_count = int(math.ceil(float(self.item_count) /
1471 self.items_per_page))
1474 self.items_per_page))
1472 self.last_page = self.first_page + self.page_count - 1
1475 self.last_page = self.first_page + self.page_count - 1
1473
1476
1474 # Make sure that the requested page number is the range of
1477 # Make sure that the requested page number is the range of
1475 # valid pages
1478 # valid pages
1476 if self.page > self.last_page:
1479 if self.page > self.last_page:
1477 self.page = self.last_page
1480 self.page = self.last_page
1478 elif self.page < self.first_page:
1481 elif self.page < self.first_page:
1479 self.page = self.first_page
1482 self.page = self.first_page
1480
1483
1481 # Note: the number of items on this page can be less than
1484 # Note: the number of items on this page can be less than
1482 # items_per_page if the last page is not full
1485 # items_per_page if the last page is not full
1483 self.first_item = max(0, (self.item_count) - (self.page *
1486 self.first_item = max(0, (self.item_count) - (self.page *
1484 items_per_page))
1487 items_per_page))
1485 self.last_item = ((self.item_count - 1) - items_per_page *
1488 self.last_item = ((self.item_count - 1) - items_per_page *
1486 (self.page - 1))
1489 (self.page - 1))
1487
1490
1488 self.items = list(self.collection[self.first_item:self.last_item + 1])
1491 self.items = list(self.collection[self.first_item:self.last_item + 1])
1489
1492
1490 # Links to previous and next page
1493 # Links to previous and next page
1491 if self.page > self.first_page:
1494 if self.page > self.first_page:
1492 self.previous_page = self.page - 1
1495 self.previous_page = self.page - 1
1493 else:
1496 else:
1494 self.previous_page = None
1497 self.previous_page = None
1495
1498
1496 if self.page < self.last_page:
1499 if self.page < self.last_page:
1497 self.next_page = self.page + 1
1500 self.next_page = self.page + 1
1498 else:
1501 else:
1499 self.next_page = None
1502 self.next_page = None
1500
1503
1501 # No items available
1504 # No items available
1502 else:
1505 else:
1503 self.first_page = None
1506 self.first_page = None
1504 self.page_count = 0
1507 self.page_count = 0
1505 self.last_page = None
1508 self.last_page = None
1506 self.first_item = None
1509 self.first_item = None
1507 self.last_item = None
1510 self.last_item = None
1508 self.previous_page = None
1511 self.previous_page = None
1509 self.next_page = None
1512 self.next_page = None
1510 self.items = []
1513 self.items = []
1511
1514
1512 # This is a subclass of the 'list' type. Initialise the list now.
1515 # This is a subclass of the 'list' type. Initialise the list now.
1513 list.__init__(self, reversed(self.items))
1516 list.__init__(self, reversed(self.items))
1514
1517
1515
1518
1516 def changed_tooltip(nodes):
1519 def changed_tooltip(nodes):
1517 """
1520 """
1518 Generates a html string for changed nodes in commit page.
1521 Generates a html string for changed nodes in commit page.
1519 It limits the output to 30 entries
1522 It limits the output to 30 entries
1520
1523
1521 :param nodes: LazyNodesGenerator
1524 :param nodes: LazyNodesGenerator
1522 """
1525 """
1523 if nodes:
1526 if nodes:
1524 pref = ': <br/> '
1527 pref = ': <br/> '
1525 suf = ''
1528 suf = ''
1526 if len(nodes) > 30:
1529 if len(nodes) > 30:
1527 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1530 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1528 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1531 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1529 for x in nodes[:30]]) + suf)
1532 for x in nodes[:30]]) + suf)
1530 else:
1533 else:
1531 return ': ' + _('No Files')
1534 return ': ' + _('No Files')
1532
1535
1533
1536
1534 def breadcrumb_repo_link(repo):
1537 def breadcrumb_repo_link(repo):
1535 """
1538 """
1536 Makes a breadcrumbs path link to repo
1539 Makes a breadcrumbs path link to repo
1537
1540
1538 ex::
1541 ex::
1539 group >> subgroup >> repo
1542 group >> subgroup >> repo
1540
1543
1541 :param repo: a Repository instance
1544 :param repo: a Repository instance
1542 """
1545 """
1543
1546
1544 path = [
1547 path = [
1545 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1548 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1546 for group in repo.groups_with_parents
1549 for group in repo.groups_with_parents
1547 ] + [
1550 ] + [
1548 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1551 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1549 ]
1552 ]
1550
1553
1551 return literal(' &raquo; '.join(path))
1554 return literal(' &raquo; '.join(path))
1552
1555
1553
1556
1554 def format_byte_size_binary(file_size):
1557 def format_byte_size_binary(file_size):
1555 """
1558 """
1556 Formats file/folder sizes to standard.
1559 Formats file/folder sizes to standard.
1557 """
1560 """
1558 formatted_size = format_byte_size(file_size, binary=True)
1561 formatted_size = format_byte_size(file_size, binary=True)
1559 return formatted_size
1562 return formatted_size
1560
1563
1561
1564
1562 def urlify_text(text_, safe=True):
1565 def urlify_text(text_, safe=True):
1563 """
1566 """
1564 Extrac urls from text and make html links out of them
1567 Extrac urls from text and make html links out of them
1565
1568
1566 :param text_:
1569 :param text_:
1567 """
1570 """
1568
1571
1569 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1572 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1570 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1573 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1571
1574
1572 def url_func(match_obj):
1575 def url_func(match_obj):
1573 url_full = match_obj.groups()[0]
1576 url_full = match_obj.groups()[0]
1574 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1577 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1575 _newtext = url_pat.sub(url_func, text_)
1578 _newtext = url_pat.sub(url_func, text_)
1576 if safe:
1579 if safe:
1577 return literal(_newtext)
1580 return literal(_newtext)
1578 return _newtext
1581 return _newtext
1579
1582
1580
1583
1581 def urlify_commits(text_, repository):
1584 def urlify_commits(text_, repository):
1582 """
1585 """
1583 Extract commit ids from text and make link from them
1586 Extract commit ids from text and make link from them
1584
1587
1585 :param text_:
1588 :param text_:
1586 :param repository: repo name to build the URL with
1589 :param repository: repo name to build the URL with
1587 """
1590 """
1588 from pylons import url # doh, we need to re-import url to mock it later
1591 from pylons import url # doh, we need to re-import url to mock it later
1589 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1592 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1590
1593
1591 def url_func(match_obj):
1594 def url_func(match_obj):
1592 commit_id = match_obj.groups()[1]
1595 commit_id = match_obj.groups()[1]
1593 pref = match_obj.groups()[0]
1596 pref = match_obj.groups()[0]
1594 suf = match_obj.groups()[2]
1597 suf = match_obj.groups()[2]
1595
1598
1596 tmpl = (
1599 tmpl = (
1597 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1600 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1598 '%(commit_id)s</a>%(suf)s'
1601 '%(commit_id)s</a>%(suf)s'
1599 )
1602 )
1600 return tmpl % {
1603 return tmpl % {
1601 'pref': pref,
1604 'pref': pref,
1602 'cls': 'revision-link',
1605 'cls': 'revision-link',
1603 'url': url('changeset_home', repo_name=repository,
1606 'url': url('changeset_home', repo_name=repository,
1604 revision=commit_id, qualified=True),
1607 revision=commit_id, qualified=True),
1605 'commit_id': commit_id,
1608 'commit_id': commit_id,
1606 'suf': suf
1609 'suf': suf
1607 }
1610 }
1608
1611
1609 newtext = URL_PAT.sub(url_func, text_)
1612 newtext = URL_PAT.sub(url_func, text_)
1610
1613
1611 return newtext
1614 return newtext
1612
1615
1613
1616
1614 def _process_url_func(match_obj, repo_name, uid, entry,
1617 def _process_url_func(match_obj, repo_name, uid, entry,
1615 return_raw_data=False, link_format='html'):
1618 return_raw_data=False, link_format='html'):
1616 pref = ''
1619 pref = ''
1617 if match_obj.group().startswith(' '):
1620 if match_obj.group().startswith(' '):
1618 pref = ' '
1621 pref = ' '
1619
1622
1620 issue_id = ''.join(match_obj.groups())
1623 issue_id = ''.join(match_obj.groups())
1621
1624
1622 if link_format == 'html':
1625 if link_format == 'html':
1623 tmpl = (
1626 tmpl = (
1624 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1627 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1625 '%(issue-prefix)s%(id-repr)s'
1628 '%(issue-prefix)s%(id-repr)s'
1626 '</a>')
1629 '</a>')
1627 elif link_format == 'rst':
1630 elif link_format == 'rst':
1628 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1631 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1629 elif link_format == 'markdown':
1632 elif link_format == 'markdown':
1630 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1633 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1631 else:
1634 else:
1632 raise ValueError('Bad link_format:{}'.format(link_format))
1635 raise ValueError('Bad link_format:{}'.format(link_format))
1633
1636
1634 (repo_name_cleaned,
1637 (repo_name_cleaned,
1635 parent_group_name) = RepoGroupModel().\
1638 parent_group_name) = RepoGroupModel().\
1636 _get_group_name_and_parent(repo_name)
1639 _get_group_name_and_parent(repo_name)
1637
1640
1638 # variables replacement
1641 # variables replacement
1639 named_vars = {
1642 named_vars = {
1640 'id': issue_id,
1643 'id': issue_id,
1641 'repo': repo_name,
1644 'repo': repo_name,
1642 'repo_name': repo_name_cleaned,
1645 'repo_name': repo_name_cleaned,
1643 'group_name': parent_group_name
1646 'group_name': parent_group_name
1644 }
1647 }
1645 # named regex variables
1648 # named regex variables
1646 named_vars.update(match_obj.groupdict())
1649 named_vars.update(match_obj.groupdict())
1647 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1650 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1648
1651
1649 data = {
1652 data = {
1650 'pref': pref,
1653 'pref': pref,
1651 'cls': 'issue-tracker-link',
1654 'cls': 'issue-tracker-link',
1652 'url': _url,
1655 'url': _url,
1653 'id-repr': issue_id,
1656 'id-repr': issue_id,
1654 'issue-prefix': entry['pref'],
1657 'issue-prefix': entry['pref'],
1655 'serv': entry['url'],
1658 'serv': entry['url'],
1656 }
1659 }
1657 if return_raw_data:
1660 if return_raw_data:
1658 return {
1661 return {
1659 'id': issue_id,
1662 'id': issue_id,
1660 'url': _url
1663 'url': _url
1661 }
1664 }
1662 return tmpl % data
1665 return tmpl % data
1663
1666
1664
1667
1665 def process_patterns(text_string, repo_name, link_format='html'):
1668 def process_patterns(text_string, repo_name, link_format='html'):
1666 allowed_formats = ['html', 'rst', 'markdown']
1669 allowed_formats = ['html', 'rst', 'markdown']
1667 if link_format not in allowed_formats:
1670 if link_format not in allowed_formats:
1668 raise ValueError('Link format can be only one of:{} got {}'.format(
1671 raise ValueError('Link format can be only one of:{} got {}'.format(
1669 allowed_formats, link_format))
1672 allowed_formats, link_format))
1670
1673
1671 repo = None
1674 repo = None
1672 if repo_name:
1675 if repo_name:
1673 # Retrieving repo_name to avoid invalid repo_name to explode on
1676 # Retrieving repo_name to avoid invalid repo_name to explode on
1674 # IssueTrackerSettingsModel but still passing invalid name further down
1677 # IssueTrackerSettingsModel but still passing invalid name further down
1675 repo = Repository.get_by_repo_name(repo_name, cache=True)
1678 repo = Repository.get_by_repo_name(repo_name, cache=True)
1676
1679
1677 settings_model = IssueTrackerSettingsModel(repo=repo)
1680 settings_model = IssueTrackerSettingsModel(repo=repo)
1678 active_entries = settings_model.get_settings(cache=True)
1681 active_entries = settings_model.get_settings(cache=True)
1679
1682
1680 issues_data = []
1683 issues_data = []
1681 newtext = text_string
1684 newtext = text_string
1682
1685
1683 for uid, entry in active_entries.items():
1686 for uid, entry in active_entries.items():
1684 log.debug('found issue tracker entry with uid %s' % (uid,))
1687 log.debug('found issue tracker entry with uid %s' % (uid,))
1685
1688
1686 if not (entry['pat'] and entry['url']):
1689 if not (entry['pat'] and entry['url']):
1687 log.debug('skipping due to missing data')
1690 log.debug('skipping due to missing data')
1688 continue
1691 continue
1689
1692
1690 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1693 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1691 % (uid, entry['pat'], entry['url'], entry['pref']))
1694 % (uid, entry['pat'], entry['url'], entry['pref']))
1692
1695
1693 try:
1696 try:
1694 pattern = re.compile(r'%s' % entry['pat'])
1697 pattern = re.compile(r'%s' % entry['pat'])
1695 except re.error:
1698 except re.error:
1696 log.exception(
1699 log.exception(
1697 'issue tracker pattern: `%s` failed to compile',
1700 'issue tracker pattern: `%s` failed to compile',
1698 entry['pat'])
1701 entry['pat'])
1699 continue
1702 continue
1700
1703
1701 data_func = partial(
1704 data_func = partial(
1702 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1705 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1703 return_raw_data=True)
1706 return_raw_data=True)
1704
1707
1705 for match_obj in pattern.finditer(text_string):
1708 for match_obj in pattern.finditer(text_string):
1706 issues_data.append(data_func(match_obj))
1709 issues_data.append(data_func(match_obj))
1707
1710
1708 url_func = partial(
1711 url_func = partial(
1709 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1712 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1710 link_format=link_format)
1713 link_format=link_format)
1711
1714
1712 newtext = pattern.sub(url_func, newtext)
1715 newtext = pattern.sub(url_func, newtext)
1713 log.debug('processed prefix:uid `%s`' % (uid,))
1716 log.debug('processed prefix:uid `%s`' % (uid,))
1714
1717
1715 return newtext, issues_data
1718 return newtext, issues_data
1716
1719
1717
1720
1718 def urlify_commit_message(commit_text, repository=None):
1721 def urlify_commit_message(commit_text, repository=None):
1719 """
1722 """
1720 Parses given text message and makes proper links.
1723 Parses given text message and makes proper links.
1721 issues are linked to given issue-server, and rest is a commit link
1724 issues are linked to given issue-server, and rest is a commit link
1722
1725
1723 :param commit_text:
1726 :param commit_text:
1724 :param repository:
1727 :param repository:
1725 """
1728 """
1726 from pylons import url # doh, we need to re-import url to mock it later
1729 from pylons import url # doh, we need to re-import url to mock it later
1727
1730
1728 def escaper(string):
1731 def escaper(string):
1729 return string.replace('<', '&lt;').replace('>', '&gt;')
1732 return string.replace('<', '&lt;').replace('>', '&gt;')
1730
1733
1731 newtext = escaper(commit_text)
1734 newtext = escaper(commit_text)
1732
1735
1733 # extract http/https links and make them real urls
1736 # extract http/https links and make them real urls
1734 newtext = urlify_text(newtext, safe=False)
1737 newtext = urlify_text(newtext, safe=False)
1735
1738
1736 # urlify commits - extract commit ids and make link out of them, if we have
1739 # urlify commits - extract commit ids and make link out of them, if we have
1737 # the scope of repository present.
1740 # the scope of repository present.
1738 if repository:
1741 if repository:
1739 newtext = urlify_commits(newtext, repository)
1742 newtext = urlify_commits(newtext, repository)
1740
1743
1741 # process issue tracker patterns
1744 # process issue tracker patterns
1742 newtext, issues = process_patterns(newtext, repository or '')
1745 newtext, issues = process_patterns(newtext, repository or '')
1743
1746
1744 return literal(newtext)
1747 return literal(newtext)
1745
1748
1746
1749
1747 def render_binary(repo_name, file_obj):
1750 def render_binary(repo_name, file_obj):
1748 """
1751 """
1749 Choose how to render a binary file
1752 Choose how to render a binary file
1750 """
1753 """
1751 filename = file_obj.name
1754 filename = file_obj.name
1752
1755
1753 # images
1756 # images
1754 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1757 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1755 if fnmatch.fnmatch(filename, pat=ext):
1758 if fnmatch.fnmatch(filename, pat=ext):
1756 alt = filename
1759 alt = filename
1757 src = url('files_raw_home', repo_name=repo_name,
1760 src = url('files_raw_home', repo_name=repo_name,
1758 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1761 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1759 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1762 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1760
1763
1761
1764
1762 def renderer_from_filename(filename, exclude=None):
1765 def renderer_from_filename(filename, exclude=None):
1763 """
1766 """
1764 choose a renderer based on filename, this works only for text based files
1767 choose a renderer based on filename, this works only for text based files
1765 """
1768 """
1766
1769
1767 # ipython
1770 # ipython
1768 for ext in ['*.ipynb']:
1771 for ext in ['*.ipynb']:
1769 if fnmatch.fnmatch(filename, pat=ext):
1772 if fnmatch.fnmatch(filename, pat=ext):
1770 return 'jupyter'
1773 return 'jupyter'
1771
1774
1772 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1775 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1773 if is_markup:
1776 if is_markup:
1774 return is_markup
1777 return is_markup
1775 return None
1778 return None
1776
1779
1777
1780
1778 def render(source, renderer='rst', mentions=False, relative_url=None,
1781 def render(source, renderer='rst', mentions=False, relative_url=None,
1779 repo_name=None):
1782 repo_name=None):
1780
1783
1781 def maybe_convert_relative_links(html_source):
1784 def maybe_convert_relative_links(html_source):
1782 if relative_url:
1785 if relative_url:
1783 return relative_links(html_source, relative_url)
1786 return relative_links(html_source, relative_url)
1784 return html_source
1787 return html_source
1785
1788
1786 if renderer == 'rst':
1789 if renderer == 'rst':
1787 if repo_name:
1790 if repo_name:
1788 # process patterns on comments if we pass in repo name
1791 # process patterns on comments if we pass in repo name
1789 source, issues = process_patterns(
1792 source, issues = process_patterns(
1790 source, repo_name, link_format='rst')
1793 source, repo_name, link_format='rst')
1791
1794
1792 return literal(
1795 return literal(
1793 '<div class="rst-block">%s</div>' %
1796 '<div class="rst-block">%s</div>' %
1794 maybe_convert_relative_links(
1797 maybe_convert_relative_links(
1795 MarkupRenderer.rst(source, mentions=mentions)))
1798 MarkupRenderer.rst(source, mentions=mentions)))
1796 elif renderer == 'markdown':
1799 elif renderer == 'markdown':
1797 if repo_name:
1800 if repo_name:
1798 # process patterns on comments if we pass in repo name
1801 # process patterns on comments if we pass in repo name
1799 source, issues = process_patterns(
1802 source, issues = process_patterns(
1800 source, repo_name, link_format='markdown')
1803 source, repo_name, link_format='markdown')
1801
1804
1802 return literal(
1805 return literal(
1803 '<div class="markdown-block">%s</div>' %
1806 '<div class="markdown-block">%s</div>' %
1804 maybe_convert_relative_links(
1807 maybe_convert_relative_links(
1805 MarkupRenderer.markdown(source, flavored=True,
1808 MarkupRenderer.markdown(source, flavored=True,
1806 mentions=mentions)))
1809 mentions=mentions)))
1807 elif renderer == 'jupyter':
1810 elif renderer == 'jupyter':
1808 return literal(
1811 return literal(
1809 '<div class="ipynb">%s</div>' %
1812 '<div class="ipynb">%s</div>' %
1810 maybe_convert_relative_links(
1813 maybe_convert_relative_links(
1811 MarkupRenderer.jupyter(source)))
1814 MarkupRenderer.jupyter(source)))
1812
1815
1813 # None means just show the file-source
1816 # None means just show the file-source
1814 return None
1817 return None
1815
1818
1816
1819
1817 def commit_status(repo, commit_id):
1820 def commit_status(repo, commit_id):
1818 return ChangesetStatusModel().get_status(repo, commit_id)
1821 return ChangesetStatusModel().get_status(repo, commit_id)
1819
1822
1820
1823
1821 def commit_status_lbl(commit_status):
1824 def commit_status_lbl(commit_status):
1822 return dict(ChangesetStatus.STATUSES).get(commit_status)
1825 return dict(ChangesetStatus.STATUSES).get(commit_status)
1823
1826
1824
1827
1825 def commit_time(repo_name, commit_id):
1828 def commit_time(repo_name, commit_id):
1826 repo = Repository.get_by_repo_name(repo_name)
1829 repo = Repository.get_by_repo_name(repo_name)
1827 commit = repo.get_commit(commit_id=commit_id)
1830 commit = repo.get_commit(commit_id=commit_id)
1828 return commit.date
1831 return commit.date
1829
1832
1830
1833
1831 def get_permission_name(key):
1834 def get_permission_name(key):
1832 return dict(Permission.PERMS).get(key)
1835 return dict(Permission.PERMS).get(key)
1833
1836
1834
1837
1835 def journal_filter_help():
1838 def journal_filter_help():
1836 return _(
1839 return _(
1837 'Example filter terms:\n' +
1840 'Example filter terms:\n' +
1838 ' repository:vcs\n' +
1841 ' repository:vcs\n' +
1839 ' username:marcin\n' +
1842 ' username:marcin\n' +
1840 ' username:(NOT marcin)\n' +
1843 ' username:(NOT marcin)\n' +
1841 ' action:*push*\n' +
1844 ' action:*push*\n' +
1842 ' ip:127.0.0.1\n' +
1845 ' ip:127.0.0.1\n' +
1843 ' date:20120101\n' +
1846 ' date:20120101\n' +
1844 ' date:[20120101100000 TO 20120102]\n' +
1847 ' date:[20120101100000 TO 20120102]\n' +
1845 '\n' +
1848 '\n' +
1846 'Generate wildcards using \'*\' character:\n' +
1849 'Generate wildcards using \'*\' character:\n' +
1847 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1850 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1848 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1851 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1849 '\n' +
1852 '\n' +
1850 'Optional AND / OR operators in queries\n' +
1853 'Optional AND / OR operators in queries\n' +
1851 ' "repository:vcs OR repository:test"\n' +
1854 ' "repository:vcs OR repository:test"\n' +
1852 ' "username:test AND repository:test*"\n'
1855 ' "username:test AND repository:test*"\n'
1853 )
1856 )
1854
1857
1855
1858
1856 def search_filter_help(searcher):
1859 def search_filter_help(searcher):
1857
1860
1858 terms = ''
1861 terms = ''
1859 return _(
1862 return _(
1860 'Example filter terms for `{searcher}` search:\n' +
1863 'Example filter terms for `{searcher}` search:\n' +
1861 '{terms}\n' +
1864 '{terms}\n' +
1862 'Generate wildcards using \'*\' character:\n' +
1865 'Generate wildcards using \'*\' character:\n' +
1863 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1866 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1864 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1867 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1865 '\n' +
1868 '\n' +
1866 'Optional AND / OR operators in queries\n' +
1869 'Optional AND / OR operators in queries\n' +
1867 ' "repo_name:vcs OR repo_name:test"\n' +
1870 ' "repo_name:vcs OR repo_name:test"\n' +
1868 ' "owner:test AND repo_name:test*"\n' +
1871 ' "owner:test AND repo_name:test*"\n' +
1869 'More: {search_doc}'
1872 'More: {search_doc}'
1870 ).format(searcher=searcher.name,
1873 ).format(searcher=searcher.name,
1871 terms=terms, search_doc=searcher.query_lang_doc)
1874 terms=terms, search_doc=searcher.query_lang_doc)
1872
1875
1873
1876
1874 def not_mapped_error(repo_name):
1877 def not_mapped_error(repo_name):
1875 flash(_('%s repository is not mapped to db perhaps'
1878 flash(_('%s repository is not mapped to db perhaps'
1876 ' it was created or renamed from the filesystem'
1879 ' it was created or renamed from the filesystem'
1877 ' please run the application again'
1880 ' please run the application again'
1878 ' in order to rescan repositories') % repo_name, category='error')
1881 ' in order to rescan repositories') % repo_name, category='error')
1879
1882
1880
1883
1881 def ip_range(ip_addr):
1884 def ip_range(ip_addr):
1882 from rhodecode.model.db import UserIpMap
1885 from rhodecode.model.db import UserIpMap
1883 s, e = UserIpMap._get_ip_range(ip_addr)
1886 s, e = UserIpMap._get_ip_range(ip_addr)
1884 return '%s - %s' % (s, e)
1887 return '%s - %s' % (s, e)
1885
1888
1886
1889
1887 def form(url, method='post', needs_csrf_token=True, **attrs):
1890 def form(url, method='post', needs_csrf_token=True, **attrs):
1888 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1891 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1889 if method.lower() != 'get' and needs_csrf_token:
1892 if method.lower() != 'get' and needs_csrf_token:
1890 raise Exception(
1893 raise Exception(
1891 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1894 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1892 'CSRF token. If the endpoint does not require such token you can ' +
1895 'CSRF token. If the endpoint does not require such token you can ' +
1893 'explicitly set the parameter needs_csrf_token to false.')
1896 'explicitly set the parameter needs_csrf_token to false.')
1894
1897
1895 return wh_form(url, method=method, **attrs)
1898 return wh_form(url, method=method, **attrs)
1896
1899
1897
1900
1898 def secure_form(url, method="POST", multipart=False, **attrs):
1901 def secure_form(url, method="POST", multipart=False, **attrs):
1899 """Start a form tag that points the action to an url. This
1902 """Start a form tag that points the action to an url. This
1900 form tag will also include the hidden field containing
1903 form tag will also include the hidden field containing
1901 the auth token.
1904 the auth token.
1902
1905
1903 The url options should be given either as a string, or as a
1906 The url options should be given either as a string, or as a
1904 ``url()`` function. The method for the form defaults to POST.
1907 ``url()`` function. The method for the form defaults to POST.
1905
1908
1906 Options:
1909 Options:
1907
1910
1908 ``multipart``
1911 ``multipart``
1909 If set to True, the enctype is set to "multipart/form-data".
1912 If set to True, the enctype is set to "multipart/form-data".
1910 ``method``
1913 ``method``
1911 The method to use when submitting the form, usually either
1914 The method to use when submitting the form, usually either
1912 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1915 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1913 hidden input with name _method is added to simulate the verb
1916 hidden input with name _method is added to simulate the verb
1914 over POST.
1917 over POST.
1915
1918
1916 """
1919 """
1917 from webhelpers.pylonslib.secure_form import insecure_form
1920 from webhelpers.pylonslib.secure_form import insecure_form
1918 form = insecure_form(url, method, multipart, **attrs)
1921 form = insecure_form(url, method, multipart, **attrs)
1919 token = csrf_input()
1922 token = csrf_input()
1920 return literal("%s\n%s" % (form, token))
1923 return literal("%s\n%s" % (form, token))
1921
1924
1922 def csrf_input():
1925 def csrf_input():
1923 return literal(
1926 return literal(
1924 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1927 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1925 csrf_token_key, csrf_token_key, get_csrf_token()))
1928 csrf_token_key, csrf_token_key, get_csrf_token()))
1926
1929
1927 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1930 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1928 select_html = select(name, selected, options, **attrs)
1931 select_html = select(name, selected, options, **attrs)
1929 select2 = """
1932 select2 = """
1930 <script>
1933 <script>
1931 $(document).ready(function() {
1934 $(document).ready(function() {
1932 $('#%s').select2({
1935 $('#%s').select2({
1933 containerCssClass: 'drop-menu',
1936 containerCssClass: 'drop-menu',
1934 dropdownCssClass: 'drop-menu-dropdown',
1937 dropdownCssClass: 'drop-menu-dropdown',
1935 dropdownAutoWidth: true%s
1938 dropdownAutoWidth: true%s
1936 });
1939 });
1937 });
1940 });
1938 </script>
1941 </script>
1939 """
1942 """
1940 filter_option = """,
1943 filter_option = """,
1941 minimumResultsForSearch: -1
1944 minimumResultsForSearch: -1
1942 """
1945 """
1943 input_id = attrs.get('id') or name
1946 input_id = attrs.get('id') or name
1944 filter_enabled = "" if enable_filter else filter_option
1947 filter_enabled = "" if enable_filter else filter_option
1945 select_script = literal(select2 % (input_id, filter_enabled))
1948 select_script = literal(select2 % (input_id, filter_enabled))
1946
1949
1947 return literal(select_html+select_script)
1950 return literal(select_html+select_script)
1948
1951
1949
1952
1950 def get_visual_attr(tmpl_context_var, attr_name):
1953 def get_visual_attr(tmpl_context_var, attr_name):
1951 """
1954 """
1952 A safe way to get a variable from visual variable of template context
1955 A safe way to get a variable from visual variable of template context
1953
1956
1954 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1957 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1955 :param attr_name: name of the attribute we fetch from the c.visual
1958 :param attr_name: name of the attribute we fetch from the c.visual
1956 """
1959 """
1957 visual = getattr(tmpl_context_var, 'visual', None)
1960 visual = getattr(tmpl_context_var, 'visual', None)
1958 if not visual:
1961 if not visual:
1959 return
1962 return
1960 else:
1963 else:
1961 return getattr(visual, attr_name, None)
1964 return getattr(visual, attr_name, None)
1962
1965
1963
1966
1964 def get_last_path_part(file_node):
1967 def get_last_path_part(file_node):
1965 if not file_node.path:
1968 if not file_node.path:
1966 return u''
1969 return u''
1967
1970
1968 path = safe_unicode(file_node.path.split('/')[-1])
1971 path = safe_unicode(file_node.path.split('/')[-1])
1969 return u'../' + path
1972 return u'../' + path
1970
1973
1971
1974
1972 def route_url(*args, **kwargs):
1975 def route_url(*args, **kwargs):
1973 """
1976 """
1974 Wrapper around pyramids `route_url` (fully qualified url) function.
1977 Wrapper around pyramids `route_url` (fully qualified url) function.
1975 It is used to generate URLs from within pylons views or templates.
1978 It is used to generate URLs from within pylons views or templates.
1976 This will be removed when pyramid migration if finished.
1979 This will be removed when pyramid migration if finished.
1977 """
1980 """
1978 req = get_current_request()
1981 req = get_current_request()
1979 return req.route_url(*args, **kwargs)
1982 return req.route_url(*args, **kwargs)
1980
1983
1981
1984
1982 def route_path(*args, **kwargs):
1985 def route_path(*args, **kwargs):
1983 """
1986 """
1984 Wrapper around pyramids `route_path` function. It is used to generate
1987 Wrapper around pyramids `route_path` function. It is used to generate
1985 URLs from within pylons views or templates. This will be removed when
1988 URLs from within pylons views or templates. This will be removed when
1986 pyramid migration if finished.
1989 pyramid migration if finished.
1987 """
1990 """
1988 req = get_current_request()
1991 req = get_current_request()
1989 return req.route_path(*args, **kwargs)
1992 return req.route_path(*args, **kwargs)
1990
1993
1991
1994
1992 def route_path_or_none(*args, **kwargs):
1995 def route_path_or_none(*args, **kwargs):
1993 try:
1996 try:
1994 return route_path(*args, **kwargs)
1997 return route_path(*args, **kwargs)
1995 except KeyError:
1998 except KeyError:
1996 return None
1999 return None
1997
2000
1998
2001
1999 def static_url(*args, **kwds):
2002 def static_url(*args, **kwds):
2000 """
2003 """
2001 Wrapper around pyramids `route_path` function. It is used to generate
2004 Wrapper around pyramids `route_path` function. It is used to generate
2002 URLs from within pylons views or templates. This will be removed when
2005 URLs from within pylons views or templates. This will be removed when
2003 pyramid migration if finished.
2006 pyramid migration if finished.
2004 """
2007 """
2005 req = get_current_request()
2008 req = get_current_request()
2006 return req.static_url(*args, **kwds)
2009 return req.static_url(*args, **kwds)
2007
2010
2008
2011
2009 def resource_path(*args, **kwds):
2012 def resource_path(*args, **kwds):
2010 """
2013 """
2011 Wrapper around pyramids `route_path` function. It is used to generate
2014 Wrapper around pyramids `route_path` function. It is used to generate
2012 URLs from within pylons views or templates. This will be removed when
2015 URLs from within pylons views or templates. This will be removed when
2013 pyramid migration if finished.
2016 pyramid migration if finished.
2014 """
2017 """
2015 req = get_current_request()
2018 req = get_current_request()
2016 return req.resource_path(*args, **kwds)
2019 return req.resource_path(*args, **kwds)
2017
2020
2018
2021
2019 def api_call_example(method, args):
2022 def api_call_example(method, args):
2020 """
2023 """
2021 Generates an API call example via CURL
2024 Generates an API call example via CURL
2022 """
2025 """
2023 args_json = json.dumps(OrderedDict([
2026 args_json = json.dumps(OrderedDict([
2024 ('id', 1),
2027 ('id', 1),
2025 ('auth_token', 'SECRET'),
2028 ('auth_token', 'SECRET'),
2026 ('method', method),
2029 ('method', method),
2027 ('args', args)
2030 ('args', args)
2028 ]))
2031 ]))
2029 return literal(
2032 return literal(
2030 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2033 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2031 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2034 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2032 "and needs to be of `api calls` role."
2035 "and needs to be of `api calls` role."
2033 .format(
2036 .format(
2034 api_url=route_url('apiv2'),
2037 api_url=route_url('apiv2'),
2035 token_url=route_url('my_account_auth_tokens'),
2038 token_url=route_url('my_account_auth_tokens'),
2036 data=args_json))
2039 data=args_json))
General Comments 0
You need to be logged in to leave comments. Login now