##// END OF EJS Templates
comments: allow parsing the issue tracker patterns inside the markup...
marcink -
r1672:a43e4e95 default
parent child Browse files
Show More
@@ -1,1961 +1,1987 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40 import itertools
40 import itertools
41 import fnmatch
41 import fnmatch
42
42
43 from datetime import datetime
43 from datetime import datetime
44 from functools import partial
44 from functools import partial
45 from pygments.formatters.html import HtmlFormatter
45 from pygments.formatters.html import HtmlFormatter
46 from pygments import highlight as code_highlight
46 from pygments import highlight as code_highlight
47 from pygments.lexers import (
47 from pygments.lexers import (
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 from pylons import url as pylons_url
49 from pylons import url as pylons_url
50 from pylons.i18n.translation import _, ungettext
50 from pylons.i18n.translation import _, ungettext
51 from pyramid.threadlocal import get_current_request
51 from pyramid.threadlocal import get_current_request
52
52
53 from webhelpers.html import literal, HTML, escape
53 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html.tools import *
54 from webhelpers.html.tools import *
55 from webhelpers.html.builder import make_tag
55 from webhelpers.html.builder import make_tag
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 submit, text, password, textarea, title, ul, xml_declaration, radio
59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 from webhelpers.pylonslib import Flash as _Flash
62 from webhelpers.pylonslib import Flash as _Flash
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def url(*args, **kw):
94 def url(*args, **kw):
95 return pylons_url(*args, **kw)
95 return pylons_url(*args, **kw)
96
96
97
97
98 def pylons_url_current(*args, **kw):
98 def pylons_url_current(*args, **kw):
99 """
99 """
100 This function overrides pylons.url.current() which returns the current
100 This function overrides pylons.url.current() which returns the current
101 path so that it will also work from a pyramid only context. This
101 path so that it will also work from a pyramid only context. This
102 should be removed once port to pyramid is complete.
102 should be removed once port to pyramid is complete.
103 """
103 """
104 if not args and not kw:
104 if not args and not kw:
105 request = get_current_request()
105 request = get_current_request()
106 return request.path
106 return request.path
107 return pylons_url.current(*args, **kw)
107 return pylons_url.current(*args, **kw)
108
108
109 url.current = pylons_url_current
109 url.current = pylons_url_current
110
110
111
111
112 def url_replace(**qargs):
112 def url_replace(**qargs):
113 """ Returns the current request url while replacing query string args """
113 """ Returns the current request url while replacing query string args """
114
114
115 request = get_current_request()
115 request = get_current_request()
116 new_args = request.GET.mixed()
116 new_args = request.GET.mixed()
117 new_args.update(qargs)
117 new_args.update(qargs)
118 return url('', **new_args)
118 return url('', **new_args)
119
119
120
120
121 def asset(path, ver=None, **kwargs):
121 def asset(path, ver=None, **kwargs):
122 """
122 """
123 Helper to generate a static asset file path for rhodecode assets
123 Helper to generate a static asset file path for rhodecode assets
124
124
125 eg. h.asset('images/image.png', ver='3923')
125 eg. h.asset('images/image.png', ver='3923')
126
126
127 :param path: path of asset
127 :param path: path of asset
128 :param ver: optional version query param to append as ?ver=
128 :param ver: optional version query param to append as ?ver=
129 """
129 """
130 request = get_current_request()
130 request = get_current_request()
131 query = {}
131 query = {}
132 query.update(kwargs)
132 query.update(kwargs)
133 if ver:
133 if ver:
134 query = {'ver': ver}
134 query = {'ver': ver}
135 return request.static_path(
135 return request.static_path(
136 'rhodecode:public/{}'.format(path), _query=query)
136 'rhodecode:public/{}'.format(path), _query=query)
137
137
138
138
139 default_html_escape_table = {
139 default_html_escape_table = {
140 ord('&'): u'&amp;',
140 ord('&'): u'&amp;',
141 ord('<'): u'&lt;',
141 ord('<'): u'&lt;',
142 ord('>'): u'&gt;',
142 ord('>'): u'&gt;',
143 ord('"'): u'&quot;',
143 ord('"'): u'&quot;',
144 ord("'"): u'&#39;',
144 ord("'"): u'&#39;',
145 }
145 }
146
146
147
147
148 def html_escape(text, html_escape_table=default_html_escape_table):
148 def html_escape(text, html_escape_table=default_html_escape_table):
149 """Produce entities within text."""
149 """Produce entities within text."""
150 return text.translate(html_escape_table)
150 return text.translate(html_escape_table)
151
151
152
152
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 """
154 """
155 Truncate string ``s`` at the first occurrence of ``sub``.
155 Truncate string ``s`` at the first occurrence of ``sub``.
156
156
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 """
158 """
159 suffix_if_chopped = suffix_if_chopped or ''
159 suffix_if_chopped = suffix_if_chopped or ''
160 pos = s.find(sub)
160 pos = s.find(sub)
161 if pos == -1:
161 if pos == -1:
162 return s
162 return s
163
163
164 if inclusive:
164 if inclusive:
165 pos += len(sub)
165 pos += len(sub)
166
166
167 chopped = s[:pos]
167 chopped = s[:pos]
168 left = s[pos:].strip()
168 left = s[pos:].strip()
169
169
170 if left and suffix_if_chopped:
170 if left and suffix_if_chopped:
171 chopped += suffix_if_chopped
171 chopped += suffix_if_chopped
172
172
173 return chopped
173 return chopped
174
174
175
175
176 def shorter(text, size=20):
176 def shorter(text, size=20):
177 postfix = '...'
177 postfix = '...'
178 if len(text) > size:
178 if len(text) > size:
179 return text[:size - len(postfix)] + postfix
179 return text[:size - len(postfix)] + postfix
180 return text
180 return text
181
181
182
182
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 """
184 """
185 Reset button
185 Reset button
186 """
186 """
187 _set_input_attrs(attrs, type, name, value)
187 _set_input_attrs(attrs, type, name, value)
188 _set_id_attr(attrs, id, name)
188 _set_id_attr(attrs, id, name)
189 convert_boolean_attrs(attrs, ["disabled"])
189 convert_boolean_attrs(attrs, ["disabled"])
190 return HTML.input(**attrs)
190 return HTML.input(**attrs)
191
191
192 reset = _reset
192 reset = _reset
193 safeid = _make_safe_id_component
193 safeid = _make_safe_id_component
194
194
195
195
196 def branding(name, length=40):
196 def branding(name, length=40):
197 return truncate(name, length, indicator="")
197 return truncate(name, length, indicator="")
198
198
199
199
200 def FID(raw_id, path):
200 def FID(raw_id, path):
201 """
201 """
202 Creates a unique ID for filenode based on it's hash of path and commit
202 Creates a unique ID for filenode based on it's hash of path and commit
203 it's safe to use in urls
203 it's safe to use in urls
204
204
205 :param raw_id:
205 :param raw_id:
206 :param path:
206 :param path:
207 """
207 """
208
208
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210
210
211
211
212 class _GetError(object):
212 class _GetError(object):
213 """Get error from form_errors, and represent it as span wrapped error
213 """Get error from form_errors, and represent it as span wrapped error
214 message
214 message
215
215
216 :param field_name: field to fetch errors for
216 :param field_name: field to fetch errors for
217 :param form_errors: form errors dict
217 :param form_errors: form errors dict
218 """
218 """
219
219
220 def __call__(self, field_name, form_errors):
220 def __call__(self, field_name, form_errors):
221 tmpl = """<span class="error_msg">%s</span>"""
221 tmpl = """<span class="error_msg">%s</span>"""
222 if form_errors and field_name in form_errors:
222 if form_errors and field_name in form_errors:
223 return literal(tmpl % form_errors.get(field_name))
223 return literal(tmpl % form_errors.get(field_name))
224
224
225 get_error = _GetError()
225 get_error = _GetError()
226
226
227
227
228 class _ToolTip(object):
228 class _ToolTip(object):
229
229
230 def __call__(self, tooltip_title, trim_at=50):
230 def __call__(self, tooltip_title, trim_at=50):
231 """
231 """
232 Special function just to wrap our text into nice formatted
232 Special function just to wrap our text into nice formatted
233 autowrapped text
233 autowrapped text
234
234
235 :param tooltip_title:
235 :param tooltip_title:
236 """
236 """
237 tooltip_title = escape(tooltip_title)
237 tooltip_title = escape(tooltip_title)
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 return tooltip_title
239 return tooltip_title
240 tooltip = _ToolTip()
240 tooltip = _ToolTip()
241
241
242
242
243 def files_breadcrumbs(repo_name, commit_id, file_path):
243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 if isinstance(file_path, str):
244 if isinstance(file_path, str):
245 file_path = safe_unicode(file_path)
245 file_path = safe_unicode(file_path)
246
246
247 # TODO: johbo: Is this always a url like path, or is this operating
247 # TODO: johbo: Is this always a url like path, or is this operating
248 # system dependent?
248 # system dependent?
249 path_segments = file_path.split('/')
249 path_segments = file_path.split('/')
250
250
251 repo_name_html = escape(repo_name)
251 repo_name_html = escape(repo_name)
252 if len(path_segments) == 1 and path_segments[0] == '':
252 if len(path_segments) == 1 and path_segments[0] == '':
253 url_segments = [repo_name_html]
253 url_segments = [repo_name_html]
254 else:
254 else:
255 url_segments = [
255 url_segments = [
256 link_to(
256 link_to(
257 repo_name_html,
257 repo_name_html,
258 url('files_home',
258 url('files_home',
259 repo_name=repo_name,
259 repo_name=repo_name,
260 revision=commit_id,
260 revision=commit_id,
261 f_path=''),
261 f_path=''),
262 class_='pjax-link')]
262 class_='pjax-link')]
263
263
264 last_cnt = len(path_segments) - 1
264 last_cnt = len(path_segments) - 1
265 for cnt, segment in enumerate(path_segments):
265 for cnt, segment in enumerate(path_segments):
266 if not segment:
266 if not segment:
267 continue
267 continue
268 segment_html = escape(segment)
268 segment_html = escape(segment)
269
269
270 if cnt != last_cnt:
270 if cnt != last_cnt:
271 url_segments.append(
271 url_segments.append(
272 link_to(
272 link_to(
273 segment_html,
273 segment_html,
274 url('files_home',
274 url('files_home',
275 repo_name=repo_name,
275 repo_name=repo_name,
276 revision=commit_id,
276 revision=commit_id,
277 f_path='/'.join(path_segments[:cnt + 1])),
277 f_path='/'.join(path_segments[:cnt + 1])),
278 class_='pjax-link'))
278 class_='pjax-link'))
279 else:
279 else:
280 url_segments.append(segment_html)
280 url_segments.append(segment_html)
281
281
282 return literal('/'.join(url_segments))
282 return literal('/'.join(url_segments))
283
283
284
284
285 class CodeHtmlFormatter(HtmlFormatter):
285 class CodeHtmlFormatter(HtmlFormatter):
286 """
286 """
287 My code Html Formatter for source codes
287 My code Html Formatter for source codes
288 """
288 """
289
289
290 def wrap(self, source, outfile):
290 def wrap(self, source, outfile):
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292
292
293 def _wrap_code(self, source):
293 def _wrap_code(self, source):
294 for cnt, it in enumerate(source):
294 for cnt, it in enumerate(source):
295 i, t = it
295 i, t = it
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 yield i, t
297 yield i, t
298
298
299 def _wrap_tablelinenos(self, inner):
299 def _wrap_tablelinenos(self, inner):
300 dummyoutfile = StringIO.StringIO()
300 dummyoutfile = StringIO.StringIO()
301 lncount = 0
301 lncount = 0
302 for t, line in inner:
302 for t, line in inner:
303 if t:
303 if t:
304 lncount += 1
304 lncount += 1
305 dummyoutfile.write(line)
305 dummyoutfile.write(line)
306
306
307 fl = self.linenostart
307 fl = self.linenostart
308 mw = len(str(lncount + fl - 1))
308 mw = len(str(lncount + fl - 1))
309 sp = self.linenospecial
309 sp = self.linenospecial
310 st = self.linenostep
310 st = self.linenostep
311 la = self.lineanchors
311 la = self.lineanchors
312 aln = self.anchorlinenos
312 aln = self.anchorlinenos
313 nocls = self.noclasses
313 nocls = self.noclasses
314 if sp:
314 if sp:
315 lines = []
315 lines = []
316
316
317 for i in range(fl, fl + lncount):
317 for i in range(fl, fl + lncount):
318 if i % st == 0:
318 if i % st == 0:
319 if i % sp == 0:
319 if i % sp == 0:
320 if aln:
320 if aln:
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 (la, i, mw, i))
322 (la, i, mw, i))
323 else:
323 else:
324 lines.append('<span class="special">%*d</span>' % (mw, i))
324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 else:
325 else:
326 if aln:
326 if aln:
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 else:
328 else:
329 lines.append('%*d' % (mw, i))
329 lines.append('%*d' % (mw, i))
330 else:
330 else:
331 lines.append('')
331 lines.append('')
332 ls = '\n'.join(lines)
332 ls = '\n'.join(lines)
333 else:
333 else:
334 lines = []
334 lines = []
335 for i in range(fl, fl + lncount):
335 for i in range(fl, fl + lncount):
336 if i % st == 0:
336 if i % st == 0:
337 if aln:
337 if aln:
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 else:
339 else:
340 lines.append('%*d' % (mw, i))
340 lines.append('%*d' % (mw, i))
341 else:
341 else:
342 lines.append('')
342 lines.append('')
343 ls = '\n'.join(lines)
343 ls = '\n'.join(lines)
344
344
345 # in case you wonder about the seemingly redundant <div> here: since the
345 # in case you wonder about the seemingly redundant <div> here: since the
346 # content in the other cell also is wrapped in a div, some browsers in
346 # content in the other cell also is wrapped in a div, some browsers in
347 # some configurations seem to mess up the formatting...
347 # some configurations seem to mess up the formatting...
348 if nocls:
348 if nocls:
349 yield 0, ('<table class="%stable">' % self.cssclass +
349 yield 0, ('<table class="%stable">' % self.cssclass +
350 '<tr><td><div class="linenodiv" '
350 '<tr><td><div class="linenodiv" '
351 'style="background-color: #f0f0f0; padding-right: 10px">'
351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 '<pre style="line-height: 125%">' +
352 '<pre style="line-height: 125%">' +
353 ls + '</pre></div></td><td id="hlcode" class="code">')
353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 else:
354 else:
355 yield 0, ('<table class="%stable">' % self.cssclass +
355 yield 0, ('<table class="%stable">' % self.cssclass +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 ls + '</pre></div></td><td id="hlcode" class="code">')
357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 yield 0, dummyoutfile.getvalue()
358 yield 0, dummyoutfile.getvalue()
359 yield 0, '</td></tr></table>'
359 yield 0, '</td></tr></table>'
360
360
361
361
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 def __init__(self, **kw):
363 def __init__(self, **kw):
364 # only show these line numbers if set
364 # only show these line numbers if set
365 self.only_lines = kw.pop('only_line_numbers', [])
365 self.only_lines = kw.pop('only_line_numbers', [])
366 self.query_terms = kw.pop('query_terms', [])
366 self.query_terms = kw.pop('query_terms', [])
367 self.max_lines = kw.pop('max_lines', 5)
367 self.max_lines = kw.pop('max_lines', 5)
368 self.line_context = kw.pop('line_context', 3)
368 self.line_context = kw.pop('line_context', 3)
369 self.url = kw.pop('url', None)
369 self.url = kw.pop('url', None)
370
370
371 super(CodeHtmlFormatter, self).__init__(**kw)
371 super(CodeHtmlFormatter, self).__init__(**kw)
372
372
373 def _wrap_code(self, source):
373 def _wrap_code(self, source):
374 for cnt, it in enumerate(source):
374 for cnt, it in enumerate(source):
375 i, t = it
375 i, t = it
376 t = '<pre>%s</pre>' % t
376 t = '<pre>%s</pre>' % t
377 yield i, t
377 yield i, t
378
378
379 def _wrap_tablelinenos(self, inner):
379 def _wrap_tablelinenos(self, inner):
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381
381
382 last_shown_line_number = 0
382 last_shown_line_number = 0
383 current_line_number = 1
383 current_line_number = 1
384
384
385 for t, line in inner:
385 for t, line in inner:
386 if not t:
386 if not t:
387 yield t, line
387 yield t, line
388 continue
388 continue
389
389
390 if current_line_number in self.only_lines:
390 if current_line_number in self.only_lines:
391 if last_shown_line_number + 1 != current_line_number:
391 if last_shown_line_number + 1 != current_line_number:
392 yield 0, '<tr>'
392 yield 0, '<tr>'
393 yield 0, '<td class="line">...</td>'
393 yield 0, '<td class="line">...</td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
395 yield 0, '</tr>'
395 yield 0, '</tr>'
396
396
397 yield 0, '<tr>'
397 yield 0, '<tr>'
398 if self.url:
398 if self.url:
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 self.url, current_line_number, current_line_number)
400 self.url, current_line_number, current_line_number)
401 else:
401 else:
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 current_line_number)
403 current_line_number)
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 yield 0, '</tr>'
405 yield 0, '</tr>'
406
406
407 last_shown_line_number = current_line_number
407 last_shown_line_number = current_line_number
408
408
409 current_line_number += 1
409 current_line_number += 1
410
410
411
411
412 yield 0, '</table>'
412 yield 0, '</table>'
413
413
414
414
415 def extract_phrases(text_query):
415 def extract_phrases(text_query):
416 """
416 """
417 Extracts phrases from search term string making sure phrases
417 Extracts phrases from search term string making sure phrases
418 contained in double quotes are kept together - and discarding empty values
418 contained in double quotes are kept together - and discarding empty values
419 or fully whitespace values eg.
419 or fully whitespace values eg.
420
420
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422
422
423 """
423 """
424
424
425 in_phrase = False
425 in_phrase = False
426 buf = ''
426 buf = ''
427 phrases = []
427 phrases = []
428 for char in text_query:
428 for char in text_query:
429 if in_phrase:
429 if in_phrase:
430 if char == '"': # end phrase
430 if char == '"': # end phrase
431 phrases.append(buf)
431 phrases.append(buf)
432 buf = ''
432 buf = ''
433 in_phrase = False
433 in_phrase = False
434 continue
434 continue
435 else:
435 else:
436 buf += char
436 buf += char
437 continue
437 continue
438 else:
438 else:
439 if char == '"': # start phrase
439 if char == '"': # start phrase
440 in_phrase = True
440 in_phrase = True
441 phrases.append(buf)
441 phrases.append(buf)
442 buf = ''
442 buf = ''
443 continue
443 continue
444 elif char == ' ':
444 elif char == ' ':
445 phrases.append(buf)
445 phrases.append(buf)
446 buf = ''
446 buf = ''
447 continue
447 continue
448 else:
448 else:
449 buf += char
449 buf += char
450
450
451 phrases.append(buf)
451 phrases.append(buf)
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 return phrases
453 return phrases
454
454
455
455
456 def get_matching_offsets(text, phrases):
456 def get_matching_offsets(text, phrases):
457 """
457 """
458 Returns a list of string offsets in `text` that the list of `terms` match
458 Returns a list of string offsets in `text` that the list of `terms` match
459
459
460 >>> get_matching_offsets('some text here', ['some', 'here'])
460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 [(0, 4), (10, 14)]
461 [(0, 4), (10, 14)]
462
462
463 """
463 """
464 offsets = []
464 offsets = []
465 for phrase in phrases:
465 for phrase in phrases:
466 for match in re.finditer(phrase, text):
466 for match in re.finditer(phrase, text):
467 offsets.append((match.start(), match.end()))
467 offsets.append((match.start(), match.end()))
468
468
469 return offsets
469 return offsets
470
470
471
471
472 def normalize_text_for_matching(x):
472 def normalize_text_for_matching(x):
473 """
473 """
474 Replaces all non alnum characters to spaces and lower cases the string,
474 Replaces all non alnum characters to spaces and lower cases the string,
475 useful for comparing two text strings without punctuation
475 useful for comparing two text strings without punctuation
476 """
476 """
477 return re.sub(r'[^\w]', ' ', x.lower())
477 return re.sub(r'[^\w]', ' ', x.lower())
478
478
479
479
480 def get_matching_line_offsets(lines, terms):
480 def get_matching_line_offsets(lines, terms):
481 """ Return a set of `lines` indices (starting from 1) matching a
481 """ Return a set of `lines` indices (starting from 1) matching a
482 text search query, along with `context` lines above/below matching lines
482 text search query, along with `context` lines above/below matching lines
483
483
484 :param lines: list of strings representing lines
484 :param lines: list of strings representing lines
485 :param terms: search term string to match in lines eg. 'some text'
485 :param terms: search term string to match in lines eg. 'some text'
486 :param context: number of lines above/below a matching line to add to result
486 :param context: number of lines above/below a matching line to add to result
487 :param max_lines: cut off for lines of interest
487 :param max_lines: cut off for lines of interest
488 eg.
488 eg.
489
489
490 text = '''
490 text = '''
491 words words words
491 words words words
492 words words words
492 words words words
493 some text some
493 some text some
494 words words words
494 words words words
495 words words words
495 words words words
496 text here what
496 text here what
497 '''
497 '''
498 get_matching_line_offsets(text, 'text', context=1)
498 get_matching_line_offsets(text, 'text', context=1)
499 {3: [(5, 9)], 6: [(0, 4)]]
499 {3: [(5, 9)], 6: [(0, 4)]]
500
500
501 """
501 """
502 matching_lines = {}
502 matching_lines = {}
503 phrases = [normalize_text_for_matching(phrase)
503 phrases = [normalize_text_for_matching(phrase)
504 for phrase in extract_phrases(terms)]
504 for phrase in extract_phrases(terms)]
505
505
506 for line_index, line in enumerate(lines, start=1):
506 for line_index, line in enumerate(lines, start=1):
507 match_offsets = get_matching_offsets(
507 match_offsets = get_matching_offsets(
508 normalize_text_for_matching(line), phrases)
508 normalize_text_for_matching(line), phrases)
509 if match_offsets:
509 if match_offsets:
510 matching_lines[line_index] = match_offsets
510 matching_lines[line_index] = match_offsets
511
511
512 return matching_lines
512 return matching_lines
513
513
514
514
515 def hsv_to_rgb(h, s, v):
515 def hsv_to_rgb(h, s, v):
516 """ Convert hsv color values to rgb """
516 """ Convert hsv color values to rgb """
517
517
518 if s == 0.0:
518 if s == 0.0:
519 return v, v, v
519 return v, v, v
520 i = int(h * 6.0) # XXX assume int() truncates!
520 i = int(h * 6.0) # XXX assume int() truncates!
521 f = (h * 6.0) - i
521 f = (h * 6.0) - i
522 p = v * (1.0 - s)
522 p = v * (1.0 - s)
523 q = v * (1.0 - s * f)
523 q = v * (1.0 - s * f)
524 t = v * (1.0 - s * (1.0 - f))
524 t = v * (1.0 - s * (1.0 - f))
525 i = i % 6
525 i = i % 6
526 if i == 0:
526 if i == 0:
527 return v, t, p
527 return v, t, p
528 if i == 1:
528 if i == 1:
529 return q, v, p
529 return q, v, p
530 if i == 2:
530 if i == 2:
531 return p, v, t
531 return p, v, t
532 if i == 3:
532 if i == 3:
533 return p, q, v
533 return p, q, v
534 if i == 4:
534 if i == 4:
535 return t, p, v
535 return t, p, v
536 if i == 5:
536 if i == 5:
537 return v, p, q
537 return v, p, q
538
538
539
539
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 """
541 """
542 Generator for getting n of evenly distributed colors using
542 Generator for getting n of evenly distributed colors using
543 hsv color and golden ratio. It always return same order of colors
543 hsv color and golden ratio. It always return same order of colors
544
544
545 :param n: number of colors to generate
545 :param n: number of colors to generate
546 :param saturation: saturation of returned colors
546 :param saturation: saturation of returned colors
547 :param lightness: lightness of returned colors
547 :param lightness: lightness of returned colors
548 :returns: RGB tuple
548 :returns: RGB tuple
549 """
549 """
550
550
551 golden_ratio = 0.618033988749895
551 golden_ratio = 0.618033988749895
552 h = 0.22717784590367374
552 h = 0.22717784590367374
553
553
554 for _ in xrange(n):
554 for _ in xrange(n):
555 h += golden_ratio
555 h += golden_ratio
556 h %= 1
556 h %= 1
557 HSV_tuple = [h, saturation, lightness]
557 HSV_tuple = [h, saturation, lightness]
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560
560
561
561
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 """
563 """
564 Returns a function which when called with an argument returns a unique
564 Returns a function which when called with an argument returns a unique
565 color for that argument, eg.
565 color for that argument, eg.
566
566
567 :param n: number of colors to generate
567 :param n: number of colors to generate
568 :param saturation: saturation of returned colors
568 :param saturation: saturation of returned colors
569 :param lightness: lightness of returned colors
569 :param lightness: lightness of returned colors
570 :returns: css RGB string
570 :returns: css RGB string
571
571
572 >>> color_hash = color_hasher()
572 >>> color_hash = color_hasher()
573 >>> color_hash('hello')
573 >>> color_hash('hello')
574 'rgb(34, 12, 59)'
574 'rgb(34, 12, 59)'
575 >>> color_hash('hello')
575 >>> color_hash('hello')
576 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
577 >>> color_hash('other')
577 >>> color_hash('other')
578 'rgb(90, 224, 159)'
578 'rgb(90, 224, 159)'
579 """
579 """
580
580
581 color_dict = {}
581 color_dict = {}
582 cgenerator = unique_color_generator(
582 cgenerator = unique_color_generator(
583 saturation=saturation, lightness=lightness)
583 saturation=saturation, lightness=lightness)
584
584
585 def get_color_string(thing):
585 def get_color_string(thing):
586 if thing in color_dict:
586 if thing in color_dict:
587 col = color_dict[thing]
587 col = color_dict[thing]
588 else:
588 else:
589 col = color_dict[thing] = cgenerator.next()
589 col = color_dict[thing] = cgenerator.next()
590 return "rgb(%s)" % (', '.join(col))
590 return "rgb(%s)" % (', '.join(col))
591
591
592 return get_color_string
592 return get_color_string
593
593
594
594
595 def get_lexer_safe(mimetype=None, filepath=None):
595 def get_lexer_safe(mimetype=None, filepath=None):
596 """
596 """
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 defaulting to plain text if none could be found
598 defaulting to plain text if none could be found
599 """
599 """
600 lexer = None
600 lexer = None
601 try:
601 try:
602 if mimetype:
602 if mimetype:
603 lexer = get_lexer_for_mimetype(mimetype)
603 lexer = get_lexer_for_mimetype(mimetype)
604 if not lexer:
604 if not lexer:
605 lexer = get_lexer_for_filename(filepath)
605 lexer = get_lexer_for_filename(filepath)
606 except pygments.util.ClassNotFound:
606 except pygments.util.ClassNotFound:
607 pass
607 pass
608
608
609 if not lexer:
609 if not lexer:
610 lexer = get_lexer_by_name('text')
610 lexer = get_lexer_by_name('text')
611
611
612 return lexer
612 return lexer
613
613
614
614
615 def get_lexer_for_filenode(filenode):
615 def get_lexer_for_filenode(filenode):
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 return lexer
617 return lexer
618
618
619
619
620 def pygmentize(filenode, **kwargs):
620 def pygmentize(filenode, **kwargs):
621 """
621 """
622 pygmentize function using pygments
622 pygmentize function using pygments
623
623
624 :param filenode:
624 :param filenode:
625 """
625 """
626 lexer = get_lexer_for_filenode(filenode)
626 lexer = get_lexer_for_filenode(filenode)
627 return literal(code_highlight(filenode.content, lexer,
627 return literal(code_highlight(filenode.content, lexer,
628 CodeHtmlFormatter(**kwargs)))
628 CodeHtmlFormatter(**kwargs)))
629
629
630
630
631 def is_following_repo(repo_name, user_id):
631 def is_following_repo(repo_name, user_id):
632 from rhodecode.model.scm import ScmModel
632 from rhodecode.model.scm import ScmModel
633 return ScmModel().is_following_repo(repo_name, user_id)
633 return ScmModel().is_following_repo(repo_name, user_id)
634
634
635
635
636 class _Message(object):
636 class _Message(object):
637 """A message returned by ``Flash.pop_messages()``.
637 """A message returned by ``Flash.pop_messages()``.
638
638
639 Converting the message to a string returns the message text. Instances
639 Converting the message to a string returns the message text. Instances
640 also have the following attributes:
640 also have the following attributes:
641
641
642 * ``message``: the message text.
642 * ``message``: the message text.
643 * ``category``: the category specified when the message was created.
643 * ``category``: the category specified when the message was created.
644 """
644 """
645
645
646 def __init__(self, category, message):
646 def __init__(self, category, message):
647 self.category = category
647 self.category = category
648 self.message = message
648 self.message = message
649
649
650 def __str__(self):
650 def __str__(self):
651 return self.message
651 return self.message
652
652
653 __unicode__ = __str__
653 __unicode__ = __str__
654
654
655 def __html__(self):
655 def __html__(self):
656 return escape(safe_unicode(self.message))
656 return escape(safe_unicode(self.message))
657
657
658
658
659 class Flash(_Flash):
659 class Flash(_Flash):
660
660
661 def pop_messages(self):
661 def pop_messages(self):
662 """Return all accumulated messages and delete them from the session.
662 """Return all accumulated messages and delete them from the session.
663
663
664 The return value is a list of ``Message`` objects.
664 The return value is a list of ``Message`` objects.
665 """
665 """
666 from pylons import session
666 from pylons import session
667
667
668 messages = []
668 messages = []
669
669
670 # Pop the 'old' pylons flash messages. They are tuples of the form
670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 # (category, message)
671 # (category, message)
672 for cat, msg in session.pop(self.session_key, []):
672 for cat, msg in session.pop(self.session_key, []):
673 messages.append(_Message(cat, msg))
673 messages.append(_Message(cat, msg))
674
674
675 # Pop the 'new' pyramid flash messages for each category as list
675 # Pop the 'new' pyramid flash messages for each category as list
676 # of strings.
676 # of strings.
677 for cat in self.categories:
677 for cat in self.categories:
678 for msg in session.pop_flash(queue=cat):
678 for msg in session.pop_flash(queue=cat):
679 messages.append(_Message(cat, msg))
679 messages.append(_Message(cat, msg))
680 # Map messages from the default queue to the 'notice' category.
680 # Map messages from the default queue to the 'notice' category.
681 for msg in session.pop_flash():
681 for msg in session.pop_flash():
682 messages.append(_Message('notice', msg))
682 messages.append(_Message('notice', msg))
683
683
684 session.save()
684 session.save()
685 return messages
685 return messages
686
686
687 def json_alerts(self):
687 def json_alerts(self):
688 payloads = []
688 payloads = []
689 messages = flash.pop_messages()
689 messages = flash.pop_messages()
690 if messages:
690 if messages:
691 for message in messages:
691 for message in messages:
692 subdata = {}
692 subdata = {}
693 if hasattr(message.message, 'rsplit'):
693 if hasattr(message.message, 'rsplit'):
694 flash_data = message.message.rsplit('|DELIM|', 1)
694 flash_data = message.message.rsplit('|DELIM|', 1)
695 org_message = flash_data[0]
695 org_message = flash_data[0]
696 if len(flash_data) > 1:
696 if len(flash_data) > 1:
697 subdata = json.loads(flash_data[1])
697 subdata = json.loads(flash_data[1])
698 else:
698 else:
699 org_message = message.message
699 org_message = message.message
700 payloads.append({
700 payloads.append({
701 'message': {
701 'message': {
702 'message': u'{}'.format(org_message),
702 'message': u'{}'.format(org_message),
703 'level': message.category,
703 'level': message.category,
704 'force': True,
704 'force': True,
705 'subdata': subdata
705 'subdata': subdata
706 }
706 }
707 })
707 })
708 return json.dumps(payloads)
708 return json.dumps(payloads)
709
709
710 flash = Flash()
710 flash = Flash()
711
711
712 #==============================================================================
712 #==============================================================================
713 # SCM FILTERS available via h.
713 # SCM FILTERS available via h.
714 #==============================================================================
714 #==============================================================================
715 from rhodecode.lib.vcs.utils import author_name, author_email
715 from rhodecode.lib.vcs.utils import author_name, author_email
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 from rhodecode.model.db import User, ChangesetStatus
717 from rhodecode.model.db import User, ChangesetStatus
718
718
719 age = _age
719 age = _age
720 capitalize = lambda x: x.capitalize()
720 capitalize = lambda x: x.capitalize()
721 email = author_email
721 email = author_email
722 short_id = lambda x: x[:12]
722 short_id = lambda x: x[:12]
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724
724
725
725
726 def age_component(datetime_iso, value=None, time_is_local=False):
726 def age_component(datetime_iso, value=None, time_is_local=False):
727 title = value or format_date(datetime_iso)
727 title = value or format_date(datetime_iso)
728 tzinfo = '+00:00'
728 tzinfo = '+00:00'
729
729
730 # detect if we have a timezone info, otherwise, add it
730 # detect if we have a timezone info, otherwise, add it
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 if time_is_local:
732 if time_is_local:
733 tzinfo = time.strftime("+%H:%M",
733 tzinfo = time.strftime("+%H:%M",
734 time.gmtime(
734 time.gmtime(
735 (datetime.now() - datetime.utcnow()).seconds + 1
735 (datetime.now() - datetime.utcnow()).seconds + 1
736 )
736 )
737 )
737 )
738
738
739 return literal(
739 return literal(
740 '<time class="timeago tooltip" '
740 '<time class="timeago tooltip" '
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 datetime_iso, title, tzinfo))
742 datetime_iso, title, tzinfo))
743
743
744
744
745 def _shorten_commit_id(commit_id):
745 def _shorten_commit_id(commit_id):
746 from rhodecode import CONFIG
746 from rhodecode import CONFIG
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 return commit_id[:def_len]
748 return commit_id[:def_len]
749
749
750
750
751 def show_id(commit):
751 def show_id(commit):
752 """
752 """
753 Configurable function that shows ID
753 Configurable function that shows ID
754 by default it's r123:fffeeefffeee
754 by default it's r123:fffeeefffeee
755
755
756 :param commit: commit instance
756 :param commit: commit instance
757 """
757 """
758 from rhodecode import CONFIG
758 from rhodecode import CONFIG
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760
760
761 raw_id = _shorten_commit_id(commit.raw_id)
761 raw_id = _shorten_commit_id(commit.raw_id)
762 if show_idx:
762 if show_idx:
763 return 'r%s:%s' % (commit.idx, raw_id)
763 return 'r%s:%s' % (commit.idx, raw_id)
764 else:
764 else:
765 return '%s' % (raw_id, )
765 return '%s' % (raw_id, )
766
766
767
767
768 def format_date(date):
768 def format_date(date):
769 """
769 """
770 use a standardized formatting for dates used in RhodeCode
770 use a standardized formatting for dates used in RhodeCode
771
771
772 :param date: date/datetime object
772 :param date: date/datetime object
773 :return: formatted date
773 :return: formatted date
774 """
774 """
775
775
776 if date:
776 if date:
777 _fmt = "%a, %d %b %Y %H:%M:%S"
777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 return safe_unicode(date.strftime(_fmt))
778 return safe_unicode(date.strftime(_fmt))
779
779
780 return u""
780 return u""
781
781
782
782
783 class _RepoChecker(object):
783 class _RepoChecker(object):
784
784
785 def __init__(self, backend_alias):
785 def __init__(self, backend_alias):
786 self._backend_alias = backend_alias
786 self._backend_alias = backend_alias
787
787
788 def __call__(self, repository):
788 def __call__(self, repository):
789 if hasattr(repository, 'alias'):
789 if hasattr(repository, 'alias'):
790 _type = repository.alias
790 _type = repository.alias
791 elif hasattr(repository, 'repo_type'):
791 elif hasattr(repository, 'repo_type'):
792 _type = repository.repo_type
792 _type = repository.repo_type
793 else:
793 else:
794 _type = repository
794 _type = repository
795 return _type == self._backend_alias
795 return _type == self._backend_alias
796
796
797 is_git = _RepoChecker('git')
797 is_git = _RepoChecker('git')
798 is_hg = _RepoChecker('hg')
798 is_hg = _RepoChecker('hg')
799 is_svn = _RepoChecker('svn')
799 is_svn = _RepoChecker('svn')
800
800
801
801
802 def get_repo_type_by_name(repo_name):
802 def get_repo_type_by_name(repo_name):
803 repo = Repository.get_by_repo_name(repo_name)
803 repo = Repository.get_by_repo_name(repo_name)
804 return repo.repo_type
804 return repo.repo_type
805
805
806
806
807 def is_svn_without_proxy(repository):
807 def is_svn_without_proxy(repository):
808 if is_svn(repository):
808 if is_svn(repository):
809 from rhodecode.model.settings import VcsSettingsModel
809 from rhodecode.model.settings import VcsSettingsModel
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 return False
812 return False
813
813
814
814
815 def discover_user(author):
815 def discover_user(author):
816 """
816 """
817 Tries to discover RhodeCode User based on the autho string. Author string
817 Tries to discover RhodeCode User based on the autho string. Author string
818 is typically `FirstName LastName <email@address.com>`
818 is typically `FirstName LastName <email@address.com>`
819 """
819 """
820
820
821 # if author is already an instance use it for extraction
821 # if author is already an instance use it for extraction
822 if isinstance(author, User):
822 if isinstance(author, User):
823 return author
823 return author
824
824
825 # Valid email in the attribute passed, see if they're in the system
825 # Valid email in the attribute passed, see if they're in the system
826 _email = author_email(author)
826 _email = author_email(author)
827 if _email != '':
827 if _email != '':
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 if user is not None:
829 if user is not None:
830 return user
830 return user
831
831
832 # Maybe it's a username, we try to extract it and fetch by username ?
832 # Maybe it's a username, we try to extract it and fetch by username ?
833 _author = author_name(author)
833 _author = author_name(author)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 if user is not None:
835 if user is not None:
836 return user
836 return user
837
837
838 return None
838 return None
839
839
840
840
841 def email_or_none(author):
841 def email_or_none(author):
842 # extract email from the commit string
842 # extract email from the commit string
843 _email = author_email(author)
843 _email = author_email(author)
844
844
845 # If we have an email, use it, otherwise
845 # If we have an email, use it, otherwise
846 # see if it contains a username we can get an email from
846 # see if it contains a username we can get an email from
847 if _email != '':
847 if _email != '':
848 return _email
848 return _email
849 else:
849 else:
850 user = User.get_by_username(
850 user = User.get_by_username(
851 author_name(author), case_insensitive=True, cache=True)
851 author_name(author), case_insensitive=True, cache=True)
852
852
853 if user is not None:
853 if user is not None:
854 return user.email
854 return user.email
855
855
856 # No valid email, not a valid user in the system, none!
856 # No valid email, not a valid user in the system, none!
857 return None
857 return None
858
858
859
859
860 def link_to_user(author, length=0, **kwargs):
860 def link_to_user(author, length=0, **kwargs):
861 user = discover_user(author)
861 user = discover_user(author)
862 # user can be None, but if we have it already it means we can re-use it
862 # user can be None, but if we have it already it means we can re-use it
863 # in the person() function, so we save 1 intensive-query
863 # in the person() function, so we save 1 intensive-query
864 if user:
864 if user:
865 author = user
865 author = user
866
866
867 display_person = person(author, 'username_or_name_or_email')
867 display_person = person(author, 'username_or_name_or_email')
868 if length:
868 if length:
869 display_person = shorter(display_person, length)
869 display_person = shorter(display_person, length)
870
870
871 if user:
871 if user:
872 return link_to(
872 return link_to(
873 escape(display_person),
873 escape(display_person),
874 route_path('user_profile', username=user.username),
874 route_path('user_profile', username=user.username),
875 **kwargs)
875 **kwargs)
876 else:
876 else:
877 return escape(display_person)
877 return escape(display_person)
878
878
879
879
880 def person(author, show_attr="username_and_name"):
880 def person(author, show_attr="username_and_name"):
881 user = discover_user(author)
881 user = discover_user(author)
882 if user:
882 if user:
883 return getattr(user, show_attr)
883 return getattr(user, show_attr)
884 else:
884 else:
885 _author = author_name(author)
885 _author = author_name(author)
886 _email = email(author)
886 _email = email(author)
887 return _author or _email
887 return _author or _email
888
888
889
889
890 def author_string(email):
890 def author_string(email):
891 if email:
891 if email:
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 if user:
893 if user:
894 if user.firstname or user.lastname:
894 if user.firstname or user.lastname:
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 else:
896 else:
897 return email
897 return email
898 else:
898 else:
899 return email
899 return email
900 else:
900 else:
901 return None
901 return None
902
902
903
903
904 def person_by_id(id_, show_attr="username_and_name"):
904 def person_by_id(id_, show_attr="username_and_name"):
905 # attr to return from fetched user
905 # attr to return from fetched user
906 person_getter = lambda usr: getattr(usr, show_attr)
906 person_getter = lambda usr: getattr(usr, show_attr)
907
907
908 #maybe it's an ID ?
908 #maybe it's an ID ?
909 if str(id_).isdigit() or isinstance(id_, int):
909 if str(id_).isdigit() or isinstance(id_, int):
910 id_ = int(id_)
910 id_ = int(id_)
911 user = User.get(id_)
911 user = User.get(id_)
912 if user is not None:
912 if user is not None:
913 return person_getter(user)
913 return person_getter(user)
914 return id_
914 return id_
915
915
916
916
917 def gravatar_with_user(author, show_disabled=False):
917 def gravatar_with_user(author, show_disabled=False):
918 from rhodecode.lib.utils import PartialRenderer
918 from rhodecode.lib.utils import PartialRenderer
919 _render = PartialRenderer('base/base.mako')
919 _render = PartialRenderer('base/base.mako')
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921
921
922
922
923 def desc_stylize(value):
923 def desc_stylize(value):
924 """
924 """
925 converts tags from value into html equivalent
925 converts tags from value into html equivalent
926
926
927 :param value:
927 :param value:
928 """
928 """
929 if not value:
929 if not value:
930 return ''
930 return ''
931
931
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 '<div class="metatag" tag="lang">\\2</div>', value)
939 '<div class="metatag" tag="lang">\\2</div>', value)
940 value = re.sub(r'\[([a-z]+)\]',
940 value = re.sub(r'\[([a-z]+)\]',
941 '<div class="metatag" tag="\\1">\\1</div>', value)
941 '<div class="metatag" tag="\\1">\\1</div>', value)
942
942
943 return value
943 return value
944
944
945
945
946 def escaped_stylize(value):
946 def escaped_stylize(value):
947 """
947 """
948 converts tags from value into html equivalent, but escaping its value first
948 converts tags from value into html equivalent, but escaping its value first
949 """
949 """
950 if not value:
950 if not value:
951 return ''
951 return ''
952
952
953 # Using default webhelper escape method, but has to force it as a
953 # Using default webhelper escape method, but has to force it as a
954 # plain unicode instead of a markup tag to be used in regex expressions
954 # plain unicode instead of a markup tag to be used in regex expressions
955 value = unicode(escape(safe_unicode(value)))
955 value = unicode(escape(safe_unicode(value)))
956
956
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 '<div class="metatag" tag="lang">\\2</div>', value)
964 '<div class="metatag" tag="lang">\\2</div>', value)
965 value = re.sub(r'\[([a-z]+)\]',
965 value = re.sub(r'\[([a-z]+)\]',
966 '<div class="metatag" tag="\\1">\\1</div>', value)
966 '<div class="metatag" tag="\\1">\\1</div>', value)
967
967
968 return value
968 return value
969
969
970
970
971 def bool2icon(value):
971 def bool2icon(value):
972 """
972 """
973 Returns boolean value of a given value, represented as html element with
973 Returns boolean value of a given value, represented as html element with
974 classes that will represent icons
974 classes that will represent icons
975
975
976 :param value: given value to convert to html node
976 :param value: given value to convert to html node
977 """
977 """
978
978
979 if value: # does bool conversion
979 if value: # does bool conversion
980 return HTML.tag('i', class_="icon-true")
980 return HTML.tag('i', class_="icon-true")
981 else: # not true as bool
981 else: # not true as bool
982 return HTML.tag('i', class_="icon-false")
982 return HTML.tag('i', class_="icon-false")
983
983
984
984
985 #==============================================================================
985 #==============================================================================
986 # PERMS
986 # PERMS
987 #==============================================================================
987 #==============================================================================
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 csrf_token_key
991 csrf_token_key
992
992
993
993
994 #==============================================================================
994 #==============================================================================
995 # GRAVATAR URL
995 # GRAVATAR URL
996 #==============================================================================
996 #==============================================================================
997 class InitialsGravatar(object):
997 class InitialsGravatar(object):
998 def __init__(self, email_address, first_name, last_name, size=30,
998 def __init__(self, email_address, first_name, last_name, size=30,
999 background=None, text_color='#fff'):
999 background=None, text_color='#fff'):
1000 self.size = size
1000 self.size = size
1001 self.first_name = first_name
1001 self.first_name = first_name
1002 self.last_name = last_name
1002 self.last_name = last_name
1003 self.email_address = email_address
1003 self.email_address = email_address
1004 self.background = background or self.str2color(email_address)
1004 self.background = background or self.str2color(email_address)
1005 self.text_color = text_color
1005 self.text_color = text_color
1006
1006
1007 def get_color_bank(self):
1007 def get_color_bank(self):
1008 """
1008 """
1009 returns a predefined list of colors that gravatars can use.
1009 returns a predefined list of colors that gravatars can use.
1010 Those are randomized distinct colors that guarantee readability and
1010 Those are randomized distinct colors that guarantee readability and
1011 uniqueness.
1011 uniqueness.
1012
1012
1013 generated with: http://phrogz.net/css/distinct-colors.html
1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 """
1014 """
1015 return [
1015 return [
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 '#4f8c46', '#368dd9', '#5c0073'
1060 '#4f8c46', '#368dd9', '#5c0073'
1061 ]
1061 ]
1062
1062
1063 def rgb_to_hex_color(self, rgb_tuple):
1063 def rgb_to_hex_color(self, rgb_tuple):
1064 """
1064 """
1065 Converts an rgb_tuple passed to an hex color.
1065 Converts an rgb_tuple passed to an hex color.
1066
1066
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 """
1068 """
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070
1070
1071 def email_to_int_list(self, email_str):
1071 def email_to_int_list(self, email_str):
1072 """
1072 """
1073 Get every byte of the hex digest value of email and turn it to integer.
1073 Get every byte of the hex digest value of email and turn it to integer.
1074 It's going to be always between 0-255
1074 It's going to be always between 0-255
1075 """
1075 """
1076 digest = md5_safe(email_str.lower())
1076 digest = md5_safe(email_str.lower())
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078
1078
1079 def pick_color_bank_index(self, email_str, color_bank):
1079 def pick_color_bank_index(self, email_str, color_bank):
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081
1081
1082 def str2color(self, email_str):
1082 def str2color(self, email_str):
1083 """
1083 """
1084 Tries to map in a stable algorithm an email to color
1084 Tries to map in a stable algorithm an email to color
1085
1085
1086 :param email_str:
1086 :param email_str:
1087 """
1087 """
1088 color_bank = self.get_color_bank()
1088 color_bank = self.get_color_bank()
1089 # pick position (module it's length so we always find it in the
1089 # pick position (module it's length so we always find it in the
1090 # bank even if it's smaller than 256 values
1090 # bank even if it's smaller than 256 values
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 return color_bank[pos]
1092 return color_bank[pos]
1093
1093
1094 def normalize_email(self, email_address):
1094 def normalize_email(self, email_address):
1095 import unicodedata
1095 import unicodedata
1096 # default host used to fill in the fake/missing email
1096 # default host used to fill in the fake/missing email
1097 default_host = u'localhost'
1097 default_host = u'localhost'
1098
1098
1099 if not email_address:
1099 if not email_address:
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101
1101
1102 email_address = safe_unicode(email_address)
1102 email_address = safe_unicode(email_address)
1103
1103
1104 if u'@' not in email_address:
1104 if u'@' not in email_address:
1105 email_address = u'%s@%s' % (email_address, default_host)
1105 email_address = u'%s@%s' % (email_address, default_host)
1106
1106
1107 if email_address.endswith(u'@'):
1107 if email_address.endswith(u'@'):
1108 email_address = u'%s%s' % (email_address, default_host)
1108 email_address = u'%s%s' % (email_address, default_host)
1109
1109
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 .encode('ascii', 'ignore')
1111 .encode('ascii', 'ignore')
1112 return email_address
1112 return email_address
1113
1113
1114 def get_initials(self):
1114 def get_initials(self):
1115 """
1115 """
1116 Returns 2 letter initials calculated based on the input.
1116 Returns 2 letter initials calculated based on the input.
1117 The algorithm picks first given email address, and takes first letter
1117 The algorithm picks first given email address, and takes first letter
1118 of part before @, and then the first letter of server name. In case
1118 of part before @, and then the first letter of server name. In case
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 the server letter with first letter of somestring2
1120 the server letter with first letter of somestring2
1121
1121
1122 In case function was initialized with both first and lastname, this
1122 In case function was initialized with both first and lastname, this
1123 overrides the extraction from email by first letter of the first and
1123 overrides the extraction from email by first letter of the first and
1124 last name. We add special logic to that functionality, In case Full name
1124 last name. We add special logic to that functionality, In case Full name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 (Von Rossum) picking `R`.
1126 (Von Rossum) picking `R`.
1127
1127
1128 Function also normalizes the non-ascii characters to they ascii
1128 Function also normalizes the non-ascii characters to they ascii
1129 representation, eg Δ„ => A
1129 representation, eg Δ„ => A
1130 """
1130 """
1131 import unicodedata
1131 import unicodedata
1132 # replace non-ascii to ascii
1132 # replace non-ascii to ascii
1133 first_name = unicodedata.normalize(
1133 first_name = unicodedata.normalize(
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 last_name = unicodedata.normalize(
1135 last_name = unicodedata.normalize(
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137
1137
1138 # do NFKD encoding, and also make sure email has proper format
1138 # do NFKD encoding, and also make sure email has proper format
1139 email_address = self.normalize_email(self.email_address)
1139 email_address = self.normalize_email(self.email_address)
1140
1140
1141 # first push the email initials
1141 # first push the email initials
1142 prefix, server = email_address.split('@', 1)
1142 prefix, server = email_address.split('@', 1)
1143
1143
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 _dot_split = prefix.rsplit('.', 1)
1145 _dot_split = prefix.rsplit('.', 1)
1146 if len(_dot_split) == 2:
1146 if len(_dot_split) == 2:
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 else:
1148 else:
1149 initials = [prefix[0], server[0]]
1149 initials = [prefix[0], server[0]]
1150
1150
1151 # then try to replace either firtname or lastname
1151 # then try to replace either firtname or lastname
1152 fn_letter = (first_name or " ")[0].strip()
1152 fn_letter = (first_name or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154
1154
1155 if fn_letter:
1155 if fn_letter:
1156 initials[0] = fn_letter
1156 initials[0] = fn_letter
1157
1157
1158 if ln_letter:
1158 if ln_letter:
1159 initials[1] = ln_letter
1159 initials[1] = ln_letter
1160
1160
1161 return ''.join(initials).upper()
1161 return ''.join(initials).upper()
1162
1162
1163 def get_img_data_by_type(self, font_family, img_type):
1163 def get_img_data_by_type(self, font_family, img_type):
1164 default_user = """
1164 default_user = """
1165 <svg xmlns="http://www.w3.org/2000/svg"
1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 viewBox="-15 -10 439.165 429.164"
1167 viewBox="-15 -10 439.165 429.164"
1168
1168
1169 xml:space="preserve"
1169 xml:space="preserve"
1170 style="background:{background};" >
1170 style="background:{background};" >
1171
1171
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 168.596,153.916,216.671,
1175 168.596,153.916,216.671,
1176 204.583,216.671z" fill="{text_color}"/>
1176 204.583,216.671z" fill="{text_color}"/>
1177 <path d="M407.164,374.717L360.88,
1177 <path d="M407.164,374.717L360.88,
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 0-48.762-8.122-69.078-23.488
1181 0-48.762-8.122-69.078-23.488
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 19.402-10.527 C409.699,390.129,
1186 19.402-10.527 C409.699,390.129,
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 </svg>""".format(
1188 </svg>""".format(
1189 size=self.size,
1189 size=self.size,
1190 background='#979797', # @grey4
1190 background='#979797', # @grey4
1191 text_color=self.text_color,
1191 text_color=self.text_color,
1192 font_family=font_family)
1192 font_family=font_family)
1193
1193
1194 return {
1194 return {
1195 "default_user": default_user
1195 "default_user": default_user
1196 }[img_type]
1196 }[img_type]
1197
1197
1198 def get_img_data(self, svg_type=None):
1198 def get_img_data(self, svg_type=None):
1199 """
1199 """
1200 generates the svg metadata for image
1200 generates the svg metadata for image
1201 """
1201 """
1202
1202
1203 font_family = ','.join([
1203 font_family = ','.join([
1204 'proximanovaregular',
1204 'proximanovaregular',
1205 'Proxima Nova Regular',
1205 'Proxima Nova Regular',
1206 'Proxima Nova',
1206 'Proxima Nova',
1207 'Arial',
1207 'Arial',
1208 'Lucida Grande',
1208 'Lucida Grande',
1209 'sans-serif'
1209 'sans-serif'
1210 ])
1210 ])
1211 if svg_type:
1211 if svg_type:
1212 return self.get_img_data_by_type(font_family, svg_type)
1212 return self.get_img_data_by_type(font_family, svg_type)
1213
1213
1214 initials = self.get_initials()
1214 initials = self.get_initials()
1215 img_data = """
1215 img_data = """
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 width="{size}" height="{size}"
1217 width="{size}" height="{size}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1219 viewBox="0 0 {size} {size}">
1219 viewBox="0 0 {size} {size}">
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 pointer-events="auto" fill="{text_color}"
1221 pointer-events="auto" fill="{text_color}"
1222 font-family="{font_family}"
1222 font-family="{font_family}"
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 </text>
1224 </text>
1225 </svg>""".format(
1225 </svg>""".format(
1226 size=self.size,
1226 size=self.size,
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 background=self.background,
1228 background=self.background,
1229 text_color=self.text_color,
1229 text_color=self.text_color,
1230 text=initials.upper(),
1230 text=initials.upper(),
1231 font_family=font_family)
1231 font_family=font_family)
1232
1232
1233 return img_data
1233 return img_data
1234
1234
1235 def generate_svg(self, svg_type=None):
1235 def generate_svg(self, svg_type=None):
1236 img_data = self.get_img_data(svg_type)
1236 img_data = self.get_img_data(svg_type)
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238
1238
1239
1239
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 svg_type = None
1241 svg_type = None
1242 if email_address == User.DEFAULT_USER_EMAIL:
1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 svg_type = 'default_user'
1243 svg_type = 'default_user'
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 return klass.generate_svg(svg_type=svg_type)
1245 return klass.generate_svg(svg_type=svg_type)
1246
1246
1247
1247
1248 def gravatar_url(email_address, size=30):
1248 def gravatar_url(email_address, size=30):
1249 # doh, we need to re-import those to mock it later
1249 # doh, we need to re-import those to mock it later
1250 from pylons import tmpl_context as c
1250 from pylons import tmpl_context as c
1251
1251
1252 _use_gravatar = c.visual.use_gravatar
1252 _use_gravatar = c.visual.use_gravatar
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254
1254
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 if isinstance(email_address, unicode):
1256 if isinstance(email_address, unicode):
1257 # hashlib crashes on unicode items
1257 # hashlib crashes on unicode items
1258 email_address = safe_str(email_address)
1258 email_address = safe_str(email_address)
1259
1259
1260 # empty email or default user
1260 # empty email or default user
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263
1263
1264 if _use_gravatar:
1264 if _use_gravatar:
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 # get the host and schema here.
1266 # get the host and schema here.
1267 request = get_current_request()
1267 request = get_current_request()
1268 tmpl = safe_str(_gravatar_url)
1268 tmpl = safe_str(_gravatar_url)
1269 tmpl = tmpl.replace('{email}', email_address)\
1269 tmpl = tmpl.replace('{email}', email_address)\
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 .replace('{netloc}', request.host)\
1271 .replace('{netloc}', request.host)\
1272 .replace('{scheme}', request.scheme)\
1272 .replace('{scheme}', request.scheme)\
1273 .replace('{size}', safe_str(size))
1273 .replace('{size}', safe_str(size))
1274 return tmpl
1274 return tmpl
1275 else:
1275 else:
1276 return initials_gravatar(email_address, '', '', size=size)
1276 return initials_gravatar(email_address, '', '', size=size)
1277
1277
1278
1278
1279 class Page(_Page):
1279 class Page(_Page):
1280 """
1280 """
1281 Custom pager to match rendering style with paginator
1281 Custom pager to match rendering style with paginator
1282 """
1282 """
1283
1283
1284 def _get_pos(self, cur_page, max_page, items):
1284 def _get_pos(self, cur_page, max_page, items):
1285 edge = (items / 2) + 1
1285 edge = (items / 2) + 1
1286 if (cur_page <= edge):
1286 if (cur_page <= edge):
1287 radius = max(items / 2, items - cur_page)
1287 radius = max(items / 2, items - cur_page)
1288 elif (max_page - cur_page) < edge:
1288 elif (max_page - cur_page) < edge:
1289 radius = (items - 1) - (max_page - cur_page)
1289 radius = (items - 1) - (max_page - cur_page)
1290 else:
1290 else:
1291 radius = items / 2
1291 radius = items / 2
1292
1292
1293 left = max(1, (cur_page - (radius)))
1293 left = max(1, (cur_page - (radius)))
1294 right = min(max_page, cur_page + (radius))
1294 right = min(max_page, cur_page + (radius))
1295 return left, cur_page, right
1295 return left, cur_page, right
1296
1296
1297 def _range(self, regexp_match):
1297 def _range(self, regexp_match):
1298 """
1298 """
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300
1300
1301 Arguments:
1301 Arguments:
1302
1302
1303 regexp_match
1303 regexp_match
1304 A "re" (regular expressions) match object containing the
1304 A "re" (regular expressions) match object containing the
1305 radius of linked pages around the current page in
1305 radius of linked pages around the current page in
1306 regexp_match.group(1) as a string
1306 regexp_match.group(1) as a string
1307
1307
1308 This function is supposed to be called as a callable in
1308 This function is supposed to be called as a callable in
1309 re.sub.
1309 re.sub.
1310
1310
1311 """
1311 """
1312 radius = int(regexp_match.group(1))
1312 radius = int(regexp_match.group(1))
1313
1313
1314 # Compute the first and last page number within the radius
1314 # Compute the first and last page number within the radius
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 # -> leftmost_page = 5
1316 # -> leftmost_page = 5
1317 # -> rightmost_page = 9
1317 # -> rightmost_page = 9
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 self.last_page,
1319 self.last_page,
1320 (radius * 2) + 1)
1320 (radius * 2) + 1)
1321 nav_items = []
1321 nav_items = []
1322
1322
1323 # Create a link to the first page (unless we are on the first page
1323 # Create a link to the first page (unless we are on the first page
1324 # or there would be no need to insert '..' spacers)
1324 # or there would be no need to insert '..' spacers)
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327
1327
1328 # Insert dots if there are pages between the first page
1328 # Insert dots if there are pages between the first page
1329 # and the currently displayed page range
1329 # and the currently displayed page range
1330 if leftmost_page - self.first_page > 1:
1330 if leftmost_page - self.first_page > 1:
1331 # Wrap in a SPAN tag if nolink_attr is set
1331 # Wrap in a SPAN tag if nolink_attr is set
1332 text = '..'
1332 text = '..'
1333 if self.dotdot_attr:
1333 if self.dotdot_attr:
1334 text = HTML.span(c=text, **self.dotdot_attr)
1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 nav_items.append(text)
1335 nav_items.append(text)
1336
1336
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 # Hilight the current page number and do not use a link
1338 # Hilight the current page number and do not use a link
1339 if thispage == self.page:
1339 if thispage == self.page:
1340 text = '%s' % (thispage,)
1340 text = '%s' % (thispage,)
1341 # Wrap in a SPAN tag if nolink_attr is set
1341 # Wrap in a SPAN tag if nolink_attr is set
1342 if self.curpage_attr:
1342 if self.curpage_attr:
1343 text = HTML.span(c=text, **self.curpage_attr)
1343 text = HTML.span(c=text, **self.curpage_attr)
1344 nav_items.append(text)
1344 nav_items.append(text)
1345 # Otherwise create just a link to that page
1345 # Otherwise create just a link to that page
1346 else:
1346 else:
1347 text = '%s' % (thispage,)
1347 text = '%s' % (thispage,)
1348 nav_items.append(self._pagerlink(thispage, text))
1348 nav_items.append(self._pagerlink(thispage, text))
1349
1349
1350 # Insert dots if there are pages between the displayed
1350 # Insert dots if there are pages between the displayed
1351 # page numbers and the end of the page range
1351 # page numbers and the end of the page range
1352 if self.last_page - rightmost_page > 1:
1352 if self.last_page - rightmost_page > 1:
1353 text = '..'
1353 text = '..'
1354 # Wrap in a SPAN tag if nolink_attr is set
1354 # Wrap in a SPAN tag if nolink_attr is set
1355 if self.dotdot_attr:
1355 if self.dotdot_attr:
1356 text = HTML.span(c=text, **self.dotdot_attr)
1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 nav_items.append(text)
1357 nav_items.append(text)
1358
1358
1359 # Create a link to the very last page (unless we are on the last
1359 # Create a link to the very last page (unless we are on the last
1360 # page or there would be no need to insert '..' spacers)
1360 # page or there would be no need to insert '..' spacers)
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363
1363
1364 ## prerender links
1364 ## prerender links
1365 #_page_link = url.current()
1365 #_page_link = url.current()
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 return self.separator.join(nav_items)
1368 return self.separator.join(nav_items)
1369
1369
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 show_if_single_page=False, separator=' ', onclick=None,
1371 show_if_single_page=False, separator=' ', onclick=None,
1372 symbol_first='<<', symbol_last='>>',
1372 symbol_first='<<', symbol_last='>>',
1373 symbol_previous='<', symbol_next='>',
1373 symbol_previous='<', symbol_next='>',
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 curpage_attr={'class': 'pager_curpage'},
1375 curpage_attr={'class': 'pager_curpage'},
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377
1377
1378 self.curpage_attr = curpage_attr
1378 self.curpage_attr = curpage_attr
1379 self.separator = separator
1379 self.separator = separator
1380 self.pager_kwargs = kwargs
1380 self.pager_kwargs = kwargs
1381 self.page_param = page_param
1381 self.page_param = page_param
1382 self.partial_param = partial_param
1382 self.partial_param = partial_param
1383 self.onclick = onclick
1383 self.onclick = onclick
1384 self.link_attr = link_attr
1384 self.link_attr = link_attr
1385 self.dotdot_attr = dotdot_attr
1385 self.dotdot_attr = dotdot_attr
1386
1386
1387 # Don't show navigator if there is no more than one page
1387 # Don't show navigator if there is no more than one page
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 return ''
1389 return ''
1390
1390
1391 from string import Template
1391 from string import Template
1392 # Replace ~...~ in token format by range of pages
1392 # Replace ~...~ in token format by range of pages
1393 result = re.sub(r'~(\d+)~', self._range, format)
1393 result = re.sub(r'~(\d+)~', self._range, format)
1394
1394
1395 # Interpolate '%' variables
1395 # Interpolate '%' variables
1396 result = Template(result).safe_substitute({
1396 result = Template(result).safe_substitute({
1397 'first_page': self.first_page,
1397 'first_page': self.first_page,
1398 'last_page': self.last_page,
1398 'last_page': self.last_page,
1399 'page': self.page,
1399 'page': self.page,
1400 'page_count': self.page_count,
1400 'page_count': self.page_count,
1401 'items_per_page': self.items_per_page,
1401 'items_per_page': self.items_per_page,
1402 'first_item': self.first_item,
1402 'first_item': self.first_item,
1403 'last_item': self.last_item,
1403 'last_item': self.last_item,
1404 'item_count': self.item_count,
1404 'item_count': self.item_count,
1405 'link_first': self.page > self.first_page and \
1405 'link_first': self.page > self.first_page and \
1406 self._pagerlink(self.first_page, symbol_first) or '',
1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 'link_last': self.page < self.last_page and \
1407 'link_last': self.page < self.last_page and \
1408 self._pagerlink(self.last_page, symbol_last) or '',
1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 'link_previous': self.previous_page and \
1409 'link_previous': self.previous_page and \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 'link_next': self.next_page and \
1412 'link_next': self.next_page and \
1413 self._pagerlink(self.next_page, symbol_next) \
1413 self._pagerlink(self.next_page, symbol_next) \
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 })
1415 })
1416
1416
1417 return literal(result)
1417 return literal(result)
1418
1418
1419
1419
1420 #==============================================================================
1420 #==============================================================================
1421 # REPO PAGER, PAGER FOR REPOSITORY
1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 #==============================================================================
1422 #==============================================================================
1423 class RepoPage(Page):
1423 class RepoPage(Page):
1424
1424
1425 def __init__(self, collection, page=1, items_per_page=20,
1425 def __init__(self, collection, page=1, items_per_page=20,
1426 item_count=None, url=None, **kwargs):
1426 item_count=None, url=None, **kwargs):
1427
1427
1428 """Create a "RepoPage" instance. special pager for paging
1428 """Create a "RepoPage" instance. special pager for paging
1429 repository
1429 repository
1430 """
1430 """
1431 self._url_generator = url
1431 self._url_generator = url
1432
1432
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 self.kwargs = kwargs
1434 self.kwargs = kwargs
1435
1435
1436 # Save a reference to the collection
1436 # Save a reference to the collection
1437 self.original_collection = collection
1437 self.original_collection = collection
1438
1438
1439 self.collection = collection
1439 self.collection = collection
1440
1440
1441 # The self.page is the number of the current page.
1441 # The self.page is the number of the current page.
1442 # The first page has the number 1!
1442 # The first page has the number 1!
1443 try:
1443 try:
1444 self.page = int(page) # make it int() if we get it as a string
1444 self.page = int(page) # make it int() if we get it as a string
1445 except (ValueError, TypeError):
1445 except (ValueError, TypeError):
1446 self.page = 1
1446 self.page = 1
1447
1447
1448 self.items_per_page = items_per_page
1448 self.items_per_page = items_per_page
1449
1449
1450 # Unless the user tells us how many items the collections has
1450 # Unless the user tells us how many items the collections has
1451 # we calculate that ourselves.
1451 # we calculate that ourselves.
1452 if item_count is not None:
1452 if item_count is not None:
1453 self.item_count = item_count
1453 self.item_count = item_count
1454 else:
1454 else:
1455 self.item_count = len(self.collection)
1455 self.item_count = len(self.collection)
1456
1456
1457 # Compute the number of the first and last available page
1457 # Compute the number of the first and last available page
1458 if self.item_count > 0:
1458 if self.item_count > 0:
1459 self.first_page = 1
1459 self.first_page = 1
1460 self.page_count = int(math.ceil(float(self.item_count) /
1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 self.items_per_page))
1461 self.items_per_page))
1462 self.last_page = self.first_page + self.page_count - 1
1462 self.last_page = self.first_page + self.page_count - 1
1463
1463
1464 # Make sure that the requested page number is the range of
1464 # Make sure that the requested page number is the range of
1465 # valid pages
1465 # valid pages
1466 if self.page > self.last_page:
1466 if self.page > self.last_page:
1467 self.page = self.last_page
1467 self.page = self.last_page
1468 elif self.page < self.first_page:
1468 elif self.page < self.first_page:
1469 self.page = self.first_page
1469 self.page = self.first_page
1470
1470
1471 # Note: the number of items on this page can be less than
1471 # Note: the number of items on this page can be less than
1472 # items_per_page if the last page is not full
1472 # items_per_page if the last page is not full
1473 self.first_item = max(0, (self.item_count) - (self.page *
1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 items_per_page))
1474 items_per_page))
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 (self.page - 1))
1476 (self.page - 1))
1477
1477
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479
1479
1480 # Links to previous and next page
1480 # Links to previous and next page
1481 if self.page > self.first_page:
1481 if self.page > self.first_page:
1482 self.previous_page = self.page - 1
1482 self.previous_page = self.page - 1
1483 else:
1483 else:
1484 self.previous_page = None
1484 self.previous_page = None
1485
1485
1486 if self.page < self.last_page:
1486 if self.page < self.last_page:
1487 self.next_page = self.page + 1
1487 self.next_page = self.page + 1
1488 else:
1488 else:
1489 self.next_page = None
1489 self.next_page = None
1490
1490
1491 # No items available
1491 # No items available
1492 else:
1492 else:
1493 self.first_page = None
1493 self.first_page = None
1494 self.page_count = 0
1494 self.page_count = 0
1495 self.last_page = None
1495 self.last_page = None
1496 self.first_item = None
1496 self.first_item = None
1497 self.last_item = None
1497 self.last_item = None
1498 self.previous_page = None
1498 self.previous_page = None
1499 self.next_page = None
1499 self.next_page = None
1500 self.items = []
1500 self.items = []
1501
1501
1502 # This is a subclass of the 'list' type. Initialise the list now.
1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 list.__init__(self, reversed(self.items))
1503 list.__init__(self, reversed(self.items))
1504
1504
1505
1505
1506 def changed_tooltip(nodes):
1506 def changed_tooltip(nodes):
1507 """
1507 """
1508 Generates a html string for changed nodes in commit page.
1508 Generates a html string for changed nodes in commit page.
1509 It limits the output to 30 entries
1509 It limits the output to 30 entries
1510
1510
1511 :param nodes: LazyNodesGenerator
1511 :param nodes: LazyNodesGenerator
1512 """
1512 """
1513 if nodes:
1513 if nodes:
1514 pref = ': <br/> '
1514 pref = ': <br/> '
1515 suf = ''
1515 suf = ''
1516 if len(nodes) > 30:
1516 if len(nodes) > 30:
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 for x in nodes[:30]]) + suf)
1519 for x in nodes[:30]]) + suf)
1520 else:
1520 else:
1521 return ': ' + _('No Files')
1521 return ': ' + _('No Files')
1522
1522
1523
1523
1524 def breadcrumb_repo_link(repo):
1524 def breadcrumb_repo_link(repo):
1525 """
1525 """
1526 Makes a breadcrumbs path link to repo
1526 Makes a breadcrumbs path link to repo
1527
1527
1528 ex::
1528 ex::
1529 group >> subgroup >> repo
1529 group >> subgroup >> repo
1530
1530
1531 :param repo: a Repository instance
1531 :param repo: a Repository instance
1532 """
1532 """
1533
1533
1534 path = [
1534 path = [
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 for group in repo.groups_with_parents
1536 for group in repo.groups_with_parents
1537 ] + [
1537 ] + [
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 ]
1539 ]
1540
1540
1541 return literal(' &raquo; '.join(path))
1541 return literal(' &raquo; '.join(path))
1542
1542
1543
1543
1544 def format_byte_size_binary(file_size):
1544 def format_byte_size_binary(file_size):
1545 """
1545 """
1546 Formats file/folder sizes to standard.
1546 Formats file/folder sizes to standard.
1547 """
1547 """
1548 formatted_size = format_byte_size(file_size, binary=True)
1548 formatted_size = format_byte_size(file_size, binary=True)
1549 return formatted_size
1549 return formatted_size
1550
1550
1551
1551
1552 def urlify_text(text_, safe=True):
1552 def urlify_text(text_, safe=True):
1553 """
1553 """
1554 Extrac urls from text and make html links out of them
1554 Extrac urls from text and make html links out of them
1555
1555
1556 :param text_:
1556 :param text_:
1557 """
1557 """
1558
1558
1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1561
1561
1562 def url_func(match_obj):
1562 def url_func(match_obj):
1563 url_full = match_obj.groups()[0]
1563 url_full = match_obj.groups()[0]
1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1565 _newtext = url_pat.sub(url_func, text_)
1565 _newtext = url_pat.sub(url_func, text_)
1566 if safe:
1566 if safe:
1567 return literal(_newtext)
1567 return literal(_newtext)
1568 return _newtext
1568 return _newtext
1569
1569
1570
1570
1571 def urlify_commits(text_, repository):
1571 def urlify_commits(text_, repository):
1572 """
1572 """
1573 Extract commit ids from text and make link from them
1573 Extract commit ids from text and make link from them
1574
1574
1575 :param text_:
1575 :param text_:
1576 :param repository: repo name to build the URL with
1576 :param repository: repo name to build the URL with
1577 """
1577 """
1578 from pylons import url # doh, we need to re-import url to mock it later
1578 from pylons import url # doh, we need to re-import url to mock it later
1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1580
1580
1581 def url_func(match_obj):
1581 def url_func(match_obj):
1582 commit_id = match_obj.groups()[1]
1582 commit_id = match_obj.groups()[1]
1583 pref = match_obj.groups()[0]
1583 pref = match_obj.groups()[0]
1584 suf = match_obj.groups()[2]
1584 suf = match_obj.groups()[2]
1585
1585
1586 tmpl = (
1586 tmpl = (
1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1588 '%(commit_id)s</a>%(suf)s'
1588 '%(commit_id)s</a>%(suf)s'
1589 )
1589 )
1590 return tmpl % {
1590 return tmpl % {
1591 'pref': pref,
1591 'pref': pref,
1592 'cls': 'revision-link',
1592 'cls': 'revision-link',
1593 'url': url('changeset_home', repo_name=repository,
1593 'url': url('changeset_home', repo_name=repository,
1594 revision=commit_id, qualified=True),
1594 revision=commit_id, qualified=True),
1595 'commit_id': commit_id,
1595 'commit_id': commit_id,
1596 'suf': suf
1596 'suf': suf
1597 }
1597 }
1598
1598
1599 newtext = URL_PAT.sub(url_func, text_)
1599 newtext = URL_PAT.sub(url_func, text_)
1600
1600
1601 return newtext
1601 return newtext
1602
1602
1603
1603
1604 def _process_url_func(match_obj, repo_name, uid, entry,
1604 def _process_url_func(match_obj, repo_name, uid, entry,
1605 return_raw_data=False):
1605 return_raw_data=False, link_format='html'):
1606 pref = ''
1606 pref = ''
1607 if match_obj.group().startswith(' '):
1607 if match_obj.group().startswith(' '):
1608 pref = ' '
1608 pref = ' '
1609
1609
1610 issue_id = ''.join(match_obj.groups())
1610 issue_id = ''.join(match_obj.groups())
1611
1612 if link_format == 'html':
1611 tmpl = (
1613 tmpl = (
1612 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1613 '%(issue-prefix)s%(id-repr)s'
1615 '%(issue-prefix)s%(id-repr)s'
1614 '</a>')
1616 '</a>')
1617 elif link_format == 'rst':
1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1619 elif link_format == 'markdown':
1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1621 else:
1622 raise ValueError('Bad link_format:{}'.format(link_format))
1615
1623
1616 (repo_name_cleaned,
1624 (repo_name_cleaned,
1617 parent_group_name) = RepoGroupModel().\
1625 parent_group_name) = RepoGroupModel().\
1618 _get_group_name_and_parent(repo_name)
1626 _get_group_name_and_parent(repo_name)
1619
1627
1620 # variables replacement
1628 # variables replacement
1621 named_vars = {
1629 named_vars = {
1622 'id': issue_id,
1630 'id': issue_id,
1623 'repo': repo_name,
1631 'repo': repo_name,
1624 'repo_name': repo_name_cleaned,
1632 'repo_name': repo_name_cleaned,
1625 'group_name': parent_group_name
1633 'group_name': parent_group_name
1626 }
1634 }
1627 # named regex variables
1635 # named regex variables
1628 named_vars.update(match_obj.groupdict())
1636 named_vars.update(match_obj.groupdict())
1629 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1630
1638
1631 data = {
1639 data = {
1632 'pref': pref,
1640 'pref': pref,
1633 'cls': 'issue-tracker-link',
1641 'cls': 'issue-tracker-link',
1634 'url': _url,
1642 'url': _url,
1635 'id-repr': issue_id,
1643 'id-repr': issue_id,
1636 'issue-prefix': entry['pref'],
1644 'issue-prefix': entry['pref'],
1637 'serv': entry['url'],
1645 'serv': entry['url'],
1638 }
1646 }
1639 if return_raw_data:
1647 if return_raw_data:
1640 return {
1648 return {
1641 'id': issue_id,
1649 'id': issue_id,
1642 'url': _url
1650 'url': _url
1643 }
1651 }
1644 return tmpl % data
1652 return tmpl % data
1645
1653
1646
1654
1647 def process_patterns(text_string, repo_name, config=None):
1655 def process_patterns(text_string, repo_name, link_format='html'):
1656 allowed_formats = ['html', 'rst', 'markdown']
1657 if link_format not in allowed_formats:
1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1659 allowed_formats, link_format))
1660
1648 repo = None
1661 repo = None
1649 if repo_name:
1662 if repo_name:
1650 # Retrieving repo_name to avoid invalid repo_name to explode on
1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1651 # IssueTrackerSettingsModel but still passing invalid name further down
1664 # IssueTrackerSettingsModel but still passing invalid name further down
1652 repo = Repository.get_by_repo_name(repo_name, cache=True)
1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1653
1666
1654 settings_model = IssueTrackerSettingsModel(repo=repo)
1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1655 active_entries = settings_model.get_settings(cache=True)
1668 active_entries = settings_model.get_settings(cache=True)
1656
1669
1657 issues_data = []
1670 issues_data = []
1658 newtext = text_string
1671 newtext = text_string
1672
1659 for uid, entry in active_entries.items():
1673 for uid, entry in active_entries.items():
1660 log.debug('found issue tracker entry with uid %s' % (uid,))
1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1661
1675
1662 if not (entry['pat'] and entry['url']):
1676 if not (entry['pat'] and entry['url']):
1663 log.debug('skipping due to missing data')
1677 log.debug('skipping due to missing data')
1664 continue
1678 continue
1665
1679
1666 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1667 % (uid, entry['pat'], entry['url'], entry['pref']))
1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1668
1682
1669 try:
1683 try:
1670 pattern = re.compile(r'%s' % entry['pat'])
1684 pattern = re.compile(r'%s' % entry['pat'])
1671 except re.error:
1685 except re.error:
1672 log.exception(
1686 log.exception(
1673 'issue tracker pattern: `%s` failed to compile',
1687 'issue tracker pattern: `%s` failed to compile',
1674 entry['pat'])
1688 entry['pat'])
1675 continue
1689 continue
1676
1690
1677 data_func = partial(
1691 data_func = partial(
1678 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1679 return_raw_data=True)
1693 return_raw_data=True)
1680
1694
1681 for match_obj in pattern.finditer(text_string):
1695 for match_obj in pattern.finditer(text_string):
1682 issues_data.append(data_func(match_obj))
1696 issues_data.append(data_func(match_obj))
1683
1697
1684 url_func = partial(
1698 url_func = partial(
1685 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1700 link_format=link_format)
1686
1701
1687 newtext = pattern.sub(url_func, newtext)
1702 newtext = pattern.sub(url_func, newtext)
1688 log.debug('processed prefix:uid `%s`' % (uid,))
1703 log.debug('processed prefix:uid `%s`' % (uid,))
1689
1704
1690 return newtext, issues_data
1705 return newtext, issues_data
1691
1706
1692
1707
1693 def urlify_commit_message(commit_text, repository=None):
1708 def urlify_commit_message(commit_text, repository=None):
1694 """
1709 """
1695 Parses given text message and makes proper links.
1710 Parses given text message and makes proper links.
1696 issues are linked to given issue-server, and rest is a commit link
1711 issues are linked to given issue-server, and rest is a commit link
1697
1712
1698 :param commit_text:
1713 :param commit_text:
1699 :param repository:
1714 :param repository:
1700 """
1715 """
1701 from pylons import url # doh, we need to re-import url to mock it later
1716 from pylons import url # doh, we need to re-import url to mock it later
1702
1717
1703 def escaper(string):
1718 def escaper(string):
1704 return string.replace('<', '&lt;').replace('>', '&gt;')
1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1705
1720
1706 newtext = escaper(commit_text)
1721 newtext = escaper(commit_text)
1707
1722
1708 # extract http/https links and make them real urls
1723 # extract http/https links and make them real urls
1709 newtext = urlify_text(newtext, safe=False)
1724 newtext = urlify_text(newtext, safe=False)
1710
1725
1711 # urlify commits - extract commit ids and make link out of them, if we have
1726 # urlify commits - extract commit ids and make link out of them, if we have
1712 # the scope of repository present.
1727 # the scope of repository present.
1713 if repository:
1728 if repository:
1714 newtext = urlify_commits(newtext, repository)
1729 newtext = urlify_commits(newtext, repository)
1715
1730
1716 # process issue tracker patterns
1731 # process issue tracker patterns
1717 newtext, issues = process_patterns(newtext, repository or '')
1732 newtext, issues = process_patterns(newtext, repository or '')
1718
1733
1719 return literal(newtext)
1734 return literal(newtext)
1720
1735
1721
1736
1722 def render_binary(repo_name, file_obj):
1737 def render_binary(repo_name, file_obj):
1723 """
1738 """
1724 Choose how to render a binary file
1739 Choose how to render a binary file
1725 """
1740 """
1726 filename = file_obj.name
1741 filename = file_obj.name
1727
1742
1728 # images
1743 # images
1729 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1730 if fnmatch.fnmatch(filename, pat=ext):
1745 if fnmatch.fnmatch(filename, pat=ext):
1731 alt = filename
1746 alt = filename
1732 src = url('files_raw_home', repo_name=repo_name,
1747 src = url('files_raw_home', repo_name=repo_name,
1733 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1748 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1734 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1749 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1735
1750
1736
1751
1737 def renderer_from_filename(filename, exclude=None):
1752 def renderer_from_filename(filename, exclude=None):
1738 """
1753 """
1739 choose a renderer based on filename, this works only for text based files
1754 choose a renderer based on filename, this works only for text based files
1740 """
1755 """
1741
1756
1742 # ipython
1757 # ipython
1743 for ext in ['*.ipynb']:
1758 for ext in ['*.ipynb']:
1744 if fnmatch.fnmatch(filename, pat=ext):
1759 if fnmatch.fnmatch(filename, pat=ext):
1745 return 'jupyter'
1760 return 'jupyter'
1746
1761
1747 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1762 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1748 if is_markup:
1763 if is_markup:
1749 return is_markup
1764 return is_markup
1750 return None
1765 return None
1751
1766
1752
1767
1753 def render(source, renderer='rst', mentions=False, relative_url=None):
1768 def render(source, renderer='rst', mentions=False, relative_url=None,
1769 repo_name=None):
1754
1770
1755 def maybe_convert_relative_links(html_source):
1771 def maybe_convert_relative_links(html_source):
1756 if relative_url:
1772 if relative_url:
1757 return relative_links(html_source, relative_url)
1773 return relative_links(html_source, relative_url)
1758 return html_source
1774 return html_source
1759
1775
1760 if renderer == 'rst':
1776 if renderer == 'rst':
1777 if repo_name:
1778 # process patterns on comments if we pass in repo name
1779 source, issues = process_patterns(
1780 source, repo_name, link_format='rst')
1781
1761 return literal(
1782 return literal(
1762 '<div class="rst-block">%s</div>' %
1783 '<div class="rst-block">%s</div>' %
1763 maybe_convert_relative_links(
1784 maybe_convert_relative_links(
1764 MarkupRenderer.rst(source, mentions=mentions)))
1785 MarkupRenderer.rst(source, mentions=mentions)))
1765 elif renderer == 'markdown':
1786 elif renderer == 'markdown':
1787 if repo_name:
1788 # process patterns on comments if we pass in repo name
1789 source, issues = process_patterns(
1790 source, repo_name, link_format='markdown')
1791
1766 return literal(
1792 return literal(
1767 '<div class="markdown-block">%s</div>' %
1793 '<div class="markdown-block">%s</div>' %
1768 maybe_convert_relative_links(
1794 maybe_convert_relative_links(
1769 MarkupRenderer.markdown(source, flavored=True,
1795 MarkupRenderer.markdown(source, flavored=True,
1770 mentions=mentions)))
1796 mentions=mentions)))
1771 elif renderer == 'jupyter':
1797 elif renderer == 'jupyter':
1772 return literal(
1798 return literal(
1773 '<div class="ipynb">%s</div>' %
1799 '<div class="ipynb">%s</div>' %
1774 maybe_convert_relative_links(
1800 maybe_convert_relative_links(
1775 MarkupRenderer.jupyter(source)))
1801 MarkupRenderer.jupyter(source)))
1776
1802
1777 # None means just show the file-source
1803 # None means just show the file-source
1778 return None
1804 return None
1779
1805
1780
1806
1781 def commit_status(repo, commit_id):
1807 def commit_status(repo, commit_id):
1782 return ChangesetStatusModel().get_status(repo, commit_id)
1808 return ChangesetStatusModel().get_status(repo, commit_id)
1783
1809
1784
1810
1785 def commit_status_lbl(commit_status):
1811 def commit_status_lbl(commit_status):
1786 return dict(ChangesetStatus.STATUSES).get(commit_status)
1812 return dict(ChangesetStatus.STATUSES).get(commit_status)
1787
1813
1788
1814
1789 def commit_time(repo_name, commit_id):
1815 def commit_time(repo_name, commit_id):
1790 repo = Repository.get_by_repo_name(repo_name)
1816 repo = Repository.get_by_repo_name(repo_name)
1791 commit = repo.get_commit(commit_id=commit_id)
1817 commit = repo.get_commit(commit_id=commit_id)
1792 return commit.date
1818 return commit.date
1793
1819
1794
1820
1795 def get_permission_name(key):
1821 def get_permission_name(key):
1796 return dict(Permission.PERMS).get(key)
1822 return dict(Permission.PERMS).get(key)
1797
1823
1798
1824
1799 def journal_filter_help():
1825 def journal_filter_help():
1800 return _(
1826 return _(
1801 'Example filter terms:\n' +
1827 'Example filter terms:\n' +
1802 ' repository:vcs\n' +
1828 ' repository:vcs\n' +
1803 ' username:marcin\n' +
1829 ' username:marcin\n' +
1804 ' action:*push*\n' +
1830 ' action:*push*\n' +
1805 ' ip:127.0.0.1\n' +
1831 ' ip:127.0.0.1\n' +
1806 ' date:20120101\n' +
1832 ' date:20120101\n' +
1807 ' date:[20120101100000 TO 20120102]\n' +
1833 ' date:[20120101100000 TO 20120102]\n' +
1808 '\n' +
1834 '\n' +
1809 'Generate wildcards using \'*\' character:\n' +
1835 'Generate wildcards using \'*\' character:\n' +
1810 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1836 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1811 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1837 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1812 '\n' +
1838 '\n' +
1813 'Optional AND / OR operators in queries\n' +
1839 'Optional AND / OR operators in queries\n' +
1814 ' "repository:vcs OR repository:test"\n' +
1840 ' "repository:vcs OR repository:test"\n' +
1815 ' "username:test AND repository:test*"\n'
1841 ' "username:test AND repository:test*"\n'
1816 )
1842 )
1817
1843
1818
1844
1819 def not_mapped_error(repo_name):
1845 def not_mapped_error(repo_name):
1820 flash(_('%s repository is not mapped to db perhaps'
1846 flash(_('%s repository is not mapped to db perhaps'
1821 ' it was created or renamed from the filesystem'
1847 ' it was created or renamed from the filesystem'
1822 ' please run the application again'
1848 ' please run the application again'
1823 ' in order to rescan repositories') % repo_name, category='error')
1849 ' in order to rescan repositories') % repo_name, category='error')
1824
1850
1825
1851
1826 def ip_range(ip_addr):
1852 def ip_range(ip_addr):
1827 from rhodecode.model.db import UserIpMap
1853 from rhodecode.model.db import UserIpMap
1828 s, e = UserIpMap._get_ip_range(ip_addr)
1854 s, e = UserIpMap._get_ip_range(ip_addr)
1829 return '%s - %s' % (s, e)
1855 return '%s - %s' % (s, e)
1830
1856
1831
1857
1832 def form(url, method='post', needs_csrf_token=True, **attrs):
1858 def form(url, method='post', needs_csrf_token=True, **attrs):
1833 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1859 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1834 if method.lower() != 'get' and needs_csrf_token:
1860 if method.lower() != 'get' and needs_csrf_token:
1835 raise Exception(
1861 raise Exception(
1836 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1862 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1837 'CSRF token. If the endpoint does not require such token you can ' +
1863 'CSRF token. If the endpoint does not require such token you can ' +
1838 'explicitly set the parameter needs_csrf_token to false.')
1864 'explicitly set the parameter needs_csrf_token to false.')
1839
1865
1840 return wh_form(url, method=method, **attrs)
1866 return wh_form(url, method=method, **attrs)
1841
1867
1842
1868
1843 def secure_form(url, method="POST", multipart=False, **attrs):
1869 def secure_form(url, method="POST", multipart=False, **attrs):
1844 """Start a form tag that points the action to an url. This
1870 """Start a form tag that points the action to an url. This
1845 form tag will also include the hidden field containing
1871 form tag will also include the hidden field containing
1846 the auth token.
1872 the auth token.
1847
1873
1848 The url options should be given either as a string, or as a
1874 The url options should be given either as a string, or as a
1849 ``url()`` function. The method for the form defaults to POST.
1875 ``url()`` function. The method for the form defaults to POST.
1850
1876
1851 Options:
1877 Options:
1852
1878
1853 ``multipart``
1879 ``multipart``
1854 If set to True, the enctype is set to "multipart/form-data".
1880 If set to True, the enctype is set to "multipart/form-data".
1855 ``method``
1881 ``method``
1856 The method to use when submitting the form, usually either
1882 The method to use when submitting the form, usually either
1857 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1883 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1858 hidden input with name _method is added to simulate the verb
1884 hidden input with name _method is added to simulate the verb
1859 over POST.
1885 over POST.
1860
1886
1861 """
1887 """
1862 from webhelpers.pylonslib.secure_form import insecure_form
1888 from webhelpers.pylonslib.secure_form import insecure_form
1863 form = insecure_form(url, method, multipart, **attrs)
1889 form = insecure_form(url, method, multipart, **attrs)
1864 token = csrf_input()
1890 token = csrf_input()
1865 return literal("%s\n%s" % (form, token))
1891 return literal("%s\n%s" % (form, token))
1866
1892
1867 def csrf_input():
1893 def csrf_input():
1868 return literal(
1894 return literal(
1869 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1895 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1870 csrf_token_key, csrf_token_key, get_csrf_token()))
1896 csrf_token_key, csrf_token_key, get_csrf_token()))
1871
1897
1872 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1898 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1873 select_html = select(name, selected, options, **attrs)
1899 select_html = select(name, selected, options, **attrs)
1874 select2 = """
1900 select2 = """
1875 <script>
1901 <script>
1876 $(document).ready(function() {
1902 $(document).ready(function() {
1877 $('#%s').select2({
1903 $('#%s').select2({
1878 containerCssClass: 'drop-menu',
1904 containerCssClass: 'drop-menu',
1879 dropdownCssClass: 'drop-menu-dropdown',
1905 dropdownCssClass: 'drop-menu-dropdown',
1880 dropdownAutoWidth: true%s
1906 dropdownAutoWidth: true%s
1881 });
1907 });
1882 });
1908 });
1883 </script>
1909 </script>
1884 """
1910 """
1885 filter_option = """,
1911 filter_option = """,
1886 minimumResultsForSearch: -1
1912 minimumResultsForSearch: -1
1887 """
1913 """
1888 input_id = attrs.get('id') or name
1914 input_id = attrs.get('id') or name
1889 filter_enabled = "" if enable_filter else filter_option
1915 filter_enabled = "" if enable_filter else filter_option
1890 select_script = literal(select2 % (input_id, filter_enabled))
1916 select_script = literal(select2 % (input_id, filter_enabled))
1891
1917
1892 return literal(select_html+select_script)
1918 return literal(select_html+select_script)
1893
1919
1894
1920
1895 def get_visual_attr(tmpl_context_var, attr_name):
1921 def get_visual_attr(tmpl_context_var, attr_name):
1896 """
1922 """
1897 A safe way to get a variable from visual variable of template context
1923 A safe way to get a variable from visual variable of template context
1898
1924
1899 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1925 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1900 :param attr_name: name of the attribute we fetch from the c.visual
1926 :param attr_name: name of the attribute we fetch from the c.visual
1901 """
1927 """
1902 visual = getattr(tmpl_context_var, 'visual', None)
1928 visual = getattr(tmpl_context_var, 'visual', None)
1903 if not visual:
1929 if not visual:
1904 return
1930 return
1905 else:
1931 else:
1906 return getattr(visual, attr_name, None)
1932 return getattr(visual, attr_name, None)
1907
1933
1908
1934
1909 def get_last_path_part(file_node):
1935 def get_last_path_part(file_node):
1910 if not file_node.path:
1936 if not file_node.path:
1911 return u''
1937 return u''
1912
1938
1913 path = safe_unicode(file_node.path.split('/')[-1])
1939 path = safe_unicode(file_node.path.split('/')[-1])
1914 return u'../' + path
1940 return u'../' + path
1915
1941
1916
1942
1917 def route_url(*args, **kwds):
1943 def route_url(*args, **kwds):
1918 """
1944 """
1919 Wrapper around pyramids `route_url` (fully qualified url) function.
1945 Wrapper around pyramids `route_url` (fully qualified url) function.
1920 It is used to generate URLs from within pylons views or templates.
1946 It is used to generate URLs from within pylons views or templates.
1921 This will be removed when pyramid migration if finished.
1947 This will be removed when pyramid migration if finished.
1922 """
1948 """
1923 req = get_current_request()
1949 req = get_current_request()
1924 return req.route_url(*args, **kwds)
1950 return req.route_url(*args, **kwds)
1925
1951
1926
1952
1927 def route_path(*args, **kwds):
1953 def route_path(*args, **kwds):
1928 """
1954 """
1929 Wrapper around pyramids `route_path` function. It is used to generate
1955 Wrapper around pyramids `route_path` function. It is used to generate
1930 URLs from within pylons views or templates. This will be removed when
1956 URLs from within pylons views or templates. This will be removed when
1931 pyramid migration if finished.
1957 pyramid migration if finished.
1932 """
1958 """
1933 req = get_current_request()
1959 req = get_current_request()
1934 return req.route_path(*args, **kwds)
1960 return req.route_path(*args, **kwds)
1935
1961
1936
1962
1937 def route_path_or_none(*args, **kwargs):
1963 def route_path_or_none(*args, **kwargs):
1938 try:
1964 try:
1939 return route_path(*args, **kwargs)
1965 return route_path(*args, **kwargs)
1940 except KeyError:
1966 except KeyError:
1941 return None
1967 return None
1942
1968
1943
1969
1944 def static_url(*args, **kwds):
1970 def static_url(*args, **kwds):
1945 """
1971 """
1946 Wrapper around pyramids `route_path` function. It is used to generate
1972 Wrapper around pyramids `route_path` function. It is used to generate
1947 URLs from within pylons views or templates. This will be removed when
1973 URLs from within pylons views or templates. This will be removed when
1948 pyramid migration if finished.
1974 pyramid migration if finished.
1949 """
1975 """
1950 req = get_current_request()
1976 req = get_current_request()
1951 return req.static_url(*args, **kwds)
1977 return req.static_url(*args, **kwds)
1952
1978
1953
1979
1954 def resource_path(*args, **kwds):
1980 def resource_path(*args, **kwds):
1955 """
1981 """
1956 Wrapper around pyramids `route_path` function. It is used to generate
1982 Wrapper around pyramids `route_path` function. It is used to generate
1957 URLs from within pylons views or templates. This will be removed when
1983 URLs from within pylons views or templates. This will be removed when
1958 pyramid migration if finished.
1984 pyramid migration if finished.
1959 """
1985 """
1960 req = get_current_request()
1986 req = get_current_request()
1961 return req.resource_path(*args, **kwds)
1987 return req.resource_path(*args, **kwds)
@@ -1,244 +1,251 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import copy
21 import copy
22 import mock
22 import mock
23 import pytest
23 import pytest
24
24
25 from pylons.util import ContextObj
25 from pylons.util import ContextObj
26
26
27 from rhodecode.lib import helpers
27 from rhodecode.lib import helpers
28 from rhodecode.lib.utils2 import AttributeDict
28 from rhodecode.lib.utils2 import AttributeDict
29 from rhodecode.model.settings import IssueTrackerSettingsModel
29 from rhodecode.model.settings import IssueTrackerSettingsModel
30
30
31
31
32 @pytest.mark.parametrize('url, expected_url', [
32 @pytest.mark.parametrize('url, expected_url', [
33 ('http://rc.rc/test', '<a href="http://rc.rc/test">http://rc.rc/test</a>'),
33 ('http://rc.rc/test', '<a href="http://rc.rc/test">http://rc.rc/test</a>'),
34 ('http://rc.rc/@foo', '<a href="http://rc.rc/@foo">http://rc.rc/@foo</a>'),
34 ('http://rc.rc/@foo', '<a href="http://rc.rc/@foo">http://rc.rc/@foo</a>'),
35 ('http://rc.rc/!foo', '<a href="http://rc.rc/!foo">http://rc.rc/!foo</a>'),
35 ('http://rc.rc/!foo', '<a href="http://rc.rc/!foo">http://rc.rc/!foo</a>'),
36 ('http://rc.rc/&foo', '<a href="http://rc.rc/&foo">http://rc.rc/&foo</a>'),
36 ('http://rc.rc/&foo', '<a href="http://rc.rc/&foo">http://rc.rc/&foo</a>'),
37 ('http://rc.rc/#foo', '<a href="http://rc.rc/#foo">http://rc.rc/#foo</a>'),
37 ('http://rc.rc/#foo', '<a href="http://rc.rc/#foo">http://rc.rc/#foo</a>'),
38 ])
38 ])
39 def test_urlify_text(url, expected_url):
39 def test_urlify_text(url, expected_url):
40 assert helpers.urlify_text(url) == expected_url
40 assert helpers.urlify_text(url) == expected_url
41
41
42
42
43 @pytest.mark.parametrize('repo_name, commit_id, path, expected_result', [
43 @pytest.mark.parametrize('repo_name, commit_id, path, expected_result', [
44 ('rX<X', 'cX<X', 'pX<X/aX<X/bX<X',
44 ('rX<X', 'cX<X', 'pX<X/aX<X/bX<X',
45 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/'
45 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/'
46 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX">pX&lt;X</a>/'
46 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX">pX&lt;X</a>/'
47 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX/aX%3CX">aX&lt;X'
47 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX/aX%3CX">aX&lt;X'
48 '</a>/bX&lt;X'),
48 '</a>/bX&lt;X'),
49 # Path with only one segment
49 # Path with only one segment
50 ('rX<X', 'cX<X', 'pX<X',
50 ('rX<X', 'cX<X', 'pX<X',
51 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/pX&lt;X'),
51 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/pX&lt;X'),
52 # Empty path
52 # Empty path
53 ('rX<X', 'cX<X', '', 'rX&lt;X'),
53 ('rX<X', 'cX<X', '', 'rX&lt;X'),
54 ('rX"X', 'cX"X', 'pX"X/aX"X/bX"X',
54 ('rX"X', 'cX"X', 'pX"X/aX"X/bX"X',
55 '<a class="pjax-link" href="/rX%22X/files/cX%22X/">rX&#34;X</a>/'
55 '<a class="pjax-link" href="/rX%22X/files/cX%22X/">rX&#34;X</a>/'
56 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X">pX&#34;X</a>/'
56 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X">pX&#34;X</a>/'
57 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X/aX%22X">aX&#34;X'
57 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X/aX%22X">aX&#34;X'
58 '</a>/bX&#34;X'),
58 '</a>/bX&#34;X'),
59 ], ids=['simple', 'one_segment', 'empty_path', 'simple_quote'])
59 ], ids=['simple', 'one_segment', 'empty_path', 'simple_quote'])
60 def test_files_breadcrumbs_xss(
60 def test_files_breadcrumbs_xss(
61 repo_name, commit_id, path, pylonsapp, expected_result):
61 repo_name, commit_id, path, pylonsapp, expected_result):
62 result = helpers.files_breadcrumbs(repo_name, commit_id, path)
62 result = helpers.files_breadcrumbs(repo_name, commit_id, path)
63 # Expect it to encode all path fragments properly. This is important
63 # Expect it to encode all path fragments properly. This is important
64 # because it returns an instance of `literal`.
64 # because it returns an instance of `literal`.
65 assert result == expected_result
65 assert result == expected_result
66
66
67
67
68 def test_format_binary():
68 def test_format_binary():
69 assert helpers.format_byte_size_binary(298489462784) == '278.0 GiB'
69 assert helpers.format_byte_size_binary(298489462784) == '278.0 GiB'
70
70
71
71
72 @pytest.mark.parametrize('text_string, pattern, expected', [
72 @pytest.mark.parametrize('text_string, pattern, expected', [
73 ('No issue here', '(?:#)(?P<issue_id>\d+)', []),
73 ('No issue here', '(?:#)(?P<issue_id>\d+)', []),
74 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
74 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
75 [{'url': 'http://r.io/{repo}/i/42', 'id': '42'}]),
75 [{'url': 'http://r.io/{repo}/i/42', 'id': '42'}]),
76 ('Fix #42, #53', '(?:#)(?P<issue_id>\d+)', [
76 ('Fix #42, #53', '(?:#)(?P<issue_id>\d+)', [
77 {'url': 'http://r.io/{repo}/i/42', 'id': '42'},
77 {'url': 'http://r.io/{repo}/i/42', 'id': '42'},
78 {'url': 'http://r.io/{repo}/i/53', 'id': '53'}]),
78 {'url': 'http://r.io/{repo}/i/53', 'id': '53'}]),
79 ('Fix #42', '(?:#)?<issue_id>\d+)', []), # Broken regex
79 ('Fix #42', '(?:#)?<issue_id>\d+)', []), # Broken regex
80 ])
80 ])
81 def test_extract_issues(backend, text_string, pattern, expected):
81 def test_extract_issues(backend, text_string, pattern, expected):
82 repo = backend.create_repo()
82 repo = backend.create_repo()
83 config = {
83 config = {
84 '123': {
84 '123': {
85 'uid': '123',
85 'uid': '123',
86 'pat': pattern,
86 'pat': pattern,
87 'url': 'http://r.io/${repo}/i/${issue_id}',
87 'url': 'http://r.io/${repo}/i/${issue_id}',
88 'pref': '#',
88 'pref': '#',
89 }
89 }
90 }
90 }
91
91
92 def get_settings_mock(self, cache=True):
92 def get_settings_mock(self, cache=True):
93 return config
93 return config
94
94
95 with mock.patch.object(IssueTrackerSettingsModel,
95 with mock.patch.object(IssueTrackerSettingsModel,
96 'get_settings', get_settings_mock):
96 'get_settings', get_settings_mock):
97 text, issues = helpers.process_patterns(text_string, repo.repo_name)
97 text, issues = helpers.process_patterns(text_string, repo.repo_name)
98
98
99 expected = copy.deepcopy(expected)
99 expected = copy.deepcopy(expected)
100 for item in expected:
100 for item in expected:
101 item['url'] = item['url'].format(repo=repo.repo_name)
101 item['url'] = item['url'].format(repo=repo.repo_name)
102
102
103 assert issues == expected
103 assert issues == expected
104
104
105
105
106 @pytest.mark.parametrize('text_string, pattern, expected_text', [
106 @pytest.mark.parametrize('text_string, pattern, link_format, expected_text', [
107 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
107 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'html',
108 'Fix <a class="issue-tracker-link" href="http://r.io/{repo}/i/42">#42</a>'
108 'Fix <a class="issue-tracker-link" href="http://r.io/{repo}/i/42">#42</a>'),
109 ),
109
110 ('Fix #42', '(?:#)?<issue_id>\d+)', 'Fix #42'), # Broken regex
110 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'markdown',
111 'Fix [#42](http://r.io/{repo}/i/42)'),
112
113 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'rst',
114 'Fix `#42 <http://r.io/{repo}/i/42>`_'),
115
116 ('Fix #42', '(?:#)?<issue_id>\d+)', 'html',
117 'Fix #42'), # Broken regex
111 ])
118 ])
112 def test_process_patterns_repo(backend, text_string, pattern, expected_text):
119 def test_process_patterns_repo(backend, text_string, pattern, expected_text, link_format):
113 repo = backend.create_repo()
120 repo = backend.create_repo()
114 config = {'123': {
121
122 def get_settings_mock(self, cache=True):
123 return {
124 '123': {
115 'uid': '123',
125 'uid': '123',
116 'pat': pattern,
126 'pat': pattern,
117 'url': 'http://r.io/${repo}/i/${issue_id}',
127 'url': 'http://r.io/${repo}/i/${issue_id}',
118 'pref': '#',
128 'pref': '#',
119 }
129 }
120 }
130 }
121
131
122 def get_settings_mock(self, cache=True):
123 return config
124
125 with mock.patch.object(IssueTrackerSettingsModel,
132 with mock.patch.object(IssueTrackerSettingsModel,
126 'get_settings', get_settings_mock):
133 'get_settings', get_settings_mock):
127 processed_text, issues = helpers.process_patterns(
134 processed_text, issues = helpers.process_patterns(
128 text_string, repo.repo_name, config)
135 text_string, repo.repo_name, link_format)
129
136
130 assert processed_text == expected_text.format(repo=repo.repo_name)
137 assert processed_text == expected_text.format(repo=repo.repo_name)
131
138
132
139
133 @pytest.mark.parametrize('text_string, pattern, expected_text', [
140 @pytest.mark.parametrize('text_string, pattern, expected_text', [
134 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
141 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
135 'Fix <a class="issue-tracker-link" href="http://r.io/i/42">#42</a>'
142 'Fix <a class="issue-tracker-link" href="http://r.io/i/42">#42</a>'),
136 ),
143 ('Fix #42', '(?:#)?<issue_id>\d+)',
137 ('Fix #42', '(?:#)?<issue_id>\d+)', 'Fix #42'), # Broken regex
144 'Fix #42'), # Broken regex
138 ])
145 ])
139 def test_process_patterns_no_repo(text_string, pattern, expected_text):
146 def test_process_patterns_no_repo(text_string, pattern, expected_text):
140 config = {'123': {
147
148 def get_settings_mock(self, cache=True):
149 return {
150 '123': {
141 'uid': '123',
151 'uid': '123',
142 'pat': pattern,
152 'pat': pattern,
143 'url': 'http://r.io/i/${issue_id}',
153 'url': 'http://r.io/i/${issue_id}',
144 'pref': '#',
154 'pref': '#',
145 }
155 }
146 }
156 }
147
157
148 def get_settings_mock(self, cache=True):
149 return config
150
151 with mock.patch.object(IssueTrackerSettingsModel,
158 with mock.patch.object(IssueTrackerSettingsModel,
152 'get_global_settings', get_settings_mock):
159 'get_global_settings', get_settings_mock):
153 processed_text, issues = helpers.process_patterns(
160 processed_text, issues = helpers.process_patterns(
154 text_string, '', config)
161 text_string, '')
155
162
156 assert processed_text == expected_text
163 assert processed_text == expected_text
157
164
158
165
159 def test_process_patterns_non_existent_repo_name(backend):
166 def test_process_patterns_non_existent_repo_name(backend):
160 text_string = 'Fix #42'
167 text_string = 'Fix #42'
161 pattern = '(?:#)(?P<issue_id>\d+)'
168 pattern = '(?:#)(?P<issue_id>\d+)'
162 expected_text = ('Fix <a class="issue-tracker-link" '
169 expected_text = ('Fix <a class="issue-tracker-link" '
163 'href="http://r.io/do-not-exist/i/42">#42</a>')
170 'href="http://r.io/do-not-exist/i/42">#42</a>')
164 config = {'123': {
171
172 def get_settings_mock(self, cache=True):
173 return {
174 '123': {
165 'uid': '123',
175 'uid': '123',
166 'pat': pattern,
176 'pat': pattern,
167 'url': 'http://r.io/${repo}/i/${issue_id}',
177 'url': 'http://r.io/${repo}/i/${issue_id}',
168 'pref': '#',
178 'pref': '#',
169 }
179 }
170 }
180 }
171
181
172 def get_settings_mock(self, cache=True):
173 return config
174
175 with mock.patch.object(IssueTrackerSettingsModel,
182 with mock.patch.object(IssueTrackerSettingsModel,
176 'get_global_settings', get_settings_mock):
183 'get_global_settings', get_settings_mock):
177 processed_text, issues = helpers.process_patterns(
184 processed_text, issues = helpers.process_patterns(
178 text_string, 'do-not-exist', config)
185 text_string, 'do-not-exist')
179
186
180 assert processed_text == expected_text
187 assert processed_text == expected_text
181
188
182
189
183 def test_get_visual_attr(pylonsapp):
190 def test_get_visual_attr(pylonsapp):
184 c = ContextObj()
191 c = ContextObj()
185 assert None is helpers.get_visual_attr(c, 'fakse')
192 assert None is helpers.get_visual_attr(c, 'fakse')
186
193
187 # emulate the c.visual behaviour
194 # emulate the c.visual behaviour
188 c.visual = AttributeDict({})
195 c.visual = AttributeDict({})
189 assert None is helpers.get_visual_attr(c, 'some_var')
196 assert None is helpers.get_visual_attr(c, 'some_var')
190
197
191 c.visual.some_var = 'foobar'
198 c.visual.some_var = 'foobar'
192 assert 'foobar' == helpers.get_visual_attr(c, 'some_var')
199 assert 'foobar' == helpers.get_visual_attr(c, 'some_var')
193
200
194
201
195 @pytest.mark.parametrize('test_text, inclusive, expected_text', [
202 @pytest.mark.parametrize('test_text, inclusive, expected_text', [
196 ('just a string', False, 'just a string'),
203 ('just a string', False, 'just a string'),
197 ('just a string\n', False, 'just a string'),
204 ('just a string\n', False, 'just a string'),
198 ('just a string\n next line', False, 'just a string...'),
205 ('just a string\n next line', False, 'just a string...'),
199 ('just a string\n next line', True, 'just a string\n...'),
206 ('just a string\n next line', True, 'just a string\n...'),
200 ])
207 ])
201 def test_chop_at(test_text, inclusive, expected_text):
208 def test_chop_at(test_text, inclusive, expected_text):
202 assert helpers.chop_at_smart(
209 assert helpers.chop_at_smart(
203 test_text, '\n', inclusive, '...') == expected_text
210 test_text, '\n', inclusive, '...') == expected_text
204
211
205
212
206 @pytest.mark.parametrize('test_text, expected_output', [
213 @pytest.mark.parametrize('test_text, expected_output', [
207 ('some text', ['some', 'text']),
214 ('some text', ['some', 'text']),
208 ('some text', ['some', 'text']),
215 ('some text', ['some', 'text']),
209 ('some text "with a phrase"', ['some', 'text', 'with a phrase']),
216 ('some text "with a phrase"', ['some', 'text', 'with a phrase']),
210 ('"a phrase" "another phrase"', ['a phrase', 'another phrase']),
217 ('"a phrase" "another phrase"', ['a phrase', 'another phrase']),
211 ('"justphrase"', ['justphrase']),
218 ('"justphrase"', ['justphrase']),
212 ('""', []),
219 ('""', []),
213 ('', []),
220 ('', []),
214 (' ', []),
221 (' ', []),
215 ('" "', []),
222 ('" "', []),
216 ])
223 ])
217 def test_extract_phrases(test_text, expected_output):
224 def test_extract_phrases(test_text, expected_output):
218 assert helpers.extract_phrases(test_text) == expected_output
225 assert helpers.extract_phrases(test_text) == expected_output
219
226
220
227
221 @pytest.mark.parametrize('test_text, text_phrases, expected_output', [
228 @pytest.mark.parametrize('test_text, text_phrases, expected_output', [
222 ('some text here', ['some', 'here'], [(0, 4), (10, 14)]),
229 ('some text here', ['some', 'here'], [(0, 4), (10, 14)]),
223 ('here here there', ['here'], [(0, 4), (5, 9), (11, 15)]),
230 ('here here there', ['here'], [(0, 4), (5, 9), (11, 15)]),
224 ('irrelevant', ['not found'], []),
231 ('irrelevant', ['not found'], []),
225 ('irrelevant', ['not found'], []),
232 ('irrelevant', ['not found'], []),
226 ])
233 ])
227 def test_get_matching_offsets(test_text, text_phrases, expected_output):
234 def test_get_matching_offsets(test_text, text_phrases, expected_output):
228 assert helpers.get_matching_offsets(
235 assert helpers.get_matching_offsets(
229 test_text, text_phrases) == expected_output
236 test_text, text_phrases) == expected_output
230
237
231
238
232 def test_normalize_text_for_matching():
239 def test_normalize_text_for_matching():
233 assert helpers.normalize_text_for_matching(
240 assert helpers.normalize_text_for_matching(
234 'OJjfe)*#$*@)$JF*)3r2f80h') == 'ojjfe jf 3r2f80h'
241 'OJjfe)*#$*@)$JF*)3r2f80h') == 'ojjfe jf 3r2f80h'
235
242
236
243
237 def test_get_matching_line_offsets():
244 def test_get_matching_line_offsets():
238 assert helpers.get_matching_line_offsets([
245 assert helpers.get_matching_line_offsets([
239 'words words words',
246 'words words words',
240 'words words words',
247 'words words words',
241 'some text some',
248 'some text some',
242 'words words words',
249 'words words words',
243 'words words words',
250 'words words words',
244 'text here what'], 'text') == {3: [(5, 9)], 6: [(0, 4)]}
251 'text here what'], 'text') == {3: [(5, 9)], 6: [(0, 4)]}
General Comments 0
You need to be logged in to leave comments. Login now