##// END OF EJS Templates
helpers: fix metatags extraction in case description is empty
marcink -
r2226:c2059633 stable
parent child Browse files
Show More
@@ -1,2110 +1,2110 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 from collections import OrderedDict
39 from collections import OrderedDict
40
40
41 import pygments
41 import pygments
42 import itertools
42 import itertools
43 import fnmatch
43 import fnmatch
44
44
45 from datetime import datetime
45 from datetime import datetime
46 from functools import partial
46 from functools import partial
47 from pygments.formatters.html import HtmlFormatter
47 from pygments.formatters.html import HtmlFormatter
48 from pygments import highlight as code_highlight
48 from pygments import highlight as code_highlight
49 from pygments.lexers import (
49 from pygments.lexers import (
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51
51
52 from pyramid.threadlocal import get_current_request
52 from pyramid.threadlocal import get_current_request
53
53
54 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html import literal, HTML, escape
55 from webhelpers.html.tools import *
55 from webhelpers.html.tools import *
56 from webhelpers.html.builder import make_tag
56 from webhelpers.html.builder import make_tag
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 submit, text, password, textarea, title, ul, xml_declaration, radio
60 submit, text, password, textarea, title, ul, xml_declaration, radio
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def url(*args, **kw):
94 def url(*args, **kw):
95 from pylons import url as pylons_url
95 from pylons import url as pylons_url
96 return pylons_url(*args, **kw)
96 return pylons_url(*args, **kw)
97
97
98
98
99 def url_replace(**qargs):
99 def url_replace(**qargs):
100 """ Returns the current request url while replacing query string args """
100 """ Returns the current request url while replacing query string args """
101
101
102 request = get_current_request()
102 request = get_current_request()
103 new_args = request.GET.mixed()
103 new_args = request.GET.mixed()
104 new_args.update(qargs)
104 new_args.update(qargs)
105 return url('', **new_args)
105 return url('', **new_args)
106
106
107
107
108 def asset(path, ver=None, **kwargs):
108 def asset(path, ver=None, **kwargs):
109 """
109 """
110 Helper to generate a static asset file path for rhodecode assets
110 Helper to generate a static asset file path for rhodecode assets
111
111
112 eg. h.asset('images/image.png', ver='3923')
112 eg. h.asset('images/image.png', ver='3923')
113
113
114 :param path: path of asset
114 :param path: path of asset
115 :param ver: optional version query param to append as ?ver=
115 :param ver: optional version query param to append as ?ver=
116 """
116 """
117 request = get_current_request()
117 request = get_current_request()
118 query = {}
118 query = {}
119 query.update(kwargs)
119 query.update(kwargs)
120 if ver:
120 if ver:
121 query = {'ver': ver}
121 query = {'ver': ver}
122 return request.static_path(
122 return request.static_path(
123 'rhodecode:public/{}'.format(path), _query=query)
123 'rhodecode:public/{}'.format(path), _query=query)
124
124
125
125
126 default_html_escape_table = {
126 default_html_escape_table = {
127 ord('&'): u'&amp;',
127 ord('&'): u'&amp;',
128 ord('<'): u'&lt;',
128 ord('<'): u'&lt;',
129 ord('>'): u'&gt;',
129 ord('>'): u'&gt;',
130 ord('"'): u'&quot;',
130 ord('"'): u'&quot;',
131 ord("'"): u'&#39;',
131 ord("'"): u'&#39;',
132 }
132 }
133
133
134
134
135 def html_escape(text, html_escape_table=default_html_escape_table):
135 def html_escape(text, html_escape_table=default_html_escape_table):
136 """Produce entities within text."""
136 """Produce entities within text."""
137 return text.translate(html_escape_table)
137 return text.translate(html_escape_table)
138
138
139
139
140 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
140 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
141 """
141 """
142 Truncate string ``s`` at the first occurrence of ``sub``.
142 Truncate string ``s`` at the first occurrence of ``sub``.
143
143
144 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
144 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
145 """
145 """
146 suffix_if_chopped = suffix_if_chopped or ''
146 suffix_if_chopped = suffix_if_chopped or ''
147 pos = s.find(sub)
147 pos = s.find(sub)
148 if pos == -1:
148 if pos == -1:
149 return s
149 return s
150
150
151 if inclusive:
151 if inclusive:
152 pos += len(sub)
152 pos += len(sub)
153
153
154 chopped = s[:pos]
154 chopped = s[:pos]
155 left = s[pos:].strip()
155 left = s[pos:].strip()
156
156
157 if left and suffix_if_chopped:
157 if left and suffix_if_chopped:
158 chopped += suffix_if_chopped
158 chopped += suffix_if_chopped
159
159
160 return chopped
160 return chopped
161
161
162
162
163 def shorter(text, size=20):
163 def shorter(text, size=20):
164 postfix = '...'
164 postfix = '...'
165 if len(text) > size:
165 if len(text) > size:
166 return text[:size - len(postfix)] + postfix
166 return text[:size - len(postfix)] + postfix
167 return text
167 return text
168
168
169
169
170 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
170 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
171 """
171 """
172 Reset button
172 Reset button
173 """
173 """
174 _set_input_attrs(attrs, type, name, value)
174 _set_input_attrs(attrs, type, name, value)
175 _set_id_attr(attrs, id, name)
175 _set_id_attr(attrs, id, name)
176 convert_boolean_attrs(attrs, ["disabled"])
176 convert_boolean_attrs(attrs, ["disabled"])
177 return HTML.input(**attrs)
177 return HTML.input(**attrs)
178
178
179 reset = _reset
179 reset = _reset
180 safeid = _make_safe_id_component
180 safeid = _make_safe_id_component
181
181
182
182
183 def branding(name, length=40):
183 def branding(name, length=40):
184 return truncate(name, length, indicator="")
184 return truncate(name, length, indicator="")
185
185
186
186
187 def FID(raw_id, path):
187 def FID(raw_id, path):
188 """
188 """
189 Creates a unique ID for filenode based on it's hash of path and commit
189 Creates a unique ID for filenode based on it's hash of path and commit
190 it's safe to use in urls
190 it's safe to use in urls
191
191
192 :param raw_id:
192 :param raw_id:
193 :param path:
193 :param path:
194 """
194 """
195
195
196 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
196 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
197
197
198
198
199 class _GetError(object):
199 class _GetError(object):
200 """Get error from form_errors, and represent it as span wrapped error
200 """Get error from form_errors, and represent it as span wrapped error
201 message
201 message
202
202
203 :param field_name: field to fetch errors for
203 :param field_name: field to fetch errors for
204 :param form_errors: form errors dict
204 :param form_errors: form errors dict
205 """
205 """
206
206
207 def __call__(self, field_name, form_errors):
207 def __call__(self, field_name, form_errors):
208 tmpl = """<span class="error_msg">%s</span>"""
208 tmpl = """<span class="error_msg">%s</span>"""
209 if form_errors and field_name in form_errors:
209 if form_errors and field_name in form_errors:
210 return literal(tmpl % form_errors.get(field_name))
210 return literal(tmpl % form_errors.get(field_name))
211
211
212 get_error = _GetError()
212 get_error = _GetError()
213
213
214
214
215 class _ToolTip(object):
215 class _ToolTip(object):
216
216
217 def __call__(self, tooltip_title, trim_at=50):
217 def __call__(self, tooltip_title, trim_at=50):
218 """
218 """
219 Special function just to wrap our text into nice formatted
219 Special function just to wrap our text into nice formatted
220 autowrapped text
220 autowrapped text
221
221
222 :param tooltip_title:
222 :param tooltip_title:
223 """
223 """
224 tooltip_title = escape(tooltip_title)
224 tooltip_title = escape(tooltip_title)
225 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
225 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
226 return tooltip_title
226 return tooltip_title
227 tooltip = _ToolTip()
227 tooltip = _ToolTip()
228
228
229
229
230 def files_breadcrumbs(repo_name, commit_id, file_path):
230 def files_breadcrumbs(repo_name, commit_id, file_path):
231 if isinstance(file_path, str):
231 if isinstance(file_path, str):
232 file_path = safe_unicode(file_path)
232 file_path = safe_unicode(file_path)
233
233
234 # TODO: johbo: Is this always a url like path, or is this operating
234 # TODO: johbo: Is this always a url like path, or is this operating
235 # system dependent?
235 # system dependent?
236 path_segments = file_path.split('/')
236 path_segments = file_path.split('/')
237
237
238 repo_name_html = escape(repo_name)
238 repo_name_html = escape(repo_name)
239 if len(path_segments) == 1 and path_segments[0] == '':
239 if len(path_segments) == 1 and path_segments[0] == '':
240 url_segments = [repo_name_html]
240 url_segments = [repo_name_html]
241 else:
241 else:
242 url_segments = [
242 url_segments = [
243 link_to(
243 link_to(
244 repo_name_html,
244 repo_name_html,
245 route_path(
245 route_path(
246 'repo_files',
246 'repo_files',
247 repo_name=repo_name,
247 repo_name=repo_name,
248 commit_id=commit_id,
248 commit_id=commit_id,
249 f_path=''),
249 f_path=''),
250 class_='pjax-link')]
250 class_='pjax-link')]
251
251
252 last_cnt = len(path_segments) - 1
252 last_cnt = len(path_segments) - 1
253 for cnt, segment in enumerate(path_segments):
253 for cnt, segment in enumerate(path_segments):
254 if not segment:
254 if not segment:
255 continue
255 continue
256 segment_html = escape(segment)
256 segment_html = escape(segment)
257
257
258 if cnt != last_cnt:
258 if cnt != last_cnt:
259 url_segments.append(
259 url_segments.append(
260 link_to(
260 link_to(
261 segment_html,
261 segment_html,
262 route_path(
262 route_path(
263 'repo_files',
263 'repo_files',
264 repo_name=repo_name,
264 repo_name=repo_name,
265 commit_id=commit_id,
265 commit_id=commit_id,
266 f_path='/'.join(path_segments[:cnt + 1])),
266 f_path='/'.join(path_segments[:cnt + 1])),
267 class_='pjax-link'))
267 class_='pjax-link'))
268 else:
268 else:
269 url_segments.append(segment_html)
269 url_segments.append(segment_html)
270
270
271 return literal('/'.join(url_segments))
271 return literal('/'.join(url_segments))
272
272
273
273
274 class CodeHtmlFormatter(HtmlFormatter):
274 class CodeHtmlFormatter(HtmlFormatter):
275 """
275 """
276 My code Html Formatter for source codes
276 My code Html Formatter for source codes
277 """
277 """
278
278
279 def wrap(self, source, outfile):
279 def wrap(self, source, outfile):
280 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
280 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
281
281
282 def _wrap_code(self, source):
282 def _wrap_code(self, source):
283 for cnt, it in enumerate(source):
283 for cnt, it in enumerate(source):
284 i, t = it
284 i, t = it
285 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
285 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
286 yield i, t
286 yield i, t
287
287
288 def _wrap_tablelinenos(self, inner):
288 def _wrap_tablelinenos(self, inner):
289 dummyoutfile = StringIO.StringIO()
289 dummyoutfile = StringIO.StringIO()
290 lncount = 0
290 lncount = 0
291 for t, line in inner:
291 for t, line in inner:
292 if t:
292 if t:
293 lncount += 1
293 lncount += 1
294 dummyoutfile.write(line)
294 dummyoutfile.write(line)
295
295
296 fl = self.linenostart
296 fl = self.linenostart
297 mw = len(str(lncount + fl - 1))
297 mw = len(str(lncount + fl - 1))
298 sp = self.linenospecial
298 sp = self.linenospecial
299 st = self.linenostep
299 st = self.linenostep
300 la = self.lineanchors
300 la = self.lineanchors
301 aln = self.anchorlinenos
301 aln = self.anchorlinenos
302 nocls = self.noclasses
302 nocls = self.noclasses
303 if sp:
303 if sp:
304 lines = []
304 lines = []
305
305
306 for i in range(fl, fl + lncount):
306 for i in range(fl, fl + lncount):
307 if i % st == 0:
307 if i % st == 0:
308 if i % sp == 0:
308 if i % sp == 0:
309 if aln:
309 if aln:
310 lines.append('<a href="#%s%d" class="special">%*d</a>' %
310 lines.append('<a href="#%s%d" class="special">%*d</a>' %
311 (la, i, mw, i))
311 (la, i, mw, i))
312 else:
312 else:
313 lines.append('<span class="special">%*d</span>' % (mw, i))
313 lines.append('<span class="special">%*d</span>' % (mw, i))
314 else:
314 else:
315 if aln:
315 if aln:
316 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
316 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
317 else:
317 else:
318 lines.append('%*d' % (mw, i))
318 lines.append('%*d' % (mw, i))
319 else:
319 else:
320 lines.append('')
320 lines.append('')
321 ls = '\n'.join(lines)
321 ls = '\n'.join(lines)
322 else:
322 else:
323 lines = []
323 lines = []
324 for i in range(fl, fl + lncount):
324 for i in range(fl, fl + lncount):
325 if i % st == 0:
325 if i % st == 0:
326 if aln:
326 if aln:
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 else:
328 else:
329 lines.append('%*d' % (mw, i))
329 lines.append('%*d' % (mw, i))
330 else:
330 else:
331 lines.append('')
331 lines.append('')
332 ls = '\n'.join(lines)
332 ls = '\n'.join(lines)
333
333
334 # in case you wonder about the seemingly redundant <div> here: since the
334 # in case you wonder about the seemingly redundant <div> here: since the
335 # content in the other cell also is wrapped in a div, some browsers in
335 # content in the other cell also is wrapped in a div, some browsers in
336 # some configurations seem to mess up the formatting...
336 # some configurations seem to mess up the formatting...
337 if nocls:
337 if nocls:
338 yield 0, ('<table class="%stable">' % self.cssclass +
338 yield 0, ('<table class="%stable">' % self.cssclass +
339 '<tr><td><div class="linenodiv" '
339 '<tr><td><div class="linenodiv" '
340 'style="background-color: #f0f0f0; padding-right: 10px">'
340 'style="background-color: #f0f0f0; padding-right: 10px">'
341 '<pre style="line-height: 125%">' +
341 '<pre style="line-height: 125%">' +
342 ls + '</pre></div></td><td id="hlcode" class="code">')
342 ls + '</pre></div></td><td id="hlcode" class="code">')
343 else:
343 else:
344 yield 0, ('<table class="%stable">' % self.cssclass +
344 yield 0, ('<table class="%stable">' % self.cssclass +
345 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
345 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
346 ls + '</pre></div></td><td id="hlcode" class="code">')
346 ls + '</pre></div></td><td id="hlcode" class="code">')
347 yield 0, dummyoutfile.getvalue()
347 yield 0, dummyoutfile.getvalue()
348 yield 0, '</td></tr></table>'
348 yield 0, '</td></tr></table>'
349
349
350
350
351 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
351 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
352 def __init__(self, **kw):
352 def __init__(self, **kw):
353 # only show these line numbers if set
353 # only show these line numbers if set
354 self.only_lines = kw.pop('only_line_numbers', [])
354 self.only_lines = kw.pop('only_line_numbers', [])
355 self.query_terms = kw.pop('query_terms', [])
355 self.query_terms = kw.pop('query_terms', [])
356 self.max_lines = kw.pop('max_lines', 5)
356 self.max_lines = kw.pop('max_lines', 5)
357 self.line_context = kw.pop('line_context', 3)
357 self.line_context = kw.pop('line_context', 3)
358 self.url = kw.pop('url', None)
358 self.url = kw.pop('url', None)
359
359
360 super(CodeHtmlFormatter, self).__init__(**kw)
360 super(CodeHtmlFormatter, self).__init__(**kw)
361
361
362 def _wrap_code(self, source):
362 def _wrap_code(self, source):
363 for cnt, it in enumerate(source):
363 for cnt, it in enumerate(source):
364 i, t = it
364 i, t = it
365 t = '<pre>%s</pre>' % t
365 t = '<pre>%s</pre>' % t
366 yield i, t
366 yield i, t
367
367
368 def _wrap_tablelinenos(self, inner):
368 def _wrap_tablelinenos(self, inner):
369 yield 0, '<table class="code-highlight %stable">' % self.cssclass
369 yield 0, '<table class="code-highlight %stable">' % self.cssclass
370
370
371 last_shown_line_number = 0
371 last_shown_line_number = 0
372 current_line_number = 1
372 current_line_number = 1
373
373
374 for t, line in inner:
374 for t, line in inner:
375 if not t:
375 if not t:
376 yield t, line
376 yield t, line
377 continue
377 continue
378
378
379 if current_line_number in self.only_lines:
379 if current_line_number in self.only_lines:
380 if last_shown_line_number + 1 != current_line_number:
380 if last_shown_line_number + 1 != current_line_number:
381 yield 0, '<tr>'
381 yield 0, '<tr>'
382 yield 0, '<td class="line">...</td>'
382 yield 0, '<td class="line">...</td>'
383 yield 0, '<td id="hlcode" class="code"></td>'
383 yield 0, '<td id="hlcode" class="code"></td>'
384 yield 0, '</tr>'
384 yield 0, '</tr>'
385
385
386 yield 0, '<tr>'
386 yield 0, '<tr>'
387 if self.url:
387 if self.url:
388 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
388 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
389 self.url, current_line_number, current_line_number)
389 self.url, current_line_number, current_line_number)
390 else:
390 else:
391 yield 0, '<td class="line"><a href="">%i</a></td>' % (
391 yield 0, '<td class="line"><a href="">%i</a></td>' % (
392 current_line_number)
392 current_line_number)
393 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
393 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
394 yield 0, '</tr>'
394 yield 0, '</tr>'
395
395
396 last_shown_line_number = current_line_number
396 last_shown_line_number = current_line_number
397
397
398 current_line_number += 1
398 current_line_number += 1
399
399
400
400
401 yield 0, '</table>'
401 yield 0, '</table>'
402
402
403
403
404 def extract_phrases(text_query):
404 def extract_phrases(text_query):
405 """
405 """
406 Extracts phrases from search term string making sure phrases
406 Extracts phrases from search term string making sure phrases
407 contained in double quotes are kept together - and discarding empty values
407 contained in double quotes are kept together - and discarding empty values
408 or fully whitespace values eg.
408 or fully whitespace values eg.
409
409
410 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
410 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
411
411
412 """
412 """
413
413
414 in_phrase = False
414 in_phrase = False
415 buf = ''
415 buf = ''
416 phrases = []
416 phrases = []
417 for char in text_query:
417 for char in text_query:
418 if in_phrase:
418 if in_phrase:
419 if char == '"': # end phrase
419 if char == '"': # end phrase
420 phrases.append(buf)
420 phrases.append(buf)
421 buf = ''
421 buf = ''
422 in_phrase = False
422 in_phrase = False
423 continue
423 continue
424 else:
424 else:
425 buf += char
425 buf += char
426 continue
426 continue
427 else:
427 else:
428 if char == '"': # start phrase
428 if char == '"': # start phrase
429 in_phrase = True
429 in_phrase = True
430 phrases.append(buf)
430 phrases.append(buf)
431 buf = ''
431 buf = ''
432 continue
432 continue
433 elif char == ' ':
433 elif char == ' ':
434 phrases.append(buf)
434 phrases.append(buf)
435 buf = ''
435 buf = ''
436 continue
436 continue
437 else:
437 else:
438 buf += char
438 buf += char
439
439
440 phrases.append(buf)
440 phrases.append(buf)
441 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
441 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
442 return phrases
442 return phrases
443
443
444
444
445 def get_matching_offsets(text, phrases):
445 def get_matching_offsets(text, phrases):
446 """
446 """
447 Returns a list of string offsets in `text` that the list of `terms` match
447 Returns a list of string offsets in `text` that the list of `terms` match
448
448
449 >>> get_matching_offsets('some text here', ['some', 'here'])
449 >>> get_matching_offsets('some text here', ['some', 'here'])
450 [(0, 4), (10, 14)]
450 [(0, 4), (10, 14)]
451
451
452 """
452 """
453 offsets = []
453 offsets = []
454 for phrase in phrases:
454 for phrase in phrases:
455 for match in re.finditer(phrase, text):
455 for match in re.finditer(phrase, text):
456 offsets.append((match.start(), match.end()))
456 offsets.append((match.start(), match.end()))
457
457
458 return offsets
458 return offsets
459
459
460
460
461 def normalize_text_for_matching(x):
461 def normalize_text_for_matching(x):
462 """
462 """
463 Replaces all non alnum characters to spaces and lower cases the string,
463 Replaces all non alnum characters to spaces and lower cases the string,
464 useful for comparing two text strings without punctuation
464 useful for comparing two text strings without punctuation
465 """
465 """
466 return re.sub(r'[^\w]', ' ', x.lower())
466 return re.sub(r'[^\w]', ' ', x.lower())
467
467
468
468
469 def get_matching_line_offsets(lines, terms):
469 def get_matching_line_offsets(lines, terms):
470 """ Return a set of `lines` indices (starting from 1) matching a
470 """ Return a set of `lines` indices (starting from 1) matching a
471 text search query, along with `context` lines above/below matching lines
471 text search query, along with `context` lines above/below matching lines
472
472
473 :param lines: list of strings representing lines
473 :param lines: list of strings representing lines
474 :param terms: search term string to match in lines eg. 'some text'
474 :param terms: search term string to match in lines eg. 'some text'
475 :param context: number of lines above/below a matching line to add to result
475 :param context: number of lines above/below a matching line to add to result
476 :param max_lines: cut off for lines of interest
476 :param max_lines: cut off for lines of interest
477 eg.
477 eg.
478
478
479 text = '''
479 text = '''
480 words words words
480 words words words
481 words words words
481 words words words
482 some text some
482 some text some
483 words words words
483 words words words
484 words words words
484 words words words
485 text here what
485 text here what
486 '''
486 '''
487 get_matching_line_offsets(text, 'text', context=1)
487 get_matching_line_offsets(text, 'text', context=1)
488 {3: [(5, 9)], 6: [(0, 4)]]
488 {3: [(5, 9)], 6: [(0, 4)]]
489
489
490 """
490 """
491 matching_lines = {}
491 matching_lines = {}
492 phrases = [normalize_text_for_matching(phrase)
492 phrases = [normalize_text_for_matching(phrase)
493 for phrase in extract_phrases(terms)]
493 for phrase in extract_phrases(terms)]
494
494
495 for line_index, line in enumerate(lines, start=1):
495 for line_index, line in enumerate(lines, start=1):
496 match_offsets = get_matching_offsets(
496 match_offsets = get_matching_offsets(
497 normalize_text_for_matching(line), phrases)
497 normalize_text_for_matching(line), phrases)
498 if match_offsets:
498 if match_offsets:
499 matching_lines[line_index] = match_offsets
499 matching_lines[line_index] = match_offsets
500
500
501 return matching_lines
501 return matching_lines
502
502
503
503
504 def hsv_to_rgb(h, s, v):
504 def hsv_to_rgb(h, s, v):
505 """ Convert hsv color values to rgb """
505 """ Convert hsv color values to rgb """
506
506
507 if s == 0.0:
507 if s == 0.0:
508 return v, v, v
508 return v, v, v
509 i = int(h * 6.0) # XXX assume int() truncates!
509 i = int(h * 6.0) # XXX assume int() truncates!
510 f = (h * 6.0) - i
510 f = (h * 6.0) - i
511 p = v * (1.0 - s)
511 p = v * (1.0 - s)
512 q = v * (1.0 - s * f)
512 q = v * (1.0 - s * f)
513 t = v * (1.0 - s * (1.0 - f))
513 t = v * (1.0 - s * (1.0 - f))
514 i = i % 6
514 i = i % 6
515 if i == 0:
515 if i == 0:
516 return v, t, p
516 return v, t, p
517 if i == 1:
517 if i == 1:
518 return q, v, p
518 return q, v, p
519 if i == 2:
519 if i == 2:
520 return p, v, t
520 return p, v, t
521 if i == 3:
521 if i == 3:
522 return p, q, v
522 return p, q, v
523 if i == 4:
523 if i == 4:
524 return t, p, v
524 return t, p, v
525 if i == 5:
525 if i == 5:
526 return v, p, q
526 return v, p, q
527
527
528
528
529 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
529 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
530 """
530 """
531 Generator for getting n of evenly distributed colors using
531 Generator for getting n of evenly distributed colors using
532 hsv color and golden ratio. It always return same order of colors
532 hsv color and golden ratio. It always return same order of colors
533
533
534 :param n: number of colors to generate
534 :param n: number of colors to generate
535 :param saturation: saturation of returned colors
535 :param saturation: saturation of returned colors
536 :param lightness: lightness of returned colors
536 :param lightness: lightness of returned colors
537 :returns: RGB tuple
537 :returns: RGB tuple
538 """
538 """
539
539
540 golden_ratio = 0.618033988749895
540 golden_ratio = 0.618033988749895
541 h = 0.22717784590367374
541 h = 0.22717784590367374
542
542
543 for _ in xrange(n):
543 for _ in xrange(n):
544 h += golden_ratio
544 h += golden_ratio
545 h %= 1
545 h %= 1
546 HSV_tuple = [h, saturation, lightness]
546 HSV_tuple = [h, saturation, lightness]
547 RGB_tuple = hsv_to_rgb(*HSV_tuple)
547 RGB_tuple = hsv_to_rgb(*HSV_tuple)
548 yield map(lambda x: str(int(x * 256)), RGB_tuple)
548 yield map(lambda x: str(int(x * 256)), RGB_tuple)
549
549
550
550
551 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
551 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
552 """
552 """
553 Returns a function which when called with an argument returns a unique
553 Returns a function which when called with an argument returns a unique
554 color for that argument, eg.
554 color for that argument, eg.
555
555
556 :param n: number of colors to generate
556 :param n: number of colors to generate
557 :param saturation: saturation of returned colors
557 :param saturation: saturation of returned colors
558 :param lightness: lightness of returned colors
558 :param lightness: lightness of returned colors
559 :returns: css RGB string
559 :returns: css RGB string
560
560
561 >>> color_hash = color_hasher()
561 >>> color_hash = color_hasher()
562 >>> color_hash('hello')
562 >>> color_hash('hello')
563 'rgb(34, 12, 59)'
563 'rgb(34, 12, 59)'
564 >>> color_hash('hello')
564 >>> color_hash('hello')
565 'rgb(34, 12, 59)'
565 'rgb(34, 12, 59)'
566 >>> color_hash('other')
566 >>> color_hash('other')
567 'rgb(90, 224, 159)'
567 'rgb(90, 224, 159)'
568 """
568 """
569
569
570 color_dict = {}
570 color_dict = {}
571 cgenerator = unique_color_generator(
571 cgenerator = unique_color_generator(
572 saturation=saturation, lightness=lightness)
572 saturation=saturation, lightness=lightness)
573
573
574 def get_color_string(thing):
574 def get_color_string(thing):
575 if thing in color_dict:
575 if thing in color_dict:
576 col = color_dict[thing]
576 col = color_dict[thing]
577 else:
577 else:
578 col = color_dict[thing] = cgenerator.next()
578 col = color_dict[thing] = cgenerator.next()
579 return "rgb(%s)" % (', '.join(col))
579 return "rgb(%s)" % (', '.join(col))
580
580
581 return get_color_string
581 return get_color_string
582
582
583
583
584 def get_lexer_safe(mimetype=None, filepath=None):
584 def get_lexer_safe(mimetype=None, filepath=None):
585 """
585 """
586 Tries to return a relevant pygments lexer using mimetype/filepath name,
586 Tries to return a relevant pygments lexer using mimetype/filepath name,
587 defaulting to plain text if none could be found
587 defaulting to plain text if none could be found
588 """
588 """
589 lexer = None
589 lexer = None
590 try:
590 try:
591 if mimetype:
591 if mimetype:
592 lexer = get_lexer_for_mimetype(mimetype)
592 lexer = get_lexer_for_mimetype(mimetype)
593 if not lexer:
593 if not lexer:
594 lexer = get_lexer_for_filename(filepath)
594 lexer = get_lexer_for_filename(filepath)
595 except pygments.util.ClassNotFound:
595 except pygments.util.ClassNotFound:
596 pass
596 pass
597
597
598 if not lexer:
598 if not lexer:
599 lexer = get_lexer_by_name('text')
599 lexer = get_lexer_by_name('text')
600
600
601 return lexer
601 return lexer
602
602
603
603
604 def get_lexer_for_filenode(filenode):
604 def get_lexer_for_filenode(filenode):
605 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
605 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
606 return lexer
606 return lexer
607
607
608
608
609 def pygmentize(filenode, **kwargs):
609 def pygmentize(filenode, **kwargs):
610 """
610 """
611 pygmentize function using pygments
611 pygmentize function using pygments
612
612
613 :param filenode:
613 :param filenode:
614 """
614 """
615 lexer = get_lexer_for_filenode(filenode)
615 lexer = get_lexer_for_filenode(filenode)
616 return literal(code_highlight(filenode.content, lexer,
616 return literal(code_highlight(filenode.content, lexer,
617 CodeHtmlFormatter(**kwargs)))
617 CodeHtmlFormatter(**kwargs)))
618
618
619
619
620 def is_following_repo(repo_name, user_id):
620 def is_following_repo(repo_name, user_id):
621 from rhodecode.model.scm import ScmModel
621 from rhodecode.model.scm import ScmModel
622 return ScmModel().is_following_repo(repo_name, user_id)
622 return ScmModel().is_following_repo(repo_name, user_id)
623
623
624
624
625 class _Message(object):
625 class _Message(object):
626 """A message returned by ``Flash.pop_messages()``.
626 """A message returned by ``Flash.pop_messages()``.
627
627
628 Converting the message to a string returns the message text. Instances
628 Converting the message to a string returns the message text. Instances
629 also have the following attributes:
629 also have the following attributes:
630
630
631 * ``message``: the message text.
631 * ``message``: the message text.
632 * ``category``: the category specified when the message was created.
632 * ``category``: the category specified when the message was created.
633 """
633 """
634
634
635 def __init__(self, category, message):
635 def __init__(self, category, message):
636 self.category = category
636 self.category = category
637 self.message = message
637 self.message = message
638
638
639 def __str__(self):
639 def __str__(self):
640 return self.message
640 return self.message
641
641
642 __unicode__ = __str__
642 __unicode__ = __str__
643
643
644 def __html__(self):
644 def __html__(self):
645 return escape(safe_unicode(self.message))
645 return escape(safe_unicode(self.message))
646
646
647
647
648 class Flash(object):
648 class Flash(object):
649 # List of allowed categories. If None, allow any category.
649 # List of allowed categories. If None, allow any category.
650 categories = ["warning", "notice", "error", "success"]
650 categories = ["warning", "notice", "error", "success"]
651
651
652 # Default category if none is specified.
652 # Default category if none is specified.
653 default_category = "notice"
653 default_category = "notice"
654
654
655 def __init__(self, session_key="flash", categories=None,
655 def __init__(self, session_key="flash", categories=None,
656 default_category=None):
656 default_category=None):
657 """
657 """
658 Instantiate a ``Flash`` object.
658 Instantiate a ``Flash`` object.
659
659
660 ``session_key`` is the key to save the messages under in the user's
660 ``session_key`` is the key to save the messages under in the user's
661 session.
661 session.
662
662
663 ``categories`` is an optional list which overrides the default list
663 ``categories`` is an optional list which overrides the default list
664 of categories.
664 of categories.
665
665
666 ``default_category`` overrides the default category used for messages
666 ``default_category`` overrides the default category used for messages
667 when none is specified.
667 when none is specified.
668 """
668 """
669 self.session_key = session_key
669 self.session_key = session_key
670 if categories is not None:
670 if categories is not None:
671 self.categories = categories
671 self.categories = categories
672 if default_category is not None:
672 if default_category is not None:
673 self.default_category = default_category
673 self.default_category = default_category
674 if self.categories and self.default_category not in self.categories:
674 if self.categories and self.default_category not in self.categories:
675 raise ValueError(
675 raise ValueError(
676 "unrecognized default category %r" % (self.default_category,))
676 "unrecognized default category %r" % (self.default_category,))
677
677
678 def pop_messages(self, session=None, request=None):
678 def pop_messages(self, session=None, request=None):
679 """
679 """
680 Return all accumulated messages and delete them from the session.
680 Return all accumulated messages and delete them from the session.
681
681
682 The return value is a list of ``Message`` objects.
682 The return value is a list of ``Message`` objects.
683 """
683 """
684 messages = []
684 messages = []
685
685
686 if not session:
686 if not session:
687 if not request:
687 if not request:
688 request = get_current_request()
688 request = get_current_request()
689 session = request.session
689 session = request.session
690
690
691 # Pop the 'old' pylons flash messages. They are tuples of the form
691 # Pop the 'old' pylons flash messages. They are tuples of the form
692 # (category, message)
692 # (category, message)
693 for cat, msg in session.pop(self.session_key, []):
693 for cat, msg in session.pop(self.session_key, []):
694 messages.append(_Message(cat, msg))
694 messages.append(_Message(cat, msg))
695
695
696 # Pop the 'new' pyramid flash messages for each category as list
696 # Pop the 'new' pyramid flash messages for each category as list
697 # of strings.
697 # of strings.
698 for cat in self.categories:
698 for cat in self.categories:
699 for msg in session.pop_flash(queue=cat):
699 for msg in session.pop_flash(queue=cat):
700 messages.append(_Message(cat, msg))
700 messages.append(_Message(cat, msg))
701 # Map messages from the default queue to the 'notice' category.
701 # Map messages from the default queue to the 'notice' category.
702 for msg in session.pop_flash():
702 for msg in session.pop_flash():
703 messages.append(_Message('notice', msg))
703 messages.append(_Message('notice', msg))
704
704
705 session.save()
705 session.save()
706 return messages
706 return messages
707
707
708 def json_alerts(self, session=None, request=None):
708 def json_alerts(self, session=None, request=None):
709 payloads = []
709 payloads = []
710 messages = flash.pop_messages(session=session, request=request)
710 messages = flash.pop_messages(session=session, request=request)
711 if messages:
711 if messages:
712 for message in messages:
712 for message in messages:
713 subdata = {}
713 subdata = {}
714 if hasattr(message.message, 'rsplit'):
714 if hasattr(message.message, 'rsplit'):
715 flash_data = message.message.rsplit('|DELIM|', 1)
715 flash_data = message.message.rsplit('|DELIM|', 1)
716 org_message = flash_data[0]
716 org_message = flash_data[0]
717 if len(flash_data) > 1:
717 if len(flash_data) > 1:
718 subdata = json.loads(flash_data[1])
718 subdata = json.loads(flash_data[1])
719 else:
719 else:
720 org_message = message.message
720 org_message = message.message
721 payloads.append({
721 payloads.append({
722 'message': {
722 'message': {
723 'message': u'{}'.format(org_message),
723 'message': u'{}'.format(org_message),
724 'level': message.category,
724 'level': message.category,
725 'force': True,
725 'force': True,
726 'subdata': subdata
726 'subdata': subdata
727 }
727 }
728 })
728 })
729 return json.dumps(payloads)
729 return json.dumps(payloads)
730
730
731 def __call__(self, message, category=None, ignore_duplicate=False,
731 def __call__(self, message, category=None, ignore_duplicate=False,
732 session=None, request=None):
732 session=None, request=None):
733
733
734 if not session:
734 if not session:
735 if not request:
735 if not request:
736 request = get_current_request()
736 request = get_current_request()
737 session = request.session
737 session = request.session
738
738
739 session.flash(
739 session.flash(
740 message, queue=category, allow_duplicate=not ignore_duplicate)
740 message, queue=category, allow_duplicate=not ignore_duplicate)
741
741
742
742
743 flash = Flash()
743 flash = Flash()
744
744
745 #==============================================================================
745 #==============================================================================
746 # SCM FILTERS available via h.
746 # SCM FILTERS available via h.
747 #==============================================================================
747 #==============================================================================
748 from rhodecode.lib.vcs.utils import author_name, author_email
748 from rhodecode.lib.vcs.utils import author_name, author_email
749 from rhodecode.lib.utils2 import credentials_filter, age as _age
749 from rhodecode.lib.utils2 import credentials_filter, age as _age
750 from rhodecode.model.db import User, ChangesetStatus
750 from rhodecode.model.db import User, ChangesetStatus
751
751
752 age = _age
752 age = _age
753 capitalize = lambda x: x.capitalize()
753 capitalize = lambda x: x.capitalize()
754 email = author_email
754 email = author_email
755 short_id = lambda x: x[:12]
755 short_id = lambda x: x[:12]
756 hide_credentials = lambda x: ''.join(credentials_filter(x))
756 hide_credentials = lambda x: ''.join(credentials_filter(x))
757
757
758
758
759 def age_component(datetime_iso, value=None, time_is_local=False):
759 def age_component(datetime_iso, value=None, time_is_local=False):
760 title = value or format_date(datetime_iso)
760 title = value or format_date(datetime_iso)
761 tzinfo = '+00:00'
761 tzinfo = '+00:00'
762
762
763 # detect if we have a timezone info, otherwise, add it
763 # detect if we have a timezone info, otherwise, add it
764 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
764 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
765 if time_is_local:
765 if time_is_local:
766 tzinfo = time.strftime("+%H:%M",
766 tzinfo = time.strftime("+%H:%M",
767 time.gmtime(
767 time.gmtime(
768 (datetime.now() - datetime.utcnow()).seconds + 1
768 (datetime.now() - datetime.utcnow()).seconds + 1
769 )
769 )
770 )
770 )
771
771
772 return literal(
772 return literal(
773 '<time class="timeago tooltip" '
773 '<time class="timeago tooltip" '
774 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
774 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
775 datetime_iso, title, tzinfo))
775 datetime_iso, title, tzinfo))
776
776
777
777
778 def _shorten_commit_id(commit_id):
778 def _shorten_commit_id(commit_id):
779 from rhodecode import CONFIG
779 from rhodecode import CONFIG
780 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
780 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
781 return commit_id[:def_len]
781 return commit_id[:def_len]
782
782
783
783
784 def show_id(commit):
784 def show_id(commit):
785 """
785 """
786 Configurable function that shows ID
786 Configurable function that shows ID
787 by default it's r123:fffeeefffeee
787 by default it's r123:fffeeefffeee
788
788
789 :param commit: commit instance
789 :param commit: commit instance
790 """
790 """
791 from rhodecode import CONFIG
791 from rhodecode import CONFIG
792 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
792 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
793
793
794 raw_id = _shorten_commit_id(commit.raw_id)
794 raw_id = _shorten_commit_id(commit.raw_id)
795 if show_idx:
795 if show_idx:
796 return 'r%s:%s' % (commit.idx, raw_id)
796 return 'r%s:%s' % (commit.idx, raw_id)
797 else:
797 else:
798 return '%s' % (raw_id, )
798 return '%s' % (raw_id, )
799
799
800
800
801 def format_date(date):
801 def format_date(date):
802 """
802 """
803 use a standardized formatting for dates used in RhodeCode
803 use a standardized formatting for dates used in RhodeCode
804
804
805 :param date: date/datetime object
805 :param date: date/datetime object
806 :return: formatted date
806 :return: formatted date
807 """
807 """
808
808
809 if date:
809 if date:
810 _fmt = "%a, %d %b %Y %H:%M:%S"
810 _fmt = "%a, %d %b %Y %H:%M:%S"
811 return safe_unicode(date.strftime(_fmt))
811 return safe_unicode(date.strftime(_fmt))
812
812
813 return u""
813 return u""
814
814
815
815
816 class _RepoChecker(object):
816 class _RepoChecker(object):
817
817
818 def __init__(self, backend_alias):
818 def __init__(self, backend_alias):
819 self._backend_alias = backend_alias
819 self._backend_alias = backend_alias
820
820
821 def __call__(self, repository):
821 def __call__(self, repository):
822 if hasattr(repository, 'alias'):
822 if hasattr(repository, 'alias'):
823 _type = repository.alias
823 _type = repository.alias
824 elif hasattr(repository, 'repo_type'):
824 elif hasattr(repository, 'repo_type'):
825 _type = repository.repo_type
825 _type = repository.repo_type
826 else:
826 else:
827 _type = repository
827 _type = repository
828 return _type == self._backend_alias
828 return _type == self._backend_alias
829
829
830 is_git = _RepoChecker('git')
830 is_git = _RepoChecker('git')
831 is_hg = _RepoChecker('hg')
831 is_hg = _RepoChecker('hg')
832 is_svn = _RepoChecker('svn')
832 is_svn = _RepoChecker('svn')
833
833
834
834
835 def get_repo_type_by_name(repo_name):
835 def get_repo_type_by_name(repo_name):
836 repo = Repository.get_by_repo_name(repo_name)
836 repo = Repository.get_by_repo_name(repo_name)
837 return repo.repo_type
837 return repo.repo_type
838
838
839
839
840 def is_svn_without_proxy(repository):
840 def is_svn_without_proxy(repository):
841 if is_svn(repository):
841 if is_svn(repository):
842 from rhodecode.model.settings import VcsSettingsModel
842 from rhodecode.model.settings import VcsSettingsModel
843 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
843 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
844 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
844 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
845 return False
845 return False
846
846
847
847
848 def discover_user(author):
848 def discover_user(author):
849 """
849 """
850 Tries to discover RhodeCode User based on the autho string. Author string
850 Tries to discover RhodeCode User based on the autho string. Author string
851 is typically `FirstName LastName <email@address.com>`
851 is typically `FirstName LastName <email@address.com>`
852 """
852 """
853
853
854 # if author is already an instance use it for extraction
854 # if author is already an instance use it for extraction
855 if isinstance(author, User):
855 if isinstance(author, User):
856 return author
856 return author
857
857
858 # Valid email in the attribute passed, see if they're in the system
858 # Valid email in the attribute passed, see if they're in the system
859 _email = author_email(author)
859 _email = author_email(author)
860 if _email != '':
860 if _email != '':
861 user = User.get_by_email(_email, case_insensitive=True, cache=True)
861 user = User.get_by_email(_email, case_insensitive=True, cache=True)
862 if user is not None:
862 if user is not None:
863 return user
863 return user
864
864
865 # Maybe it's a username, we try to extract it and fetch by username ?
865 # Maybe it's a username, we try to extract it and fetch by username ?
866 _author = author_name(author)
866 _author = author_name(author)
867 user = User.get_by_username(_author, case_insensitive=True, cache=True)
867 user = User.get_by_username(_author, case_insensitive=True, cache=True)
868 if user is not None:
868 if user is not None:
869 return user
869 return user
870
870
871 return None
871 return None
872
872
873
873
874 def email_or_none(author):
874 def email_or_none(author):
875 # extract email from the commit string
875 # extract email from the commit string
876 _email = author_email(author)
876 _email = author_email(author)
877
877
878 # If we have an email, use it, otherwise
878 # If we have an email, use it, otherwise
879 # see if it contains a username we can get an email from
879 # see if it contains a username we can get an email from
880 if _email != '':
880 if _email != '':
881 return _email
881 return _email
882 else:
882 else:
883 user = User.get_by_username(
883 user = User.get_by_username(
884 author_name(author), case_insensitive=True, cache=True)
884 author_name(author), case_insensitive=True, cache=True)
885
885
886 if user is not None:
886 if user is not None:
887 return user.email
887 return user.email
888
888
889 # No valid email, not a valid user in the system, none!
889 # No valid email, not a valid user in the system, none!
890 return None
890 return None
891
891
892
892
893 def link_to_user(author, length=0, **kwargs):
893 def link_to_user(author, length=0, **kwargs):
894 user = discover_user(author)
894 user = discover_user(author)
895 # user can be None, but if we have it already it means we can re-use it
895 # user can be None, but if we have it already it means we can re-use it
896 # in the person() function, so we save 1 intensive-query
896 # in the person() function, so we save 1 intensive-query
897 if user:
897 if user:
898 author = user
898 author = user
899
899
900 display_person = person(author, 'username_or_name_or_email')
900 display_person = person(author, 'username_or_name_or_email')
901 if length:
901 if length:
902 display_person = shorter(display_person, length)
902 display_person = shorter(display_person, length)
903
903
904 if user:
904 if user:
905 return link_to(
905 return link_to(
906 escape(display_person),
906 escape(display_person),
907 route_path('user_profile', username=user.username),
907 route_path('user_profile', username=user.username),
908 **kwargs)
908 **kwargs)
909 else:
909 else:
910 return escape(display_person)
910 return escape(display_person)
911
911
912
912
913 def person(author, show_attr="username_and_name"):
913 def person(author, show_attr="username_and_name"):
914 user = discover_user(author)
914 user = discover_user(author)
915 if user:
915 if user:
916 return getattr(user, show_attr)
916 return getattr(user, show_attr)
917 else:
917 else:
918 _author = author_name(author)
918 _author = author_name(author)
919 _email = email(author)
919 _email = email(author)
920 return _author or _email
920 return _author or _email
921
921
922
922
923 def author_string(email):
923 def author_string(email):
924 if email:
924 if email:
925 user = User.get_by_email(email, case_insensitive=True, cache=True)
925 user = User.get_by_email(email, case_insensitive=True, cache=True)
926 if user:
926 if user:
927 if user.first_name or user.last_name:
927 if user.first_name or user.last_name:
928 return '%s %s &lt;%s&gt;' % (
928 return '%s %s &lt;%s&gt;' % (
929 user.first_name, user.last_name, email)
929 user.first_name, user.last_name, email)
930 else:
930 else:
931 return email
931 return email
932 else:
932 else:
933 return email
933 return email
934 else:
934 else:
935 return None
935 return None
936
936
937
937
938 def person_by_id(id_, show_attr="username_and_name"):
938 def person_by_id(id_, show_attr="username_and_name"):
939 # attr to return from fetched user
939 # attr to return from fetched user
940 person_getter = lambda usr: getattr(usr, show_attr)
940 person_getter = lambda usr: getattr(usr, show_attr)
941
941
942 #maybe it's an ID ?
942 #maybe it's an ID ?
943 if str(id_).isdigit() or isinstance(id_, int):
943 if str(id_).isdigit() or isinstance(id_, int):
944 id_ = int(id_)
944 id_ = int(id_)
945 user = User.get(id_)
945 user = User.get(id_)
946 if user is not None:
946 if user is not None:
947 return person_getter(user)
947 return person_getter(user)
948 return id_
948 return id_
949
949
950
950
951 def gravatar_with_user(request, author, show_disabled=False):
951 def gravatar_with_user(request, author, show_disabled=False):
952 _render = request.get_partial_renderer('base/base.mako')
952 _render = request.get_partial_renderer('base/base.mako')
953 return _render('gravatar_with_user', author, show_disabled=show_disabled)
953 return _render('gravatar_with_user', author, show_disabled=show_disabled)
954
954
955
955
956 tags_paterns = OrderedDict((
956 tags_paterns = OrderedDict((
957 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
957 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
958 '<div class="metatag" tag="lang">\\2</div>')),
958 '<div class="metatag" tag="lang">\\2</div>')),
959
959
960 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
960 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
961 '<div class="metatag" tag="see">see: \\1 </div>')),
961 '<div class="metatag" tag="see">see: \\1 </div>')),
962
962
963 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
963 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
964 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
964 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
965
965
966 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
966 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
967 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
967 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
968
968
969 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
969 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
970 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
970 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
971
971
972 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
972 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
973 '<div class="metatag" tag="state \\1">\\1</div>')),
973 '<div class="metatag" tag="state \\1">\\1</div>')),
974
974
975 # label in grey
975 # label in grey
976 ('label', (re.compile(r'\[([a-z]+)\]'),
976 ('label', (re.compile(r'\[([a-z]+)\]'),
977 '<div class="metatag" tag="label">\\1</div>')),
977 '<div class="metatag" tag="label">\\1</div>')),
978
978
979 # generic catch all in grey
979 # generic catch all in grey
980 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
980 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
981 '<div class="metatag" tag="generic">\\1</div>')),
981 '<div class="metatag" tag="generic">\\1</div>')),
982 ))
982 ))
983
983
984
984
985 def extract_metatags(value):
985 def extract_metatags(value):
986 """
986 """
987 Extract supported meta-tags from given text value
987 Extract supported meta-tags from given text value
988 """
988 """
989 tags = []
989 if not value:
990 if not value:
990 return ''
991 return tags, ''
991
992
992 tags = []
993 for key, val in tags_paterns.items():
993 for key, val in tags_paterns.items():
994 pat, replace_html = val
994 pat, replace_html = val
995 tags.extend([(key, x.group()) for x in pat.finditer(value)])
995 tags.extend([(key, x.group()) for x in pat.finditer(value)])
996 value = pat.sub('', value)
996 value = pat.sub('', value)
997
997
998 return tags, value
998 return tags, value
999
999
1000
1000
1001 def style_metatag(tag_type, value):
1001 def style_metatag(tag_type, value):
1002 """
1002 """
1003 converts tags from value into html equivalent
1003 converts tags from value into html equivalent
1004 """
1004 """
1005 if not value:
1005 if not value:
1006 return ''
1006 return ''
1007
1007
1008 html_value = value
1008 html_value = value
1009 tag_data = tags_paterns.get(tag_type)
1009 tag_data = tags_paterns.get(tag_type)
1010 if tag_data:
1010 if tag_data:
1011 pat, replace_html = tag_data
1011 pat, replace_html = tag_data
1012 # convert to plain `unicode` instead of a markup tag to be used in
1012 # convert to plain `unicode` instead of a markup tag to be used in
1013 # regex expressions. safe_unicode doesn't work here
1013 # regex expressions. safe_unicode doesn't work here
1014 html_value = pat.sub(replace_html, unicode(value))
1014 html_value = pat.sub(replace_html, unicode(value))
1015
1015
1016 return html_value
1016 return html_value
1017
1017
1018
1018
1019 def bool2icon(value):
1019 def bool2icon(value):
1020 """
1020 """
1021 Returns boolean value of a given value, represented as html element with
1021 Returns boolean value of a given value, represented as html element with
1022 classes that will represent icons
1022 classes that will represent icons
1023
1023
1024 :param value: given value to convert to html node
1024 :param value: given value to convert to html node
1025 """
1025 """
1026
1026
1027 if value: # does bool conversion
1027 if value: # does bool conversion
1028 return HTML.tag('i', class_="icon-true")
1028 return HTML.tag('i', class_="icon-true")
1029 else: # not true as bool
1029 else: # not true as bool
1030 return HTML.tag('i', class_="icon-false")
1030 return HTML.tag('i', class_="icon-false")
1031
1031
1032
1032
1033 #==============================================================================
1033 #==============================================================================
1034 # PERMS
1034 # PERMS
1035 #==============================================================================
1035 #==============================================================================
1036 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1036 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1037 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1037 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1038 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1038 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1039 csrf_token_key
1039 csrf_token_key
1040
1040
1041
1041
1042 #==============================================================================
1042 #==============================================================================
1043 # GRAVATAR URL
1043 # GRAVATAR URL
1044 #==============================================================================
1044 #==============================================================================
1045 class InitialsGravatar(object):
1045 class InitialsGravatar(object):
1046 def __init__(self, email_address, first_name, last_name, size=30,
1046 def __init__(self, email_address, first_name, last_name, size=30,
1047 background=None, text_color='#fff'):
1047 background=None, text_color='#fff'):
1048 self.size = size
1048 self.size = size
1049 self.first_name = first_name
1049 self.first_name = first_name
1050 self.last_name = last_name
1050 self.last_name = last_name
1051 self.email_address = email_address
1051 self.email_address = email_address
1052 self.background = background or self.str2color(email_address)
1052 self.background = background or self.str2color(email_address)
1053 self.text_color = text_color
1053 self.text_color = text_color
1054
1054
1055 def get_color_bank(self):
1055 def get_color_bank(self):
1056 """
1056 """
1057 returns a predefined list of colors that gravatars can use.
1057 returns a predefined list of colors that gravatars can use.
1058 Those are randomized distinct colors that guarantee readability and
1058 Those are randomized distinct colors that guarantee readability and
1059 uniqueness.
1059 uniqueness.
1060
1060
1061 generated with: http://phrogz.net/css/distinct-colors.html
1061 generated with: http://phrogz.net/css/distinct-colors.html
1062 """
1062 """
1063 return [
1063 return [
1064 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1064 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1065 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1065 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1066 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1066 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1067 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1067 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1068 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1068 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1069 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1069 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1070 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1070 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1071 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1071 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1072 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1072 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1073 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1073 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1074 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1074 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1075 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1075 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1076 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1076 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1077 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1077 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1078 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1078 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1079 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1079 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1080 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1080 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1081 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1081 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1082 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1082 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1083 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1083 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1084 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1084 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1085 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1085 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1086 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1086 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1087 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1087 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1088 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1088 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1089 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1089 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1090 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1090 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1091 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1091 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1092 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1092 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1093 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1093 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1094 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1094 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1095 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1095 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1096 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1096 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1097 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1097 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1098 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1098 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1099 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1099 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1100 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1100 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1101 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1101 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1102 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1102 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1103 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1103 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1104 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1104 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1105 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1105 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1106 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1106 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1107 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1107 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1108 '#4f8c46', '#368dd9', '#5c0073'
1108 '#4f8c46', '#368dd9', '#5c0073'
1109 ]
1109 ]
1110
1110
1111 def rgb_to_hex_color(self, rgb_tuple):
1111 def rgb_to_hex_color(self, rgb_tuple):
1112 """
1112 """
1113 Converts an rgb_tuple passed to an hex color.
1113 Converts an rgb_tuple passed to an hex color.
1114
1114
1115 :param rgb_tuple: tuple with 3 ints represents rgb color space
1115 :param rgb_tuple: tuple with 3 ints represents rgb color space
1116 """
1116 """
1117 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1117 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1118
1118
1119 def email_to_int_list(self, email_str):
1119 def email_to_int_list(self, email_str):
1120 """
1120 """
1121 Get every byte of the hex digest value of email and turn it to integer.
1121 Get every byte of the hex digest value of email and turn it to integer.
1122 It's going to be always between 0-255
1122 It's going to be always between 0-255
1123 """
1123 """
1124 digest = md5_safe(email_str.lower())
1124 digest = md5_safe(email_str.lower())
1125 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1125 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1126
1126
1127 def pick_color_bank_index(self, email_str, color_bank):
1127 def pick_color_bank_index(self, email_str, color_bank):
1128 return self.email_to_int_list(email_str)[0] % len(color_bank)
1128 return self.email_to_int_list(email_str)[0] % len(color_bank)
1129
1129
1130 def str2color(self, email_str):
1130 def str2color(self, email_str):
1131 """
1131 """
1132 Tries to map in a stable algorithm an email to color
1132 Tries to map in a stable algorithm an email to color
1133
1133
1134 :param email_str:
1134 :param email_str:
1135 """
1135 """
1136 color_bank = self.get_color_bank()
1136 color_bank = self.get_color_bank()
1137 # pick position (module it's length so we always find it in the
1137 # pick position (module it's length so we always find it in the
1138 # bank even if it's smaller than 256 values
1138 # bank even if it's smaller than 256 values
1139 pos = self.pick_color_bank_index(email_str, color_bank)
1139 pos = self.pick_color_bank_index(email_str, color_bank)
1140 return color_bank[pos]
1140 return color_bank[pos]
1141
1141
1142 def normalize_email(self, email_address):
1142 def normalize_email(self, email_address):
1143 import unicodedata
1143 import unicodedata
1144 # default host used to fill in the fake/missing email
1144 # default host used to fill in the fake/missing email
1145 default_host = u'localhost'
1145 default_host = u'localhost'
1146
1146
1147 if not email_address:
1147 if not email_address:
1148 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1148 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1149
1149
1150 email_address = safe_unicode(email_address)
1150 email_address = safe_unicode(email_address)
1151
1151
1152 if u'@' not in email_address:
1152 if u'@' not in email_address:
1153 email_address = u'%s@%s' % (email_address, default_host)
1153 email_address = u'%s@%s' % (email_address, default_host)
1154
1154
1155 if email_address.endswith(u'@'):
1155 if email_address.endswith(u'@'):
1156 email_address = u'%s%s' % (email_address, default_host)
1156 email_address = u'%s%s' % (email_address, default_host)
1157
1157
1158 email_address = unicodedata.normalize('NFKD', email_address)\
1158 email_address = unicodedata.normalize('NFKD', email_address)\
1159 .encode('ascii', 'ignore')
1159 .encode('ascii', 'ignore')
1160 return email_address
1160 return email_address
1161
1161
1162 def get_initials(self):
1162 def get_initials(self):
1163 """
1163 """
1164 Returns 2 letter initials calculated based on the input.
1164 Returns 2 letter initials calculated based on the input.
1165 The algorithm picks first given email address, and takes first letter
1165 The algorithm picks first given email address, and takes first letter
1166 of part before @, and then the first letter of server name. In case
1166 of part before @, and then the first letter of server name. In case
1167 the part before @ is in a format of `somestring.somestring2` it replaces
1167 the part before @ is in a format of `somestring.somestring2` it replaces
1168 the server letter with first letter of somestring2
1168 the server letter with first letter of somestring2
1169
1169
1170 In case function was initialized with both first and lastname, this
1170 In case function was initialized with both first and lastname, this
1171 overrides the extraction from email by first letter of the first and
1171 overrides the extraction from email by first letter of the first and
1172 last name. We add special logic to that functionality, In case Full name
1172 last name. We add special logic to that functionality, In case Full name
1173 is compound, like Guido Von Rossum, we use last part of the last name
1173 is compound, like Guido Von Rossum, we use last part of the last name
1174 (Von Rossum) picking `R`.
1174 (Von Rossum) picking `R`.
1175
1175
1176 Function also normalizes the non-ascii characters to they ascii
1176 Function also normalizes the non-ascii characters to they ascii
1177 representation, eg Δ„ => A
1177 representation, eg Δ„ => A
1178 """
1178 """
1179 import unicodedata
1179 import unicodedata
1180 # replace non-ascii to ascii
1180 # replace non-ascii to ascii
1181 first_name = unicodedata.normalize(
1181 first_name = unicodedata.normalize(
1182 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1182 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1183 last_name = unicodedata.normalize(
1183 last_name = unicodedata.normalize(
1184 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1184 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1185
1185
1186 # do NFKD encoding, and also make sure email has proper format
1186 # do NFKD encoding, and also make sure email has proper format
1187 email_address = self.normalize_email(self.email_address)
1187 email_address = self.normalize_email(self.email_address)
1188
1188
1189 # first push the email initials
1189 # first push the email initials
1190 prefix, server = email_address.split('@', 1)
1190 prefix, server = email_address.split('@', 1)
1191
1191
1192 # check if prefix is maybe a 'first_name.last_name' syntax
1192 # check if prefix is maybe a 'first_name.last_name' syntax
1193 _dot_split = prefix.rsplit('.', 1)
1193 _dot_split = prefix.rsplit('.', 1)
1194 if len(_dot_split) == 2 and _dot_split[1]:
1194 if len(_dot_split) == 2 and _dot_split[1]:
1195 initials = [_dot_split[0][0], _dot_split[1][0]]
1195 initials = [_dot_split[0][0], _dot_split[1][0]]
1196 else:
1196 else:
1197 initials = [prefix[0], server[0]]
1197 initials = [prefix[0], server[0]]
1198
1198
1199 # then try to replace either first_name or last_name
1199 # then try to replace either first_name or last_name
1200 fn_letter = (first_name or " ")[0].strip()
1200 fn_letter = (first_name or " ")[0].strip()
1201 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1201 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1202
1202
1203 if fn_letter:
1203 if fn_letter:
1204 initials[0] = fn_letter
1204 initials[0] = fn_letter
1205
1205
1206 if ln_letter:
1206 if ln_letter:
1207 initials[1] = ln_letter
1207 initials[1] = ln_letter
1208
1208
1209 return ''.join(initials).upper()
1209 return ''.join(initials).upper()
1210
1210
1211 def get_img_data_by_type(self, font_family, img_type):
1211 def get_img_data_by_type(self, font_family, img_type):
1212 default_user = """
1212 default_user = """
1213 <svg xmlns="http://www.w3.org/2000/svg"
1213 <svg xmlns="http://www.w3.org/2000/svg"
1214 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1214 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1215 viewBox="-15 -10 439.165 429.164"
1215 viewBox="-15 -10 439.165 429.164"
1216
1216
1217 xml:space="preserve"
1217 xml:space="preserve"
1218 style="background:{background};" >
1218 style="background:{background};" >
1219
1219
1220 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1220 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1221 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1221 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1222 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1222 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1223 168.596,153.916,216.671,
1223 168.596,153.916,216.671,
1224 204.583,216.671z" fill="{text_color}"/>
1224 204.583,216.671z" fill="{text_color}"/>
1225 <path d="M407.164,374.717L360.88,
1225 <path d="M407.164,374.717L360.88,
1226 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1226 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1227 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1227 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1228 15.366-44.203,23.488-69.076,23.488c-24.877,
1228 15.366-44.203,23.488-69.076,23.488c-24.877,
1229 0-48.762-8.122-69.078-23.488
1229 0-48.762-8.122-69.078-23.488
1230 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1230 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1231 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1231 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1232 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1232 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1233 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1233 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1234 19.402-10.527 C409.699,390.129,
1234 19.402-10.527 C409.699,390.129,
1235 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1235 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1236 </svg>""".format(
1236 </svg>""".format(
1237 size=self.size,
1237 size=self.size,
1238 background='#979797', # @grey4
1238 background='#979797', # @grey4
1239 text_color=self.text_color,
1239 text_color=self.text_color,
1240 font_family=font_family)
1240 font_family=font_family)
1241
1241
1242 return {
1242 return {
1243 "default_user": default_user
1243 "default_user": default_user
1244 }[img_type]
1244 }[img_type]
1245
1245
1246 def get_img_data(self, svg_type=None):
1246 def get_img_data(self, svg_type=None):
1247 """
1247 """
1248 generates the svg metadata for image
1248 generates the svg metadata for image
1249 """
1249 """
1250
1250
1251 font_family = ','.join([
1251 font_family = ','.join([
1252 'proximanovaregular',
1252 'proximanovaregular',
1253 'Proxima Nova Regular',
1253 'Proxima Nova Regular',
1254 'Proxima Nova',
1254 'Proxima Nova',
1255 'Arial',
1255 'Arial',
1256 'Lucida Grande',
1256 'Lucida Grande',
1257 'sans-serif'
1257 'sans-serif'
1258 ])
1258 ])
1259 if svg_type:
1259 if svg_type:
1260 return self.get_img_data_by_type(font_family, svg_type)
1260 return self.get_img_data_by_type(font_family, svg_type)
1261
1261
1262 initials = self.get_initials()
1262 initials = self.get_initials()
1263 img_data = """
1263 img_data = """
1264 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1264 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1265 width="{size}" height="{size}"
1265 width="{size}" height="{size}"
1266 style="width: 100%; height: 100%; background-color: {background}"
1266 style="width: 100%; height: 100%; background-color: {background}"
1267 viewBox="0 0 {size} {size}">
1267 viewBox="0 0 {size} {size}">
1268 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1268 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1269 pointer-events="auto" fill="{text_color}"
1269 pointer-events="auto" fill="{text_color}"
1270 font-family="{font_family}"
1270 font-family="{font_family}"
1271 style="font-weight: 400; font-size: {f_size}px;">{text}
1271 style="font-weight: 400; font-size: {f_size}px;">{text}
1272 </text>
1272 </text>
1273 </svg>""".format(
1273 </svg>""".format(
1274 size=self.size,
1274 size=self.size,
1275 f_size=self.size/1.85, # scale the text inside the box nicely
1275 f_size=self.size/1.85, # scale the text inside the box nicely
1276 background=self.background,
1276 background=self.background,
1277 text_color=self.text_color,
1277 text_color=self.text_color,
1278 text=initials.upper(),
1278 text=initials.upper(),
1279 font_family=font_family)
1279 font_family=font_family)
1280
1280
1281 return img_data
1281 return img_data
1282
1282
1283 def generate_svg(self, svg_type=None):
1283 def generate_svg(self, svg_type=None):
1284 img_data = self.get_img_data(svg_type)
1284 img_data = self.get_img_data(svg_type)
1285 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1285 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1286
1286
1287
1287
1288 def initials_gravatar(email_address, first_name, last_name, size=30):
1288 def initials_gravatar(email_address, first_name, last_name, size=30):
1289 svg_type = None
1289 svg_type = None
1290 if email_address == User.DEFAULT_USER_EMAIL:
1290 if email_address == User.DEFAULT_USER_EMAIL:
1291 svg_type = 'default_user'
1291 svg_type = 'default_user'
1292 klass = InitialsGravatar(email_address, first_name, last_name, size)
1292 klass = InitialsGravatar(email_address, first_name, last_name, size)
1293 return klass.generate_svg(svg_type=svg_type)
1293 return klass.generate_svg(svg_type=svg_type)
1294
1294
1295
1295
1296 def gravatar_url(email_address, size=30, request=None):
1296 def gravatar_url(email_address, size=30, request=None):
1297 request = get_current_request()
1297 request = get_current_request()
1298 if request and hasattr(request, 'call_context'):
1298 if request and hasattr(request, 'call_context'):
1299 _use_gravatar = request.call_context.visual.use_gravatar
1299 _use_gravatar = request.call_context.visual.use_gravatar
1300 _gravatar_url = request.call_context.visual.gravatar_url
1300 _gravatar_url = request.call_context.visual.gravatar_url
1301 else:
1301 else:
1302 # doh, we need to re-import those to mock it later
1302 # doh, we need to re-import those to mock it later
1303 from pylons import tmpl_context as c
1303 from pylons import tmpl_context as c
1304
1304
1305 _use_gravatar = c.visual.use_gravatar
1305 _use_gravatar = c.visual.use_gravatar
1306 _gravatar_url = c.visual.gravatar_url
1306 _gravatar_url = c.visual.gravatar_url
1307
1307
1308 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1308 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1309
1309
1310 email_address = email_address or User.DEFAULT_USER_EMAIL
1310 email_address = email_address or User.DEFAULT_USER_EMAIL
1311 if isinstance(email_address, unicode):
1311 if isinstance(email_address, unicode):
1312 # hashlib crashes on unicode items
1312 # hashlib crashes on unicode items
1313 email_address = safe_str(email_address)
1313 email_address = safe_str(email_address)
1314
1314
1315 # empty email or default user
1315 # empty email or default user
1316 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1316 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1317 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1317 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1318
1318
1319 if _use_gravatar:
1319 if _use_gravatar:
1320 # TODO: Disuse pyramid thread locals. Think about another solution to
1320 # TODO: Disuse pyramid thread locals. Think about another solution to
1321 # get the host and schema here.
1321 # get the host and schema here.
1322 request = get_current_request()
1322 request = get_current_request()
1323 tmpl = safe_str(_gravatar_url)
1323 tmpl = safe_str(_gravatar_url)
1324 tmpl = tmpl.replace('{email}', email_address)\
1324 tmpl = tmpl.replace('{email}', email_address)\
1325 .replace('{md5email}', md5_safe(email_address.lower())) \
1325 .replace('{md5email}', md5_safe(email_address.lower())) \
1326 .replace('{netloc}', request.host)\
1326 .replace('{netloc}', request.host)\
1327 .replace('{scheme}', request.scheme)\
1327 .replace('{scheme}', request.scheme)\
1328 .replace('{size}', safe_str(size))
1328 .replace('{size}', safe_str(size))
1329 return tmpl
1329 return tmpl
1330 else:
1330 else:
1331 return initials_gravatar(email_address, '', '', size=size)
1331 return initials_gravatar(email_address, '', '', size=size)
1332
1332
1333
1333
1334 class Page(_Page):
1334 class Page(_Page):
1335 """
1335 """
1336 Custom pager to match rendering style with paginator
1336 Custom pager to match rendering style with paginator
1337 """
1337 """
1338
1338
1339 def _get_pos(self, cur_page, max_page, items):
1339 def _get_pos(self, cur_page, max_page, items):
1340 edge = (items / 2) + 1
1340 edge = (items / 2) + 1
1341 if (cur_page <= edge):
1341 if (cur_page <= edge):
1342 radius = max(items / 2, items - cur_page)
1342 radius = max(items / 2, items - cur_page)
1343 elif (max_page - cur_page) < edge:
1343 elif (max_page - cur_page) < edge:
1344 radius = (items - 1) - (max_page - cur_page)
1344 radius = (items - 1) - (max_page - cur_page)
1345 else:
1345 else:
1346 radius = items / 2
1346 radius = items / 2
1347
1347
1348 left = max(1, (cur_page - (radius)))
1348 left = max(1, (cur_page - (radius)))
1349 right = min(max_page, cur_page + (radius))
1349 right = min(max_page, cur_page + (radius))
1350 return left, cur_page, right
1350 return left, cur_page, right
1351
1351
1352 def _range(self, regexp_match):
1352 def _range(self, regexp_match):
1353 """
1353 """
1354 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1354 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1355
1355
1356 Arguments:
1356 Arguments:
1357
1357
1358 regexp_match
1358 regexp_match
1359 A "re" (regular expressions) match object containing the
1359 A "re" (regular expressions) match object containing the
1360 radius of linked pages around the current page in
1360 radius of linked pages around the current page in
1361 regexp_match.group(1) as a string
1361 regexp_match.group(1) as a string
1362
1362
1363 This function is supposed to be called as a callable in
1363 This function is supposed to be called as a callable in
1364 re.sub.
1364 re.sub.
1365
1365
1366 """
1366 """
1367 radius = int(regexp_match.group(1))
1367 radius = int(regexp_match.group(1))
1368
1368
1369 # Compute the first and last page number within the radius
1369 # Compute the first and last page number within the radius
1370 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1370 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1371 # -> leftmost_page = 5
1371 # -> leftmost_page = 5
1372 # -> rightmost_page = 9
1372 # -> rightmost_page = 9
1373 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1373 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1374 self.last_page,
1374 self.last_page,
1375 (radius * 2) + 1)
1375 (radius * 2) + 1)
1376 nav_items = []
1376 nav_items = []
1377
1377
1378 # Create a link to the first page (unless we are on the first page
1378 # Create a link to the first page (unless we are on the first page
1379 # or there would be no need to insert '..' spacers)
1379 # or there would be no need to insert '..' spacers)
1380 if self.page != self.first_page and self.first_page < leftmost_page:
1380 if self.page != self.first_page and self.first_page < leftmost_page:
1381 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1381 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1382
1382
1383 # Insert dots if there are pages between the first page
1383 # Insert dots if there are pages between the first page
1384 # and the currently displayed page range
1384 # and the currently displayed page range
1385 if leftmost_page - self.first_page > 1:
1385 if leftmost_page - self.first_page > 1:
1386 # Wrap in a SPAN tag if nolink_attr is set
1386 # Wrap in a SPAN tag if nolink_attr is set
1387 text = '..'
1387 text = '..'
1388 if self.dotdot_attr:
1388 if self.dotdot_attr:
1389 text = HTML.span(c=text, **self.dotdot_attr)
1389 text = HTML.span(c=text, **self.dotdot_attr)
1390 nav_items.append(text)
1390 nav_items.append(text)
1391
1391
1392 for thispage in xrange(leftmost_page, rightmost_page + 1):
1392 for thispage in xrange(leftmost_page, rightmost_page + 1):
1393 # Hilight the current page number and do not use a link
1393 # Hilight the current page number and do not use a link
1394 if thispage == self.page:
1394 if thispage == self.page:
1395 text = '%s' % (thispage,)
1395 text = '%s' % (thispage,)
1396 # Wrap in a SPAN tag if nolink_attr is set
1396 # Wrap in a SPAN tag if nolink_attr is set
1397 if self.curpage_attr:
1397 if self.curpage_attr:
1398 text = HTML.span(c=text, **self.curpage_attr)
1398 text = HTML.span(c=text, **self.curpage_attr)
1399 nav_items.append(text)
1399 nav_items.append(text)
1400 # Otherwise create just a link to that page
1400 # Otherwise create just a link to that page
1401 else:
1401 else:
1402 text = '%s' % (thispage,)
1402 text = '%s' % (thispage,)
1403 nav_items.append(self._pagerlink(thispage, text))
1403 nav_items.append(self._pagerlink(thispage, text))
1404
1404
1405 # Insert dots if there are pages between the displayed
1405 # Insert dots if there are pages between the displayed
1406 # page numbers and the end of the page range
1406 # page numbers and the end of the page range
1407 if self.last_page - rightmost_page > 1:
1407 if self.last_page - rightmost_page > 1:
1408 text = '..'
1408 text = '..'
1409 # Wrap in a SPAN tag if nolink_attr is set
1409 # Wrap in a SPAN tag if nolink_attr is set
1410 if self.dotdot_attr:
1410 if self.dotdot_attr:
1411 text = HTML.span(c=text, **self.dotdot_attr)
1411 text = HTML.span(c=text, **self.dotdot_attr)
1412 nav_items.append(text)
1412 nav_items.append(text)
1413
1413
1414 # Create a link to the very last page (unless we are on the last
1414 # Create a link to the very last page (unless we are on the last
1415 # page or there would be no need to insert '..' spacers)
1415 # page or there would be no need to insert '..' spacers)
1416 if self.page != self.last_page and rightmost_page < self.last_page:
1416 if self.page != self.last_page and rightmost_page < self.last_page:
1417 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1417 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1418
1418
1419 ## prerender links
1419 ## prerender links
1420 #_page_link = url.current()
1420 #_page_link = url.current()
1421 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1421 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1422 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1422 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1423 return self.separator.join(nav_items)
1423 return self.separator.join(nav_items)
1424
1424
1425 def pager(self, format='~2~', page_param='page', partial_param='partial',
1425 def pager(self, format='~2~', page_param='page', partial_param='partial',
1426 show_if_single_page=False, separator=' ', onclick=None,
1426 show_if_single_page=False, separator=' ', onclick=None,
1427 symbol_first='<<', symbol_last='>>',
1427 symbol_first='<<', symbol_last='>>',
1428 symbol_previous='<', symbol_next='>',
1428 symbol_previous='<', symbol_next='>',
1429 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1429 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1430 curpage_attr={'class': 'pager_curpage'},
1430 curpage_attr={'class': 'pager_curpage'},
1431 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1431 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1432
1432
1433 self.curpage_attr = curpage_attr
1433 self.curpage_attr = curpage_attr
1434 self.separator = separator
1434 self.separator = separator
1435 self.pager_kwargs = kwargs
1435 self.pager_kwargs = kwargs
1436 self.page_param = page_param
1436 self.page_param = page_param
1437 self.partial_param = partial_param
1437 self.partial_param = partial_param
1438 self.onclick = onclick
1438 self.onclick = onclick
1439 self.link_attr = link_attr
1439 self.link_attr = link_attr
1440 self.dotdot_attr = dotdot_attr
1440 self.dotdot_attr = dotdot_attr
1441
1441
1442 # Don't show navigator if there is no more than one page
1442 # Don't show navigator if there is no more than one page
1443 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1443 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1444 return ''
1444 return ''
1445
1445
1446 from string import Template
1446 from string import Template
1447 # Replace ~...~ in token format by range of pages
1447 # Replace ~...~ in token format by range of pages
1448 result = re.sub(r'~(\d+)~', self._range, format)
1448 result = re.sub(r'~(\d+)~', self._range, format)
1449
1449
1450 # Interpolate '%' variables
1450 # Interpolate '%' variables
1451 result = Template(result).safe_substitute({
1451 result = Template(result).safe_substitute({
1452 'first_page': self.first_page,
1452 'first_page': self.first_page,
1453 'last_page': self.last_page,
1453 'last_page': self.last_page,
1454 'page': self.page,
1454 'page': self.page,
1455 'page_count': self.page_count,
1455 'page_count': self.page_count,
1456 'items_per_page': self.items_per_page,
1456 'items_per_page': self.items_per_page,
1457 'first_item': self.first_item,
1457 'first_item': self.first_item,
1458 'last_item': self.last_item,
1458 'last_item': self.last_item,
1459 'item_count': self.item_count,
1459 'item_count': self.item_count,
1460 'link_first': self.page > self.first_page and \
1460 'link_first': self.page > self.first_page and \
1461 self._pagerlink(self.first_page, symbol_first) or '',
1461 self._pagerlink(self.first_page, symbol_first) or '',
1462 'link_last': self.page < self.last_page and \
1462 'link_last': self.page < self.last_page and \
1463 self._pagerlink(self.last_page, symbol_last) or '',
1463 self._pagerlink(self.last_page, symbol_last) or '',
1464 'link_previous': self.previous_page and \
1464 'link_previous': self.previous_page and \
1465 self._pagerlink(self.previous_page, symbol_previous) \
1465 self._pagerlink(self.previous_page, symbol_previous) \
1466 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1466 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1467 'link_next': self.next_page and \
1467 'link_next': self.next_page and \
1468 self._pagerlink(self.next_page, symbol_next) \
1468 self._pagerlink(self.next_page, symbol_next) \
1469 or HTML.span(symbol_next, class_="pg-next disabled")
1469 or HTML.span(symbol_next, class_="pg-next disabled")
1470 })
1470 })
1471
1471
1472 return literal(result)
1472 return literal(result)
1473
1473
1474
1474
1475 #==============================================================================
1475 #==============================================================================
1476 # REPO PAGER, PAGER FOR REPOSITORY
1476 # REPO PAGER, PAGER FOR REPOSITORY
1477 #==============================================================================
1477 #==============================================================================
1478 class RepoPage(Page):
1478 class RepoPage(Page):
1479
1479
1480 def __init__(self, collection, page=1, items_per_page=20,
1480 def __init__(self, collection, page=1, items_per_page=20,
1481 item_count=None, url=None, **kwargs):
1481 item_count=None, url=None, **kwargs):
1482
1482
1483 """Create a "RepoPage" instance. special pager for paging
1483 """Create a "RepoPage" instance. special pager for paging
1484 repository
1484 repository
1485 """
1485 """
1486 self._url_generator = url
1486 self._url_generator = url
1487
1487
1488 # Safe the kwargs class-wide so they can be used in the pager() method
1488 # Safe the kwargs class-wide so they can be used in the pager() method
1489 self.kwargs = kwargs
1489 self.kwargs = kwargs
1490
1490
1491 # Save a reference to the collection
1491 # Save a reference to the collection
1492 self.original_collection = collection
1492 self.original_collection = collection
1493
1493
1494 self.collection = collection
1494 self.collection = collection
1495
1495
1496 # The self.page is the number of the current page.
1496 # The self.page is the number of the current page.
1497 # The first page has the number 1!
1497 # The first page has the number 1!
1498 try:
1498 try:
1499 self.page = int(page) # make it int() if we get it as a string
1499 self.page = int(page) # make it int() if we get it as a string
1500 except (ValueError, TypeError):
1500 except (ValueError, TypeError):
1501 self.page = 1
1501 self.page = 1
1502
1502
1503 self.items_per_page = items_per_page
1503 self.items_per_page = items_per_page
1504
1504
1505 # Unless the user tells us how many items the collections has
1505 # Unless the user tells us how many items the collections has
1506 # we calculate that ourselves.
1506 # we calculate that ourselves.
1507 if item_count is not None:
1507 if item_count is not None:
1508 self.item_count = item_count
1508 self.item_count = item_count
1509 else:
1509 else:
1510 self.item_count = len(self.collection)
1510 self.item_count = len(self.collection)
1511
1511
1512 # Compute the number of the first and last available page
1512 # Compute the number of the first and last available page
1513 if self.item_count > 0:
1513 if self.item_count > 0:
1514 self.first_page = 1
1514 self.first_page = 1
1515 self.page_count = int(math.ceil(float(self.item_count) /
1515 self.page_count = int(math.ceil(float(self.item_count) /
1516 self.items_per_page))
1516 self.items_per_page))
1517 self.last_page = self.first_page + self.page_count - 1
1517 self.last_page = self.first_page + self.page_count - 1
1518
1518
1519 # Make sure that the requested page number is the range of
1519 # Make sure that the requested page number is the range of
1520 # valid pages
1520 # valid pages
1521 if self.page > self.last_page:
1521 if self.page > self.last_page:
1522 self.page = self.last_page
1522 self.page = self.last_page
1523 elif self.page < self.first_page:
1523 elif self.page < self.first_page:
1524 self.page = self.first_page
1524 self.page = self.first_page
1525
1525
1526 # Note: the number of items on this page can be less than
1526 # Note: the number of items on this page can be less than
1527 # items_per_page if the last page is not full
1527 # items_per_page if the last page is not full
1528 self.first_item = max(0, (self.item_count) - (self.page *
1528 self.first_item = max(0, (self.item_count) - (self.page *
1529 items_per_page))
1529 items_per_page))
1530 self.last_item = ((self.item_count - 1) - items_per_page *
1530 self.last_item = ((self.item_count - 1) - items_per_page *
1531 (self.page - 1))
1531 (self.page - 1))
1532
1532
1533 self.items = list(self.collection[self.first_item:self.last_item + 1])
1533 self.items = list(self.collection[self.first_item:self.last_item + 1])
1534
1534
1535 # Links to previous and next page
1535 # Links to previous and next page
1536 if self.page > self.first_page:
1536 if self.page > self.first_page:
1537 self.previous_page = self.page - 1
1537 self.previous_page = self.page - 1
1538 else:
1538 else:
1539 self.previous_page = None
1539 self.previous_page = None
1540
1540
1541 if self.page < self.last_page:
1541 if self.page < self.last_page:
1542 self.next_page = self.page + 1
1542 self.next_page = self.page + 1
1543 else:
1543 else:
1544 self.next_page = None
1544 self.next_page = None
1545
1545
1546 # No items available
1546 # No items available
1547 else:
1547 else:
1548 self.first_page = None
1548 self.first_page = None
1549 self.page_count = 0
1549 self.page_count = 0
1550 self.last_page = None
1550 self.last_page = None
1551 self.first_item = None
1551 self.first_item = None
1552 self.last_item = None
1552 self.last_item = None
1553 self.previous_page = None
1553 self.previous_page = None
1554 self.next_page = None
1554 self.next_page = None
1555 self.items = []
1555 self.items = []
1556
1556
1557 # This is a subclass of the 'list' type. Initialise the list now.
1557 # This is a subclass of the 'list' type. Initialise the list now.
1558 list.__init__(self, reversed(self.items))
1558 list.__init__(self, reversed(self.items))
1559
1559
1560
1560
1561 def breadcrumb_repo_link(repo):
1561 def breadcrumb_repo_link(repo):
1562 """
1562 """
1563 Makes a breadcrumbs path link to repo
1563 Makes a breadcrumbs path link to repo
1564
1564
1565 ex::
1565 ex::
1566 group >> subgroup >> repo
1566 group >> subgroup >> repo
1567
1567
1568 :param repo: a Repository instance
1568 :param repo: a Repository instance
1569 """
1569 """
1570
1570
1571 path = [
1571 path = [
1572 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1572 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1573 for group in repo.groups_with_parents
1573 for group in repo.groups_with_parents
1574 ] + [
1574 ] + [
1575 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1575 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1576 ]
1576 ]
1577
1577
1578 return literal(' &raquo; '.join(path))
1578 return literal(' &raquo; '.join(path))
1579
1579
1580
1580
1581 def format_byte_size_binary(file_size):
1581 def format_byte_size_binary(file_size):
1582 """
1582 """
1583 Formats file/folder sizes to standard.
1583 Formats file/folder sizes to standard.
1584 """
1584 """
1585 if file_size is None:
1585 if file_size is None:
1586 file_size = 0
1586 file_size = 0
1587
1587
1588 formatted_size = format_byte_size(file_size, binary=True)
1588 formatted_size = format_byte_size(file_size, binary=True)
1589 return formatted_size
1589 return formatted_size
1590
1590
1591
1591
1592 def urlify_text(text_, safe=True):
1592 def urlify_text(text_, safe=True):
1593 """
1593 """
1594 Extrac urls from text and make html links out of them
1594 Extrac urls from text and make html links out of them
1595
1595
1596 :param text_:
1596 :param text_:
1597 """
1597 """
1598
1598
1599 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1599 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1600 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1600 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1601
1601
1602 def url_func(match_obj):
1602 def url_func(match_obj):
1603 url_full = match_obj.groups()[0]
1603 url_full = match_obj.groups()[0]
1604 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1604 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1605 _newtext = url_pat.sub(url_func, text_)
1605 _newtext = url_pat.sub(url_func, text_)
1606 if safe:
1606 if safe:
1607 return literal(_newtext)
1607 return literal(_newtext)
1608 return _newtext
1608 return _newtext
1609
1609
1610
1610
1611 def urlify_commits(text_, repository):
1611 def urlify_commits(text_, repository):
1612 """
1612 """
1613 Extract commit ids from text and make link from them
1613 Extract commit ids from text and make link from them
1614
1614
1615 :param text_:
1615 :param text_:
1616 :param repository: repo name to build the URL with
1616 :param repository: repo name to build the URL with
1617 """
1617 """
1618
1618
1619 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1619 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1620
1620
1621 def url_func(match_obj):
1621 def url_func(match_obj):
1622 commit_id = match_obj.groups()[1]
1622 commit_id = match_obj.groups()[1]
1623 pref = match_obj.groups()[0]
1623 pref = match_obj.groups()[0]
1624 suf = match_obj.groups()[2]
1624 suf = match_obj.groups()[2]
1625
1625
1626 tmpl = (
1626 tmpl = (
1627 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1627 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1628 '%(commit_id)s</a>%(suf)s'
1628 '%(commit_id)s</a>%(suf)s'
1629 )
1629 )
1630 return tmpl % {
1630 return tmpl % {
1631 'pref': pref,
1631 'pref': pref,
1632 'cls': 'revision-link',
1632 'cls': 'revision-link',
1633 'url': route_url('repo_commit', repo_name=repository,
1633 'url': route_url('repo_commit', repo_name=repository,
1634 commit_id=commit_id),
1634 commit_id=commit_id),
1635 'commit_id': commit_id,
1635 'commit_id': commit_id,
1636 'suf': suf
1636 'suf': suf
1637 }
1637 }
1638
1638
1639 newtext = URL_PAT.sub(url_func, text_)
1639 newtext = URL_PAT.sub(url_func, text_)
1640
1640
1641 return newtext
1641 return newtext
1642
1642
1643
1643
1644 def _process_url_func(match_obj, repo_name, uid, entry,
1644 def _process_url_func(match_obj, repo_name, uid, entry,
1645 return_raw_data=False, link_format='html'):
1645 return_raw_data=False, link_format='html'):
1646 pref = ''
1646 pref = ''
1647 if match_obj.group().startswith(' '):
1647 if match_obj.group().startswith(' '):
1648 pref = ' '
1648 pref = ' '
1649
1649
1650 issue_id = ''.join(match_obj.groups())
1650 issue_id = ''.join(match_obj.groups())
1651
1651
1652 if link_format == 'html':
1652 if link_format == 'html':
1653 tmpl = (
1653 tmpl = (
1654 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1654 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1655 '%(issue-prefix)s%(id-repr)s'
1655 '%(issue-prefix)s%(id-repr)s'
1656 '</a>')
1656 '</a>')
1657 elif link_format == 'rst':
1657 elif link_format == 'rst':
1658 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1658 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1659 elif link_format == 'markdown':
1659 elif link_format == 'markdown':
1660 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1660 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1661 else:
1661 else:
1662 raise ValueError('Bad link_format:{}'.format(link_format))
1662 raise ValueError('Bad link_format:{}'.format(link_format))
1663
1663
1664 (repo_name_cleaned,
1664 (repo_name_cleaned,
1665 parent_group_name) = RepoGroupModel().\
1665 parent_group_name) = RepoGroupModel().\
1666 _get_group_name_and_parent(repo_name)
1666 _get_group_name_and_parent(repo_name)
1667
1667
1668 # variables replacement
1668 # variables replacement
1669 named_vars = {
1669 named_vars = {
1670 'id': issue_id,
1670 'id': issue_id,
1671 'repo': repo_name,
1671 'repo': repo_name,
1672 'repo_name': repo_name_cleaned,
1672 'repo_name': repo_name_cleaned,
1673 'group_name': parent_group_name
1673 'group_name': parent_group_name
1674 }
1674 }
1675 # named regex variables
1675 # named regex variables
1676 named_vars.update(match_obj.groupdict())
1676 named_vars.update(match_obj.groupdict())
1677 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1677 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1678
1678
1679 data = {
1679 data = {
1680 'pref': pref,
1680 'pref': pref,
1681 'cls': 'issue-tracker-link',
1681 'cls': 'issue-tracker-link',
1682 'url': _url,
1682 'url': _url,
1683 'id-repr': issue_id,
1683 'id-repr': issue_id,
1684 'issue-prefix': entry['pref'],
1684 'issue-prefix': entry['pref'],
1685 'serv': entry['url'],
1685 'serv': entry['url'],
1686 }
1686 }
1687 if return_raw_data:
1687 if return_raw_data:
1688 return {
1688 return {
1689 'id': issue_id,
1689 'id': issue_id,
1690 'url': _url
1690 'url': _url
1691 }
1691 }
1692 return tmpl % data
1692 return tmpl % data
1693
1693
1694
1694
1695 def process_patterns(text_string, repo_name, link_format='html'):
1695 def process_patterns(text_string, repo_name, link_format='html'):
1696 allowed_formats = ['html', 'rst', 'markdown']
1696 allowed_formats = ['html', 'rst', 'markdown']
1697 if link_format not in allowed_formats:
1697 if link_format not in allowed_formats:
1698 raise ValueError('Link format can be only one of:{} got {}'.format(
1698 raise ValueError('Link format can be only one of:{} got {}'.format(
1699 allowed_formats, link_format))
1699 allowed_formats, link_format))
1700
1700
1701 repo = None
1701 repo = None
1702 if repo_name:
1702 if repo_name:
1703 # Retrieving repo_name to avoid invalid repo_name to explode on
1703 # Retrieving repo_name to avoid invalid repo_name to explode on
1704 # IssueTrackerSettingsModel but still passing invalid name further down
1704 # IssueTrackerSettingsModel but still passing invalid name further down
1705 repo = Repository.get_by_repo_name(repo_name, cache=True)
1705 repo = Repository.get_by_repo_name(repo_name, cache=True)
1706
1706
1707 settings_model = IssueTrackerSettingsModel(repo=repo)
1707 settings_model = IssueTrackerSettingsModel(repo=repo)
1708 active_entries = settings_model.get_settings(cache=True)
1708 active_entries = settings_model.get_settings(cache=True)
1709
1709
1710 issues_data = []
1710 issues_data = []
1711 newtext = text_string
1711 newtext = text_string
1712
1712
1713 for uid, entry in active_entries.items():
1713 for uid, entry in active_entries.items():
1714 log.debug('found issue tracker entry with uid %s' % (uid,))
1714 log.debug('found issue tracker entry with uid %s' % (uid,))
1715
1715
1716 if not (entry['pat'] and entry['url']):
1716 if not (entry['pat'] and entry['url']):
1717 log.debug('skipping due to missing data')
1717 log.debug('skipping due to missing data')
1718 continue
1718 continue
1719
1719
1720 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1720 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1721 % (uid, entry['pat'], entry['url'], entry['pref']))
1721 % (uid, entry['pat'], entry['url'], entry['pref']))
1722
1722
1723 try:
1723 try:
1724 pattern = re.compile(r'%s' % entry['pat'])
1724 pattern = re.compile(r'%s' % entry['pat'])
1725 except re.error:
1725 except re.error:
1726 log.exception(
1726 log.exception(
1727 'issue tracker pattern: `%s` failed to compile',
1727 'issue tracker pattern: `%s` failed to compile',
1728 entry['pat'])
1728 entry['pat'])
1729 continue
1729 continue
1730
1730
1731 data_func = partial(
1731 data_func = partial(
1732 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1732 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1733 return_raw_data=True)
1733 return_raw_data=True)
1734
1734
1735 for match_obj in pattern.finditer(text_string):
1735 for match_obj in pattern.finditer(text_string):
1736 issues_data.append(data_func(match_obj))
1736 issues_data.append(data_func(match_obj))
1737
1737
1738 url_func = partial(
1738 url_func = partial(
1739 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1739 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1740 link_format=link_format)
1740 link_format=link_format)
1741
1741
1742 newtext = pattern.sub(url_func, newtext)
1742 newtext = pattern.sub(url_func, newtext)
1743 log.debug('processed prefix:uid `%s`' % (uid,))
1743 log.debug('processed prefix:uid `%s`' % (uid,))
1744
1744
1745 return newtext, issues_data
1745 return newtext, issues_data
1746
1746
1747
1747
1748 def urlify_commit_message(commit_text, repository=None):
1748 def urlify_commit_message(commit_text, repository=None):
1749 """
1749 """
1750 Parses given text message and makes proper links.
1750 Parses given text message and makes proper links.
1751 issues are linked to given issue-server, and rest is a commit link
1751 issues are linked to given issue-server, and rest is a commit link
1752
1752
1753 :param commit_text:
1753 :param commit_text:
1754 :param repository:
1754 :param repository:
1755 """
1755 """
1756 from pylons import url # doh, we need to re-import url to mock it later
1756 from pylons import url # doh, we need to re-import url to mock it later
1757
1757
1758 def escaper(string):
1758 def escaper(string):
1759 return string.replace('<', '&lt;').replace('>', '&gt;')
1759 return string.replace('<', '&lt;').replace('>', '&gt;')
1760
1760
1761 newtext = escaper(commit_text)
1761 newtext = escaper(commit_text)
1762
1762
1763 # extract http/https links and make them real urls
1763 # extract http/https links and make them real urls
1764 newtext = urlify_text(newtext, safe=False)
1764 newtext = urlify_text(newtext, safe=False)
1765
1765
1766 # urlify commits - extract commit ids and make link out of them, if we have
1766 # urlify commits - extract commit ids and make link out of them, if we have
1767 # the scope of repository present.
1767 # the scope of repository present.
1768 if repository:
1768 if repository:
1769 newtext = urlify_commits(newtext, repository)
1769 newtext = urlify_commits(newtext, repository)
1770
1770
1771 # process issue tracker patterns
1771 # process issue tracker patterns
1772 newtext, issues = process_patterns(newtext, repository or '')
1772 newtext, issues = process_patterns(newtext, repository or '')
1773
1773
1774 return literal(newtext)
1774 return literal(newtext)
1775
1775
1776
1776
1777 def render_binary(repo_name, file_obj):
1777 def render_binary(repo_name, file_obj):
1778 """
1778 """
1779 Choose how to render a binary file
1779 Choose how to render a binary file
1780 """
1780 """
1781 filename = file_obj.name
1781 filename = file_obj.name
1782
1782
1783 # images
1783 # images
1784 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1784 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1785 if fnmatch.fnmatch(filename, pat=ext):
1785 if fnmatch.fnmatch(filename, pat=ext):
1786 alt = filename
1786 alt = filename
1787 src = route_path(
1787 src = route_path(
1788 'repo_file_raw', repo_name=repo_name,
1788 'repo_file_raw', repo_name=repo_name,
1789 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1789 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1790 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1790 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1791
1791
1792
1792
1793 def renderer_from_filename(filename, exclude=None):
1793 def renderer_from_filename(filename, exclude=None):
1794 """
1794 """
1795 choose a renderer based on filename, this works only for text based files
1795 choose a renderer based on filename, this works only for text based files
1796 """
1796 """
1797
1797
1798 # ipython
1798 # ipython
1799 for ext in ['*.ipynb']:
1799 for ext in ['*.ipynb']:
1800 if fnmatch.fnmatch(filename, pat=ext):
1800 if fnmatch.fnmatch(filename, pat=ext):
1801 return 'jupyter'
1801 return 'jupyter'
1802
1802
1803 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1803 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1804 if is_markup:
1804 if is_markup:
1805 return is_markup
1805 return is_markup
1806 return None
1806 return None
1807
1807
1808
1808
1809 def render(source, renderer='rst', mentions=False, relative_urls=None,
1809 def render(source, renderer='rst', mentions=False, relative_urls=None,
1810 repo_name=None):
1810 repo_name=None):
1811
1811
1812 def maybe_convert_relative_links(html_source):
1812 def maybe_convert_relative_links(html_source):
1813 if relative_urls:
1813 if relative_urls:
1814 return relative_links(html_source, relative_urls)
1814 return relative_links(html_source, relative_urls)
1815 return html_source
1815 return html_source
1816
1816
1817 if renderer == 'rst':
1817 if renderer == 'rst':
1818 if repo_name:
1818 if repo_name:
1819 # process patterns on comments if we pass in repo name
1819 # process patterns on comments if we pass in repo name
1820 source, issues = process_patterns(
1820 source, issues = process_patterns(
1821 source, repo_name, link_format='rst')
1821 source, repo_name, link_format='rst')
1822
1822
1823 return literal(
1823 return literal(
1824 '<div class="rst-block">%s</div>' %
1824 '<div class="rst-block">%s</div>' %
1825 maybe_convert_relative_links(
1825 maybe_convert_relative_links(
1826 MarkupRenderer.rst(source, mentions=mentions)))
1826 MarkupRenderer.rst(source, mentions=mentions)))
1827 elif renderer == 'markdown':
1827 elif renderer == 'markdown':
1828 if repo_name:
1828 if repo_name:
1829 # process patterns on comments if we pass in repo name
1829 # process patterns on comments if we pass in repo name
1830 source, issues = process_patterns(
1830 source, issues = process_patterns(
1831 source, repo_name, link_format='markdown')
1831 source, repo_name, link_format='markdown')
1832
1832
1833 return literal(
1833 return literal(
1834 '<div class="markdown-block">%s</div>' %
1834 '<div class="markdown-block">%s</div>' %
1835 maybe_convert_relative_links(
1835 maybe_convert_relative_links(
1836 MarkupRenderer.markdown(source, flavored=True,
1836 MarkupRenderer.markdown(source, flavored=True,
1837 mentions=mentions)))
1837 mentions=mentions)))
1838 elif renderer == 'jupyter':
1838 elif renderer == 'jupyter':
1839 return literal(
1839 return literal(
1840 '<div class="ipynb">%s</div>' %
1840 '<div class="ipynb">%s</div>' %
1841 maybe_convert_relative_links(
1841 maybe_convert_relative_links(
1842 MarkupRenderer.jupyter(source)))
1842 MarkupRenderer.jupyter(source)))
1843
1843
1844 # None means just show the file-source
1844 # None means just show the file-source
1845 return None
1845 return None
1846
1846
1847
1847
1848 def commit_status(repo, commit_id):
1848 def commit_status(repo, commit_id):
1849 return ChangesetStatusModel().get_status(repo, commit_id)
1849 return ChangesetStatusModel().get_status(repo, commit_id)
1850
1850
1851
1851
1852 def commit_status_lbl(commit_status):
1852 def commit_status_lbl(commit_status):
1853 return dict(ChangesetStatus.STATUSES).get(commit_status)
1853 return dict(ChangesetStatus.STATUSES).get(commit_status)
1854
1854
1855
1855
1856 def commit_time(repo_name, commit_id):
1856 def commit_time(repo_name, commit_id):
1857 repo = Repository.get_by_repo_name(repo_name)
1857 repo = Repository.get_by_repo_name(repo_name)
1858 commit = repo.get_commit(commit_id=commit_id)
1858 commit = repo.get_commit(commit_id=commit_id)
1859 return commit.date
1859 return commit.date
1860
1860
1861
1861
1862 def get_permission_name(key):
1862 def get_permission_name(key):
1863 return dict(Permission.PERMS).get(key)
1863 return dict(Permission.PERMS).get(key)
1864
1864
1865
1865
1866 def journal_filter_help(request):
1866 def journal_filter_help(request):
1867 _ = request.translate
1867 _ = request.translate
1868
1868
1869 return _(
1869 return _(
1870 'Example filter terms:\n' +
1870 'Example filter terms:\n' +
1871 ' repository:vcs\n' +
1871 ' repository:vcs\n' +
1872 ' username:marcin\n' +
1872 ' username:marcin\n' +
1873 ' username:(NOT marcin)\n' +
1873 ' username:(NOT marcin)\n' +
1874 ' action:*push*\n' +
1874 ' action:*push*\n' +
1875 ' ip:127.0.0.1\n' +
1875 ' ip:127.0.0.1\n' +
1876 ' date:20120101\n' +
1876 ' date:20120101\n' +
1877 ' date:[20120101100000 TO 20120102]\n' +
1877 ' date:[20120101100000 TO 20120102]\n' +
1878 '\n' +
1878 '\n' +
1879 'Generate wildcards using \'*\' character:\n' +
1879 'Generate wildcards using \'*\' character:\n' +
1880 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1880 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1881 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1881 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1882 '\n' +
1882 '\n' +
1883 'Optional AND / OR operators in queries\n' +
1883 'Optional AND / OR operators in queries\n' +
1884 ' "repository:vcs OR repository:test"\n' +
1884 ' "repository:vcs OR repository:test"\n' +
1885 ' "username:test AND repository:test*"\n'
1885 ' "username:test AND repository:test*"\n'
1886 )
1886 )
1887
1887
1888
1888
1889 def search_filter_help(searcher, request):
1889 def search_filter_help(searcher, request):
1890 _ = request.translate
1890 _ = request.translate
1891
1891
1892 terms = ''
1892 terms = ''
1893 return _(
1893 return _(
1894 'Example filter terms for `{searcher}` search:\n' +
1894 'Example filter terms for `{searcher}` search:\n' +
1895 '{terms}\n' +
1895 '{terms}\n' +
1896 'Generate wildcards using \'*\' character:\n' +
1896 'Generate wildcards using \'*\' character:\n' +
1897 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1897 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1898 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1898 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1899 '\n' +
1899 '\n' +
1900 'Optional AND / OR operators in queries\n' +
1900 'Optional AND / OR operators in queries\n' +
1901 ' "repo_name:vcs OR repo_name:test"\n' +
1901 ' "repo_name:vcs OR repo_name:test"\n' +
1902 ' "owner:test AND repo_name:test*"\n' +
1902 ' "owner:test AND repo_name:test*"\n' +
1903 'More: {search_doc}'
1903 'More: {search_doc}'
1904 ).format(searcher=searcher.name,
1904 ).format(searcher=searcher.name,
1905 terms=terms, search_doc=searcher.query_lang_doc)
1905 terms=terms, search_doc=searcher.query_lang_doc)
1906
1906
1907
1907
1908 def not_mapped_error(repo_name):
1908 def not_mapped_error(repo_name):
1909 from rhodecode.translation import _
1909 from rhodecode.translation import _
1910 flash(_('%s repository is not mapped to db perhaps'
1910 flash(_('%s repository is not mapped to db perhaps'
1911 ' it was created or renamed from the filesystem'
1911 ' it was created or renamed from the filesystem'
1912 ' please run the application again'
1912 ' please run the application again'
1913 ' in order to rescan repositories') % repo_name, category='error')
1913 ' in order to rescan repositories') % repo_name, category='error')
1914
1914
1915
1915
1916 def ip_range(ip_addr):
1916 def ip_range(ip_addr):
1917 from rhodecode.model.db import UserIpMap
1917 from rhodecode.model.db import UserIpMap
1918 s, e = UserIpMap._get_ip_range(ip_addr)
1918 s, e = UserIpMap._get_ip_range(ip_addr)
1919 return '%s - %s' % (s, e)
1919 return '%s - %s' % (s, e)
1920
1920
1921
1921
1922 def form(url, method='post', needs_csrf_token=True, **attrs):
1922 def form(url, method='post', needs_csrf_token=True, **attrs):
1923 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1923 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1924 if method.lower() != 'get' and needs_csrf_token:
1924 if method.lower() != 'get' and needs_csrf_token:
1925 raise Exception(
1925 raise Exception(
1926 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1926 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1927 'CSRF token. If the endpoint does not require such token you can ' +
1927 'CSRF token. If the endpoint does not require such token you can ' +
1928 'explicitly set the parameter needs_csrf_token to false.')
1928 'explicitly set the parameter needs_csrf_token to false.')
1929
1929
1930 return wh_form(url, method=method, **attrs)
1930 return wh_form(url, method=method, **attrs)
1931
1931
1932
1932
1933 def secure_form(form_url, method="POST", multipart=False, **attrs):
1933 def secure_form(form_url, method="POST", multipart=False, **attrs):
1934 """Start a form tag that points the action to an url. This
1934 """Start a form tag that points the action to an url. This
1935 form tag will also include the hidden field containing
1935 form tag will also include the hidden field containing
1936 the auth token.
1936 the auth token.
1937
1937
1938 The url options should be given either as a string, or as a
1938 The url options should be given either as a string, or as a
1939 ``url()`` function. The method for the form defaults to POST.
1939 ``url()`` function. The method for the form defaults to POST.
1940
1940
1941 Options:
1941 Options:
1942
1942
1943 ``multipart``
1943 ``multipart``
1944 If set to True, the enctype is set to "multipart/form-data".
1944 If set to True, the enctype is set to "multipart/form-data".
1945 ``method``
1945 ``method``
1946 The method to use when submitting the form, usually either
1946 The method to use when submitting the form, usually either
1947 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1947 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1948 hidden input with name _method is added to simulate the verb
1948 hidden input with name _method is added to simulate the verb
1949 over POST.
1949 over POST.
1950
1950
1951 """
1951 """
1952 from webhelpers.pylonslib.secure_form import insecure_form
1952 from webhelpers.pylonslib.secure_form import insecure_form
1953
1953
1954 session = None
1954 session = None
1955
1955
1956 # TODO(marcink): after pyramid migration require request variable ALWAYS
1956 # TODO(marcink): after pyramid migration require request variable ALWAYS
1957 if 'request' in attrs:
1957 if 'request' in attrs:
1958 session = attrs['request'].session
1958 session = attrs['request'].session
1959 del attrs['request']
1959 del attrs['request']
1960
1960
1961 form = insecure_form(form_url, method, multipart, **attrs)
1961 form = insecure_form(form_url, method, multipart, **attrs)
1962 token = literal(
1962 token = literal(
1963 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1963 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1964 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1964 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1965
1965
1966 return literal("%s\n%s" % (form, token))
1966 return literal("%s\n%s" % (form, token))
1967
1967
1968
1968
1969 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1969 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1970 select_html = select(name, selected, options, **attrs)
1970 select_html = select(name, selected, options, **attrs)
1971 select2 = """
1971 select2 = """
1972 <script>
1972 <script>
1973 $(document).ready(function() {
1973 $(document).ready(function() {
1974 $('#%s').select2({
1974 $('#%s').select2({
1975 containerCssClass: 'drop-menu',
1975 containerCssClass: 'drop-menu',
1976 dropdownCssClass: 'drop-menu-dropdown',
1976 dropdownCssClass: 'drop-menu-dropdown',
1977 dropdownAutoWidth: true%s
1977 dropdownAutoWidth: true%s
1978 });
1978 });
1979 });
1979 });
1980 </script>
1980 </script>
1981 """
1981 """
1982 filter_option = """,
1982 filter_option = """,
1983 minimumResultsForSearch: -1
1983 minimumResultsForSearch: -1
1984 """
1984 """
1985 input_id = attrs.get('id') or name
1985 input_id = attrs.get('id') or name
1986 filter_enabled = "" if enable_filter else filter_option
1986 filter_enabled = "" if enable_filter else filter_option
1987 select_script = literal(select2 % (input_id, filter_enabled))
1987 select_script = literal(select2 % (input_id, filter_enabled))
1988
1988
1989 return literal(select_html+select_script)
1989 return literal(select_html+select_script)
1990
1990
1991
1991
1992 def get_visual_attr(tmpl_context_var, attr_name):
1992 def get_visual_attr(tmpl_context_var, attr_name):
1993 """
1993 """
1994 A safe way to get a variable from visual variable of template context
1994 A safe way to get a variable from visual variable of template context
1995
1995
1996 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1996 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1997 :param attr_name: name of the attribute we fetch from the c.visual
1997 :param attr_name: name of the attribute we fetch from the c.visual
1998 """
1998 """
1999 visual = getattr(tmpl_context_var, 'visual', None)
1999 visual = getattr(tmpl_context_var, 'visual', None)
2000 if not visual:
2000 if not visual:
2001 return
2001 return
2002 else:
2002 else:
2003 return getattr(visual, attr_name, None)
2003 return getattr(visual, attr_name, None)
2004
2004
2005
2005
2006 def get_last_path_part(file_node):
2006 def get_last_path_part(file_node):
2007 if not file_node.path:
2007 if not file_node.path:
2008 return u''
2008 return u''
2009
2009
2010 path = safe_unicode(file_node.path.split('/')[-1])
2010 path = safe_unicode(file_node.path.split('/')[-1])
2011 return u'../' + path
2011 return u'../' + path
2012
2012
2013
2013
2014 def route_url(*args, **kwargs):
2014 def route_url(*args, **kwargs):
2015 """
2015 """
2016 Wrapper around pyramids `route_url` (fully qualified url) function.
2016 Wrapper around pyramids `route_url` (fully qualified url) function.
2017 It is used to generate URLs from within pylons views or templates.
2017 It is used to generate URLs from within pylons views or templates.
2018 This will be removed when pyramid migration if finished.
2018 This will be removed when pyramid migration if finished.
2019 """
2019 """
2020 req = get_current_request()
2020 req = get_current_request()
2021 return req.route_url(*args, **kwargs)
2021 return req.route_url(*args, **kwargs)
2022
2022
2023
2023
2024 def route_path(*args, **kwargs):
2024 def route_path(*args, **kwargs):
2025 """
2025 """
2026 Wrapper around pyramids `route_path` function. It is used to generate
2026 Wrapper around pyramids `route_path` function. It is used to generate
2027 URLs from within pylons views or templates. This will be removed when
2027 URLs from within pylons views or templates. This will be removed when
2028 pyramid migration if finished.
2028 pyramid migration if finished.
2029 """
2029 """
2030 req = get_current_request()
2030 req = get_current_request()
2031 return req.route_path(*args, **kwargs)
2031 return req.route_path(*args, **kwargs)
2032
2032
2033
2033
2034 def route_path_or_none(*args, **kwargs):
2034 def route_path_or_none(*args, **kwargs):
2035 try:
2035 try:
2036 return route_path(*args, **kwargs)
2036 return route_path(*args, **kwargs)
2037 except KeyError:
2037 except KeyError:
2038 return None
2038 return None
2039
2039
2040
2040
2041 def current_route_path(request, **kw):
2041 def current_route_path(request, **kw):
2042 new_args = request.GET.mixed()
2042 new_args = request.GET.mixed()
2043 new_args.update(kw)
2043 new_args.update(kw)
2044 return request.current_route_path(_query=new_args)
2044 return request.current_route_path(_query=new_args)
2045
2045
2046
2046
2047 def static_url(*args, **kwds):
2047 def static_url(*args, **kwds):
2048 """
2048 """
2049 Wrapper around pyramids `route_path` function. It is used to generate
2049 Wrapper around pyramids `route_path` function. It is used to generate
2050 URLs from within pylons views or templates. This will be removed when
2050 URLs from within pylons views or templates. This will be removed when
2051 pyramid migration if finished.
2051 pyramid migration if finished.
2052 """
2052 """
2053 req = get_current_request()
2053 req = get_current_request()
2054 return req.static_url(*args, **kwds)
2054 return req.static_url(*args, **kwds)
2055
2055
2056
2056
2057 def resource_path(*args, **kwds):
2057 def resource_path(*args, **kwds):
2058 """
2058 """
2059 Wrapper around pyramids `route_path` function. It is used to generate
2059 Wrapper around pyramids `route_path` function. It is used to generate
2060 URLs from within pylons views or templates. This will be removed when
2060 URLs from within pylons views or templates. This will be removed when
2061 pyramid migration if finished.
2061 pyramid migration if finished.
2062 """
2062 """
2063 req = get_current_request()
2063 req = get_current_request()
2064 return req.resource_path(*args, **kwds)
2064 return req.resource_path(*args, **kwds)
2065
2065
2066
2066
2067 def api_call_example(method, args):
2067 def api_call_example(method, args):
2068 """
2068 """
2069 Generates an API call example via CURL
2069 Generates an API call example via CURL
2070 """
2070 """
2071 args_json = json.dumps(OrderedDict([
2071 args_json = json.dumps(OrderedDict([
2072 ('id', 1),
2072 ('id', 1),
2073 ('auth_token', 'SECRET'),
2073 ('auth_token', 'SECRET'),
2074 ('method', method),
2074 ('method', method),
2075 ('args', args)
2075 ('args', args)
2076 ]))
2076 ]))
2077 return literal(
2077 return literal(
2078 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2078 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2079 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2079 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2080 "and needs to be of `api calls` role."
2080 "and needs to be of `api calls` role."
2081 .format(
2081 .format(
2082 api_url=route_url('apiv2'),
2082 api_url=route_url('apiv2'),
2083 token_url=route_url('my_account_auth_tokens'),
2083 token_url=route_url('my_account_auth_tokens'),
2084 data=args_json))
2084 data=args_json))
2085
2085
2086
2086
2087 def notification_description(notification, request):
2087 def notification_description(notification, request):
2088 """
2088 """
2089 Generate notification human readable description based on notification type
2089 Generate notification human readable description based on notification type
2090 """
2090 """
2091 from rhodecode.model.notification import NotificationModel
2091 from rhodecode.model.notification import NotificationModel
2092 return NotificationModel().make_description(
2092 return NotificationModel().make_description(
2093 notification, translate=request.translate)
2093 notification, translate=request.translate)
2094
2094
2095
2095
2096 def go_import_header(request, db_repo=None):
2096 def go_import_header(request, db_repo=None):
2097 """
2097 """
2098 Creates a header for go-import functionality in Go Lang
2098 Creates a header for go-import functionality in Go Lang
2099 """
2099 """
2100
2100
2101 if not db_repo:
2101 if not db_repo:
2102 return
2102 return
2103 if 'go-get' not in request.GET:
2103 if 'go-get' not in request.GET:
2104 return
2104 return
2105
2105
2106 clone_url = db_repo.clone_url()
2106 clone_url = db_repo.clone_url()
2107 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2107 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2108 # we have a repo and go-get flag,
2108 # we have a repo and go-get flag,
2109 return literal('<meta name="go-import" content="{} {} {}">'.format(
2109 return literal('<meta name="go-import" content="{} {} {}">'.format(
2110 prefix, db_repo.repo_type, clone_url))
2110 prefix, db_repo.repo_type, clone_url))
@@ -1,624 +1,632 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Package for testing various lib/helper functions in rhodecode
23 Package for testing various lib/helper functions in rhodecode
24 """
24 """
25
25
26 import datetime
26 import datetime
27 import string
27 import string
28 import mock
28 import mock
29 import pytest
29 import pytest
30
30
31 from rhodecode.tests import no_newline_id_generator
31 from rhodecode.tests import no_newline_id_generator
32 from rhodecode.tests.utils import run_test_concurrently
32 from rhodecode.tests.utils import run_test_concurrently
33 from rhodecode.lib.helpers import InitialsGravatar
33 from rhodecode.lib.helpers import InitialsGravatar
34
34
35 from rhodecode.lib.utils2 import AttributeDict
35 from rhodecode.lib.utils2 import AttributeDict
36 from rhodecode.model.db import Repository
36 from rhodecode.model.db import Repository
37
37
38
38
39 def _urls_for_proto(proto):
39 def _urls_for_proto(proto):
40 return [
40 return [
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
42 '%s://127.0.0.1' % proto),
42 '%s://127.0.0.1' % proto),
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
44 '%s://127.0.0.1' % proto),
44 '%s://127.0.0.1' % proto),
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
46 '%s://127.0.0.1' % proto),
46 '%s://127.0.0.1' % proto),
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
48 '%s://127.0.0.1:8080' % proto),
48 '%s://127.0.0.1:8080' % proto),
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
50 '%s://domain.org' % proto),
50 '%s://domain.org' % proto),
51 ('%s://user:pass@domain.org:8080' % proto,
51 ('%s://user:pass@domain.org:8080' % proto,
52 ['%s://' % proto, 'domain.org', '8080'],
52 ['%s://' % proto, 'domain.org', '8080'],
53 '%s://domain.org:8080' % proto),
53 '%s://domain.org:8080' % proto),
54 ]
54 ]
55
55
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
57
57
58
58
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
60 def test_uri_filter(test_url, expected, expected_creds):
60 def test_uri_filter(test_url, expected, expected_creds):
61 from rhodecode.lib.utils2 import uri_filter
61 from rhodecode.lib.utils2 import uri_filter
62 assert uri_filter(test_url) == expected
62 assert uri_filter(test_url) == expected
63
63
64
64
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
66 def test_credentials_filter(test_url, expected, expected_creds):
66 def test_credentials_filter(test_url, expected, expected_creds):
67 from rhodecode.lib.utils2 import credentials_filter
67 from rhodecode.lib.utils2 import credentials_filter
68 assert credentials_filter(test_url) == expected_creds
68 assert credentials_filter(test_url) == expected_creds
69
69
70
70
71 @pytest.mark.parametrize("str_bool, expected", [
71 @pytest.mark.parametrize("str_bool, expected", [
72 ('t', True),
72 ('t', True),
73 ('true', True),
73 ('true', True),
74 ('y', True),
74 ('y', True),
75 ('yes', True),
75 ('yes', True),
76 ('on', True),
76 ('on', True),
77 ('1', True),
77 ('1', True),
78 ('Y', True),
78 ('Y', True),
79 ('yeS', True),
79 ('yeS', True),
80 ('Y', True),
80 ('Y', True),
81 ('TRUE', True),
81 ('TRUE', True),
82 ('T', True),
82 ('T', True),
83 ('False', False),
83 ('False', False),
84 ('F', False),
84 ('F', False),
85 ('FALSE', False),
85 ('FALSE', False),
86 ('0', False),
86 ('0', False),
87 ('-1', False),
87 ('-1', False),
88 ('', False)
88 ('', False)
89 ])
89 ])
90 def test_str2bool(str_bool, expected):
90 def test_str2bool(str_bool, expected):
91 from rhodecode.lib.utils2 import str2bool
91 from rhodecode.lib.utils2 import str2bool
92 assert str2bool(str_bool) == expected
92 assert str2bool(str_bool) == expected
93
93
94
94
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
96 [
96 [
97 (pref+"", []),
97 (pref+"", []),
98 (pref+"Hi there @marcink", ['marcink']),
98 (pref+"Hi there @marcink", ['marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
102 (pref+"Hi there marcin@rhodecode.com", []),
102 (pref+"Hi there marcin@rhodecode.com", []),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
116 (pref+"\n@marcin", ['marcin']),
116 (pref+"\n@marcin", ['marcin']),
117 ]
117 ]
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
119 def test_mention_extractor(text, expected):
119 def test_mention_extractor(text, expected):
120 from rhodecode.lib.utils2 import extract_mentioned_users
120 from rhodecode.lib.utils2 import extract_mentioned_users
121 got = extract_mentioned_users(text)
121 got = extract_mentioned_users(text)
122 assert sorted(got, key=lambda x: x.lower()) == got
122 assert sorted(got, key=lambda x: x.lower()) == got
123 assert set(expected) == set(got)
123 assert set(expected) == set(got)
124
124
125 @pytest.mark.parametrize("age_args, expected, kw", [
125 @pytest.mark.parametrize("age_args, expected, kw", [
126 ({}, u'just now', {}),
126 ({}, u'just now', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
135 ({}, u'just now', {'short_format': True}),
135 ({}, u'just now', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
144 ])
144 ])
145 def test_age(age_args, expected, kw, pylonsapp):
145 def test_age(age_args, expected, kw, pylonsapp):
146 from rhodecode.lib.utils2 import age
146 from rhodecode.lib.utils2 import age
147 from dateutil import relativedelta
147 from dateutil import relativedelta
148 n = datetime.datetime(year=2012, month=5, day=17)
148 n = datetime.datetime(year=2012, month=5, day=17)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
150
150
151 def translate(elem):
151 def translate(elem):
152 return elem.interpolate()
152 return elem.interpolate()
153
153
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
155
155
156
156
157 @pytest.mark.parametrize("age_args, expected, kw", [
157 @pytest.mark.parametrize("age_args, expected, kw", [
158 ({}, u'just now', {}),
158 ({}, u'just now', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
162 ({'hours': 24}, u'in 1 day', {}),
162 ({'hours': 24}, u'in 1 day', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
164 ({'months': 1}, u'in 1 month', {}),
164 ({'months': 1}, u'in 1 month', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
167 ({}, u'just now', {'short_format': True}),
167 ({}, u'just now', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
176 ])
176 ])
177 def test_age_in_future(age_args, expected, kw, pylonsapp):
177 def test_age_in_future(age_args, expected, kw, pylonsapp):
178 from rhodecode.lib.utils2 import age
178 from rhodecode.lib.utils2 import age
179 from dateutil import relativedelta
179 from dateutil import relativedelta
180 n = datetime.datetime(year=2012, month=5, day=17)
180 n = datetime.datetime(year=2012, month=5, day=17)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
182
182
183 def translate(elem):
183 def translate(elem):
184 return elem.interpolate()
184 return elem.interpolate()
185
185
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
187
187
188
188
189 @pytest.mark.parametrize("sample, expected_tags", [
189 @pytest.mark.parametrize("sample, expected_tags", [
190 # entry
191 ((
192 ""
193 ),
194 [
195
196 ]),
197 # entry
190 ((
198 ((
191 "hello world [stale]"
199 "hello world [stale]"
192 ),
200 ),
193 [
201 [
194 ('state', '[stale]'),
202 ('state', '[stale]'),
195 ]),
203 ]),
196 # entry
204 # entry
197 ((
205 ((
198 "hello world [v2.0.0] [v1.0.0]"
206 "hello world [v2.0.0] [v1.0.0]"
199 ),
207 ),
200 [
208 [
201 ('generic', '[v2.0.0]'),
209 ('generic', '[v2.0.0]'),
202 ('generic', '[v1.0.0]'),
210 ('generic', '[v1.0.0]'),
203 ]),
211 ]),
204 # entry
212 # entry
205 ((
213 ((
206 "he[ll]o wo[rl]d"
214 "he[ll]o wo[rl]d"
207 ),
215 ),
208 [
216 [
209 ('label', '[ll]'),
217 ('label', '[ll]'),
210 ('label', '[rl]'),
218 ('label', '[rl]'),
211 ]),
219 ]),
212 # entry
220 # entry
213 ((
221 ((
214 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
222 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
215 ),
223 ),
216 [
224 [
217 ('state', '[stale]'),
225 ('state', '[stale]'),
218 ('state', '[featured]'),
226 ('state', '[featured]'),
219 ('state', '[stale]'),
227 ('state', '[stale]'),
220 ('state', '[dead]'),
228 ('state', '[dead]'),
221 ('state', '[dev]'),
229 ('state', '[dev]'),
222 ]),
230 ]),
223 # entry
231 # entry
224 ((
232 ((
225 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
233 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
226 ),
234 ),
227 [
235 [
228 ('state', '[stale]'),
236 ('state', '[stale]'),
229 ('url', '[url =&gt; [name](http://rc.com)]'),
237 ('url', '[url =&gt; [name](http://rc.com)]'),
230 ]),
238 ]),
231 # entry
239 # entry
232 ((
240 ((
233 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
241 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
234 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
242 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
235 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
243 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
236 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
244 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
237 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
245 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
238 ),
246 ),
239 [
247 [
240 ('label', '[desc]'),
248 ('label', '[desc]'),
241 ('label', '[obsolete]'),
249 ('label', '[obsolete]'),
242 ('label', '[or]'),
250 ('label', '[or]'),
243 ('label', '[requires]'),
251 ('label', '[requires]'),
244 ('label', '[tag]'),
252 ('label', '[tag]'),
245 ('state', '[stale]'),
253 ('state', '[stale]'),
246 ('lang', '[lang =&gt; python]'),
254 ('lang', '[lang =&gt; python]'),
247 ('ref', '[requires =&gt; url]'),
255 ('ref', '[requires =&gt; url]'),
248 ('see', '[see =&gt; http://url.com]'),
256 ('see', '[see =&gt; http://url.com]'),
249
257
250 ]),
258 ]),
251
259
252 ], ids=no_newline_id_generator)
260 ], ids=no_newline_id_generator)
253 def test_metatag_extraction(sample, expected_tags):
261 def test_metatag_extraction(sample, expected_tags):
254 from rhodecode.lib.helpers import extract_metatags
262 from rhodecode.lib.helpers import extract_metatags
255 tags, value = extract_metatags(sample)
263 tags, value = extract_metatags(sample)
256 assert sorted(tags) == sorted(expected_tags)
264 assert sorted(tags) == sorted(expected_tags)
257
265
258
266
259 @pytest.mark.parametrize("tag_data, expected_html", [
267 @pytest.mark.parametrize("tag_data, expected_html", [
260
268
261 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
269 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
262 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
270 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
263 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
271 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
264 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
272 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
265 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
273 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
266
274
267 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
275 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
268 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
276 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
269
277
270 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
278 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
271 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
279 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
272 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
280 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
273 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
281 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
274 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
282 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
275 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
283 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
276
284
277 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
285 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
278 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
286 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
279 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
287 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
280 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
288 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
281
289
282 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
290 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
283 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
291 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
284 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
292 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
285 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
293 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
286
294
287 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
295 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
288
296
289 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
297 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
290 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
298 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
291 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
299 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
292
300
293 ])
301 ])
294 def test_metatags_stylize(tag_data, expected_html):
302 def test_metatags_stylize(tag_data, expected_html):
295 from rhodecode.lib.helpers import style_metatag
303 from rhodecode.lib.helpers import style_metatag
296 tag_type,value = tag_data
304 tag_type,value = tag_data
297 assert style_metatag(tag_type, value) == expected_html
305 assert style_metatag(tag_type, value) == expected_html
298
306
299
307
300 @pytest.mark.parametrize("tmpl_url, email, expected", [
308 @pytest.mark.parametrize("tmpl_url, email, expected", [
301 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
309 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
302
310
303 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
311 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
304 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
312 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
305
313
306 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
314 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
307 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
315 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
308
316
309 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
317 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
310 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
318 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
311
319
312 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
320 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
313 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
321 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
314 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
322 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
315 ])
323 ])
316 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
324 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
317 from rhodecode.lib.helpers import gravatar_url
325 from rhodecode.lib.helpers import gravatar_url
318
326
319 # mock pyramid.threadlocals
327 # mock pyramid.threadlocals
320 def fake_get_current_request():
328 def fake_get_current_request():
321 request_stub.scheme = 'https'
329 request_stub.scheme = 'https'
322 request_stub.host = 'server.com'
330 request_stub.host = 'server.com'
323 return request_stub
331 return request_stub
324
332
325 # mock pylons.tmpl_context
333 # mock pylons.tmpl_context
326 def fake_tmpl_context(_url):
334 def fake_tmpl_context(_url):
327 _c = AttributeDict()
335 _c = AttributeDict()
328 _c.visual = AttributeDict()
336 _c.visual = AttributeDict()
329 _c.visual.use_gravatar = True
337 _c.visual.use_gravatar = True
330 _c.visual.gravatar_url = _url
338 _c.visual.gravatar_url = _url
331
339
332 return _c
340 return _c
333
341
334 with mock.patch('rhodecode.lib.helpers.get_current_request',
342 with mock.patch('rhodecode.lib.helpers.get_current_request',
335 fake_get_current_request):
343 fake_get_current_request):
336 fake = fake_tmpl_context(_url=tmpl_url)
344 fake = fake_tmpl_context(_url=tmpl_url)
337 with mock.patch('pylons.tmpl_context', fake):
345 with mock.patch('pylons.tmpl_context', fake):
338 grav = gravatar_url(email_address=email, size=24)
346 grav = gravatar_url(email_address=email, size=24)
339 assert grav == expected
347 assert grav == expected
340
348
341
349
342 @pytest.mark.parametrize(
350 @pytest.mark.parametrize(
343 "email, first_name, last_name, expected_initials, expected_color", [
351 "email, first_name, last_name, expected_initials, expected_color", [
344
352
345 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
353 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
346 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
354 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
347 # special cases of email
355 # special cases of email
348 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
356 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
349 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
357 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
350 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
358 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
351
359
352 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
360 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
353 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
361 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
354
362
355 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
363 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
356 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
364 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
357 # partials
365 # partials
358 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
366 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
359 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
367 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
360 # non-ascii
368 # non-ascii
361 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
369 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
362 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
370 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
363
371
364 # special cases, LDAP can provide those...
372 # special cases, LDAP can provide those...
365 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
373 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
366 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
374 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
367 ('null', '', '', 'NL', '#8c4646'),
375 ('null', '', '', 'NL', '#8c4646'),
368 ('some.@abc.com', 'some', '', 'SA', '#664e33')
376 ('some.@abc.com', 'some', '', 'SA', '#664e33')
369 ])
377 ])
370 def test_initials_gravatar_pick_of_initials_and_color_algo(
378 def test_initials_gravatar_pick_of_initials_and_color_algo(
371 email, first_name, last_name, expected_initials, expected_color):
379 email, first_name, last_name, expected_initials, expected_color):
372 instance = InitialsGravatar(email, first_name, last_name)
380 instance = InitialsGravatar(email, first_name, last_name)
373 assert instance.get_initials() == expected_initials
381 assert instance.get_initials() == expected_initials
374 assert instance.str2color(email) == expected_color
382 assert instance.str2color(email) == expected_color
375
383
376
384
377 def test_initials_gravatar_mapping_algo():
385 def test_initials_gravatar_mapping_algo():
378 pos = set()
386 pos = set()
379 instance = InitialsGravatar('', '', '')
387 instance = InitialsGravatar('', '', '')
380 iterations = 0
388 iterations = 0
381
389
382 variations = []
390 variations = []
383 for letter1 in string.ascii_letters:
391 for letter1 in string.ascii_letters:
384 for letter2 in string.ascii_letters[::-1][:10]:
392 for letter2 in string.ascii_letters[::-1][:10]:
385 for letter3 in string.ascii_letters[:10]:
393 for letter3 in string.ascii_letters[:10]:
386 variations.append(
394 variations.append(
387 '%s@rhodecode.com' % (letter1+letter2+letter3))
395 '%s@rhodecode.com' % (letter1+letter2+letter3))
388
396
389 max_variations = 4096
397 max_variations = 4096
390 for email in variations[:max_variations]:
398 for email in variations[:max_variations]:
391 iterations += 1
399 iterations += 1
392 pos.add(
400 pos.add(
393 instance.pick_color_bank_index(email,
401 instance.pick_color_bank_index(email,
394 instance.get_color_bank()))
402 instance.get_color_bank()))
395
403
396 # we assume that we have match all 256 possible positions,
404 # we assume that we have match all 256 possible positions,
397 # in reasonable amount of different email addresses
405 # in reasonable amount of different email addresses
398 assert len(pos) == 256
406 assert len(pos) == 256
399 assert iterations == max_variations
407 assert iterations == max_variations
400
408
401
409
402 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
410 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
403 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
411 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
404 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
412 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
405 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
413 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
406 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
414 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
407 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
415 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
408 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
416 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
409 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
417 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
410 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
418 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
411 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
419 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
412 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
420 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
413 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
421 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
414 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
422 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
415 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
423 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
416 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
424 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
417 ])
425 ])
418 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
426 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
419 from rhodecode.lib.utils2 import get_clone_url
427 from rhodecode.lib.utils2 import get_clone_url
420
428
421 class RequestStub(object):
429 class RequestStub(object):
422 def request_url(self, name):
430 def request_url(self, name):
423 return 'http://vps1:8000' + prefix
431 return 'http://vps1:8000' + prefix
424
432
425 def route_url(self, name):
433 def route_url(self, name):
426 return self.request_url(name)
434 return self.request_url(name)
427
435
428 clone_url = get_clone_url(
436 clone_url = get_clone_url(
429 request=RequestStub(),
437 request=RequestStub(),
430 uri_tmpl=tmpl,
438 uri_tmpl=tmpl,
431 repo_name=repo_name, repo_id=23, **overrides)
439 repo_name=repo_name, repo_id=23, **overrides)
432 assert clone_url == expected
440 assert clone_url == expected
433
441
434
442
435 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
443 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
436 """
444 """
437 Changes `some text url[foo]` => `some text <a href="/">foo</a>
445 Changes `some text url[foo]` => `some text <a href="/">foo</a>
438
446
439 :param text:
447 :param text:
440 """
448 """
441 import re
449 import re
442 # quickly change expected url[] into a link
450 # quickly change expected url[] into a link
443 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
451 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
444
452
445 def url_func(match_obj):
453 def url_func(match_obj):
446 _url = match_obj.groups()[0]
454 _url = match_obj.groups()[0]
447 return tmpl % (url_ or '/some-url', _url)
455 return tmpl % (url_ or '/some-url', _url)
448 return URL_PAT.sub(url_func, text)
456 return URL_PAT.sub(url_func, text)
449
457
450
458
451 @pytest.mark.parametrize("sample, expected", [
459 @pytest.mark.parametrize("sample, expected", [
452 ("",
460 ("",
453 ""),
461 ""),
454 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
462 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
455 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
463 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
456 ("from rev 000000000000",
464 ("from rev 000000000000",
457 "from rev url[000000000000]"),
465 "from rev url[000000000000]"),
458 ("from rev 000000000000123123 also rev 000000000000",
466 ("from rev 000000000000123123 also rev 000000000000",
459 "from rev url[000000000000123123] also rev url[000000000000]"),
467 "from rev url[000000000000123123] also rev url[000000000000]"),
460 ("this should-000 00",
468 ("this should-000 00",
461 "this should-000 00"),
469 "this should-000 00"),
462 ("longtextffffffffff rev 123123123123",
470 ("longtextffffffffff rev 123123123123",
463 "longtextffffffffff rev url[123123123123]"),
471 "longtextffffffffff rev url[123123123123]"),
464 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
472 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
465 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
473 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
466 ("ffffffffffff some text traalaa",
474 ("ffffffffffff some text traalaa",
467 "url[ffffffffffff] some text traalaa"),
475 "url[ffffffffffff] some text traalaa"),
468 ("""Multi line
476 ("""Multi line
469 123123123123
477 123123123123
470 some text 123123123123
478 some text 123123123123
471 sometimes !
479 sometimes !
472 """,
480 """,
473 """Multi line
481 """Multi line
474 url[123123123123]
482 url[123123123123]
475 some text url[123123123123]
483 some text url[123123123123]
476 sometimes !
484 sometimes !
477 """)
485 """)
478 ], ids=no_newline_id_generator)
486 ], ids=no_newline_id_generator)
479 def test_urlify_commits(sample, expected):
487 def test_urlify_commits(sample, expected):
480 def fake_url(self, *args, **kwargs):
488 def fake_url(self, *args, **kwargs):
481 return '/some-url'
489 return '/some-url'
482
490
483 expected = _quick_url(expected)
491 expected = _quick_url(expected)
484
492
485 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
493 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
486 from rhodecode.lib.helpers import urlify_commits
494 from rhodecode.lib.helpers import urlify_commits
487 assert urlify_commits(sample, 'repo_name') == expected
495 assert urlify_commits(sample, 'repo_name') == expected
488
496
489
497
490 @pytest.mark.parametrize("sample, expected, url_", [
498 @pytest.mark.parametrize("sample, expected, url_", [
491 ("",
499 ("",
492 "",
500 "",
493 ""),
501 ""),
494 ("https://svn.apache.org/repos",
502 ("https://svn.apache.org/repos",
495 "url[https://svn.apache.org/repos]",
503 "url[https://svn.apache.org/repos]",
496 "https://svn.apache.org/repos"),
504 "https://svn.apache.org/repos"),
497 ("http://svn.apache.org/repos",
505 ("http://svn.apache.org/repos",
498 "url[http://svn.apache.org/repos]",
506 "url[http://svn.apache.org/repos]",
499 "http://svn.apache.org/repos"),
507 "http://svn.apache.org/repos"),
500 ("from rev a also rev http://google.com",
508 ("from rev a also rev http://google.com",
501 "from rev a also rev url[http://google.com]",
509 "from rev a also rev url[http://google.com]",
502 "http://google.com"),
510 "http://google.com"),
503 ("""Multi line
511 ("""Multi line
504 https://foo.bar.com
512 https://foo.bar.com
505 some text lalala""",
513 some text lalala""",
506 """Multi line
514 """Multi line
507 url[https://foo.bar.com]
515 url[https://foo.bar.com]
508 some text lalala""",
516 some text lalala""",
509 "https://foo.bar.com")
517 "https://foo.bar.com")
510 ], ids=no_newline_id_generator)
518 ], ids=no_newline_id_generator)
511 def test_urlify_test(sample, expected, url_):
519 def test_urlify_test(sample, expected, url_):
512 from rhodecode.lib.helpers import urlify_text
520 from rhodecode.lib.helpers import urlify_text
513 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
521 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
514 assert urlify_text(sample) == expected
522 assert urlify_text(sample) == expected
515
523
516
524
517 @pytest.mark.parametrize("test, expected", [
525 @pytest.mark.parametrize("test, expected", [
518 ("", None),
526 ("", None),
519 ("/_2", '2'),
527 ("/_2", '2'),
520 ("_2", '2'),
528 ("_2", '2'),
521 ("/_2/", '2'),
529 ("/_2/", '2'),
522 ("_2/", '2'),
530 ("_2/", '2'),
523
531
524 ("/_21", '21'),
532 ("/_21", '21'),
525 ("_21", '21'),
533 ("_21", '21'),
526 ("/_21/", '21'),
534 ("/_21/", '21'),
527 ("_21/", '21'),
535 ("_21/", '21'),
528
536
529 ("/_21/foobar", '21'),
537 ("/_21/foobar", '21'),
530 ("_21/121", '21'),
538 ("_21/121", '21'),
531 ("/_21/_12", '21'),
539 ("/_21/_12", '21'),
532 ("_21/rc/foo", '21'),
540 ("_21/rc/foo", '21'),
533
541
534 ])
542 ])
535 def test_get_repo_by_id(test, expected):
543 def test_get_repo_by_id(test, expected):
536 from rhodecode.model.repo import RepoModel
544 from rhodecode.model.repo import RepoModel
537 _test = RepoModel()._extract_id_from_repo_name(test)
545 _test = RepoModel()._extract_id_from_repo_name(test)
538 assert _test == expected
546 assert _test == expected
539
547
540
548
541 @pytest.mark.parametrize("test_repo_name, repo_type", [
549 @pytest.mark.parametrize("test_repo_name, repo_type", [
542 ("test_repo_1", None),
550 ("test_repo_1", None),
543 ("repo_group/foobar", None),
551 ("repo_group/foobar", None),
544 ("test_non_asci_Δ…Δ‡Δ™", None),
552 ("test_non_asci_Δ…Δ‡Δ™", None),
545 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
553 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
546 ])
554 ])
547 def test_invalidation_context(pylonsapp, test_repo_name, repo_type):
555 def test_invalidation_context(pylonsapp, test_repo_name, repo_type):
548 from beaker.cache import cache_region
556 from beaker.cache import cache_region
549 from rhodecode.lib import caches
557 from rhodecode.lib import caches
550 from rhodecode.model.db import CacheKey
558 from rhodecode.model.db import CacheKey
551
559
552 @cache_region('long_term')
560 @cache_region('long_term')
553 def _dummy_func(cache_key):
561 def _dummy_func(cache_key):
554 return 'result'
562 return 'result'
555
563
556 invalidator_context = CacheKey.repo_context_cache(
564 invalidator_context = CacheKey.repo_context_cache(
557 _dummy_func, test_repo_name, 'repo')
565 _dummy_func, test_repo_name, 'repo')
558
566
559 with invalidator_context as context:
567 with invalidator_context as context:
560 invalidated = context.invalidate()
568 invalidated = context.invalidate()
561 result = context.compute()
569 result = context.compute()
562
570
563 assert invalidated == True
571 assert invalidated == True
564 assert 'result' == result
572 assert 'result' == result
565 assert isinstance(context, caches.FreshRegionCache)
573 assert isinstance(context, caches.FreshRegionCache)
566
574
567 assert 'InvalidationContext' in repr(invalidator_context)
575 assert 'InvalidationContext' in repr(invalidator_context)
568
576
569 with invalidator_context as context:
577 with invalidator_context as context:
570 context.invalidate()
578 context.invalidate()
571 result = context.compute()
579 result = context.compute()
572
580
573 assert 'result' == result
581 assert 'result' == result
574 assert isinstance(context, caches.ActiveRegionCache)
582 assert isinstance(context, caches.ActiveRegionCache)
575
583
576
584
577 def test_invalidation_context_exception_in_compute(pylonsapp):
585 def test_invalidation_context_exception_in_compute(pylonsapp):
578 from rhodecode.model.db import CacheKey
586 from rhodecode.model.db import CacheKey
579 from beaker.cache import cache_region
587 from beaker.cache import cache_region
580
588
581 @cache_region('long_term')
589 @cache_region('long_term')
582 def _dummy_func(cache_key):
590 def _dummy_func(cache_key):
583 # this causes error since it doesn't get any params
591 # this causes error since it doesn't get any params
584 raise Exception('ups')
592 raise Exception('ups')
585
593
586 invalidator_context = CacheKey.repo_context_cache(
594 invalidator_context = CacheKey.repo_context_cache(
587 _dummy_func, 'test_repo_2', 'repo')
595 _dummy_func, 'test_repo_2', 'repo')
588
596
589 with pytest.raises(Exception):
597 with pytest.raises(Exception):
590 with invalidator_context as context:
598 with invalidator_context as context:
591 context.invalidate()
599 context.invalidate()
592 context.compute()
600 context.compute()
593
601
594
602
595 @pytest.mark.parametrize('execution_number', range(5))
603 @pytest.mark.parametrize('execution_number', range(5))
596 def test_cache_invalidation_race_condition(execution_number, pylonsapp):
604 def test_cache_invalidation_race_condition(execution_number, pylonsapp):
597 import time
605 import time
598 from beaker.cache import cache_region
606 from beaker.cache import cache_region
599 from rhodecode.model.db import CacheKey
607 from rhodecode.model.db import CacheKey
600
608
601 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
609 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
602 reason = (
610 reason = (
603 'Fails on MariaDB due to some locking issues. Investigation'
611 'Fails on MariaDB due to some locking issues. Investigation'
604 ' needed')
612 ' needed')
605 pytest.xfail(reason=reason)
613 pytest.xfail(reason=reason)
606
614
607 @run_test_concurrently(25)
615 @run_test_concurrently(25)
608 def test_create_and_delete_cache_keys():
616 def test_create_and_delete_cache_keys():
609 time.sleep(0.2)
617 time.sleep(0.2)
610
618
611 @cache_region('long_term')
619 @cache_region('long_term')
612 def _dummy_func(cache_key):
620 def _dummy_func(cache_key):
613 return 'result'
621 return 'result'
614
622
615 invalidator_context = CacheKey.repo_context_cache(
623 invalidator_context = CacheKey.repo_context_cache(
616 _dummy_func, 'test_repo_1', 'repo')
624 _dummy_func, 'test_repo_1', 'repo')
617
625
618 with invalidator_context as context:
626 with invalidator_context as context:
619 context.invalidate()
627 context.invalidate()
620 context.compute()
628 context.compute()
621
629
622 CacheKey.set_invalidate('test_repo_1', delete=True)
630 CacheKey.set_invalidate('test_repo_1', delete=True)
623
631
624 test_create_and_delete_cache_keys()
632 test_create_and_delete_cache_keys()
General Comments 0
You need to be logged in to leave comments. Login now