##// END OF EJS Templates
mettags: limit the scope of url => metatag to http, https and / links....
marcink -
r2381:39239f6c default
parent child Browse files
Show More
@@ -1,2064 +1,2064 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 from collections import OrderedDict
39 from collections import OrderedDict
40
40
41 import pygments
41 import pygments
42 import itertools
42 import itertools
43 import fnmatch
43 import fnmatch
44
44
45 from datetime import datetime
45 from datetime import datetime
46 from functools import partial
46 from functools import partial
47 from pygments.formatters.html import HtmlFormatter
47 from pygments.formatters.html import HtmlFormatter
48 from pygments import highlight as code_highlight
48 from pygments import highlight as code_highlight
49 from pygments.lexers import (
49 from pygments.lexers import (
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51
51
52 from pyramid.threadlocal import get_current_request
52 from pyramid.threadlocal import get_current_request
53
53
54 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html import literal, HTML, escape
55 from webhelpers.html.tools import *
55 from webhelpers.html.tools import *
56 from webhelpers.html.builder import make_tag
56 from webhelpers.html.builder import make_tag
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 submit, text, password, textarea, title, ul, xml_declaration, radio
60 submit, text, password, textarea, title, ul, xml_declaration, radio
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def asset(path, ver=None, **kwargs):
94 def asset(path, ver=None, **kwargs):
95 """
95 """
96 Helper to generate a static asset file path for rhodecode assets
96 Helper to generate a static asset file path for rhodecode assets
97
97
98 eg. h.asset('images/image.png', ver='3923')
98 eg. h.asset('images/image.png', ver='3923')
99
99
100 :param path: path of asset
100 :param path: path of asset
101 :param ver: optional version query param to append as ?ver=
101 :param ver: optional version query param to append as ?ver=
102 """
102 """
103 request = get_current_request()
103 request = get_current_request()
104 query = {}
104 query = {}
105 query.update(kwargs)
105 query.update(kwargs)
106 if ver:
106 if ver:
107 query = {'ver': ver}
107 query = {'ver': ver}
108 return request.static_path(
108 return request.static_path(
109 'rhodecode:public/{}'.format(path), _query=query)
109 'rhodecode:public/{}'.format(path), _query=query)
110
110
111
111
112 default_html_escape_table = {
112 default_html_escape_table = {
113 ord('&'): u'&amp;',
113 ord('&'): u'&amp;',
114 ord('<'): u'&lt;',
114 ord('<'): u'&lt;',
115 ord('>'): u'&gt;',
115 ord('>'): u'&gt;',
116 ord('"'): u'&quot;',
116 ord('"'): u'&quot;',
117 ord("'"): u'&#39;',
117 ord("'"): u'&#39;',
118 }
118 }
119
119
120
120
121 def html_escape(text, html_escape_table=default_html_escape_table):
121 def html_escape(text, html_escape_table=default_html_escape_table):
122 """Produce entities within text."""
122 """Produce entities within text."""
123 return text.translate(html_escape_table)
123 return text.translate(html_escape_table)
124
124
125
125
126 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
126 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
127 """
127 """
128 Truncate string ``s`` at the first occurrence of ``sub``.
128 Truncate string ``s`` at the first occurrence of ``sub``.
129
129
130 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
130 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
131 """
131 """
132 suffix_if_chopped = suffix_if_chopped or ''
132 suffix_if_chopped = suffix_if_chopped or ''
133 pos = s.find(sub)
133 pos = s.find(sub)
134 if pos == -1:
134 if pos == -1:
135 return s
135 return s
136
136
137 if inclusive:
137 if inclusive:
138 pos += len(sub)
138 pos += len(sub)
139
139
140 chopped = s[:pos]
140 chopped = s[:pos]
141 left = s[pos:].strip()
141 left = s[pos:].strip()
142
142
143 if left and suffix_if_chopped:
143 if left and suffix_if_chopped:
144 chopped += suffix_if_chopped
144 chopped += suffix_if_chopped
145
145
146 return chopped
146 return chopped
147
147
148
148
149 def shorter(text, size=20):
149 def shorter(text, size=20):
150 postfix = '...'
150 postfix = '...'
151 if len(text) > size:
151 if len(text) > size:
152 return text[:size - len(postfix)] + postfix
152 return text[:size - len(postfix)] + postfix
153 return text
153 return text
154
154
155
155
156 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
156 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
157 """
157 """
158 Reset button
158 Reset button
159 """
159 """
160 _set_input_attrs(attrs, type, name, value)
160 _set_input_attrs(attrs, type, name, value)
161 _set_id_attr(attrs, id, name)
161 _set_id_attr(attrs, id, name)
162 convert_boolean_attrs(attrs, ["disabled"])
162 convert_boolean_attrs(attrs, ["disabled"])
163 return HTML.input(**attrs)
163 return HTML.input(**attrs)
164
164
165 reset = _reset
165 reset = _reset
166 safeid = _make_safe_id_component
166 safeid = _make_safe_id_component
167
167
168
168
169 def branding(name, length=40):
169 def branding(name, length=40):
170 return truncate(name, length, indicator="")
170 return truncate(name, length, indicator="")
171
171
172
172
173 def FID(raw_id, path):
173 def FID(raw_id, path):
174 """
174 """
175 Creates a unique ID for filenode based on it's hash of path and commit
175 Creates a unique ID for filenode based on it's hash of path and commit
176 it's safe to use in urls
176 it's safe to use in urls
177
177
178 :param raw_id:
178 :param raw_id:
179 :param path:
179 :param path:
180 """
180 """
181
181
182 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
182 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
183
183
184
184
185 class _GetError(object):
185 class _GetError(object):
186 """Get error from form_errors, and represent it as span wrapped error
186 """Get error from form_errors, and represent it as span wrapped error
187 message
187 message
188
188
189 :param field_name: field to fetch errors for
189 :param field_name: field to fetch errors for
190 :param form_errors: form errors dict
190 :param form_errors: form errors dict
191 """
191 """
192
192
193 def __call__(self, field_name, form_errors):
193 def __call__(self, field_name, form_errors):
194 tmpl = """<span class="error_msg">%s</span>"""
194 tmpl = """<span class="error_msg">%s</span>"""
195 if form_errors and field_name in form_errors:
195 if form_errors and field_name in form_errors:
196 return literal(tmpl % form_errors.get(field_name))
196 return literal(tmpl % form_errors.get(field_name))
197
197
198 get_error = _GetError()
198 get_error = _GetError()
199
199
200
200
201 class _ToolTip(object):
201 class _ToolTip(object):
202
202
203 def __call__(self, tooltip_title, trim_at=50):
203 def __call__(self, tooltip_title, trim_at=50):
204 """
204 """
205 Special function just to wrap our text into nice formatted
205 Special function just to wrap our text into nice formatted
206 autowrapped text
206 autowrapped text
207
207
208 :param tooltip_title:
208 :param tooltip_title:
209 """
209 """
210 tooltip_title = escape(tooltip_title)
210 tooltip_title = escape(tooltip_title)
211 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
211 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
212 return tooltip_title
212 return tooltip_title
213 tooltip = _ToolTip()
213 tooltip = _ToolTip()
214
214
215
215
216 def files_breadcrumbs(repo_name, commit_id, file_path):
216 def files_breadcrumbs(repo_name, commit_id, file_path):
217 if isinstance(file_path, str):
217 if isinstance(file_path, str):
218 file_path = safe_unicode(file_path)
218 file_path = safe_unicode(file_path)
219
219
220 # TODO: johbo: Is this always a url like path, or is this operating
220 # TODO: johbo: Is this always a url like path, or is this operating
221 # system dependent?
221 # system dependent?
222 path_segments = file_path.split('/')
222 path_segments = file_path.split('/')
223
223
224 repo_name_html = escape(repo_name)
224 repo_name_html = escape(repo_name)
225 if len(path_segments) == 1 and path_segments[0] == '':
225 if len(path_segments) == 1 and path_segments[0] == '':
226 url_segments = [repo_name_html]
226 url_segments = [repo_name_html]
227 else:
227 else:
228 url_segments = [
228 url_segments = [
229 link_to(
229 link_to(
230 repo_name_html,
230 repo_name_html,
231 route_path(
231 route_path(
232 'repo_files',
232 'repo_files',
233 repo_name=repo_name,
233 repo_name=repo_name,
234 commit_id=commit_id,
234 commit_id=commit_id,
235 f_path=''),
235 f_path=''),
236 class_='pjax-link')]
236 class_='pjax-link')]
237
237
238 last_cnt = len(path_segments) - 1
238 last_cnt = len(path_segments) - 1
239 for cnt, segment in enumerate(path_segments):
239 for cnt, segment in enumerate(path_segments):
240 if not segment:
240 if not segment:
241 continue
241 continue
242 segment_html = escape(segment)
242 segment_html = escape(segment)
243
243
244 if cnt != last_cnt:
244 if cnt != last_cnt:
245 url_segments.append(
245 url_segments.append(
246 link_to(
246 link_to(
247 segment_html,
247 segment_html,
248 route_path(
248 route_path(
249 'repo_files',
249 'repo_files',
250 repo_name=repo_name,
250 repo_name=repo_name,
251 commit_id=commit_id,
251 commit_id=commit_id,
252 f_path='/'.join(path_segments[:cnt + 1])),
252 f_path='/'.join(path_segments[:cnt + 1])),
253 class_='pjax-link'))
253 class_='pjax-link'))
254 else:
254 else:
255 url_segments.append(segment_html)
255 url_segments.append(segment_html)
256
256
257 return literal('/'.join(url_segments))
257 return literal('/'.join(url_segments))
258
258
259
259
260 class CodeHtmlFormatter(HtmlFormatter):
260 class CodeHtmlFormatter(HtmlFormatter):
261 """
261 """
262 My code Html Formatter for source codes
262 My code Html Formatter for source codes
263 """
263 """
264
264
265 def wrap(self, source, outfile):
265 def wrap(self, source, outfile):
266 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
266 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
267
267
268 def _wrap_code(self, source):
268 def _wrap_code(self, source):
269 for cnt, it in enumerate(source):
269 for cnt, it in enumerate(source):
270 i, t = it
270 i, t = it
271 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
271 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
272 yield i, t
272 yield i, t
273
273
274 def _wrap_tablelinenos(self, inner):
274 def _wrap_tablelinenos(self, inner):
275 dummyoutfile = StringIO.StringIO()
275 dummyoutfile = StringIO.StringIO()
276 lncount = 0
276 lncount = 0
277 for t, line in inner:
277 for t, line in inner:
278 if t:
278 if t:
279 lncount += 1
279 lncount += 1
280 dummyoutfile.write(line)
280 dummyoutfile.write(line)
281
281
282 fl = self.linenostart
282 fl = self.linenostart
283 mw = len(str(lncount + fl - 1))
283 mw = len(str(lncount + fl - 1))
284 sp = self.linenospecial
284 sp = self.linenospecial
285 st = self.linenostep
285 st = self.linenostep
286 la = self.lineanchors
286 la = self.lineanchors
287 aln = self.anchorlinenos
287 aln = self.anchorlinenos
288 nocls = self.noclasses
288 nocls = self.noclasses
289 if sp:
289 if sp:
290 lines = []
290 lines = []
291
291
292 for i in range(fl, fl + lncount):
292 for i in range(fl, fl + lncount):
293 if i % st == 0:
293 if i % st == 0:
294 if i % sp == 0:
294 if i % sp == 0:
295 if aln:
295 if aln:
296 lines.append('<a href="#%s%d" class="special">%*d</a>' %
296 lines.append('<a href="#%s%d" class="special">%*d</a>' %
297 (la, i, mw, i))
297 (la, i, mw, i))
298 else:
298 else:
299 lines.append('<span class="special">%*d</span>' % (mw, i))
299 lines.append('<span class="special">%*d</span>' % (mw, i))
300 else:
300 else:
301 if aln:
301 if aln:
302 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
302 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
303 else:
303 else:
304 lines.append('%*d' % (mw, i))
304 lines.append('%*d' % (mw, i))
305 else:
305 else:
306 lines.append('')
306 lines.append('')
307 ls = '\n'.join(lines)
307 ls = '\n'.join(lines)
308 else:
308 else:
309 lines = []
309 lines = []
310 for i in range(fl, fl + lncount):
310 for i in range(fl, fl + lncount):
311 if i % st == 0:
311 if i % st == 0:
312 if aln:
312 if aln:
313 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
313 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
314 else:
314 else:
315 lines.append('%*d' % (mw, i))
315 lines.append('%*d' % (mw, i))
316 else:
316 else:
317 lines.append('')
317 lines.append('')
318 ls = '\n'.join(lines)
318 ls = '\n'.join(lines)
319
319
320 # in case you wonder about the seemingly redundant <div> here: since the
320 # in case you wonder about the seemingly redundant <div> here: since the
321 # content in the other cell also is wrapped in a div, some browsers in
321 # content in the other cell also is wrapped in a div, some browsers in
322 # some configurations seem to mess up the formatting...
322 # some configurations seem to mess up the formatting...
323 if nocls:
323 if nocls:
324 yield 0, ('<table class="%stable">' % self.cssclass +
324 yield 0, ('<table class="%stable">' % self.cssclass +
325 '<tr><td><div class="linenodiv" '
325 '<tr><td><div class="linenodiv" '
326 'style="background-color: #f0f0f0; padding-right: 10px">'
326 'style="background-color: #f0f0f0; padding-right: 10px">'
327 '<pre style="line-height: 125%">' +
327 '<pre style="line-height: 125%">' +
328 ls + '</pre></div></td><td id="hlcode" class="code">')
328 ls + '</pre></div></td><td id="hlcode" class="code">')
329 else:
329 else:
330 yield 0, ('<table class="%stable">' % self.cssclass +
330 yield 0, ('<table class="%stable">' % self.cssclass +
331 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
331 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
332 ls + '</pre></div></td><td id="hlcode" class="code">')
332 ls + '</pre></div></td><td id="hlcode" class="code">')
333 yield 0, dummyoutfile.getvalue()
333 yield 0, dummyoutfile.getvalue()
334 yield 0, '</td></tr></table>'
334 yield 0, '</td></tr></table>'
335
335
336
336
337 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
337 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
338 def __init__(self, **kw):
338 def __init__(self, **kw):
339 # only show these line numbers if set
339 # only show these line numbers if set
340 self.only_lines = kw.pop('only_line_numbers', [])
340 self.only_lines = kw.pop('only_line_numbers', [])
341 self.query_terms = kw.pop('query_terms', [])
341 self.query_terms = kw.pop('query_terms', [])
342 self.max_lines = kw.pop('max_lines', 5)
342 self.max_lines = kw.pop('max_lines', 5)
343 self.line_context = kw.pop('line_context', 3)
343 self.line_context = kw.pop('line_context', 3)
344 self.url = kw.pop('url', None)
344 self.url = kw.pop('url', None)
345
345
346 super(CodeHtmlFormatter, self).__init__(**kw)
346 super(CodeHtmlFormatter, self).__init__(**kw)
347
347
348 def _wrap_code(self, source):
348 def _wrap_code(self, source):
349 for cnt, it in enumerate(source):
349 for cnt, it in enumerate(source):
350 i, t = it
350 i, t = it
351 t = '<pre>%s</pre>' % t
351 t = '<pre>%s</pre>' % t
352 yield i, t
352 yield i, t
353
353
354 def _wrap_tablelinenos(self, inner):
354 def _wrap_tablelinenos(self, inner):
355 yield 0, '<table class="code-highlight %stable">' % self.cssclass
355 yield 0, '<table class="code-highlight %stable">' % self.cssclass
356
356
357 last_shown_line_number = 0
357 last_shown_line_number = 0
358 current_line_number = 1
358 current_line_number = 1
359
359
360 for t, line in inner:
360 for t, line in inner:
361 if not t:
361 if not t:
362 yield t, line
362 yield t, line
363 continue
363 continue
364
364
365 if current_line_number in self.only_lines:
365 if current_line_number in self.only_lines:
366 if last_shown_line_number + 1 != current_line_number:
366 if last_shown_line_number + 1 != current_line_number:
367 yield 0, '<tr>'
367 yield 0, '<tr>'
368 yield 0, '<td class="line">...</td>'
368 yield 0, '<td class="line">...</td>'
369 yield 0, '<td id="hlcode" class="code"></td>'
369 yield 0, '<td id="hlcode" class="code"></td>'
370 yield 0, '</tr>'
370 yield 0, '</tr>'
371
371
372 yield 0, '<tr>'
372 yield 0, '<tr>'
373 if self.url:
373 if self.url:
374 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
374 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
375 self.url, current_line_number, current_line_number)
375 self.url, current_line_number, current_line_number)
376 else:
376 else:
377 yield 0, '<td class="line"><a href="">%i</a></td>' % (
377 yield 0, '<td class="line"><a href="">%i</a></td>' % (
378 current_line_number)
378 current_line_number)
379 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
379 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
380 yield 0, '</tr>'
380 yield 0, '</tr>'
381
381
382 last_shown_line_number = current_line_number
382 last_shown_line_number = current_line_number
383
383
384 current_line_number += 1
384 current_line_number += 1
385
385
386
386
387 yield 0, '</table>'
387 yield 0, '</table>'
388
388
389
389
390 def extract_phrases(text_query):
390 def extract_phrases(text_query):
391 """
391 """
392 Extracts phrases from search term string making sure phrases
392 Extracts phrases from search term string making sure phrases
393 contained in double quotes are kept together - and discarding empty values
393 contained in double quotes are kept together - and discarding empty values
394 or fully whitespace values eg.
394 or fully whitespace values eg.
395
395
396 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
396 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
397
397
398 """
398 """
399
399
400 in_phrase = False
400 in_phrase = False
401 buf = ''
401 buf = ''
402 phrases = []
402 phrases = []
403 for char in text_query:
403 for char in text_query:
404 if in_phrase:
404 if in_phrase:
405 if char == '"': # end phrase
405 if char == '"': # end phrase
406 phrases.append(buf)
406 phrases.append(buf)
407 buf = ''
407 buf = ''
408 in_phrase = False
408 in_phrase = False
409 continue
409 continue
410 else:
410 else:
411 buf += char
411 buf += char
412 continue
412 continue
413 else:
413 else:
414 if char == '"': # start phrase
414 if char == '"': # start phrase
415 in_phrase = True
415 in_phrase = True
416 phrases.append(buf)
416 phrases.append(buf)
417 buf = ''
417 buf = ''
418 continue
418 continue
419 elif char == ' ':
419 elif char == ' ':
420 phrases.append(buf)
420 phrases.append(buf)
421 buf = ''
421 buf = ''
422 continue
422 continue
423 else:
423 else:
424 buf += char
424 buf += char
425
425
426 phrases.append(buf)
426 phrases.append(buf)
427 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
427 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
428 return phrases
428 return phrases
429
429
430
430
431 def get_matching_offsets(text, phrases):
431 def get_matching_offsets(text, phrases):
432 """
432 """
433 Returns a list of string offsets in `text` that the list of `terms` match
433 Returns a list of string offsets in `text` that the list of `terms` match
434
434
435 >>> get_matching_offsets('some text here', ['some', 'here'])
435 >>> get_matching_offsets('some text here', ['some', 'here'])
436 [(0, 4), (10, 14)]
436 [(0, 4), (10, 14)]
437
437
438 """
438 """
439 offsets = []
439 offsets = []
440 for phrase in phrases:
440 for phrase in phrases:
441 for match in re.finditer(phrase, text):
441 for match in re.finditer(phrase, text):
442 offsets.append((match.start(), match.end()))
442 offsets.append((match.start(), match.end()))
443
443
444 return offsets
444 return offsets
445
445
446
446
447 def normalize_text_for_matching(x):
447 def normalize_text_for_matching(x):
448 """
448 """
449 Replaces all non alnum characters to spaces and lower cases the string,
449 Replaces all non alnum characters to spaces and lower cases the string,
450 useful for comparing two text strings without punctuation
450 useful for comparing two text strings without punctuation
451 """
451 """
452 return re.sub(r'[^\w]', ' ', x.lower())
452 return re.sub(r'[^\w]', ' ', x.lower())
453
453
454
454
455 def get_matching_line_offsets(lines, terms):
455 def get_matching_line_offsets(lines, terms):
456 """ Return a set of `lines` indices (starting from 1) matching a
456 """ Return a set of `lines` indices (starting from 1) matching a
457 text search query, along with `context` lines above/below matching lines
457 text search query, along with `context` lines above/below matching lines
458
458
459 :param lines: list of strings representing lines
459 :param lines: list of strings representing lines
460 :param terms: search term string to match in lines eg. 'some text'
460 :param terms: search term string to match in lines eg. 'some text'
461 :param context: number of lines above/below a matching line to add to result
461 :param context: number of lines above/below a matching line to add to result
462 :param max_lines: cut off for lines of interest
462 :param max_lines: cut off for lines of interest
463 eg.
463 eg.
464
464
465 text = '''
465 text = '''
466 words words words
466 words words words
467 words words words
467 words words words
468 some text some
468 some text some
469 words words words
469 words words words
470 words words words
470 words words words
471 text here what
471 text here what
472 '''
472 '''
473 get_matching_line_offsets(text, 'text', context=1)
473 get_matching_line_offsets(text, 'text', context=1)
474 {3: [(5, 9)], 6: [(0, 4)]]
474 {3: [(5, 9)], 6: [(0, 4)]]
475
475
476 """
476 """
477 matching_lines = {}
477 matching_lines = {}
478 phrases = [normalize_text_for_matching(phrase)
478 phrases = [normalize_text_for_matching(phrase)
479 for phrase in extract_phrases(terms)]
479 for phrase in extract_phrases(terms)]
480
480
481 for line_index, line in enumerate(lines, start=1):
481 for line_index, line in enumerate(lines, start=1):
482 match_offsets = get_matching_offsets(
482 match_offsets = get_matching_offsets(
483 normalize_text_for_matching(line), phrases)
483 normalize_text_for_matching(line), phrases)
484 if match_offsets:
484 if match_offsets:
485 matching_lines[line_index] = match_offsets
485 matching_lines[line_index] = match_offsets
486
486
487 return matching_lines
487 return matching_lines
488
488
489
489
490 def hsv_to_rgb(h, s, v):
490 def hsv_to_rgb(h, s, v):
491 """ Convert hsv color values to rgb """
491 """ Convert hsv color values to rgb """
492
492
493 if s == 0.0:
493 if s == 0.0:
494 return v, v, v
494 return v, v, v
495 i = int(h * 6.0) # XXX assume int() truncates!
495 i = int(h * 6.0) # XXX assume int() truncates!
496 f = (h * 6.0) - i
496 f = (h * 6.0) - i
497 p = v * (1.0 - s)
497 p = v * (1.0 - s)
498 q = v * (1.0 - s * f)
498 q = v * (1.0 - s * f)
499 t = v * (1.0 - s * (1.0 - f))
499 t = v * (1.0 - s * (1.0 - f))
500 i = i % 6
500 i = i % 6
501 if i == 0:
501 if i == 0:
502 return v, t, p
502 return v, t, p
503 if i == 1:
503 if i == 1:
504 return q, v, p
504 return q, v, p
505 if i == 2:
505 if i == 2:
506 return p, v, t
506 return p, v, t
507 if i == 3:
507 if i == 3:
508 return p, q, v
508 return p, q, v
509 if i == 4:
509 if i == 4:
510 return t, p, v
510 return t, p, v
511 if i == 5:
511 if i == 5:
512 return v, p, q
512 return v, p, q
513
513
514
514
515 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
515 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
516 """
516 """
517 Generator for getting n of evenly distributed colors using
517 Generator for getting n of evenly distributed colors using
518 hsv color and golden ratio. It always return same order of colors
518 hsv color and golden ratio. It always return same order of colors
519
519
520 :param n: number of colors to generate
520 :param n: number of colors to generate
521 :param saturation: saturation of returned colors
521 :param saturation: saturation of returned colors
522 :param lightness: lightness of returned colors
522 :param lightness: lightness of returned colors
523 :returns: RGB tuple
523 :returns: RGB tuple
524 """
524 """
525
525
526 golden_ratio = 0.618033988749895
526 golden_ratio = 0.618033988749895
527 h = 0.22717784590367374
527 h = 0.22717784590367374
528
528
529 for _ in xrange(n):
529 for _ in xrange(n):
530 h += golden_ratio
530 h += golden_ratio
531 h %= 1
531 h %= 1
532 HSV_tuple = [h, saturation, lightness]
532 HSV_tuple = [h, saturation, lightness]
533 RGB_tuple = hsv_to_rgb(*HSV_tuple)
533 RGB_tuple = hsv_to_rgb(*HSV_tuple)
534 yield map(lambda x: str(int(x * 256)), RGB_tuple)
534 yield map(lambda x: str(int(x * 256)), RGB_tuple)
535
535
536
536
537 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
537 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
538 """
538 """
539 Returns a function which when called with an argument returns a unique
539 Returns a function which when called with an argument returns a unique
540 color for that argument, eg.
540 color for that argument, eg.
541
541
542 :param n: number of colors to generate
542 :param n: number of colors to generate
543 :param saturation: saturation of returned colors
543 :param saturation: saturation of returned colors
544 :param lightness: lightness of returned colors
544 :param lightness: lightness of returned colors
545 :returns: css RGB string
545 :returns: css RGB string
546
546
547 >>> color_hash = color_hasher()
547 >>> color_hash = color_hasher()
548 >>> color_hash('hello')
548 >>> color_hash('hello')
549 'rgb(34, 12, 59)'
549 'rgb(34, 12, 59)'
550 >>> color_hash('hello')
550 >>> color_hash('hello')
551 'rgb(34, 12, 59)'
551 'rgb(34, 12, 59)'
552 >>> color_hash('other')
552 >>> color_hash('other')
553 'rgb(90, 224, 159)'
553 'rgb(90, 224, 159)'
554 """
554 """
555
555
556 color_dict = {}
556 color_dict = {}
557 cgenerator = unique_color_generator(
557 cgenerator = unique_color_generator(
558 saturation=saturation, lightness=lightness)
558 saturation=saturation, lightness=lightness)
559
559
560 def get_color_string(thing):
560 def get_color_string(thing):
561 if thing in color_dict:
561 if thing in color_dict:
562 col = color_dict[thing]
562 col = color_dict[thing]
563 else:
563 else:
564 col = color_dict[thing] = cgenerator.next()
564 col = color_dict[thing] = cgenerator.next()
565 return "rgb(%s)" % (', '.join(col))
565 return "rgb(%s)" % (', '.join(col))
566
566
567 return get_color_string
567 return get_color_string
568
568
569
569
570 def get_lexer_safe(mimetype=None, filepath=None):
570 def get_lexer_safe(mimetype=None, filepath=None):
571 """
571 """
572 Tries to return a relevant pygments lexer using mimetype/filepath name,
572 Tries to return a relevant pygments lexer using mimetype/filepath name,
573 defaulting to plain text if none could be found
573 defaulting to plain text if none could be found
574 """
574 """
575 lexer = None
575 lexer = None
576 try:
576 try:
577 if mimetype:
577 if mimetype:
578 lexer = get_lexer_for_mimetype(mimetype)
578 lexer = get_lexer_for_mimetype(mimetype)
579 if not lexer:
579 if not lexer:
580 lexer = get_lexer_for_filename(filepath)
580 lexer = get_lexer_for_filename(filepath)
581 except pygments.util.ClassNotFound:
581 except pygments.util.ClassNotFound:
582 pass
582 pass
583
583
584 if not lexer:
584 if not lexer:
585 lexer = get_lexer_by_name('text')
585 lexer = get_lexer_by_name('text')
586
586
587 return lexer
587 return lexer
588
588
589
589
590 def get_lexer_for_filenode(filenode):
590 def get_lexer_for_filenode(filenode):
591 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
591 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
592 return lexer
592 return lexer
593
593
594
594
595 def pygmentize(filenode, **kwargs):
595 def pygmentize(filenode, **kwargs):
596 """
596 """
597 pygmentize function using pygments
597 pygmentize function using pygments
598
598
599 :param filenode:
599 :param filenode:
600 """
600 """
601 lexer = get_lexer_for_filenode(filenode)
601 lexer = get_lexer_for_filenode(filenode)
602 return literal(code_highlight(filenode.content, lexer,
602 return literal(code_highlight(filenode.content, lexer,
603 CodeHtmlFormatter(**kwargs)))
603 CodeHtmlFormatter(**kwargs)))
604
604
605
605
606 def is_following_repo(repo_name, user_id):
606 def is_following_repo(repo_name, user_id):
607 from rhodecode.model.scm import ScmModel
607 from rhodecode.model.scm import ScmModel
608 return ScmModel().is_following_repo(repo_name, user_id)
608 return ScmModel().is_following_repo(repo_name, user_id)
609
609
610
610
611 class _Message(object):
611 class _Message(object):
612 """A message returned by ``Flash.pop_messages()``.
612 """A message returned by ``Flash.pop_messages()``.
613
613
614 Converting the message to a string returns the message text. Instances
614 Converting the message to a string returns the message text. Instances
615 also have the following attributes:
615 also have the following attributes:
616
616
617 * ``message``: the message text.
617 * ``message``: the message text.
618 * ``category``: the category specified when the message was created.
618 * ``category``: the category specified when the message was created.
619 """
619 """
620
620
621 def __init__(self, category, message):
621 def __init__(self, category, message):
622 self.category = category
622 self.category = category
623 self.message = message
623 self.message = message
624
624
625 def __str__(self):
625 def __str__(self):
626 return self.message
626 return self.message
627
627
628 __unicode__ = __str__
628 __unicode__ = __str__
629
629
630 def __html__(self):
630 def __html__(self):
631 return escape(safe_unicode(self.message))
631 return escape(safe_unicode(self.message))
632
632
633
633
634 class Flash(object):
634 class Flash(object):
635 # List of allowed categories. If None, allow any category.
635 # List of allowed categories. If None, allow any category.
636 categories = ["warning", "notice", "error", "success"]
636 categories = ["warning", "notice", "error", "success"]
637
637
638 # Default category if none is specified.
638 # Default category if none is specified.
639 default_category = "notice"
639 default_category = "notice"
640
640
641 def __init__(self, session_key="flash", categories=None,
641 def __init__(self, session_key="flash", categories=None,
642 default_category=None):
642 default_category=None):
643 """
643 """
644 Instantiate a ``Flash`` object.
644 Instantiate a ``Flash`` object.
645
645
646 ``session_key`` is the key to save the messages under in the user's
646 ``session_key`` is the key to save the messages under in the user's
647 session.
647 session.
648
648
649 ``categories`` is an optional list which overrides the default list
649 ``categories`` is an optional list which overrides the default list
650 of categories.
650 of categories.
651
651
652 ``default_category`` overrides the default category used for messages
652 ``default_category`` overrides the default category used for messages
653 when none is specified.
653 when none is specified.
654 """
654 """
655 self.session_key = session_key
655 self.session_key = session_key
656 if categories is not None:
656 if categories is not None:
657 self.categories = categories
657 self.categories = categories
658 if default_category is not None:
658 if default_category is not None:
659 self.default_category = default_category
659 self.default_category = default_category
660 if self.categories and self.default_category not in self.categories:
660 if self.categories and self.default_category not in self.categories:
661 raise ValueError(
661 raise ValueError(
662 "unrecognized default category %r" % (self.default_category,))
662 "unrecognized default category %r" % (self.default_category,))
663
663
664 def pop_messages(self, session=None, request=None):
664 def pop_messages(self, session=None, request=None):
665 """
665 """
666 Return all accumulated messages and delete them from the session.
666 Return all accumulated messages and delete them from the session.
667
667
668 The return value is a list of ``Message`` objects.
668 The return value is a list of ``Message`` objects.
669 """
669 """
670 messages = []
670 messages = []
671
671
672 if not session:
672 if not session:
673 if not request:
673 if not request:
674 request = get_current_request()
674 request = get_current_request()
675 session = request.session
675 session = request.session
676
676
677 # Pop the 'old' pylons flash messages. They are tuples of the form
677 # Pop the 'old' pylons flash messages. They are tuples of the form
678 # (category, message)
678 # (category, message)
679 for cat, msg in session.pop(self.session_key, []):
679 for cat, msg in session.pop(self.session_key, []):
680 messages.append(_Message(cat, msg))
680 messages.append(_Message(cat, msg))
681
681
682 # Pop the 'new' pyramid flash messages for each category as list
682 # Pop the 'new' pyramid flash messages for each category as list
683 # of strings.
683 # of strings.
684 for cat in self.categories:
684 for cat in self.categories:
685 for msg in session.pop_flash(queue=cat):
685 for msg in session.pop_flash(queue=cat):
686 messages.append(_Message(cat, msg))
686 messages.append(_Message(cat, msg))
687 # Map messages from the default queue to the 'notice' category.
687 # Map messages from the default queue to the 'notice' category.
688 for msg in session.pop_flash():
688 for msg in session.pop_flash():
689 messages.append(_Message('notice', msg))
689 messages.append(_Message('notice', msg))
690
690
691 session.save()
691 session.save()
692 return messages
692 return messages
693
693
694 def json_alerts(self, session=None, request=None):
694 def json_alerts(self, session=None, request=None):
695 payloads = []
695 payloads = []
696 messages = flash.pop_messages(session=session, request=request)
696 messages = flash.pop_messages(session=session, request=request)
697 if messages:
697 if messages:
698 for message in messages:
698 for message in messages:
699 subdata = {}
699 subdata = {}
700 if hasattr(message.message, 'rsplit'):
700 if hasattr(message.message, 'rsplit'):
701 flash_data = message.message.rsplit('|DELIM|', 1)
701 flash_data = message.message.rsplit('|DELIM|', 1)
702 org_message = flash_data[0]
702 org_message = flash_data[0]
703 if len(flash_data) > 1:
703 if len(flash_data) > 1:
704 subdata = json.loads(flash_data[1])
704 subdata = json.loads(flash_data[1])
705 else:
705 else:
706 org_message = message.message
706 org_message = message.message
707 payloads.append({
707 payloads.append({
708 'message': {
708 'message': {
709 'message': u'{}'.format(org_message),
709 'message': u'{}'.format(org_message),
710 'level': message.category,
710 'level': message.category,
711 'force': True,
711 'force': True,
712 'subdata': subdata
712 'subdata': subdata
713 }
713 }
714 })
714 })
715 return json.dumps(payloads)
715 return json.dumps(payloads)
716
716
717 def __call__(self, message, category=None, ignore_duplicate=False,
717 def __call__(self, message, category=None, ignore_duplicate=False,
718 session=None, request=None):
718 session=None, request=None):
719
719
720 if not session:
720 if not session:
721 if not request:
721 if not request:
722 request = get_current_request()
722 request = get_current_request()
723 session = request.session
723 session = request.session
724
724
725 session.flash(
725 session.flash(
726 message, queue=category, allow_duplicate=not ignore_duplicate)
726 message, queue=category, allow_duplicate=not ignore_duplicate)
727
727
728
728
729 flash = Flash()
729 flash = Flash()
730
730
731 #==============================================================================
731 #==============================================================================
732 # SCM FILTERS available via h.
732 # SCM FILTERS available via h.
733 #==============================================================================
733 #==============================================================================
734 from rhodecode.lib.vcs.utils import author_name, author_email
734 from rhodecode.lib.vcs.utils import author_name, author_email
735 from rhodecode.lib.utils2 import credentials_filter, age as _age
735 from rhodecode.lib.utils2 import credentials_filter, age as _age
736 from rhodecode.model.db import User, ChangesetStatus
736 from rhodecode.model.db import User, ChangesetStatus
737
737
738 age = _age
738 age = _age
739 capitalize = lambda x: x.capitalize()
739 capitalize = lambda x: x.capitalize()
740 email = author_email
740 email = author_email
741 short_id = lambda x: x[:12]
741 short_id = lambda x: x[:12]
742 hide_credentials = lambda x: ''.join(credentials_filter(x))
742 hide_credentials = lambda x: ''.join(credentials_filter(x))
743
743
744
744
745 def age_component(datetime_iso, value=None, time_is_local=False):
745 def age_component(datetime_iso, value=None, time_is_local=False):
746 title = value or format_date(datetime_iso)
746 title = value or format_date(datetime_iso)
747 tzinfo = '+00:00'
747 tzinfo = '+00:00'
748
748
749 # detect if we have a timezone info, otherwise, add it
749 # detect if we have a timezone info, otherwise, add it
750 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
750 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
751 if time_is_local:
751 if time_is_local:
752 tzinfo = time.strftime("+%H:%M",
752 tzinfo = time.strftime("+%H:%M",
753 time.gmtime(
753 time.gmtime(
754 (datetime.now() - datetime.utcnow()).seconds + 1
754 (datetime.now() - datetime.utcnow()).seconds + 1
755 )
755 )
756 )
756 )
757
757
758 return literal(
758 return literal(
759 '<time class="timeago tooltip" '
759 '<time class="timeago tooltip" '
760 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
760 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
761 datetime_iso, title, tzinfo))
761 datetime_iso, title, tzinfo))
762
762
763
763
764 def _shorten_commit_id(commit_id):
764 def _shorten_commit_id(commit_id):
765 from rhodecode import CONFIG
765 from rhodecode import CONFIG
766 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
766 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
767 return commit_id[:def_len]
767 return commit_id[:def_len]
768
768
769
769
770 def show_id(commit):
770 def show_id(commit):
771 """
771 """
772 Configurable function that shows ID
772 Configurable function that shows ID
773 by default it's r123:fffeeefffeee
773 by default it's r123:fffeeefffeee
774
774
775 :param commit: commit instance
775 :param commit: commit instance
776 """
776 """
777 from rhodecode import CONFIG
777 from rhodecode import CONFIG
778 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
778 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
779
779
780 raw_id = _shorten_commit_id(commit.raw_id)
780 raw_id = _shorten_commit_id(commit.raw_id)
781 if show_idx:
781 if show_idx:
782 return 'r%s:%s' % (commit.idx, raw_id)
782 return 'r%s:%s' % (commit.idx, raw_id)
783 else:
783 else:
784 return '%s' % (raw_id, )
784 return '%s' % (raw_id, )
785
785
786
786
787 def format_date(date):
787 def format_date(date):
788 """
788 """
789 use a standardized formatting for dates used in RhodeCode
789 use a standardized formatting for dates used in RhodeCode
790
790
791 :param date: date/datetime object
791 :param date: date/datetime object
792 :return: formatted date
792 :return: formatted date
793 """
793 """
794
794
795 if date:
795 if date:
796 _fmt = "%a, %d %b %Y %H:%M:%S"
796 _fmt = "%a, %d %b %Y %H:%M:%S"
797 return safe_unicode(date.strftime(_fmt))
797 return safe_unicode(date.strftime(_fmt))
798
798
799 return u""
799 return u""
800
800
801
801
802 class _RepoChecker(object):
802 class _RepoChecker(object):
803
803
804 def __init__(self, backend_alias):
804 def __init__(self, backend_alias):
805 self._backend_alias = backend_alias
805 self._backend_alias = backend_alias
806
806
807 def __call__(self, repository):
807 def __call__(self, repository):
808 if hasattr(repository, 'alias'):
808 if hasattr(repository, 'alias'):
809 _type = repository.alias
809 _type = repository.alias
810 elif hasattr(repository, 'repo_type'):
810 elif hasattr(repository, 'repo_type'):
811 _type = repository.repo_type
811 _type = repository.repo_type
812 else:
812 else:
813 _type = repository
813 _type = repository
814 return _type == self._backend_alias
814 return _type == self._backend_alias
815
815
816 is_git = _RepoChecker('git')
816 is_git = _RepoChecker('git')
817 is_hg = _RepoChecker('hg')
817 is_hg = _RepoChecker('hg')
818 is_svn = _RepoChecker('svn')
818 is_svn = _RepoChecker('svn')
819
819
820
820
821 def get_repo_type_by_name(repo_name):
821 def get_repo_type_by_name(repo_name):
822 repo = Repository.get_by_repo_name(repo_name)
822 repo = Repository.get_by_repo_name(repo_name)
823 return repo.repo_type
823 return repo.repo_type
824
824
825
825
826 def is_svn_without_proxy(repository):
826 def is_svn_without_proxy(repository):
827 if is_svn(repository):
827 if is_svn(repository):
828 from rhodecode.model.settings import VcsSettingsModel
828 from rhodecode.model.settings import VcsSettingsModel
829 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
829 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
830 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
830 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
831 return False
831 return False
832
832
833
833
834 def discover_user(author):
834 def discover_user(author):
835 """
835 """
836 Tries to discover RhodeCode User based on the autho string. Author string
836 Tries to discover RhodeCode User based on the autho string. Author string
837 is typically `FirstName LastName <email@address.com>`
837 is typically `FirstName LastName <email@address.com>`
838 """
838 """
839
839
840 # if author is already an instance use it for extraction
840 # if author is already an instance use it for extraction
841 if isinstance(author, User):
841 if isinstance(author, User):
842 return author
842 return author
843
843
844 # Valid email in the attribute passed, see if they're in the system
844 # Valid email in the attribute passed, see if they're in the system
845 _email = author_email(author)
845 _email = author_email(author)
846 if _email != '':
846 if _email != '':
847 user = User.get_by_email(_email, case_insensitive=True, cache=True)
847 user = User.get_by_email(_email, case_insensitive=True, cache=True)
848 if user is not None:
848 if user is not None:
849 return user
849 return user
850
850
851 # Maybe it's a username, we try to extract it and fetch by username ?
851 # Maybe it's a username, we try to extract it and fetch by username ?
852 _author = author_name(author)
852 _author = author_name(author)
853 user = User.get_by_username(_author, case_insensitive=True, cache=True)
853 user = User.get_by_username(_author, case_insensitive=True, cache=True)
854 if user is not None:
854 if user is not None:
855 return user
855 return user
856
856
857 return None
857 return None
858
858
859
859
860 def email_or_none(author):
860 def email_or_none(author):
861 # extract email from the commit string
861 # extract email from the commit string
862 _email = author_email(author)
862 _email = author_email(author)
863
863
864 # If we have an email, use it, otherwise
864 # If we have an email, use it, otherwise
865 # see if it contains a username we can get an email from
865 # see if it contains a username we can get an email from
866 if _email != '':
866 if _email != '':
867 return _email
867 return _email
868 else:
868 else:
869 user = User.get_by_username(
869 user = User.get_by_username(
870 author_name(author), case_insensitive=True, cache=True)
870 author_name(author), case_insensitive=True, cache=True)
871
871
872 if user is not None:
872 if user is not None:
873 return user.email
873 return user.email
874
874
875 # No valid email, not a valid user in the system, none!
875 # No valid email, not a valid user in the system, none!
876 return None
876 return None
877
877
878
878
879 def link_to_user(author, length=0, **kwargs):
879 def link_to_user(author, length=0, **kwargs):
880 user = discover_user(author)
880 user = discover_user(author)
881 # user can be None, but if we have it already it means we can re-use it
881 # user can be None, but if we have it already it means we can re-use it
882 # in the person() function, so we save 1 intensive-query
882 # in the person() function, so we save 1 intensive-query
883 if user:
883 if user:
884 author = user
884 author = user
885
885
886 display_person = person(author, 'username_or_name_or_email')
886 display_person = person(author, 'username_or_name_or_email')
887 if length:
887 if length:
888 display_person = shorter(display_person, length)
888 display_person = shorter(display_person, length)
889
889
890 if user:
890 if user:
891 return link_to(
891 return link_to(
892 escape(display_person),
892 escape(display_person),
893 route_path('user_profile', username=user.username),
893 route_path('user_profile', username=user.username),
894 **kwargs)
894 **kwargs)
895 else:
895 else:
896 return escape(display_person)
896 return escape(display_person)
897
897
898
898
899 def person(author, show_attr="username_and_name"):
899 def person(author, show_attr="username_and_name"):
900 user = discover_user(author)
900 user = discover_user(author)
901 if user:
901 if user:
902 return getattr(user, show_attr)
902 return getattr(user, show_attr)
903 else:
903 else:
904 _author = author_name(author)
904 _author = author_name(author)
905 _email = email(author)
905 _email = email(author)
906 return _author or _email
906 return _author or _email
907
907
908
908
909 def author_string(email):
909 def author_string(email):
910 if email:
910 if email:
911 user = User.get_by_email(email, case_insensitive=True, cache=True)
911 user = User.get_by_email(email, case_insensitive=True, cache=True)
912 if user:
912 if user:
913 if user.first_name or user.last_name:
913 if user.first_name or user.last_name:
914 return '%s %s &lt;%s&gt;' % (
914 return '%s %s &lt;%s&gt;' % (
915 user.first_name, user.last_name, email)
915 user.first_name, user.last_name, email)
916 else:
916 else:
917 return email
917 return email
918 else:
918 else:
919 return email
919 return email
920 else:
920 else:
921 return None
921 return None
922
922
923
923
924 def person_by_id(id_, show_attr="username_and_name"):
924 def person_by_id(id_, show_attr="username_and_name"):
925 # attr to return from fetched user
925 # attr to return from fetched user
926 person_getter = lambda usr: getattr(usr, show_attr)
926 person_getter = lambda usr: getattr(usr, show_attr)
927
927
928 #maybe it's an ID ?
928 #maybe it's an ID ?
929 if str(id_).isdigit() or isinstance(id_, int):
929 if str(id_).isdigit() or isinstance(id_, int):
930 id_ = int(id_)
930 id_ = int(id_)
931 user = User.get(id_)
931 user = User.get(id_)
932 if user is not None:
932 if user is not None:
933 return person_getter(user)
933 return person_getter(user)
934 return id_
934 return id_
935
935
936
936
937 def gravatar_with_user(request, author, show_disabled=False):
937 def gravatar_with_user(request, author, show_disabled=False):
938 _render = request.get_partial_renderer(
938 _render = request.get_partial_renderer(
939 'rhodecode:templates/base/base.mako')
939 'rhodecode:templates/base/base.mako')
940 return _render('gravatar_with_user', author, show_disabled=show_disabled)
940 return _render('gravatar_with_user', author, show_disabled=show_disabled)
941
941
942
942
943 tags_paterns = OrderedDict((
943 tags_paterns = OrderedDict((
944 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
944 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
945 '<div class="metatag" tag="lang">\\2</div>')),
945 '<div class="metatag" tag="lang">\\2</div>')),
946
946
947 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
947 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
948 '<div class="metatag" tag="see">see: \\1 </div>')),
948 '<div class="metatag" tag="see">see: \\1 </div>')),
949
949
950 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
950 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((http://|https://|/)(.*?)\)\]'),
951 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
951 '<div class="metatag" tag="url"> <a href="\\2\\3">\\1</a> </div>')),
952
952
953 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
953 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
954 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
954 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
955
955
956 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
956 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
957 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
957 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
958
958
959 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
959 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
960 '<div class="metatag" tag="state \\1">\\1</div>')),
960 '<div class="metatag" tag="state \\1">\\1</div>')),
961
961
962 # label in grey
962 # label in grey
963 ('label', (re.compile(r'\[([a-z]+)\]'),
963 ('label', (re.compile(r'\[([a-z]+)\]'),
964 '<div class="metatag" tag="label">\\1</div>')),
964 '<div class="metatag" tag="label">\\1</div>')),
965
965
966 # generic catch all in grey
966 # generic catch all in grey
967 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
967 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
968 '<div class="metatag" tag="generic">\\1</div>')),
968 '<div class="metatag" tag="generic">\\1</div>')),
969 ))
969 ))
970
970
971
971
972 def extract_metatags(value):
972 def extract_metatags(value):
973 """
973 """
974 Extract supported meta-tags from given text value
974 Extract supported meta-tags from given text value
975 """
975 """
976 tags = []
976 tags = []
977 if not value:
977 if not value:
978 return tags, ''
978 return tags, ''
979
979
980 for key, val in tags_paterns.items():
980 for key, val in tags_paterns.items():
981 pat, replace_html = val
981 pat, replace_html = val
982 tags.extend([(key, x.group()) for x in pat.finditer(value)])
982 tags.extend([(key, x.group()) for x in pat.finditer(value)])
983 value = pat.sub('', value)
983 value = pat.sub('', value)
984
984
985 return tags, value
985 return tags, value
986
986
987
987
988 def style_metatag(tag_type, value):
988 def style_metatag(tag_type, value):
989 """
989 """
990 converts tags from value into html equivalent
990 converts tags from value into html equivalent
991 """
991 """
992 if not value:
992 if not value:
993 return ''
993 return ''
994
994
995 html_value = value
995 html_value = value
996 tag_data = tags_paterns.get(tag_type)
996 tag_data = tags_paterns.get(tag_type)
997 if tag_data:
997 if tag_data:
998 pat, replace_html = tag_data
998 pat, replace_html = tag_data
999 # convert to plain `unicode` instead of a markup tag to be used in
999 # convert to plain `unicode` instead of a markup tag to be used in
1000 # regex expressions. safe_unicode doesn't work here
1000 # regex expressions. safe_unicode doesn't work here
1001 html_value = pat.sub(replace_html, unicode(value))
1001 html_value = pat.sub(replace_html, unicode(value))
1002
1002
1003 return html_value
1003 return html_value
1004
1004
1005
1005
1006 def bool2icon(value):
1006 def bool2icon(value):
1007 """
1007 """
1008 Returns boolean value of a given value, represented as html element with
1008 Returns boolean value of a given value, represented as html element with
1009 classes that will represent icons
1009 classes that will represent icons
1010
1010
1011 :param value: given value to convert to html node
1011 :param value: given value to convert to html node
1012 """
1012 """
1013
1013
1014 if value: # does bool conversion
1014 if value: # does bool conversion
1015 return HTML.tag('i', class_="icon-true")
1015 return HTML.tag('i', class_="icon-true")
1016 else: # not true as bool
1016 else: # not true as bool
1017 return HTML.tag('i', class_="icon-false")
1017 return HTML.tag('i', class_="icon-false")
1018
1018
1019
1019
1020 #==============================================================================
1020 #==============================================================================
1021 # PERMS
1021 # PERMS
1022 #==============================================================================
1022 #==============================================================================
1023 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1023 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1024 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1024 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1025 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1025 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1026 csrf_token_key
1026 csrf_token_key
1027
1027
1028
1028
1029 #==============================================================================
1029 #==============================================================================
1030 # GRAVATAR URL
1030 # GRAVATAR URL
1031 #==============================================================================
1031 #==============================================================================
1032 class InitialsGravatar(object):
1032 class InitialsGravatar(object):
1033 def __init__(self, email_address, first_name, last_name, size=30,
1033 def __init__(self, email_address, first_name, last_name, size=30,
1034 background=None, text_color='#fff'):
1034 background=None, text_color='#fff'):
1035 self.size = size
1035 self.size = size
1036 self.first_name = first_name
1036 self.first_name = first_name
1037 self.last_name = last_name
1037 self.last_name = last_name
1038 self.email_address = email_address
1038 self.email_address = email_address
1039 self.background = background or self.str2color(email_address)
1039 self.background = background or self.str2color(email_address)
1040 self.text_color = text_color
1040 self.text_color = text_color
1041
1041
1042 def get_color_bank(self):
1042 def get_color_bank(self):
1043 """
1043 """
1044 returns a predefined list of colors that gravatars can use.
1044 returns a predefined list of colors that gravatars can use.
1045 Those are randomized distinct colors that guarantee readability and
1045 Those are randomized distinct colors that guarantee readability and
1046 uniqueness.
1046 uniqueness.
1047
1047
1048 generated with: http://phrogz.net/css/distinct-colors.html
1048 generated with: http://phrogz.net/css/distinct-colors.html
1049 """
1049 """
1050 return [
1050 return [
1051 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1051 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1052 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1052 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1053 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1053 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1054 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1054 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1055 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1055 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1056 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1056 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1057 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1057 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1058 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1058 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1059 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1059 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1060 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1060 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1061 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1061 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1062 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1062 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1063 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1063 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1064 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1064 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1065 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1065 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1066 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1066 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1067 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1067 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1068 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1068 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1069 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1069 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1070 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1070 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1071 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1071 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1072 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1072 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1073 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1073 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1074 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1074 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1075 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1075 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1076 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1076 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1077 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1077 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1078 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1078 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1079 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1079 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1080 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1080 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1081 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1081 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1082 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1082 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1083 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1083 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1084 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1084 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1085 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1085 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1086 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1086 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1087 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1087 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1088 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1088 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1089 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1089 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1090 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1090 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1091 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1091 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1092 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1092 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1093 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1093 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1094 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1094 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1095 '#4f8c46', '#368dd9', '#5c0073'
1095 '#4f8c46', '#368dd9', '#5c0073'
1096 ]
1096 ]
1097
1097
1098 def rgb_to_hex_color(self, rgb_tuple):
1098 def rgb_to_hex_color(self, rgb_tuple):
1099 """
1099 """
1100 Converts an rgb_tuple passed to an hex color.
1100 Converts an rgb_tuple passed to an hex color.
1101
1101
1102 :param rgb_tuple: tuple with 3 ints represents rgb color space
1102 :param rgb_tuple: tuple with 3 ints represents rgb color space
1103 """
1103 """
1104 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1104 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1105
1105
1106 def email_to_int_list(self, email_str):
1106 def email_to_int_list(self, email_str):
1107 """
1107 """
1108 Get every byte of the hex digest value of email and turn it to integer.
1108 Get every byte of the hex digest value of email and turn it to integer.
1109 It's going to be always between 0-255
1109 It's going to be always between 0-255
1110 """
1110 """
1111 digest = md5_safe(email_str.lower())
1111 digest = md5_safe(email_str.lower())
1112 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1112 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1113
1113
1114 def pick_color_bank_index(self, email_str, color_bank):
1114 def pick_color_bank_index(self, email_str, color_bank):
1115 return self.email_to_int_list(email_str)[0] % len(color_bank)
1115 return self.email_to_int_list(email_str)[0] % len(color_bank)
1116
1116
1117 def str2color(self, email_str):
1117 def str2color(self, email_str):
1118 """
1118 """
1119 Tries to map in a stable algorithm an email to color
1119 Tries to map in a stable algorithm an email to color
1120
1120
1121 :param email_str:
1121 :param email_str:
1122 """
1122 """
1123 color_bank = self.get_color_bank()
1123 color_bank = self.get_color_bank()
1124 # pick position (module it's length so we always find it in the
1124 # pick position (module it's length so we always find it in the
1125 # bank even if it's smaller than 256 values
1125 # bank even if it's smaller than 256 values
1126 pos = self.pick_color_bank_index(email_str, color_bank)
1126 pos = self.pick_color_bank_index(email_str, color_bank)
1127 return color_bank[pos]
1127 return color_bank[pos]
1128
1128
1129 def normalize_email(self, email_address):
1129 def normalize_email(self, email_address):
1130 import unicodedata
1130 import unicodedata
1131 # default host used to fill in the fake/missing email
1131 # default host used to fill in the fake/missing email
1132 default_host = u'localhost'
1132 default_host = u'localhost'
1133
1133
1134 if not email_address:
1134 if not email_address:
1135 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1135 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1136
1136
1137 email_address = safe_unicode(email_address)
1137 email_address = safe_unicode(email_address)
1138
1138
1139 if u'@' not in email_address:
1139 if u'@' not in email_address:
1140 email_address = u'%s@%s' % (email_address, default_host)
1140 email_address = u'%s@%s' % (email_address, default_host)
1141
1141
1142 if email_address.endswith(u'@'):
1142 if email_address.endswith(u'@'):
1143 email_address = u'%s%s' % (email_address, default_host)
1143 email_address = u'%s%s' % (email_address, default_host)
1144
1144
1145 email_address = unicodedata.normalize('NFKD', email_address)\
1145 email_address = unicodedata.normalize('NFKD', email_address)\
1146 .encode('ascii', 'ignore')
1146 .encode('ascii', 'ignore')
1147 return email_address
1147 return email_address
1148
1148
1149 def get_initials(self):
1149 def get_initials(self):
1150 """
1150 """
1151 Returns 2 letter initials calculated based on the input.
1151 Returns 2 letter initials calculated based on the input.
1152 The algorithm picks first given email address, and takes first letter
1152 The algorithm picks first given email address, and takes first letter
1153 of part before @, and then the first letter of server name. In case
1153 of part before @, and then the first letter of server name. In case
1154 the part before @ is in a format of `somestring.somestring2` it replaces
1154 the part before @ is in a format of `somestring.somestring2` it replaces
1155 the server letter with first letter of somestring2
1155 the server letter with first letter of somestring2
1156
1156
1157 In case function was initialized with both first and lastname, this
1157 In case function was initialized with both first and lastname, this
1158 overrides the extraction from email by first letter of the first and
1158 overrides the extraction from email by first letter of the first and
1159 last name. We add special logic to that functionality, In case Full name
1159 last name. We add special logic to that functionality, In case Full name
1160 is compound, like Guido Von Rossum, we use last part of the last name
1160 is compound, like Guido Von Rossum, we use last part of the last name
1161 (Von Rossum) picking `R`.
1161 (Von Rossum) picking `R`.
1162
1162
1163 Function also normalizes the non-ascii characters to they ascii
1163 Function also normalizes the non-ascii characters to they ascii
1164 representation, eg Δ„ => A
1164 representation, eg Δ„ => A
1165 """
1165 """
1166 import unicodedata
1166 import unicodedata
1167 # replace non-ascii to ascii
1167 # replace non-ascii to ascii
1168 first_name = unicodedata.normalize(
1168 first_name = unicodedata.normalize(
1169 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1169 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1170 last_name = unicodedata.normalize(
1170 last_name = unicodedata.normalize(
1171 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1171 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1172
1172
1173 # do NFKD encoding, and also make sure email has proper format
1173 # do NFKD encoding, and also make sure email has proper format
1174 email_address = self.normalize_email(self.email_address)
1174 email_address = self.normalize_email(self.email_address)
1175
1175
1176 # first push the email initials
1176 # first push the email initials
1177 prefix, server = email_address.split('@', 1)
1177 prefix, server = email_address.split('@', 1)
1178
1178
1179 # check if prefix is maybe a 'first_name.last_name' syntax
1179 # check if prefix is maybe a 'first_name.last_name' syntax
1180 _dot_split = prefix.rsplit('.', 1)
1180 _dot_split = prefix.rsplit('.', 1)
1181 if len(_dot_split) == 2 and _dot_split[1]:
1181 if len(_dot_split) == 2 and _dot_split[1]:
1182 initials = [_dot_split[0][0], _dot_split[1][0]]
1182 initials = [_dot_split[0][0], _dot_split[1][0]]
1183 else:
1183 else:
1184 initials = [prefix[0], server[0]]
1184 initials = [prefix[0], server[0]]
1185
1185
1186 # then try to replace either first_name or last_name
1186 # then try to replace either first_name or last_name
1187 fn_letter = (first_name or " ")[0].strip()
1187 fn_letter = (first_name or " ")[0].strip()
1188 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1188 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1189
1189
1190 if fn_letter:
1190 if fn_letter:
1191 initials[0] = fn_letter
1191 initials[0] = fn_letter
1192
1192
1193 if ln_letter:
1193 if ln_letter:
1194 initials[1] = ln_letter
1194 initials[1] = ln_letter
1195
1195
1196 return ''.join(initials).upper()
1196 return ''.join(initials).upper()
1197
1197
1198 def get_img_data_by_type(self, font_family, img_type):
1198 def get_img_data_by_type(self, font_family, img_type):
1199 default_user = """
1199 default_user = """
1200 <svg xmlns="http://www.w3.org/2000/svg"
1200 <svg xmlns="http://www.w3.org/2000/svg"
1201 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1201 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1202 viewBox="-15 -10 439.165 429.164"
1202 viewBox="-15 -10 439.165 429.164"
1203
1203
1204 xml:space="preserve"
1204 xml:space="preserve"
1205 style="background:{background};" >
1205 style="background:{background};" >
1206
1206
1207 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1207 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1208 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1208 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1209 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1209 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1210 168.596,153.916,216.671,
1210 168.596,153.916,216.671,
1211 204.583,216.671z" fill="{text_color}"/>
1211 204.583,216.671z" fill="{text_color}"/>
1212 <path d="M407.164,374.717L360.88,
1212 <path d="M407.164,374.717L360.88,
1213 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1213 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1214 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1214 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1215 15.366-44.203,23.488-69.076,23.488c-24.877,
1215 15.366-44.203,23.488-69.076,23.488c-24.877,
1216 0-48.762-8.122-69.078-23.488
1216 0-48.762-8.122-69.078-23.488
1217 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1217 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1218 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1218 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1219 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1219 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1220 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1220 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1221 19.402-10.527 C409.699,390.129,
1221 19.402-10.527 C409.699,390.129,
1222 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1222 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1223 </svg>""".format(
1223 </svg>""".format(
1224 size=self.size,
1224 size=self.size,
1225 background='#979797', # @grey4
1225 background='#979797', # @grey4
1226 text_color=self.text_color,
1226 text_color=self.text_color,
1227 font_family=font_family)
1227 font_family=font_family)
1228
1228
1229 return {
1229 return {
1230 "default_user": default_user
1230 "default_user": default_user
1231 }[img_type]
1231 }[img_type]
1232
1232
1233 def get_img_data(self, svg_type=None):
1233 def get_img_data(self, svg_type=None):
1234 """
1234 """
1235 generates the svg metadata for image
1235 generates the svg metadata for image
1236 """
1236 """
1237
1237
1238 font_family = ','.join([
1238 font_family = ','.join([
1239 'proximanovaregular',
1239 'proximanovaregular',
1240 'Proxima Nova Regular',
1240 'Proxima Nova Regular',
1241 'Proxima Nova',
1241 'Proxima Nova',
1242 'Arial',
1242 'Arial',
1243 'Lucida Grande',
1243 'Lucida Grande',
1244 'sans-serif'
1244 'sans-serif'
1245 ])
1245 ])
1246 if svg_type:
1246 if svg_type:
1247 return self.get_img_data_by_type(font_family, svg_type)
1247 return self.get_img_data_by_type(font_family, svg_type)
1248
1248
1249 initials = self.get_initials()
1249 initials = self.get_initials()
1250 img_data = """
1250 img_data = """
1251 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1251 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1252 width="{size}" height="{size}"
1252 width="{size}" height="{size}"
1253 style="width: 100%; height: 100%; background-color: {background}"
1253 style="width: 100%; height: 100%; background-color: {background}"
1254 viewBox="0 0 {size} {size}">
1254 viewBox="0 0 {size} {size}">
1255 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1255 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1256 pointer-events="auto" fill="{text_color}"
1256 pointer-events="auto" fill="{text_color}"
1257 font-family="{font_family}"
1257 font-family="{font_family}"
1258 style="font-weight: 400; font-size: {f_size}px;">{text}
1258 style="font-weight: 400; font-size: {f_size}px;">{text}
1259 </text>
1259 </text>
1260 </svg>""".format(
1260 </svg>""".format(
1261 size=self.size,
1261 size=self.size,
1262 f_size=self.size/1.85, # scale the text inside the box nicely
1262 f_size=self.size/1.85, # scale the text inside the box nicely
1263 background=self.background,
1263 background=self.background,
1264 text_color=self.text_color,
1264 text_color=self.text_color,
1265 text=initials.upper(),
1265 text=initials.upper(),
1266 font_family=font_family)
1266 font_family=font_family)
1267
1267
1268 return img_data
1268 return img_data
1269
1269
1270 def generate_svg(self, svg_type=None):
1270 def generate_svg(self, svg_type=None):
1271 img_data = self.get_img_data(svg_type)
1271 img_data = self.get_img_data(svg_type)
1272 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1272 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1273
1273
1274
1274
1275 def initials_gravatar(email_address, first_name, last_name, size=30):
1275 def initials_gravatar(email_address, first_name, last_name, size=30):
1276 svg_type = None
1276 svg_type = None
1277 if email_address == User.DEFAULT_USER_EMAIL:
1277 if email_address == User.DEFAULT_USER_EMAIL:
1278 svg_type = 'default_user'
1278 svg_type = 'default_user'
1279 klass = InitialsGravatar(email_address, first_name, last_name, size)
1279 klass = InitialsGravatar(email_address, first_name, last_name, size)
1280 return klass.generate_svg(svg_type=svg_type)
1280 return klass.generate_svg(svg_type=svg_type)
1281
1281
1282
1282
1283 def gravatar_url(email_address, size=30, request=None):
1283 def gravatar_url(email_address, size=30, request=None):
1284 request = get_current_request()
1284 request = get_current_request()
1285 _use_gravatar = request.call_context.visual.use_gravatar
1285 _use_gravatar = request.call_context.visual.use_gravatar
1286 _gravatar_url = request.call_context.visual.gravatar_url
1286 _gravatar_url = request.call_context.visual.gravatar_url
1287
1287
1288 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1288 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1289
1289
1290 email_address = email_address or User.DEFAULT_USER_EMAIL
1290 email_address = email_address or User.DEFAULT_USER_EMAIL
1291 if isinstance(email_address, unicode):
1291 if isinstance(email_address, unicode):
1292 # hashlib crashes on unicode items
1292 # hashlib crashes on unicode items
1293 email_address = safe_str(email_address)
1293 email_address = safe_str(email_address)
1294
1294
1295 # empty email or default user
1295 # empty email or default user
1296 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1296 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1297 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1297 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1298
1298
1299 if _use_gravatar:
1299 if _use_gravatar:
1300 # TODO: Disuse pyramid thread locals. Think about another solution to
1300 # TODO: Disuse pyramid thread locals. Think about another solution to
1301 # get the host and schema here.
1301 # get the host and schema here.
1302 request = get_current_request()
1302 request = get_current_request()
1303 tmpl = safe_str(_gravatar_url)
1303 tmpl = safe_str(_gravatar_url)
1304 tmpl = tmpl.replace('{email}', email_address)\
1304 tmpl = tmpl.replace('{email}', email_address)\
1305 .replace('{md5email}', md5_safe(email_address.lower())) \
1305 .replace('{md5email}', md5_safe(email_address.lower())) \
1306 .replace('{netloc}', request.host)\
1306 .replace('{netloc}', request.host)\
1307 .replace('{scheme}', request.scheme)\
1307 .replace('{scheme}', request.scheme)\
1308 .replace('{size}', safe_str(size))
1308 .replace('{size}', safe_str(size))
1309 return tmpl
1309 return tmpl
1310 else:
1310 else:
1311 return initials_gravatar(email_address, '', '', size=size)
1311 return initials_gravatar(email_address, '', '', size=size)
1312
1312
1313
1313
1314 class Page(_Page):
1314 class Page(_Page):
1315 """
1315 """
1316 Custom pager to match rendering style with paginator
1316 Custom pager to match rendering style with paginator
1317 """
1317 """
1318
1318
1319 def _get_pos(self, cur_page, max_page, items):
1319 def _get_pos(self, cur_page, max_page, items):
1320 edge = (items / 2) + 1
1320 edge = (items / 2) + 1
1321 if (cur_page <= edge):
1321 if (cur_page <= edge):
1322 radius = max(items / 2, items - cur_page)
1322 radius = max(items / 2, items - cur_page)
1323 elif (max_page - cur_page) < edge:
1323 elif (max_page - cur_page) < edge:
1324 radius = (items - 1) - (max_page - cur_page)
1324 radius = (items - 1) - (max_page - cur_page)
1325 else:
1325 else:
1326 radius = items / 2
1326 radius = items / 2
1327
1327
1328 left = max(1, (cur_page - (radius)))
1328 left = max(1, (cur_page - (radius)))
1329 right = min(max_page, cur_page + (radius))
1329 right = min(max_page, cur_page + (radius))
1330 return left, cur_page, right
1330 return left, cur_page, right
1331
1331
1332 def _range(self, regexp_match):
1332 def _range(self, regexp_match):
1333 """
1333 """
1334 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1334 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1335
1335
1336 Arguments:
1336 Arguments:
1337
1337
1338 regexp_match
1338 regexp_match
1339 A "re" (regular expressions) match object containing the
1339 A "re" (regular expressions) match object containing the
1340 radius of linked pages around the current page in
1340 radius of linked pages around the current page in
1341 regexp_match.group(1) as a string
1341 regexp_match.group(1) as a string
1342
1342
1343 This function is supposed to be called as a callable in
1343 This function is supposed to be called as a callable in
1344 re.sub.
1344 re.sub.
1345
1345
1346 """
1346 """
1347 radius = int(regexp_match.group(1))
1347 radius = int(regexp_match.group(1))
1348
1348
1349 # Compute the first and last page number within the radius
1349 # Compute the first and last page number within the radius
1350 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1350 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1351 # -> leftmost_page = 5
1351 # -> leftmost_page = 5
1352 # -> rightmost_page = 9
1352 # -> rightmost_page = 9
1353 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1353 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1354 self.last_page,
1354 self.last_page,
1355 (radius * 2) + 1)
1355 (radius * 2) + 1)
1356 nav_items = []
1356 nav_items = []
1357
1357
1358 # Create a link to the first page (unless we are on the first page
1358 # Create a link to the first page (unless we are on the first page
1359 # or there would be no need to insert '..' spacers)
1359 # or there would be no need to insert '..' spacers)
1360 if self.page != self.first_page and self.first_page < leftmost_page:
1360 if self.page != self.first_page and self.first_page < leftmost_page:
1361 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1361 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1362
1362
1363 # Insert dots if there are pages between the first page
1363 # Insert dots if there are pages between the first page
1364 # and the currently displayed page range
1364 # and the currently displayed page range
1365 if leftmost_page - self.first_page > 1:
1365 if leftmost_page - self.first_page > 1:
1366 # Wrap in a SPAN tag if nolink_attr is set
1366 # Wrap in a SPAN tag if nolink_attr is set
1367 text = '..'
1367 text = '..'
1368 if self.dotdot_attr:
1368 if self.dotdot_attr:
1369 text = HTML.span(c=text, **self.dotdot_attr)
1369 text = HTML.span(c=text, **self.dotdot_attr)
1370 nav_items.append(text)
1370 nav_items.append(text)
1371
1371
1372 for thispage in xrange(leftmost_page, rightmost_page + 1):
1372 for thispage in xrange(leftmost_page, rightmost_page + 1):
1373 # Hilight the current page number and do not use a link
1373 # Hilight the current page number and do not use a link
1374 if thispage == self.page:
1374 if thispage == self.page:
1375 text = '%s' % (thispage,)
1375 text = '%s' % (thispage,)
1376 # Wrap in a SPAN tag if nolink_attr is set
1376 # Wrap in a SPAN tag if nolink_attr is set
1377 if self.curpage_attr:
1377 if self.curpage_attr:
1378 text = HTML.span(c=text, **self.curpage_attr)
1378 text = HTML.span(c=text, **self.curpage_attr)
1379 nav_items.append(text)
1379 nav_items.append(text)
1380 # Otherwise create just a link to that page
1380 # Otherwise create just a link to that page
1381 else:
1381 else:
1382 text = '%s' % (thispage,)
1382 text = '%s' % (thispage,)
1383 nav_items.append(self._pagerlink(thispage, text))
1383 nav_items.append(self._pagerlink(thispage, text))
1384
1384
1385 # Insert dots if there are pages between the displayed
1385 # Insert dots if there are pages between the displayed
1386 # page numbers and the end of the page range
1386 # page numbers and the end of the page range
1387 if self.last_page - rightmost_page > 1:
1387 if self.last_page - rightmost_page > 1:
1388 text = '..'
1388 text = '..'
1389 # Wrap in a SPAN tag if nolink_attr is set
1389 # Wrap in a SPAN tag if nolink_attr is set
1390 if self.dotdot_attr:
1390 if self.dotdot_attr:
1391 text = HTML.span(c=text, **self.dotdot_attr)
1391 text = HTML.span(c=text, **self.dotdot_attr)
1392 nav_items.append(text)
1392 nav_items.append(text)
1393
1393
1394 # Create a link to the very last page (unless we are on the last
1394 # Create a link to the very last page (unless we are on the last
1395 # page or there would be no need to insert '..' spacers)
1395 # page or there would be no need to insert '..' spacers)
1396 if self.page != self.last_page and rightmost_page < self.last_page:
1396 if self.page != self.last_page and rightmost_page < self.last_page:
1397 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1397 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1398
1398
1399 ## prerender links
1399 ## prerender links
1400 #_page_link = url.current()
1400 #_page_link = url.current()
1401 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1401 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1402 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1402 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1403 return self.separator.join(nav_items)
1403 return self.separator.join(nav_items)
1404
1404
1405 def pager(self, format='~2~', page_param='page', partial_param='partial',
1405 def pager(self, format='~2~', page_param='page', partial_param='partial',
1406 show_if_single_page=False, separator=' ', onclick=None,
1406 show_if_single_page=False, separator=' ', onclick=None,
1407 symbol_first='<<', symbol_last='>>',
1407 symbol_first='<<', symbol_last='>>',
1408 symbol_previous='<', symbol_next='>',
1408 symbol_previous='<', symbol_next='>',
1409 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1409 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1410 curpage_attr={'class': 'pager_curpage'},
1410 curpage_attr={'class': 'pager_curpage'},
1411 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1411 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1412
1412
1413 self.curpage_attr = curpage_attr
1413 self.curpage_attr = curpage_attr
1414 self.separator = separator
1414 self.separator = separator
1415 self.pager_kwargs = kwargs
1415 self.pager_kwargs = kwargs
1416 self.page_param = page_param
1416 self.page_param = page_param
1417 self.partial_param = partial_param
1417 self.partial_param = partial_param
1418 self.onclick = onclick
1418 self.onclick = onclick
1419 self.link_attr = link_attr
1419 self.link_attr = link_attr
1420 self.dotdot_attr = dotdot_attr
1420 self.dotdot_attr = dotdot_attr
1421
1421
1422 # Don't show navigator if there is no more than one page
1422 # Don't show navigator if there is no more than one page
1423 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1423 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1424 return ''
1424 return ''
1425
1425
1426 from string import Template
1426 from string import Template
1427 # Replace ~...~ in token format by range of pages
1427 # Replace ~...~ in token format by range of pages
1428 result = re.sub(r'~(\d+)~', self._range, format)
1428 result = re.sub(r'~(\d+)~', self._range, format)
1429
1429
1430 # Interpolate '%' variables
1430 # Interpolate '%' variables
1431 result = Template(result).safe_substitute({
1431 result = Template(result).safe_substitute({
1432 'first_page': self.first_page,
1432 'first_page': self.first_page,
1433 'last_page': self.last_page,
1433 'last_page': self.last_page,
1434 'page': self.page,
1434 'page': self.page,
1435 'page_count': self.page_count,
1435 'page_count': self.page_count,
1436 'items_per_page': self.items_per_page,
1436 'items_per_page': self.items_per_page,
1437 'first_item': self.first_item,
1437 'first_item': self.first_item,
1438 'last_item': self.last_item,
1438 'last_item': self.last_item,
1439 'item_count': self.item_count,
1439 'item_count': self.item_count,
1440 'link_first': self.page > self.first_page and \
1440 'link_first': self.page > self.first_page and \
1441 self._pagerlink(self.first_page, symbol_first) or '',
1441 self._pagerlink(self.first_page, symbol_first) or '',
1442 'link_last': self.page < self.last_page and \
1442 'link_last': self.page < self.last_page and \
1443 self._pagerlink(self.last_page, symbol_last) or '',
1443 self._pagerlink(self.last_page, symbol_last) or '',
1444 'link_previous': self.previous_page and \
1444 'link_previous': self.previous_page and \
1445 self._pagerlink(self.previous_page, symbol_previous) \
1445 self._pagerlink(self.previous_page, symbol_previous) \
1446 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1446 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1447 'link_next': self.next_page and \
1447 'link_next': self.next_page and \
1448 self._pagerlink(self.next_page, symbol_next) \
1448 self._pagerlink(self.next_page, symbol_next) \
1449 or HTML.span(symbol_next, class_="pg-next disabled")
1449 or HTML.span(symbol_next, class_="pg-next disabled")
1450 })
1450 })
1451
1451
1452 return literal(result)
1452 return literal(result)
1453
1453
1454
1454
1455 #==============================================================================
1455 #==============================================================================
1456 # REPO PAGER, PAGER FOR REPOSITORY
1456 # REPO PAGER, PAGER FOR REPOSITORY
1457 #==============================================================================
1457 #==============================================================================
1458 class RepoPage(Page):
1458 class RepoPage(Page):
1459
1459
1460 def __init__(self, collection, page=1, items_per_page=20,
1460 def __init__(self, collection, page=1, items_per_page=20,
1461 item_count=None, url=None, **kwargs):
1461 item_count=None, url=None, **kwargs):
1462
1462
1463 """Create a "RepoPage" instance. special pager for paging
1463 """Create a "RepoPage" instance. special pager for paging
1464 repository
1464 repository
1465 """
1465 """
1466 self._url_generator = url
1466 self._url_generator = url
1467
1467
1468 # Safe the kwargs class-wide so they can be used in the pager() method
1468 # Safe the kwargs class-wide so they can be used in the pager() method
1469 self.kwargs = kwargs
1469 self.kwargs = kwargs
1470
1470
1471 # Save a reference to the collection
1471 # Save a reference to the collection
1472 self.original_collection = collection
1472 self.original_collection = collection
1473
1473
1474 self.collection = collection
1474 self.collection = collection
1475
1475
1476 # The self.page is the number of the current page.
1476 # The self.page is the number of the current page.
1477 # The first page has the number 1!
1477 # The first page has the number 1!
1478 try:
1478 try:
1479 self.page = int(page) # make it int() if we get it as a string
1479 self.page = int(page) # make it int() if we get it as a string
1480 except (ValueError, TypeError):
1480 except (ValueError, TypeError):
1481 self.page = 1
1481 self.page = 1
1482
1482
1483 self.items_per_page = items_per_page
1483 self.items_per_page = items_per_page
1484
1484
1485 # Unless the user tells us how many items the collections has
1485 # Unless the user tells us how many items the collections has
1486 # we calculate that ourselves.
1486 # we calculate that ourselves.
1487 if item_count is not None:
1487 if item_count is not None:
1488 self.item_count = item_count
1488 self.item_count = item_count
1489 else:
1489 else:
1490 self.item_count = len(self.collection)
1490 self.item_count = len(self.collection)
1491
1491
1492 # Compute the number of the first and last available page
1492 # Compute the number of the first and last available page
1493 if self.item_count > 0:
1493 if self.item_count > 0:
1494 self.first_page = 1
1494 self.first_page = 1
1495 self.page_count = int(math.ceil(float(self.item_count) /
1495 self.page_count = int(math.ceil(float(self.item_count) /
1496 self.items_per_page))
1496 self.items_per_page))
1497 self.last_page = self.first_page + self.page_count - 1
1497 self.last_page = self.first_page + self.page_count - 1
1498
1498
1499 # Make sure that the requested page number is the range of
1499 # Make sure that the requested page number is the range of
1500 # valid pages
1500 # valid pages
1501 if self.page > self.last_page:
1501 if self.page > self.last_page:
1502 self.page = self.last_page
1502 self.page = self.last_page
1503 elif self.page < self.first_page:
1503 elif self.page < self.first_page:
1504 self.page = self.first_page
1504 self.page = self.first_page
1505
1505
1506 # Note: the number of items on this page can be less than
1506 # Note: the number of items on this page can be less than
1507 # items_per_page if the last page is not full
1507 # items_per_page if the last page is not full
1508 self.first_item = max(0, (self.item_count) - (self.page *
1508 self.first_item = max(0, (self.item_count) - (self.page *
1509 items_per_page))
1509 items_per_page))
1510 self.last_item = ((self.item_count - 1) - items_per_page *
1510 self.last_item = ((self.item_count - 1) - items_per_page *
1511 (self.page - 1))
1511 (self.page - 1))
1512
1512
1513 self.items = list(self.collection[self.first_item:self.last_item + 1])
1513 self.items = list(self.collection[self.first_item:self.last_item + 1])
1514
1514
1515 # Links to previous and next page
1515 # Links to previous and next page
1516 if self.page > self.first_page:
1516 if self.page > self.first_page:
1517 self.previous_page = self.page - 1
1517 self.previous_page = self.page - 1
1518 else:
1518 else:
1519 self.previous_page = None
1519 self.previous_page = None
1520
1520
1521 if self.page < self.last_page:
1521 if self.page < self.last_page:
1522 self.next_page = self.page + 1
1522 self.next_page = self.page + 1
1523 else:
1523 else:
1524 self.next_page = None
1524 self.next_page = None
1525
1525
1526 # No items available
1526 # No items available
1527 else:
1527 else:
1528 self.first_page = None
1528 self.first_page = None
1529 self.page_count = 0
1529 self.page_count = 0
1530 self.last_page = None
1530 self.last_page = None
1531 self.first_item = None
1531 self.first_item = None
1532 self.last_item = None
1532 self.last_item = None
1533 self.previous_page = None
1533 self.previous_page = None
1534 self.next_page = None
1534 self.next_page = None
1535 self.items = []
1535 self.items = []
1536
1536
1537 # This is a subclass of the 'list' type. Initialise the list now.
1537 # This is a subclass of the 'list' type. Initialise the list now.
1538 list.__init__(self, reversed(self.items))
1538 list.__init__(self, reversed(self.items))
1539
1539
1540
1540
1541 def breadcrumb_repo_link(repo):
1541 def breadcrumb_repo_link(repo):
1542 """
1542 """
1543 Makes a breadcrumbs path link to repo
1543 Makes a breadcrumbs path link to repo
1544
1544
1545 ex::
1545 ex::
1546 group >> subgroup >> repo
1546 group >> subgroup >> repo
1547
1547
1548 :param repo: a Repository instance
1548 :param repo: a Repository instance
1549 """
1549 """
1550
1550
1551 path = [
1551 path = [
1552 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1552 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1553 for group in repo.groups_with_parents
1553 for group in repo.groups_with_parents
1554 ] + [
1554 ] + [
1555 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1555 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1556 ]
1556 ]
1557
1557
1558 return literal(' &raquo; '.join(path))
1558 return literal(' &raquo; '.join(path))
1559
1559
1560
1560
1561 def format_byte_size_binary(file_size):
1561 def format_byte_size_binary(file_size):
1562 """
1562 """
1563 Formats file/folder sizes to standard.
1563 Formats file/folder sizes to standard.
1564 """
1564 """
1565 if file_size is None:
1565 if file_size is None:
1566 file_size = 0
1566 file_size = 0
1567
1567
1568 formatted_size = format_byte_size(file_size, binary=True)
1568 formatted_size = format_byte_size(file_size, binary=True)
1569 return formatted_size
1569 return formatted_size
1570
1570
1571
1571
1572 def urlify_text(text_, safe=True):
1572 def urlify_text(text_, safe=True):
1573 """
1573 """
1574 Extrac urls from text and make html links out of them
1574 Extrac urls from text and make html links out of them
1575
1575
1576 :param text_:
1576 :param text_:
1577 """
1577 """
1578
1578
1579 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1579 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1580 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1580 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1581
1581
1582 def url_func(match_obj):
1582 def url_func(match_obj):
1583 url_full = match_obj.groups()[0]
1583 url_full = match_obj.groups()[0]
1584 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1584 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1585 _newtext = url_pat.sub(url_func, text_)
1585 _newtext = url_pat.sub(url_func, text_)
1586 if safe:
1586 if safe:
1587 return literal(_newtext)
1587 return literal(_newtext)
1588 return _newtext
1588 return _newtext
1589
1589
1590
1590
1591 def urlify_commits(text_, repository):
1591 def urlify_commits(text_, repository):
1592 """
1592 """
1593 Extract commit ids from text and make link from them
1593 Extract commit ids from text and make link from them
1594
1594
1595 :param text_:
1595 :param text_:
1596 :param repository: repo name to build the URL with
1596 :param repository: repo name to build the URL with
1597 """
1597 """
1598
1598
1599 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1599 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1600
1600
1601 def url_func(match_obj):
1601 def url_func(match_obj):
1602 commit_id = match_obj.groups()[1]
1602 commit_id = match_obj.groups()[1]
1603 pref = match_obj.groups()[0]
1603 pref = match_obj.groups()[0]
1604 suf = match_obj.groups()[2]
1604 suf = match_obj.groups()[2]
1605
1605
1606 tmpl = (
1606 tmpl = (
1607 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1607 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1608 '%(commit_id)s</a>%(suf)s'
1608 '%(commit_id)s</a>%(suf)s'
1609 )
1609 )
1610 return tmpl % {
1610 return tmpl % {
1611 'pref': pref,
1611 'pref': pref,
1612 'cls': 'revision-link',
1612 'cls': 'revision-link',
1613 'url': route_url('repo_commit', repo_name=repository,
1613 'url': route_url('repo_commit', repo_name=repository,
1614 commit_id=commit_id),
1614 commit_id=commit_id),
1615 'commit_id': commit_id,
1615 'commit_id': commit_id,
1616 'suf': suf
1616 'suf': suf
1617 }
1617 }
1618
1618
1619 newtext = URL_PAT.sub(url_func, text_)
1619 newtext = URL_PAT.sub(url_func, text_)
1620
1620
1621 return newtext
1621 return newtext
1622
1622
1623
1623
1624 def _process_url_func(match_obj, repo_name, uid, entry,
1624 def _process_url_func(match_obj, repo_name, uid, entry,
1625 return_raw_data=False, link_format='html'):
1625 return_raw_data=False, link_format='html'):
1626 pref = ''
1626 pref = ''
1627 if match_obj.group().startswith(' '):
1627 if match_obj.group().startswith(' '):
1628 pref = ' '
1628 pref = ' '
1629
1629
1630 issue_id = ''.join(match_obj.groups())
1630 issue_id = ''.join(match_obj.groups())
1631
1631
1632 if link_format == 'html':
1632 if link_format == 'html':
1633 tmpl = (
1633 tmpl = (
1634 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1634 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1635 '%(issue-prefix)s%(id-repr)s'
1635 '%(issue-prefix)s%(id-repr)s'
1636 '</a>')
1636 '</a>')
1637 elif link_format == 'rst':
1637 elif link_format == 'rst':
1638 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1638 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1639 elif link_format == 'markdown':
1639 elif link_format == 'markdown':
1640 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1640 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1641 else:
1641 else:
1642 raise ValueError('Bad link_format:{}'.format(link_format))
1642 raise ValueError('Bad link_format:{}'.format(link_format))
1643
1643
1644 (repo_name_cleaned,
1644 (repo_name_cleaned,
1645 parent_group_name) = RepoGroupModel().\
1645 parent_group_name) = RepoGroupModel().\
1646 _get_group_name_and_parent(repo_name)
1646 _get_group_name_and_parent(repo_name)
1647
1647
1648 # variables replacement
1648 # variables replacement
1649 named_vars = {
1649 named_vars = {
1650 'id': issue_id,
1650 'id': issue_id,
1651 'repo': repo_name,
1651 'repo': repo_name,
1652 'repo_name': repo_name_cleaned,
1652 'repo_name': repo_name_cleaned,
1653 'group_name': parent_group_name
1653 'group_name': parent_group_name
1654 }
1654 }
1655 # named regex variables
1655 # named regex variables
1656 named_vars.update(match_obj.groupdict())
1656 named_vars.update(match_obj.groupdict())
1657 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1657 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1658
1658
1659 data = {
1659 data = {
1660 'pref': pref,
1660 'pref': pref,
1661 'cls': 'issue-tracker-link',
1661 'cls': 'issue-tracker-link',
1662 'url': _url,
1662 'url': _url,
1663 'id-repr': issue_id,
1663 'id-repr': issue_id,
1664 'issue-prefix': entry['pref'],
1664 'issue-prefix': entry['pref'],
1665 'serv': entry['url'],
1665 'serv': entry['url'],
1666 }
1666 }
1667 if return_raw_data:
1667 if return_raw_data:
1668 return {
1668 return {
1669 'id': issue_id,
1669 'id': issue_id,
1670 'url': _url
1670 'url': _url
1671 }
1671 }
1672 return tmpl % data
1672 return tmpl % data
1673
1673
1674
1674
1675 def process_patterns(text_string, repo_name, link_format='html'):
1675 def process_patterns(text_string, repo_name, link_format='html'):
1676 allowed_formats = ['html', 'rst', 'markdown']
1676 allowed_formats = ['html', 'rst', 'markdown']
1677 if link_format not in allowed_formats:
1677 if link_format not in allowed_formats:
1678 raise ValueError('Link format can be only one of:{} got {}'.format(
1678 raise ValueError('Link format can be only one of:{} got {}'.format(
1679 allowed_formats, link_format))
1679 allowed_formats, link_format))
1680
1680
1681 repo = None
1681 repo = None
1682 if repo_name:
1682 if repo_name:
1683 # Retrieving repo_name to avoid invalid repo_name to explode on
1683 # Retrieving repo_name to avoid invalid repo_name to explode on
1684 # IssueTrackerSettingsModel but still passing invalid name further down
1684 # IssueTrackerSettingsModel but still passing invalid name further down
1685 repo = Repository.get_by_repo_name(repo_name, cache=True)
1685 repo = Repository.get_by_repo_name(repo_name, cache=True)
1686
1686
1687 settings_model = IssueTrackerSettingsModel(repo=repo)
1687 settings_model = IssueTrackerSettingsModel(repo=repo)
1688 active_entries = settings_model.get_settings(cache=True)
1688 active_entries = settings_model.get_settings(cache=True)
1689
1689
1690 issues_data = []
1690 issues_data = []
1691 newtext = text_string
1691 newtext = text_string
1692
1692
1693 for uid, entry in active_entries.items():
1693 for uid, entry in active_entries.items():
1694 log.debug('found issue tracker entry with uid %s' % (uid,))
1694 log.debug('found issue tracker entry with uid %s' % (uid,))
1695
1695
1696 if not (entry['pat'] and entry['url']):
1696 if not (entry['pat'] and entry['url']):
1697 log.debug('skipping due to missing data')
1697 log.debug('skipping due to missing data')
1698 continue
1698 continue
1699
1699
1700 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1700 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1701 % (uid, entry['pat'], entry['url'], entry['pref']))
1701 % (uid, entry['pat'], entry['url'], entry['pref']))
1702
1702
1703 try:
1703 try:
1704 pattern = re.compile(r'%s' % entry['pat'])
1704 pattern = re.compile(r'%s' % entry['pat'])
1705 except re.error:
1705 except re.error:
1706 log.exception(
1706 log.exception(
1707 'issue tracker pattern: `%s` failed to compile',
1707 'issue tracker pattern: `%s` failed to compile',
1708 entry['pat'])
1708 entry['pat'])
1709 continue
1709 continue
1710
1710
1711 data_func = partial(
1711 data_func = partial(
1712 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1712 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1713 return_raw_data=True)
1713 return_raw_data=True)
1714
1714
1715 for match_obj in pattern.finditer(text_string):
1715 for match_obj in pattern.finditer(text_string):
1716 issues_data.append(data_func(match_obj))
1716 issues_data.append(data_func(match_obj))
1717
1717
1718 url_func = partial(
1718 url_func = partial(
1719 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1719 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1720 link_format=link_format)
1720 link_format=link_format)
1721
1721
1722 newtext = pattern.sub(url_func, newtext)
1722 newtext = pattern.sub(url_func, newtext)
1723 log.debug('processed prefix:uid `%s`' % (uid,))
1723 log.debug('processed prefix:uid `%s`' % (uid,))
1724
1724
1725 return newtext, issues_data
1725 return newtext, issues_data
1726
1726
1727
1727
1728 def urlify_commit_message(commit_text, repository=None):
1728 def urlify_commit_message(commit_text, repository=None):
1729 """
1729 """
1730 Parses given text message and makes proper links.
1730 Parses given text message and makes proper links.
1731 issues are linked to given issue-server, and rest is a commit link
1731 issues are linked to given issue-server, and rest is a commit link
1732
1732
1733 :param commit_text:
1733 :param commit_text:
1734 :param repository:
1734 :param repository:
1735 """
1735 """
1736 def escaper(string):
1736 def escaper(string):
1737 return string.replace('<', '&lt;').replace('>', '&gt;')
1737 return string.replace('<', '&lt;').replace('>', '&gt;')
1738
1738
1739 newtext = escaper(commit_text)
1739 newtext = escaper(commit_text)
1740
1740
1741 # extract http/https links and make them real urls
1741 # extract http/https links and make them real urls
1742 newtext = urlify_text(newtext, safe=False)
1742 newtext = urlify_text(newtext, safe=False)
1743
1743
1744 # urlify commits - extract commit ids and make link out of them, if we have
1744 # urlify commits - extract commit ids and make link out of them, if we have
1745 # the scope of repository present.
1745 # the scope of repository present.
1746 if repository:
1746 if repository:
1747 newtext = urlify_commits(newtext, repository)
1747 newtext = urlify_commits(newtext, repository)
1748
1748
1749 # process issue tracker patterns
1749 # process issue tracker patterns
1750 newtext, issues = process_patterns(newtext, repository or '')
1750 newtext, issues = process_patterns(newtext, repository or '')
1751
1751
1752 return literal(newtext)
1752 return literal(newtext)
1753
1753
1754
1754
1755 def render_binary(repo_name, file_obj):
1755 def render_binary(repo_name, file_obj):
1756 """
1756 """
1757 Choose how to render a binary file
1757 Choose how to render a binary file
1758 """
1758 """
1759 filename = file_obj.name
1759 filename = file_obj.name
1760
1760
1761 # images
1761 # images
1762 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1762 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1763 if fnmatch.fnmatch(filename, pat=ext):
1763 if fnmatch.fnmatch(filename, pat=ext):
1764 alt = filename
1764 alt = filename
1765 src = route_path(
1765 src = route_path(
1766 'repo_file_raw', repo_name=repo_name,
1766 'repo_file_raw', repo_name=repo_name,
1767 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1767 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1768 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1768 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1769
1769
1770
1770
1771 def renderer_from_filename(filename, exclude=None):
1771 def renderer_from_filename(filename, exclude=None):
1772 """
1772 """
1773 choose a renderer based on filename, this works only for text based files
1773 choose a renderer based on filename, this works only for text based files
1774 """
1774 """
1775
1775
1776 # ipython
1776 # ipython
1777 for ext in ['*.ipynb']:
1777 for ext in ['*.ipynb']:
1778 if fnmatch.fnmatch(filename, pat=ext):
1778 if fnmatch.fnmatch(filename, pat=ext):
1779 return 'jupyter'
1779 return 'jupyter'
1780
1780
1781 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1781 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1782 if is_markup:
1782 if is_markup:
1783 return is_markup
1783 return is_markup
1784 return None
1784 return None
1785
1785
1786
1786
1787 def render(source, renderer='rst', mentions=False, relative_urls=None,
1787 def render(source, renderer='rst', mentions=False, relative_urls=None,
1788 repo_name=None):
1788 repo_name=None):
1789
1789
1790 def maybe_convert_relative_links(html_source):
1790 def maybe_convert_relative_links(html_source):
1791 if relative_urls:
1791 if relative_urls:
1792 return relative_links(html_source, relative_urls)
1792 return relative_links(html_source, relative_urls)
1793 return html_source
1793 return html_source
1794
1794
1795 if renderer == 'rst':
1795 if renderer == 'rst':
1796 if repo_name:
1796 if repo_name:
1797 # process patterns on comments if we pass in repo name
1797 # process patterns on comments if we pass in repo name
1798 source, issues = process_patterns(
1798 source, issues = process_patterns(
1799 source, repo_name, link_format='rst')
1799 source, repo_name, link_format='rst')
1800
1800
1801 return literal(
1801 return literal(
1802 '<div class="rst-block">%s</div>' %
1802 '<div class="rst-block">%s</div>' %
1803 maybe_convert_relative_links(
1803 maybe_convert_relative_links(
1804 MarkupRenderer.rst(source, mentions=mentions)))
1804 MarkupRenderer.rst(source, mentions=mentions)))
1805 elif renderer == 'markdown':
1805 elif renderer == 'markdown':
1806 if repo_name:
1806 if repo_name:
1807 # process patterns on comments if we pass in repo name
1807 # process patterns on comments if we pass in repo name
1808 source, issues = process_patterns(
1808 source, issues = process_patterns(
1809 source, repo_name, link_format='markdown')
1809 source, repo_name, link_format='markdown')
1810
1810
1811 return literal(
1811 return literal(
1812 '<div class="markdown-block">%s</div>' %
1812 '<div class="markdown-block">%s</div>' %
1813 maybe_convert_relative_links(
1813 maybe_convert_relative_links(
1814 MarkupRenderer.markdown(source, flavored=True,
1814 MarkupRenderer.markdown(source, flavored=True,
1815 mentions=mentions)))
1815 mentions=mentions)))
1816 elif renderer == 'jupyter':
1816 elif renderer == 'jupyter':
1817 return literal(
1817 return literal(
1818 '<div class="ipynb">%s</div>' %
1818 '<div class="ipynb">%s</div>' %
1819 maybe_convert_relative_links(
1819 maybe_convert_relative_links(
1820 MarkupRenderer.jupyter(source)))
1820 MarkupRenderer.jupyter(source)))
1821
1821
1822 # None means just show the file-source
1822 # None means just show the file-source
1823 return None
1823 return None
1824
1824
1825
1825
1826 def commit_status(repo, commit_id):
1826 def commit_status(repo, commit_id):
1827 return ChangesetStatusModel().get_status(repo, commit_id)
1827 return ChangesetStatusModel().get_status(repo, commit_id)
1828
1828
1829
1829
1830 def commit_status_lbl(commit_status):
1830 def commit_status_lbl(commit_status):
1831 return dict(ChangesetStatus.STATUSES).get(commit_status)
1831 return dict(ChangesetStatus.STATUSES).get(commit_status)
1832
1832
1833
1833
1834 def commit_time(repo_name, commit_id):
1834 def commit_time(repo_name, commit_id):
1835 repo = Repository.get_by_repo_name(repo_name)
1835 repo = Repository.get_by_repo_name(repo_name)
1836 commit = repo.get_commit(commit_id=commit_id)
1836 commit = repo.get_commit(commit_id=commit_id)
1837 return commit.date
1837 return commit.date
1838
1838
1839
1839
1840 def get_permission_name(key):
1840 def get_permission_name(key):
1841 return dict(Permission.PERMS).get(key)
1841 return dict(Permission.PERMS).get(key)
1842
1842
1843
1843
1844 def journal_filter_help(request):
1844 def journal_filter_help(request):
1845 _ = request.translate
1845 _ = request.translate
1846
1846
1847 return _(
1847 return _(
1848 'Example filter terms:\n' +
1848 'Example filter terms:\n' +
1849 ' repository:vcs\n' +
1849 ' repository:vcs\n' +
1850 ' username:marcin\n' +
1850 ' username:marcin\n' +
1851 ' username:(NOT marcin)\n' +
1851 ' username:(NOT marcin)\n' +
1852 ' action:*push*\n' +
1852 ' action:*push*\n' +
1853 ' ip:127.0.0.1\n' +
1853 ' ip:127.0.0.1\n' +
1854 ' date:20120101\n' +
1854 ' date:20120101\n' +
1855 ' date:[20120101100000 TO 20120102]\n' +
1855 ' date:[20120101100000 TO 20120102]\n' +
1856 '\n' +
1856 '\n' +
1857 'Generate wildcards using \'*\' character:\n' +
1857 'Generate wildcards using \'*\' character:\n' +
1858 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1858 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1859 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1859 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1860 '\n' +
1860 '\n' +
1861 'Optional AND / OR operators in queries\n' +
1861 'Optional AND / OR operators in queries\n' +
1862 ' "repository:vcs OR repository:test"\n' +
1862 ' "repository:vcs OR repository:test"\n' +
1863 ' "username:test AND repository:test*"\n'
1863 ' "username:test AND repository:test*"\n'
1864 )
1864 )
1865
1865
1866
1866
1867 def search_filter_help(searcher, request):
1867 def search_filter_help(searcher, request):
1868 _ = request.translate
1868 _ = request.translate
1869
1869
1870 terms = ''
1870 terms = ''
1871 return _(
1871 return _(
1872 'Example filter terms for `{searcher}` search:\n' +
1872 'Example filter terms for `{searcher}` search:\n' +
1873 '{terms}\n' +
1873 '{terms}\n' +
1874 'Generate wildcards using \'*\' character:\n' +
1874 'Generate wildcards using \'*\' character:\n' +
1875 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1875 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1876 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1876 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1877 '\n' +
1877 '\n' +
1878 'Optional AND / OR operators in queries\n' +
1878 'Optional AND / OR operators in queries\n' +
1879 ' "repo_name:vcs OR repo_name:test"\n' +
1879 ' "repo_name:vcs OR repo_name:test"\n' +
1880 ' "owner:test AND repo_name:test*"\n' +
1880 ' "owner:test AND repo_name:test*"\n' +
1881 'More: {search_doc}'
1881 'More: {search_doc}'
1882 ).format(searcher=searcher.name,
1882 ).format(searcher=searcher.name,
1883 terms=terms, search_doc=searcher.query_lang_doc)
1883 terms=terms, search_doc=searcher.query_lang_doc)
1884
1884
1885
1885
1886 def not_mapped_error(repo_name):
1886 def not_mapped_error(repo_name):
1887 from rhodecode.translation import _
1887 from rhodecode.translation import _
1888 flash(_('%s repository is not mapped to db perhaps'
1888 flash(_('%s repository is not mapped to db perhaps'
1889 ' it was created or renamed from the filesystem'
1889 ' it was created or renamed from the filesystem'
1890 ' please run the application again'
1890 ' please run the application again'
1891 ' in order to rescan repositories') % repo_name, category='error')
1891 ' in order to rescan repositories') % repo_name, category='error')
1892
1892
1893
1893
1894 def ip_range(ip_addr):
1894 def ip_range(ip_addr):
1895 from rhodecode.model.db import UserIpMap
1895 from rhodecode.model.db import UserIpMap
1896 s, e = UserIpMap._get_ip_range(ip_addr)
1896 s, e = UserIpMap._get_ip_range(ip_addr)
1897 return '%s - %s' % (s, e)
1897 return '%s - %s' % (s, e)
1898
1898
1899
1899
1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1902 if method.lower() != 'get' and needs_csrf_token:
1902 if method.lower() != 'get' and needs_csrf_token:
1903 raise Exception(
1903 raise Exception(
1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1905 'CSRF token. If the endpoint does not require such token you can ' +
1905 'CSRF token. If the endpoint does not require such token you can ' +
1906 'explicitly set the parameter needs_csrf_token to false.')
1906 'explicitly set the parameter needs_csrf_token to false.')
1907
1907
1908 return wh_form(url, method=method, **attrs)
1908 return wh_form(url, method=method, **attrs)
1909
1909
1910
1910
1911 def secure_form(form_url, method="POST", multipart=False, **attrs):
1911 def secure_form(form_url, method="POST", multipart=False, **attrs):
1912 """Start a form tag that points the action to an url. This
1912 """Start a form tag that points the action to an url. This
1913 form tag will also include the hidden field containing
1913 form tag will also include the hidden field containing
1914 the auth token.
1914 the auth token.
1915
1915
1916 The url options should be given either as a string, or as a
1916 The url options should be given either as a string, or as a
1917 ``url()`` function. The method for the form defaults to POST.
1917 ``url()`` function. The method for the form defaults to POST.
1918
1918
1919 Options:
1919 Options:
1920
1920
1921 ``multipart``
1921 ``multipart``
1922 If set to True, the enctype is set to "multipart/form-data".
1922 If set to True, the enctype is set to "multipart/form-data".
1923 ``method``
1923 ``method``
1924 The method to use when submitting the form, usually either
1924 The method to use when submitting the form, usually either
1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1926 hidden input with name _method is added to simulate the verb
1926 hidden input with name _method is added to simulate the verb
1927 over POST.
1927 over POST.
1928
1928
1929 """
1929 """
1930 from webhelpers.pylonslib.secure_form import insecure_form
1930 from webhelpers.pylonslib.secure_form import insecure_form
1931
1931
1932 if 'request' in attrs:
1932 if 'request' in attrs:
1933 session = attrs['request'].session
1933 session = attrs['request'].session
1934 del attrs['request']
1934 del attrs['request']
1935 else:
1935 else:
1936 raise ValueError(
1936 raise ValueError(
1937 'Calling this form requires request= to be passed as argument')
1937 'Calling this form requires request= to be passed as argument')
1938
1938
1939 form = insecure_form(form_url, method, multipart, **attrs)
1939 form = insecure_form(form_url, method, multipart, **attrs)
1940 token = literal(
1940 token = literal(
1941 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1941 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1942 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1942 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1943
1943
1944 return literal("%s\n%s" % (form, token))
1944 return literal("%s\n%s" % (form, token))
1945
1945
1946
1946
1947 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1947 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1948 select_html = select(name, selected, options, **attrs)
1948 select_html = select(name, selected, options, **attrs)
1949 select2 = """
1949 select2 = """
1950 <script>
1950 <script>
1951 $(document).ready(function() {
1951 $(document).ready(function() {
1952 $('#%s').select2({
1952 $('#%s').select2({
1953 containerCssClass: 'drop-menu',
1953 containerCssClass: 'drop-menu',
1954 dropdownCssClass: 'drop-menu-dropdown',
1954 dropdownCssClass: 'drop-menu-dropdown',
1955 dropdownAutoWidth: true%s
1955 dropdownAutoWidth: true%s
1956 });
1956 });
1957 });
1957 });
1958 </script>
1958 </script>
1959 """
1959 """
1960 filter_option = """,
1960 filter_option = """,
1961 minimumResultsForSearch: -1
1961 minimumResultsForSearch: -1
1962 """
1962 """
1963 input_id = attrs.get('id') or name
1963 input_id = attrs.get('id') or name
1964 filter_enabled = "" if enable_filter else filter_option
1964 filter_enabled = "" if enable_filter else filter_option
1965 select_script = literal(select2 % (input_id, filter_enabled))
1965 select_script = literal(select2 % (input_id, filter_enabled))
1966
1966
1967 return literal(select_html+select_script)
1967 return literal(select_html+select_script)
1968
1968
1969
1969
1970 def get_visual_attr(tmpl_context_var, attr_name):
1970 def get_visual_attr(tmpl_context_var, attr_name):
1971 """
1971 """
1972 A safe way to get a variable from visual variable of template context
1972 A safe way to get a variable from visual variable of template context
1973
1973
1974 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1974 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1975 :param attr_name: name of the attribute we fetch from the c.visual
1975 :param attr_name: name of the attribute we fetch from the c.visual
1976 """
1976 """
1977 visual = getattr(tmpl_context_var, 'visual', None)
1977 visual = getattr(tmpl_context_var, 'visual', None)
1978 if not visual:
1978 if not visual:
1979 return
1979 return
1980 else:
1980 else:
1981 return getattr(visual, attr_name, None)
1981 return getattr(visual, attr_name, None)
1982
1982
1983
1983
1984 def get_last_path_part(file_node):
1984 def get_last_path_part(file_node):
1985 if not file_node.path:
1985 if not file_node.path:
1986 return u''
1986 return u''
1987
1987
1988 path = safe_unicode(file_node.path.split('/')[-1])
1988 path = safe_unicode(file_node.path.split('/')[-1])
1989 return u'../' + path
1989 return u'../' + path
1990
1990
1991
1991
1992 def route_url(*args, **kwargs):
1992 def route_url(*args, **kwargs):
1993 """
1993 """
1994 Wrapper around pyramids `route_url` (fully qualified url) function.
1994 Wrapper around pyramids `route_url` (fully qualified url) function.
1995 """
1995 """
1996 req = get_current_request()
1996 req = get_current_request()
1997 return req.route_url(*args, **kwargs)
1997 return req.route_url(*args, **kwargs)
1998
1998
1999
1999
2000 def route_path(*args, **kwargs):
2000 def route_path(*args, **kwargs):
2001 """
2001 """
2002 Wrapper around pyramids `route_path` function.
2002 Wrapper around pyramids `route_path` function.
2003 """
2003 """
2004 req = get_current_request()
2004 req = get_current_request()
2005 return req.route_path(*args, **kwargs)
2005 return req.route_path(*args, **kwargs)
2006
2006
2007
2007
2008 def route_path_or_none(*args, **kwargs):
2008 def route_path_or_none(*args, **kwargs):
2009 try:
2009 try:
2010 return route_path(*args, **kwargs)
2010 return route_path(*args, **kwargs)
2011 except KeyError:
2011 except KeyError:
2012 return None
2012 return None
2013
2013
2014
2014
2015 def current_route_path(request, **kw):
2015 def current_route_path(request, **kw):
2016 new_args = request.GET.mixed()
2016 new_args = request.GET.mixed()
2017 new_args.update(kw)
2017 new_args.update(kw)
2018 return request.current_route_path(_query=new_args)
2018 return request.current_route_path(_query=new_args)
2019
2019
2020
2020
2021 def api_call_example(method, args):
2021 def api_call_example(method, args):
2022 """
2022 """
2023 Generates an API call example via CURL
2023 Generates an API call example via CURL
2024 """
2024 """
2025 args_json = json.dumps(OrderedDict([
2025 args_json = json.dumps(OrderedDict([
2026 ('id', 1),
2026 ('id', 1),
2027 ('auth_token', 'SECRET'),
2027 ('auth_token', 'SECRET'),
2028 ('method', method),
2028 ('method', method),
2029 ('args', args)
2029 ('args', args)
2030 ]))
2030 ]))
2031 return literal(
2031 return literal(
2032 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2032 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2033 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2033 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2034 "and needs to be of `api calls` role."
2034 "and needs to be of `api calls` role."
2035 .format(
2035 .format(
2036 api_url=route_url('apiv2'),
2036 api_url=route_url('apiv2'),
2037 token_url=route_url('my_account_auth_tokens'),
2037 token_url=route_url('my_account_auth_tokens'),
2038 data=args_json))
2038 data=args_json))
2039
2039
2040
2040
2041 def notification_description(notification, request):
2041 def notification_description(notification, request):
2042 """
2042 """
2043 Generate notification human readable description based on notification type
2043 Generate notification human readable description based on notification type
2044 """
2044 """
2045 from rhodecode.model.notification import NotificationModel
2045 from rhodecode.model.notification import NotificationModel
2046 return NotificationModel().make_description(
2046 return NotificationModel().make_description(
2047 notification, translate=request.translate)
2047 notification, translate=request.translate)
2048
2048
2049
2049
2050 def go_import_header(request, db_repo=None):
2050 def go_import_header(request, db_repo=None):
2051 """
2051 """
2052 Creates a header for go-import functionality in Go Lang
2052 Creates a header for go-import functionality in Go Lang
2053 """
2053 """
2054
2054
2055 if not db_repo:
2055 if not db_repo:
2056 return
2056 return
2057 if 'go-get' not in request.GET:
2057 if 'go-get' not in request.GET:
2058 return
2058 return
2059
2059
2060 clone_url = db_repo.clone_url()
2060 clone_url = db_repo.clone_url()
2061 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2061 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2062 # we have a repo and go-get flag,
2062 # we have a repo and go-get flag,
2063 return literal('<meta name="go-import" content="{} {} {}">'.format(
2063 return literal('<meta name="go-import" content="{} {} {}">'.format(
2064 prefix, db_repo.repo_type, clone_url))
2064 prefix, db_repo.repo_type, clone_url))
@@ -1,631 +1,644 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Package for testing various lib/helper functions in rhodecode
23 Package for testing various lib/helper functions in rhodecode
24 """
24 """
25
25
26 import datetime
26 import datetime
27 import string
27 import string
28 import mock
28 import mock
29 import pytest
29 import pytest
30
30
31 from rhodecode.tests import no_newline_id_generator
31 from rhodecode.tests import no_newline_id_generator
32 from rhodecode.tests.utils import run_test_concurrently
32 from rhodecode.tests.utils import run_test_concurrently
33 from rhodecode.lib.helpers import InitialsGravatar
33 from rhodecode.lib.helpers import InitialsGravatar
34
34
35 from rhodecode.lib.utils2 import AttributeDict
35 from rhodecode.lib.utils2 import AttributeDict
36 from rhodecode.model.db import Repository
36 from rhodecode.model.db import Repository
37
37
38
38
39 def _urls_for_proto(proto):
39 def _urls_for_proto(proto):
40 return [
40 return [
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
42 '%s://127.0.0.1' % proto),
42 '%s://127.0.0.1' % proto),
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
44 '%s://127.0.0.1' % proto),
44 '%s://127.0.0.1' % proto),
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
46 '%s://127.0.0.1' % proto),
46 '%s://127.0.0.1' % proto),
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
48 '%s://127.0.0.1:8080' % proto),
48 '%s://127.0.0.1:8080' % proto),
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
50 '%s://domain.org' % proto),
50 '%s://domain.org' % proto),
51 ('%s://user:pass@domain.org:8080' % proto,
51 ('%s://user:pass@domain.org:8080' % proto,
52 ['%s://' % proto, 'domain.org', '8080'],
52 ['%s://' % proto, 'domain.org', '8080'],
53 '%s://domain.org:8080' % proto),
53 '%s://domain.org:8080' % proto),
54 ]
54 ]
55
55
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
57
57
58
58
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
60 def test_uri_filter(test_url, expected, expected_creds):
60 def test_uri_filter(test_url, expected, expected_creds):
61 from rhodecode.lib.utils2 import uri_filter
61 from rhodecode.lib.utils2 import uri_filter
62 assert uri_filter(test_url) == expected
62 assert uri_filter(test_url) == expected
63
63
64
64
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
66 def test_credentials_filter(test_url, expected, expected_creds):
66 def test_credentials_filter(test_url, expected, expected_creds):
67 from rhodecode.lib.utils2 import credentials_filter
67 from rhodecode.lib.utils2 import credentials_filter
68 assert credentials_filter(test_url) == expected_creds
68 assert credentials_filter(test_url) == expected_creds
69
69
70
70
71 @pytest.mark.parametrize("str_bool, expected", [
71 @pytest.mark.parametrize("str_bool, expected", [
72 ('t', True),
72 ('t', True),
73 ('true', True),
73 ('true', True),
74 ('y', True),
74 ('y', True),
75 ('yes', True),
75 ('yes', True),
76 ('on', True),
76 ('on', True),
77 ('1', True),
77 ('1', True),
78 ('Y', True),
78 ('Y', True),
79 ('yeS', True),
79 ('yeS', True),
80 ('Y', True),
80 ('Y', True),
81 ('TRUE', True),
81 ('TRUE', True),
82 ('T', True),
82 ('T', True),
83 ('False', False),
83 ('False', False),
84 ('F', False),
84 ('F', False),
85 ('FALSE', False),
85 ('FALSE', False),
86 ('0', False),
86 ('0', False),
87 ('-1', False),
87 ('-1', False),
88 ('', False)
88 ('', False)
89 ])
89 ])
90 def test_str2bool(str_bool, expected):
90 def test_str2bool(str_bool, expected):
91 from rhodecode.lib.utils2 import str2bool
91 from rhodecode.lib.utils2 import str2bool
92 assert str2bool(str_bool) == expected
92 assert str2bool(str_bool) == expected
93
93
94
94
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
96 [
96 [
97 (pref+"", []),
97 (pref+"", []),
98 (pref+"Hi there @marcink", ['marcink']),
98 (pref+"Hi there @marcink", ['marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
102 (pref+"Hi there marcin@rhodecode.com", []),
102 (pref+"Hi there marcin@rhodecode.com", []),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
116 (pref+"\n@marcin", ['marcin']),
116 (pref+"\n@marcin", ['marcin']),
117 ]
117 ]
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
119 def test_mention_extractor(text, expected):
119 def test_mention_extractor(text, expected):
120 from rhodecode.lib.utils2 import extract_mentioned_users
120 from rhodecode.lib.utils2 import extract_mentioned_users
121 got = extract_mentioned_users(text)
121 got = extract_mentioned_users(text)
122 assert sorted(got, key=lambda x: x.lower()) == got
122 assert sorted(got, key=lambda x: x.lower()) == got
123 assert set(expected) == set(got)
123 assert set(expected) == set(got)
124
124
125 @pytest.mark.parametrize("age_args, expected, kw", [
125 @pytest.mark.parametrize("age_args, expected, kw", [
126 ({}, u'just now', {}),
126 ({}, u'just now', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
135 ({}, u'just now', {'short_format': True}),
135 ({}, u'just now', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
144 ])
144 ])
145 def test_age(age_args, expected, kw, baseapp):
145 def test_age(age_args, expected, kw, baseapp):
146 from rhodecode.lib.utils2 import age
146 from rhodecode.lib.utils2 import age
147 from dateutil import relativedelta
147 from dateutil import relativedelta
148 n = datetime.datetime(year=2012, month=5, day=17)
148 n = datetime.datetime(year=2012, month=5, day=17)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
150
150
151 def translate(elem):
151 def translate(elem):
152 return elem.interpolate()
152 return elem.interpolate()
153
153
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
155
155
156
156
157 @pytest.mark.parametrize("age_args, expected, kw", [
157 @pytest.mark.parametrize("age_args, expected, kw", [
158 ({}, u'just now', {}),
158 ({}, u'just now', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
162 ({'hours': 24}, u'in 1 day', {}),
162 ({'hours': 24}, u'in 1 day', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
164 ({'months': 1}, u'in 1 month', {}),
164 ({'months': 1}, u'in 1 month', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
167 ({}, u'just now', {'short_format': True}),
167 ({}, u'just now', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
176 ])
176 ])
177 def test_age_in_future(age_args, expected, kw, baseapp):
177 def test_age_in_future(age_args, expected, kw, baseapp):
178 from rhodecode.lib.utils2 import age
178 from rhodecode.lib.utils2 import age
179 from dateutil import relativedelta
179 from dateutil import relativedelta
180 n = datetime.datetime(year=2012, month=5, day=17)
180 n = datetime.datetime(year=2012, month=5, day=17)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
182
182
183 def translate(elem):
183 def translate(elem):
184 return elem.interpolate()
184 return elem.interpolate()
185
185
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
187
187
188
188
189 @pytest.mark.parametrize("sample, expected_tags", [
189 @pytest.mark.parametrize("sample, expected_tags", [
190 # entry
190 # entry
191 ((
191 ((
192 ""
192 ""
193 ),
193 ),
194 [
194 [
195
195
196 ]),
196 ]),
197 # entry
197 # entry
198 ((
198 ((
199 "hello world [stale]"
199 "hello world [stale]"
200 ),
200 ),
201 [
201 [
202 ('state', '[stale]'),
202 ('state', '[stale]'),
203 ]),
203 ]),
204 # entry
204 # entry
205 ((
205 ((
206 "hello world [v2.0.0] [v1.0.0]"
206 "hello world [v2.0.0] [v1.0.0]"
207 ),
207 ),
208 [
208 [
209 ('generic', '[v2.0.0]'),
209 ('generic', '[v2.0.0]'),
210 ('generic', '[v1.0.0]'),
210 ('generic', '[v1.0.0]'),
211 ]),
211 ]),
212 # entry
212 # entry
213 ((
213 ((
214 "he[ll]o wo[rl]d"
214 "he[ll]o wo[rl]d"
215 ),
215 ),
216 [
216 [
217 ('label', '[ll]'),
217 ('label', '[ll]'),
218 ('label', '[rl]'),
218 ('label', '[rl]'),
219 ]),
219 ]),
220 # entry
220 # entry
221 ((
221 ((
222 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
222 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
223 ),
223 ),
224 [
224 [
225 ('state', '[stale]'),
225 ('state', '[stale]'),
226 ('state', '[featured]'),
226 ('state', '[featured]'),
227 ('state', '[stale]'),
227 ('state', '[stale]'),
228 ('state', '[dead]'),
228 ('state', '[dead]'),
229 ('state', '[dev]'),
229 ('state', '[dev]'),
230 ]),
230 ]),
231 # entry
231 # entry
232 ((
232 ((
233 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
233 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
234 ),
234 ),
235 [
235 [
236 ('state', '[stale]'),
236 ('state', '[stale]'),
237 ('url', '[url =&gt; [name](http://rc.com)]'),
237 ('url', '[url =&gt; [name](http://rc.com)]'),
238 ]),
238 ]),
239 # entry
239 # entry
240 ((
240 ((
241 "[url =&gt; [linkNameJS](javascript:alert(document.domain))]\n"
242 "[url =&gt; [linkNameHTTP](http://rhodecode.com)]\n"
243 "[url =&gt; [linkNameHTTPS](https://rhodecode.com)]\n"
244 "[url =&gt; [linkNamePath](/repo_group)]\n"
245 ),
246 [
247 ('generic', '[linkNameJS]'),
248 ('url', '[url =&gt; [linkNameHTTP](http://rhodecode.com)]'),
249 ('url', '[url =&gt; [linkNameHTTPS](https://rhodecode.com)]'),
250 ('url', '[url =&gt; [linkNamePath](/repo_group)]'),
251 ]),
252 # entry
253 ((
241 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
254 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
242 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
255 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
243 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
256 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
244 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
257 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
245 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
258 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
246 ),
259 ),
247 [
260 [
248 ('label', '[desc]'),
261 ('label', '[desc]'),
249 ('label', '[obsolete]'),
262 ('label', '[obsolete]'),
250 ('label', '[or]'),
263 ('label', '[or]'),
251 ('label', '[requires]'),
264 ('label', '[requires]'),
252 ('label', '[tag]'),
265 ('label', '[tag]'),
253 ('state', '[stale]'),
266 ('state', '[stale]'),
254 ('lang', '[lang =&gt; python]'),
267 ('lang', '[lang =&gt; python]'),
255 ('ref', '[requires =&gt; url]'),
268 ('ref', '[requires =&gt; url]'),
256 ('see', '[see =&gt; http://url.com]'),
269 ('see', '[see =&gt; http://url.com]'),
257
270
258 ]),
271 ]),
259
272
260 ], ids=no_newline_id_generator)
273 ], ids=no_newline_id_generator)
261 def test_metatag_extraction(sample, expected_tags):
274 def test_metatag_extraction(sample, expected_tags):
262 from rhodecode.lib.helpers import extract_metatags
275 from rhodecode.lib.helpers import extract_metatags
263 tags, value = extract_metatags(sample)
276 tags, value = extract_metatags(sample)
264 assert sorted(tags) == sorted(expected_tags)
277 assert sorted(tags) == sorted(expected_tags)
265
278
266
279
267 @pytest.mark.parametrize("tag_data, expected_html", [
280 @pytest.mark.parametrize("tag_data, expected_html", [
268
281
269 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
282 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
270 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
283 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
271 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
284 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
272 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
285 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
273 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
286 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
274
287
275 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
288 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
276 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
289 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
277
290
278 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
291 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
279 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
292 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
280 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
293 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
281 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
294 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
282 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
295 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
283 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
296 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
284
297
285 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
298 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
286 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
299 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
287 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
300 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
288 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
301 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
289
302
290 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
303 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
291 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
304 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
292 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
305 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
293 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
306 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
294
307
295 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
308 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
296
309
297 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
310 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
298 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
311 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
299 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
312 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
300
313
301 ])
314 ])
302 def test_metatags_stylize(tag_data, expected_html):
315 def test_metatags_stylize(tag_data, expected_html):
303 from rhodecode.lib.helpers import style_metatag
316 from rhodecode.lib.helpers import style_metatag
304 tag_type,value = tag_data
317 tag_type,value = tag_data
305 assert style_metatag(tag_type, value) == expected_html
318 assert style_metatag(tag_type, value) == expected_html
306
319
307
320
308 @pytest.mark.parametrize("tmpl_url, email, expected", [
321 @pytest.mark.parametrize("tmpl_url, email, expected", [
309 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
322 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
310
323
311 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
324 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
312 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
325 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
313
326
314 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
327 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
315 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
328 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
316
329
317 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
330 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
318 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
331 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
319
332
320 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
333 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
321 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
334 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
322 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
335 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
323 ])
336 ])
324 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
337 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
325 from rhodecode.lib.helpers import gravatar_url
338 from rhodecode.lib.helpers import gravatar_url
326
339
327 def fake_tmpl_context(_url):
340 def fake_tmpl_context(_url):
328 _c = AttributeDict()
341 _c = AttributeDict()
329 _c.visual = AttributeDict()
342 _c.visual = AttributeDict()
330 _c.visual.use_gravatar = True
343 _c.visual.use_gravatar = True
331 _c.visual.gravatar_url = _url
344 _c.visual.gravatar_url = _url
332 return _c
345 return _c
333
346
334 # mock pyramid.threadlocals
347 # mock pyramid.threadlocals
335 def fake_get_current_request():
348 def fake_get_current_request():
336 request_stub.scheme = 'https'
349 request_stub.scheme = 'https'
337 request_stub.host = 'server.com'
350 request_stub.host = 'server.com'
338
351
339 request_stub._call_context = fake_tmpl_context(tmpl_url)
352 request_stub._call_context = fake_tmpl_context(tmpl_url)
340 return request_stub
353 return request_stub
341
354
342 with mock.patch('rhodecode.lib.helpers.get_current_request',
355 with mock.patch('rhodecode.lib.helpers.get_current_request',
343 fake_get_current_request):
356 fake_get_current_request):
344
357
345 grav = gravatar_url(email_address=email, size=24)
358 grav = gravatar_url(email_address=email, size=24)
346 assert grav == expected
359 assert grav == expected
347
360
348
361
349 @pytest.mark.parametrize(
362 @pytest.mark.parametrize(
350 "email, first_name, last_name, expected_initials, expected_color", [
363 "email, first_name, last_name, expected_initials, expected_color", [
351
364
352 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
365 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
353 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
366 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
354 # special cases of email
367 # special cases of email
355 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
368 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
356 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
369 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
357 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
370 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
358
371
359 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
372 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
360 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
373 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
361
374
362 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
375 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
363 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
376 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
364 # partials
377 # partials
365 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
378 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
366 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
379 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
367 # non-ascii
380 # non-ascii
368 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
381 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
369 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
382 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
370
383
371 # special cases, LDAP can provide those...
384 # special cases, LDAP can provide those...
372 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
385 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
373 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
386 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
374 ('null', '', '', 'NL', '#8c4646'),
387 ('null', '', '', 'NL', '#8c4646'),
375 ('some.@abc.com', 'some', '', 'SA', '#664e33')
388 ('some.@abc.com', 'some', '', 'SA', '#664e33')
376 ])
389 ])
377 def test_initials_gravatar_pick_of_initials_and_color_algo(
390 def test_initials_gravatar_pick_of_initials_and_color_algo(
378 email, first_name, last_name, expected_initials, expected_color):
391 email, first_name, last_name, expected_initials, expected_color):
379 instance = InitialsGravatar(email, first_name, last_name)
392 instance = InitialsGravatar(email, first_name, last_name)
380 assert instance.get_initials() == expected_initials
393 assert instance.get_initials() == expected_initials
381 assert instance.str2color(email) == expected_color
394 assert instance.str2color(email) == expected_color
382
395
383
396
384 def test_initials_gravatar_mapping_algo():
397 def test_initials_gravatar_mapping_algo():
385 pos = set()
398 pos = set()
386 instance = InitialsGravatar('', '', '')
399 instance = InitialsGravatar('', '', '')
387 iterations = 0
400 iterations = 0
388
401
389 variations = []
402 variations = []
390 for letter1 in string.ascii_letters:
403 for letter1 in string.ascii_letters:
391 for letter2 in string.ascii_letters[::-1][:10]:
404 for letter2 in string.ascii_letters[::-1][:10]:
392 for letter3 in string.ascii_letters[:10]:
405 for letter3 in string.ascii_letters[:10]:
393 variations.append(
406 variations.append(
394 '%s@rhodecode.com' % (letter1+letter2+letter3))
407 '%s@rhodecode.com' % (letter1+letter2+letter3))
395
408
396 max_variations = 4096
409 max_variations = 4096
397 for email in variations[:max_variations]:
410 for email in variations[:max_variations]:
398 iterations += 1
411 iterations += 1
399 pos.add(
412 pos.add(
400 instance.pick_color_bank_index(email,
413 instance.pick_color_bank_index(email,
401 instance.get_color_bank()))
414 instance.get_color_bank()))
402
415
403 # we assume that we have match all 256 possible positions,
416 # we assume that we have match all 256 possible positions,
404 # in reasonable amount of different email addresses
417 # in reasonable amount of different email addresses
405 assert len(pos) == 256
418 assert len(pos) == 256
406 assert iterations == max_variations
419 assert iterations == max_variations
407
420
408
421
409 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
422 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
410 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
423 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
411 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
424 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
412 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
425 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
413 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
426 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
414 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
427 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
415 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
428 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
416 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
429 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
417 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
430 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
418 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
431 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
419 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
432 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
420 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
433 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
421 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
434 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
422 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
435 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
423 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
436 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
424 ])
437 ])
425 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
438 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
426 from rhodecode.lib.utils2 import get_clone_url
439 from rhodecode.lib.utils2 import get_clone_url
427
440
428 class RequestStub(object):
441 class RequestStub(object):
429 def request_url(self, name):
442 def request_url(self, name):
430 return 'http://vps1:8000' + prefix
443 return 'http://vps1:8000' + prefix
431
444
432 def route_url(self, name):
445 def route_url(self, name):
433 return self.request_url(name)
446 return self.request_url(name)
434
447
435 clone_url = get_clone_url(
448 clone_url = get_clone_url(
436 request=RequestStub(),
449 request=RequestStub(),
437 uri_tmpl=tmpl,
450 uri_tmpl=tmpl,
438 repo_name=repo_name, repo_id=23, **overrides)
451 repo_name=repo_name, repo_id=23, **overrides)
439 assert clone_url == expected
452 assert clone_url == expected
440
453
441
454
442 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
455 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
443 """
456 """
444 Changes `some text url[foo]` => `some text <a href="/">foo</a>
457 Changes `some text url[foo]` => `some text <a href="/">foo</a>
445
458
446 :param text:
459 :param text:
447 """
460 """
448 import re
461 import re
449 # quickly change expected url[] into a link
462 # quickly change expected url[] into a link
450 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
463 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
451
464
452 def url_func(match_obj):
465 def url_func(match_obj):
453 _url = match_obj.groups()[0]
466 _url = match_obj.groups()[0]
454 return tmpl % (url_ or '/some-url', _url)
467 return tmpl % (url_ or '/some-url', _url)
455 return URL_PAT.sub(url_func, text)
468 return URL_PAT.sub(url_func, text)
456
469
457
470
458 @pytest.mark.parametrize("sample, expected", [
471 @pytest.mark.parametrize("sample, expected", [
459 ("",
472 ("",
460 ""),
473 ""),
461 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
474 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
462 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
475 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
463 ("from rev 000000000000",
476 ("from rev 000000000000",
464 "from rev url[000000000000]"),
477 "from rev url[000000000000]"),
465 ("from rev 000000000000123123 also rev 000000000000",
478 ("from rev 000000000000123123 also rev 000000000000",
466 "from rev url[000000000000123123] also rev url[000000000000]"),
479 "from rev url[000000000000123123] also rev url[000000000000]"),
467 ("this should-000 00",
480 ("this should-000 00",
468 "this should-000 00"),
481 "this should-000 00"),
469 ("longtextffffffffff rev 123123123123",
482 ("longtextffffffffff rev 123123123123",
470 "longtextffffffffff rev url[123123123123]"),
483 "longtextffffffffff rev url[123123123123]"),
471 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
484 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
472 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
485 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
473 ("ffffffffffff some text traalaa",
486 ("ffffffffffff some text traalaa",
474 "url[ffffffffffff] some text traalaa"),
487 "url[ffffffffffff] some text traalaa"),
475 ("""Multi line
488 ("""Multi line
476 123123123123
489 123123123123
477 some text 123123123123
490 some text 123123123123
478 sometimes !
491 sometimes !
479 """,
492 """,
480 """Multi line
493 """Multi line
481 url[123123123123]
494 url[123123123123]
482 some text url[123123123123]
495 some text url[123123123123]
483 sometimes !
496 sometimes !
484 """)
497 """)
485 ], ids=no_newline_id_generator)
498 ], ids=no_newline_id_generator)
486 def test_urlify_commits(sample, expected):
499 def test_urlify_commits(sample, expected):
487 def fake_url(self, *args, **kwargs):
500 def fake_url(self, *args, **kwargs):
488 return '/some-url'
501 return '/some-url'
489
502
490 expected = _quick_url(expected)
503 expected = _quick_url(expected)
491
504
492 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
505 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
493 from rhodecode.lib.helpers import urlify_commits
506 from rhodecode.lib.helpers import urlify_commits
494 assert urlify_commits(sample, 'repo_name') == expected
507 assert urlify_commits(sample, 'repo_name') == expected
495
508
496
509
497 @pytest.mark.parametrize("sample, expected, url_", [
510 @pytest.mark.parametrize("sample, expected, url_", [
498 ("",
511 ("",
499 "",
512 "",
500 ""),
513 ""),
501 ("https://svn.apache.org/repos",
514 ("https://svn.apache.org/repos",
502 "url[https://svn.apache.org/repos]",
515 "url[https://svn.apache.org/repos]",
503 "https://svn.apache.org/repos"),
516 "https://svn.apache.org/repos"),
504 ("http://svn.apache.org/repos",
517 ("http://svn.apache.org/repos",
505 "url[http://svn.apache.org/repos]",
518 "url[http://svn.apache.org/repos]",
506 "http://svn.apache.org/repos"),
519 "http://svn.apache.org/repos"),
507 ("from rev a also rev http://google.com",
520 ("from rev a also rev http://google.com",
508 "from rev a also rev url[http://google.com]",
521 "from rev a also rev url[http://google.com]",
509 "http://google.com"),
522 "http://google.com"),
510 ("""Multi line
523 ("""Multi line
511 https://foo.bar.com
524 https://foo.bar.com
512 some text lalala""",
525 some text lalala""",
513 """Multi line
526 """Multi line
514 url[https://foo.bar.com]
527 url[https://foo.bar.com]
515 some text lalala""",
528 some text lalala""",
516 "https://foo.bar.com")
529 "https://foo.bar.com")
517 ], ids=no_newline_id_generator)
530 ], ids=no_newline_id_generator)
518 def test_urlify_test(sample, expected, url_):
531 def test_urlify_test(sample, expected, url_):
519 from rhodecode.lib.helpers import urlify_text
532 from rhodecode.lib.helpers import urlify_text
520 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
533 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
521 assert urlify_text(sample) == expected
534 assert urlify_text(sample) == expected
522
535
523
536
524 @pytest.mark.parametrize("test, expected", [
537 @pytest.mark.parametrize("test, expected", [
525 ("", None),
538 ("", None),
526 ("/_2", '2'),
539 ("/_2", '2'),
527 ("_2", '2'),
540 ("_2", '2'),
528 ("/_2/", '2'),
541 ("/_2/", '2'),
529 ("_2/", '2'),
542 ("_2/", '2'),
530
543
531 ("/_21", '21'),
544 ("/_21", '21'),
532 ("_21", '21'),
545 ("_21", '21'),
533 ("/_21/", '21'),
546 ("/_21/", '21'),
534 ("_21/", '21'),
547 ("_21/", '21'),
535
548
536 ("/_21/foobar", '21'),
549 ("/_21/foobar", '21'),
537 ("_21/121", '21'),
550 ("_21/121", '21'),
538 ("/_21/_12", '21'),
551 ("/_21/_12", '21'),
539 ("_21/rc/foo", '21'),
552 ("_21/rc/foo", '21'),
540
553
541 ])
554 ])
542 def test_get_repo_by_id(test, expected):
555 def test_get_repo_by_id(test, expected):
543 from rhodecode.model.repo import RepoModel
556 from rhodecode.model.repo import RepoModel
544 _test = RepoModel()._extract_id_from_repo_name(test)
557 _test = RepoModel()._extract_id_from_repo_name(test)
545 assert _test == expected
558 assert _test == expected
546
559
547
560
548 @pytest.mark.parametrize("test_repo_name, repo_type", [
561 @pytest.mark.parametrize("test_repo_name, repo_type", [
549 ("test_repo_1", None),
562 ("test_repo_1", None),
550 ("repo_group/foobar", None),
563 ("repo_group/foobar", None),
551 ("test_non_asci_Δ…Δ‡Δ™", None),
564 ("test_non_asci_Δ…Δ‡Δ™", None),
552 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
565 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
553 ])
566 ])
554 def test_invalidation_context(baseapp, test_repo_name, repo_type):
567 def test_invalidation_context(baseapp, test_repo_name, repo_type):
555 from beaker.cache import cache_region
568 from beaker.cache import cache_region
556 from rhodecode.lib import caches
569 from rhodecode.lib import caches
557 from rhodecode.model.db import CacheKey
570 from rhodecode.model.db import CacheKey
558
571
559 @cache_region('long_term')
572 @cache_region('long_term')
560 def _dummy_func(cache_key):
573 def _dummy_func(cache_key):
561 return 'result'
574 return 'result'
562
575
563 invalidator_context = CacheKey.repo_context_cache(
576 invalidator_context = CacheKey.repo_context_cache(
564 _dummy_func, test_repo_name, 'repo')
577 _dummy_func, test_repo_name, 'repo')
565
578
566 with invalidator_context as context:
579 with invalidator_context as context:
567 invalidated = context.invalidate()
580 invalidated = context.invalidate()
568 result = context.compute()
581 result = context.compute()
569
582
570 assert invalidated == True
583 assert invalidated == True
571 assert 'result' == result
584 assert 'result' == result
572 assert isinstance(context, caches.FreshRegionCache)
585 assert isinstance(context, caches.FreshRegionCache)
573
586
574 assert 'InvalidationContext' in repr(invalidator_context)
587 assert 'InvalidationContext' in repr(invalidator_context)
575
588
576 with invalidator_context as context:
589 with invalidator_context as context:
577 context.invalidate()
590 context.invalidate()
578 result = context.compute()
591 result = context.compute()
579
592
580 assert 'result' == result
593 assert 'result' == result
581 assert isinstance(context, caches.ActiveRegionCache)
594 assert isinstance(context, caches.ActiveRegionCache)
582
595
583
596
584 def test_invalidation_context_exception_in_compute(baseapp):
597 def test_invalidation_context_exception_in_compute(baseapp):
585 from rhodecode.model.db import CacheKey
598 from rhodecode.model.db import CacheKey
586 from beaker.cache import cache_region
599 from beaker.cache import cache_region
587
600
588 @cache_region('long_term')
601 @cache_region('long_term')
589 def _dummy_func(cache_key):
602 def _dummy_func(cache_key):
590 # this causes error since it doesn't get any params
603 # this causes error since it doesn't get any params
591 raise Exception('ups')
604 raise Exception('ups')
592
605
593 invalidator_context = CacheKey.repo_context_cache(
606 invalidator_context = CacheKey.repo_context_cache(
594 _dummy_func, 'test_repo_2', 'repo')
607 _dummy_func, 'test_repo_2', 'repo')
595
608
596 with pytest.raises(Exception):
609 with pytest.raises(Exception):
597 with invalidator_context as context:
610 with invalidator_context as context:
598 context.invalidate()
611 context.invalidate()
599 context.compute()
612 context.compute()
600
613
601
614
602 @pytest.mark.parametrize('execution_number', range(5))
615 @pytest.mark.parametrize('execution_number', range(5))
603 def test_cache_invalidation_race_condition(execution_number, baseapp):
616 def test_cache_invalidation_race_condition(execution_number, baseapp):
604 import time
617 import time
605 from beaker.cache import cache_region
618 from beaker.cache import cache_region
606 from rhodecode.model.db import CacheKey
619 from rhodecode.model.db import CacheKey
607
620
608 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
621 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
609 reason = (
622 reason = (
610 'Fails on MariaDB due to some locking issues. Investigation'
623 'Fails on MariaDB due to some locking issues. Investigation'
611 ' needed')
624 ' needed')
612 pytest.xfail(reason=reason)
625 pytest.xfail(reason=reason)
613
626
614 @run_test_concurrently(25)
627 @run_test_concurrently(25)
615 def test_create_and_delete_cache_keys():
628 def test_create_and_delete_cache_keys():
616 time.sleep(0.2)
629 time.sleep(0.2)
617
630
618 @cache_region('long_term')
631 @cache_region('long_term')
619 def _dummy_func(cache_key):
632 def _dummy_func(cache_key):
620 return 'result'
633 return 'result'
621
634
622 invalidator_context = CacheKey.repo_context_cache(
635 invalidator_context = CacheKey.repo_context_cache(
623 _dummy_func, 'test_repo_1', 'repo')
636 _dummy_func, 'test_repo_1', 'repo')
624
637
625 with invalidator_context as context:
638 with invalidator_context as context:
626 context.invalidate()
639 context.invalidate()
627 context.compute()
640 context.compute()
628
641
629 CacheKey.set_invalidate('test_repo_1', delete=True)
642 CacheKey.set_invalidate('test_repo_1', delete=True)
630
643
631 test_create_and_delete_cache_keys()
644 test_create_and_delete_cache_keys()
General Comments 0
You need to be logged in to leave comments. Login now