##// END OF EJS Templates
jupyter-rendering: added rendering of notebook into MarkupRenderer class.
marcink -
r1491:4811d677 default
parent child Browse files
Show More
@@ -1,2038 +1,2019 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40 import itertools
40 import itertools
41 import fnmatch
41
42
42 from datetime import datetime
43 from datetime import datetime
43 from functools import partial
44 from functools import partial
44 from pygments.formatters.html import HtmlFormatter
45 from pygments.formatters.html import HtmlFormatter
45 from pygments import highlight as code_highlight
46 from pygments import highlight as code_highlight
46 from pygments.lexers import (
47 from pygments.lexers import (
47 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 from pylons import url as pylons_url
49 from pylons import url as pylons_url
49 from pylons.i18n.translation import _, ungettext
50 from pylons.i18n.translation import _, ungettext
50 from pyramid.threadlocal import get_current_request
51 from pyramid.threadlocal import get_current_request
51
52
52 from webhelpers.html import literal, HTML, escape
53 from webhelpers.html import literal, HTML, escape
53 from webhelpers.html.tools import *
54 from webhelpers.html.tools import *
54 from webhelpers.html.builder import make_tag
55 from webhelpers.html.builder import make_tag
55 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 submit, text, password, textarea, title, ul, xml_declaration, radio
59 submit, text, password, textarea, title, ul, xml_declaration, radio
59 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 from webhelpers.pylonslib import Flash as _Flash
62 from webhelpers.pylonslib import Flash as _Flash
62 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
70
71
71 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85
86
86 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
87
88
88
89
89 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91
92
92
93
93 def url(*args, **kw):
94 def url(*args, **kw):
94 return pylons_url(*args, **kw)
95 return pylons_url(*args, **kw)
95
96
96
97
97 def pylons_url_current(*args, **kw):
98 def pylons_url_current(*args, **kw):
98 """
99 """
99 This function overrides pylons.url.current() which returns the current
100 This function overrides pylons.url.current() which returns the current
100 path so that it will also work from a pyramid only context. This
101 path so that it will also work from a pyramid only context. This
101 should be removed once port to pyramid is complete.
102 should be removed once port to pyramid is complete.
102 """
103 """
103 if not args and not kw:
104 if not args and not kw:
104 request = get_current_request()
105 request = get_current_request()
105 return request.path
106 return request.path
106 return pylons_url.current(*args, **kw)
107 return pylons_url.current(*args, **kw)
107
108
108 url.current = pylons_url_current
109 url.current = pylons_url_current
109
110
110
111
111 def url_replace(**qargs):
112 def url_replace(**qargs):
112 """ Returns the current request url while replacing query string args """
113 """ Returns the current request url while replacing query string args """
113
114
114 request = get_current_request()
115 request = get_current_request()
115 new_args = request.GET.mixed()
116 new_args = request.GET.mixed()
116 new_args.update(qargs)
117 new_args.update(qargs)
117 return url('', **new_args)
118 return url('', **new_args)
118
119
119
120
120 def asset(path, ver=None, **kwargs):
121 def asset(path, ver=None, **kwargs):
121 """
122 """
122 Helper to generate a static asset file path for rhodecode assets
123 Helper to generate a static asset file path for rhodecode assets
123
124
124 eg. h.asset('images/image.png', ver='3923')
125 eg. h.asset('images/image.png', ver='3923')
125
126
126 :param path: path of asset
127 :param path: path of asset
127 :param ver: optional version query param to append as ?ver=
128 :param ver: optional version query param to append as ?ver=
128 """
129 """
129 request = get_current_request()
130 request = get_current_request()
130 query = {}
131 query = {}
131 query.update(kwargs)
132 query.update(kwargs)
132 if ver:
133 if ver:
133 query = {'ver': ver}
134 query = {'ver': ver}
134 return request.static_path(
135 return request.static_path(
135 'rhodecode:public/{}'.format(path), _query=query)
136 'rhodecode:public/{}'.format(path), _query=query)
136
137
137
138
138 default_html_escape_table = {
139 default_html_escape_table = {
139 ord('&'): u'&amp;',
140 ord('&'): u'&amp;',
140 ord('<'): u'&lt;',
141 ord('<'): u'&lt;',
141 ord('>'): u'&gt;',
142 ord('>'): u'&gt;',
142 ord('"'): u'&quot;',
143 ord('"'): u'&quot;',
143 ord("'"): u'&#39;',
144 ord("'"): u'&#39;',
144 }
145 }
145
146
146
147
147 def html_escape(text, html_escape_table=default_html_escape_table):
148 def html_escape(text, html_escape_table=default_html_escape_table):
148 """Produce entities within text."""
149 """Produce entities within text."""
149 return text.translate(html_escape_table)
150 return text.translate(html_escape_table)
150
151
151
152
152 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 """
154 """
154 Truncate string ``s`` at the first occurrence of ``sub``.
155 Truncate string ``s`` at the first occurrence of ``sub``.
155
156
156 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 """
158 """
158 suffix_if_chopped = suffix_if_chopped or ''
159 suffix_if_chopped = suffix_if_chopped or ''
159 pos = s.find(sub)
160 pos = s.find(sub)
160 if pos == -1:
161 if pos == -1:
161 return s
162 return s
162
163
163 if inclusive:
164 if inclusive:
164 pos += len(sub)
165 pos += len(sub)
165
166
166 chopped = s[:pos]
167 chopped = s[:pos]
167 left = s[pos:].strip()
168 left = s[pos:].strip()
168
169
169 if left and suffix_if_chopped:
170 if left and suffix_if_chopped:
170 chopped += suffix_if_chopped
171 chopped += suffix_if_chopped
171
172
172 return chopped
173 return chopped
173
174
174
175
175 def shorter(text, size=20):
176 def shorter(text, size=20):
176 postfix = '...'
177 postfix = '...'
177 if len(text) > size:
178 if len(text) > size:
178 return text[:size - len(postfix)] + postfix
179 return text[:size - len(postfix)] + postfix
179 return text
180 return text
180
181
181
182
182 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 """
184 """
184 Reset button
185 Reset button
185 """
186 """
186 _set_input_attrs(attrs, type, name, value)
187 _set_input_attrs(attrs, type, name, value)
187 _set_id_attr(attrs, id, name)
188 _set_id_attr(attrs, id, name)
188 convert_boolean_attrs(attrs, ["disabled"])
189 convert_boolean_attrs(attrs, ["disabled"])
189 return HTML.input(**attrs)
190 return HTML.input(**attrs)
190
191
191 reset = _reset
192 reset = _reset
192 safeid = _make_safe_id_component
193 safeid = _make_safe_id_component
193
194
194
195
195 def branding(name, length=40):
196 def branding(name, length=40):
196 return truncate(name, length, indicator="")
197 return truncate(name, length, indicator="")
197
198
198
199
199 def FID(raw_id, path):
200 def FID(raw_id, path):
200 """
201 """
201 Creates a unique ID for filenode based on it's hash of path and commit
202 Creates a unique ID for filenode based on it's hash of path and commit
202 it's safe to use in urls
203 it's safe to use in urls
203
204
204 :param raw_id:
205 :param raw_id:
205 :param path:
206 :param path:
206 """
207 """
207
208
208 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209
210
210
211
211 class _GetError(object):
212 class _GetError(object):
212 """Get error from form_errors, and represent it as span wrapped error
213 """Get error from form_errors, and represent it as span wrapped error
213 message
214 message
214
215
215 :param field_name: field to fetch errors for
216 :param field_name: field to fetch errors for
216 :param form_errors: form errors dict
217 :param form_errors: form errors dict
217 """
218 """
218
219
219 def __call__(self, field_name, form_errors):
220 def __call__(self, field_name, form_errors):
220 tmpl = """<span class="error_msg">%s</span>"""
221 tmpl = """<span class="error_msg">%s</span>"""
221 if form_errors and field_name in form_errors:
222 if form_errors and field_name in form_errors:
222 return literal(tmpl % form_errors.get(field_name))
223 return literal(tmpl % form_errors.get(field_name))
223
224
224 get_error = _GetError()
225 get_error = _GetError()
225
226
226
227
227 class _ToolTip(object):
228 class _ToolTip(object):
228
229
229 def __call__(self, tooltip_title, trim_at=50):
230 def __call__(self, tooltip_title, trim_at=50):
230 """
231 """
231 Special function just to wrap our text into nice formatted
232 Special function just to wrap our text into nice formatted
232 autowrapped text
233 autowrapped text
233
234
234 :param tooltip_title:
235 :param tooltip_title:
235 """
236 """
236 tooltip_title = escape(tooltip_title)
237 tooltip_title = escape(tooltip_title)
237 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 return tooltip_title
239 return tooltip_title
239 tooltip = _ToolTip()
240 tooltip = _ToolTip()
240
241
241
242
242 def files_breadcrumbs(repo_name, commit_id, file_path):
243 def files_breadcrumbs(repo_name, commit_id, file_path):
243 if isinstance(file_path, str):
244 if isinstance(file_path, str):
244 file_path = safe_unicode(file_path)
245 file_path = safe_unicode(file_path)
245
246
246 # TODO: johbo: Is this always a url like path, or is this operating
247 # TODO: johbo: Is this always a url like path, or is this operating
247 # system dependent?
248 # system dependent?
248 path_segments = file_path.split('/')
249 path_segments = file_path.split('/')
249
250
250 repo_name_html = escape(repo_name)
251 repo_name_html = escape(repo_name)
251 if len(path_segments) == 1 and path_segments[0] == '':
252 if len(path_segments) == 1 and path_segments[0] == '':
252 url_segments = [repo_name_html]
253 url_segments = [repo_name_html]
253 else:
254 else:
254 url_segments = [
255 url_segments = [
255 link_to(
256 link_to(
256 repo_name_html,
257 repo_name_html,
257 url('files_home',
258 url('files_home',
258 repo_name=repo_name,
259 repo_name=repo_name,
259 revision=commit_id,
260 revision=commit_id,
260 f_path=''),
261 f_path=''),
261 class_='pjax-link')]
262 class_='pjax-link')]
262
263
263 last_cnt = len(path_segments) - 1
264 last_cnt = len(path_segments) - 1
264 for cnt, segment in enumerate(path_segments):
265 for cnt, segment in enumerate(path_segments):
265 if not segment:
266 if not segment:
266 continue
267 continue
267 segment_html = escape(segment)
268 segment_html = escape(segment)
268
269
269 if cnt != last_cnt:
270 if cnt != last_cnt:
270 url_segments.append(
271 url_segments.append(
271 link_to(
272 link_to(
272 segment_html,
273 segment_html,
273 url('files_home',
274 url('files_home',
274 repo_name=repo_name,
275 repo_name=repo_name,
275 revision=commit_id,
276 revision=commit_id,
276 f_path='/'.join(path_segments[:cnt + 1])),
277 f_path='/'.join(path_segments[:cnt + 1])),
277 class_='pjax-link'))
278 class_='pjax-link'))
278 else:
279 else:
279 url_segments.append(segment_html)
280 url_segments.append(segment_html)
280
281
281 return literal('/'.join(url_segments))
282 return literal('/'.join(url_segments))
282
283
283
284
284 class CodeHtmlFormatter(HtmlFormatter):
285 class CodeHtmlFormatter(HtmlFormatter):
285 """
286 """
286 My code Html Formatter for source codes
287 My code Html Formatter for source codes
287 """
288 """
288
289
289 def wrap(self, source, outfile):
290 def wrap(self, source, outfile):
290 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291
292
292 def _wrap_code(self, source):
293 def _wrap_code(self, source):
293 for cnt, it in enumerate(source):
294 for cnt, it in enumerate(source):
294 i, t = it
295 i, t = it
295 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 yield i, t
297 yield i, t
297
298
298 def _wrap_tablelinenos(self, inner):
299 def _wrap_tablelinenos(self, inner):
299 dummyoutfile = StringIO.StringIO()
300 dummyoutfile = StringIO.StringIO()
300 lncount = 0
301 lncount = 0
301 for t, line in inner:
302 for t, line in inner:
302 if t:
303 if t:
303 lncount += 1
304 lncount += 1
304 dummyoutfile.write(line)
305 dummyoutfile.write(line)
305
306
306 fl = self.linenostart
307 fl = self.linenostart
307 mw = len(str(lncount + fl - 1))
308 mw = len(str(lncount + fl - 1))
308 sp = self.linenospecial
309 sp = self.linenospecial
309 st = self.linenostep
310 st = self.linenostep
310 la = self.lineanchors
311 la = self.lineanchors
311 aln = self.anchorlinenos
312 aln = self.anchorlinenos
312 nocls = self.noclasses
313 nocls = self.noclasses
313 if sp:
314 if sp:
314 lines = []
315 lines = []
315
316
316 for i in range(fl, fl + lncount):
317 for i in range(fl, fl + lncount):
317 if i % st == 0:
318 if i % st == 0:
318 if i % sp == 0:
319 if i % sp == 0:
319 if aln:
320 if aln:
320 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 (la, i, mw, i))
322 (la, i, mw, i))
322 else:
323 else:
323 lines.append('<span class="special">%*d</span>' % (mw, i))
324 lines.append('<span class="special">%*d</span>' % (mw, i))
324 else:
325 else:
325 if aln:
326 if aln:
326 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 else:
328 else:
328 lines.append('%*d' % (mw, i))
329 lines.append('%*d' % (mw, i))
329 else:
330 else:
330 lines.append('')
331 lines.append('')
331 ls = '\n'.join(lines)
332 ls = '\n'.join(lines)
332 else:
333 else:
333 lines = []
334 lines = []
334 for i in range(fl, fl + lncount):
335 for i in range(fl, fl + lncount):
335 if i % st == 0:
336 if i % st == 0:
336 if aln:
337 if aln:
337 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 else:
339 else:
339 lines.append('%*d' % (mw, i))
340 lines.append('%*d' % (mw, i))
340 else:
341 else:
341 lines.append('')
342 lines.append('')
342 ls = '\n'.join(lines)
343 ls = '\n'.join(lines)
343
344
344 # in case you wonder about the seemingly redundant <div> here: since the
345 # in case you wonder about the seemingly redundant <div> here: since the
345 # content in the other cell also is wrapped in a div, some browsers in
346 # content in the other cell also is wrapped in a div, some browsers in
346 # some configurations seem to mess up the formatting...
347 # some configurations seem to mess up the formatting...
347 if nocls:
348 if nocls:
348 yield 0, ('<table class="%stable">' % self.cssclass +
349 yield 0, ('<table class="%stable">' % self.cssclass +
349 '<tr><td><div class="linenodiv" '
350 '<tr><td><div class="linenodiv" '
350 'style="background-color: #f0f0f0; padding-right: 10px">'
351 'style="background-color: #f0f0f0; padding-right: 10px">'
351 '<pre style="line-height: 125%">' +
352 '<pre style="line-height: 125%">' +
352 ls + '</pre></div></td><td id="hlcode" class="code">')
353 ls + '</pre></div></td><td id="hlcode" class="code">')
353 else:
354 else:
354 yield 0, ('<table class="%stable">' % self.cssclass +
355 yield 0, ('<table class="%stable">' % self.cssclass +
355 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 ls + '</pre></div></td><td id="hlcode" class="code">')
357 ls + '</pre></div></td><td id="hlcode" class="code">')
357 yield 0, dummyoutfile.getvalue()
358 yield 0, dummyoutfile.getvalue()
358 yield 0, '</td></tr></table>'
359 yield 0, '</td></tr></table>'
359
360
360
361
361 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 def __init__(self, **kw):
363 def __init__(self, **kw):
363 # only show these line numbers if set
364 # only show these line numbers if set
364 self.only_lines = kw.pop('only_line_numbers', [])
365 self.only_lines = kw.pop('only_line_numbers', [])
365 self.query_terms = kw.pop('query_terms', [])
366 self.query_terms = kw.pop('query_terms', [])
366 self.max_lines = kw.pop('max_lines', 5)
367 self.max_lines = kw.pop('max_lines', 5)
367 self.line_context = kw.pop('line_context', 3)
368 self.line_context = kw.pop('line_context', 3)
368 self.url = kw.pop('url', None)
369 self.url = kw.pop('url', None)
369
370
370 super(CodeHtmlFormatter, self).__init__(**kw)
371 super(CodeHtmlFormatter, self).__init__(**kw)
371
372
372 def _wrap_code(self, source):
373 def _wrap_code(self, source):
373 for cnt, it in enumerate(source):
374 for cnt, it in enumerate(source):
374 i, t = it
375 i, t = it
375 t = '<pre>%s</pre>' % t
376 t = '<pre>%s</pre>' % t
376 yield i, t
377 yield i, t
377
378
378 def _wrap_tablelinenos(self, inner):
379 def _wrap_tablelinenos(self, inner):
379 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380
381
381 last_shown_line_number = 0
382 last_shown_line_number = 0
382 current_line_number = 1
383 current_line_number = 1
383
384
384 for t, line in inner:
385 for t, line in inner:
385 if not t:
386 if not t:
386 yield t, line
387 yield t, line
387 continue
388 continue
388
389
389 if current_line_number in self.only_lines:
390 if current_line_number in self.only_lines:
390 if last_shown_line_number + 1 != current_line_number:
391 if last_shown_line_number + 1 != current_line_number:
391 yield 0, '<tr>'
392 yield 0, '<tr>'
392 yield 0, '<td class="line">...</td>'
393 yield 0, '<td class="line">...</td>'
393 yield 0, '<td id="hlcode" class="code"></td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
394 yield 0, '</tr>'
395 yield 0, '</tr>'
395
396
396 yield 0, '<tr>'
397 yield 0, '<tr>'
397 if self.url:
398 if self.url:
398 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 self.url, current_line_number, current_line_number)
400 self.url, current_line_number, current_line_number)
400 else:
401 else:
401 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 current_line_number)
403 current_line_number)
403 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 yield 0, '</tr>'
405 yield 0, '</tr>'
405
406
406 last_shown_line_number = current_line_number
407 last_shown_line_number = current_line_number
407
408
408 current_line_number += 1
409 current_line_number += 1
409
410
410
411
411 yield 0, '</table>'
412 yield 0, '</table>'
412
413
413
414
414 def extract_phrases(text_query):
415 def extract_phrases(text_query):
415 """
416 """
416 Extracts phrases from search term string making sure phrases
417 Extracts phrases from search term string making sure phrases
417 contained in double quotes are kept together - and discarding empty values
418 contained in double quotes are kept together - and discarding empty values
418 or fully whitespace values eg.
419 or fully whitespace values eg.
419
420
420 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421
422
422 """
423 """
423
424
424 in_phrase = False
425 in_phrase = False
425 buf = ''
426 buf = ''
426 phrases = []
427 phrases = []
427 for char in text_query:
428 for char in text_query:
428 if in_phrase:
429 if in_phrase:
429 if char == '"': # end phrase
430 if char == '"': # end phrase
430 phrases.append(buf)
431 phrases.append(buf)
431 buf = ''
432 buf = ''
432 in_phrase = False
433 in_phrase = False
433 continue
434 continue
434 else:
435 else:
435 buf += char
436 buf += char
436 continue
437 continue
437 else:
438 else:
438 if char == '"': # start phrase
439 if char == '"': # start phrase
439 in_phrase = True
440 in_phrase = True
440 phrases.append(buf)
441 phrases.append(buf)
441 buf = ''
442 buf = ''
442 continue
443 continue
443 elif char == ' ':
444 elif char == ' ':
444 phrases.append(buf)
445 phrases.append(buf)
445 buf = ''
446 buf = ''
446 continue
447 continue
447 else:
448 else:
448 buf += char
449 buf += char
449
450
450 phrases.append(buf)
451 phrases.append(buf)
451 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 return phrases
453 return phrases
453
454
454
455
455 def get_matching_offsets(text, phrases):
456 def get_matching_offsets(text, phrases):
456 """
457 """
457 Returns a list of string offsets in `text` that the list of `terms` match
458 Returns a list of string offsets in `text` that the list of `terms` match
458
459
459 >>> get_matching_offsets('some text here', ['some', 'here'])
460 >>> get_matching_offsets('some text here', ['some', 'here'])
460 [(0, 4), (10, 14)]
461 [(0, 4), (10, 14)]
461
462
462 """
463 """
463 offsets = []
464 offsets = []
464 for phrase in phrases:
465 for phrase in phrases:
465 for match in re.finditer(phrase, text):
466 for match in re.finditer(phrase, text):
466 offsets.append((match.start(), match.end()))
467 offsets.append((match.start(), match.end()))
467
468
468 return offsets
469 return offsets
469
470
470
471
471 def normalize_text_for_matching(x):
472 def normalize_text_for_matching(x):
472 """
473 """
473 Replaces all non alnum characters to spaces and lower cases the string,
474 Replaces all non alnum characters to spaces and lower cases the string,
474 useful for comparing two text strings without punctuation
475 useful for comparing two text strings without punctuation
475 """
476 """
476 return re.sub(r'[^\w]', ' ', x.lower())
477 return re.sub(r'[^\w]', ' ', x.lower())
477
478
478
479
479 def get_matching_line_offsets(lines, terms):
480 def get_matching_line_offsets(lines, terms):
480 """ Return a set of `lines` indices (starting from 1) matching a
481 """ Return a set of `lines` indices (starting from 1) matching a
481 text search query, along with `context` lines above/below matching lines
482 text search query, along with `context` lines above/below matching lines
482
483
483 :param lines: list of strings representing lines
484 :param lines: list of strings representing lines
484 :param terms: search term string to match in lines eg. 'some text'
485 :param terms: search term string to match in lines eg. 'some text'
485 :param context: number of lines above/below a matching line to add to result
486 :param context: number of lines above/below a matching line to add to result
486 :param max_lines: cut off for lines of interest
487 :param max_lines: cut off for lines of interest
487 eg.
488 eg.
488
489
489 text = '''
490 text = '''
490 words words words
491 words words words
491 words words words
492 words words words
492 some text some
493 some text some
493 words words words
494 words words words
494 words words words
495 words words words
495 text here what
496 text here what
496 '''
497 '''
497 get_matching_line_offsets(text, 'text', context=1)
498 get_matching_line_offsets(text, 'text', context=1)
498 {3: [(5, 9)], 6: [(0, 4)]]
499 {3: [(5, 9)], 6: [(0, 4)]]
499
500
500 """
501 """
501 matching_lines = {}
502 matching_lines = {}
502 phrases = [normalize_text_for_matching(phrase)
503 phrases = [normalize_text_for_matching(phrase)
503 for phrase in extract_phrases(terms)]
504 for phrase in extract_phrases(terms)]
504
505
505 for line_index, line in enumerate(lines, start=1):
506 for line_index, line in enumerate(lines, start=1):
506 match_offsets = get_matching_offsets(
507 match_offsets = get_matching_offsets(
507 normalize_text_for_matching(line), phrases)
508 normalize_text_for_matching(line), phrases)
508 if match_offsets:
509 if match_offsets:
509 matching_lines[line_index] = match_offsets
510 matching_lines[line_index] = match_offsets
510
511
511 return matching_lines
512 return matching_lines
512
513
513
514
514 def hsv_to_rgb(h, s, v):
515 def hsv_to_rgb(h, s, v):
515 """ Convert hsv color values to rgb """
516 """ Convert hsv color values to rgb """
516
517
517 if s == 0.0:
518 if s == 0.0:
518 return v, v, v
519 return v, v, v
519 i = int(h * 6.0) # XXX assume int() truncates!
520 i = int(h * 6.0) # XXX assume int() truncates!
520 f = (h * 6.0) - i
521 f = (h * 6.0) - i
521 p = v * (1.0 - s)
522 p = v * (1.0 - s)
522 q = v * (1.0 - s * f)
523 q = v * (1.0 - s * f)
523 t = v * (1.0 - s * (1.0 - f))
524 t = v * (1.0 - s * (1.0 - f))
524 i = i % 6
525 i = i % 6
525 if i == 0:
526 if i == 0:
526 return v, t, p
527 return v, t, p
527 if i == 1:
528 if i == 1:
528 return q, v, p
529 return q, v, p
529 if i == 2:
530 if i == 2:
530 return p, v, t
531 return p, v, t
531 if i == 3:
532 if i == 3:
532 return p, q, v
533 return p, q, v
533 if i == 4:
534 if i == 4:
534 return t, p, v
535 return t, p, v
535 if i == 5:
536 if i == 5:
536 return v, p, q
537 return v, p, q
537
538
538
539
539 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 """
541 """
541 Generator for getting n of evenly distributed colors using
542 Generator for getting n of evenly distributed colors using
542 hsv color and golden ratio. It always return same order of colors
543 hsv color and golden ratio. It always return same order of colors
543
544
544 :param n: number of colors to generate
545 :param n: number of colors to generate
545 :param saturation: saturation of returned colors
546 :param saturation: saturation of returned colors
546 :param lightness: lightness of returned colors
547 :param lightness: lightness of returned colors
547 :returns: RGB tuple
548 :returns: RGB tuple
548 """
549 """
549
550
550 golden_ratio = 0.618033988749895
551 golden_ratio = 0.618033988749895
551 h = 0.22717784590367374
552 h = 0.22717784590367374
552
553
553 for _ in xrange(n):
554 for _ in xrange(n):
554 h += golden_ratio
555 h += golden_ratio
555 h %= 1
556 h %= 1
556 HSV_tuple = [h, saturation, lightness]
557 HSV_tuple = [h, saturation, lightness]
557 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559
560
560
561
561 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 """
563 """
563 Returns a function which when called with an argument returns a unique
564 Returns a function which when called with an argument returns a unique
564 color for that argument, eg.
565 color for that argument, eg.
565
566
566 :param n: number of colors to generate
567 :param n: number of colors to generate
567 :param saturation: saturation of returned colors
568 :param saturation: saturation of returned colors
568 :param lightness: lightness of returned colors
569 :param lightness: lightness of returned colors
569 :returns: css RGB string
570 :returns: css RGB string
570
571
571 >>> color_hash = color_hasher()
572 >>> color_hash = color_hasher()
572 >>> color_hash('hello')
573 >>> color_hash('hello')
573 'rgb(34, 12, 59)'
574 'rgb(34, 12, 59)'
574 >>> color_hash('hello')
575 >>> color_hash('hello')
575 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
576 >>> color_hash('other')
577 >>> color_hash('other')
577 'rgb(90, 224, 159)'
578 'rgb(90, 224, 159)'
578 """
579 """
579
580
580 color_dict = {}
581 color_dict = {}
581 cgenerator = unique_color_generator(
582 cgenerator = unique_color_generator(
582 saturation=saturation, lightness=lightness)
583 saturation=saturation, lightness=lightness)
583
584
584 def get_color_string(thing):
585 def get_color_string(thing):
585 if thing in color_dict:
586 if thing in color_dict:
586 col = color_dict[thing]
587 col = color_dict[thing]
587 else:
588 else:
588 col = color_dict[thing] = cgenerator.next()
589 col = color_dict[thing] = cgenerator.next()
589 return "rgb(%s)" % (', '.join(col))
590 return "rgb(%s)" % (', '.join(col))
590
591
591 return get_color_string
592 return get_color_string
592
593
593
594
594 def get_lexer_safe(mimetype=None, filepath=None):
595 def get_lexer_safe(mimetype=None, filepath=None):
595 """
596 """
596 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 defaulting to plain text if none could be found
598 defaulting to plain text if none could be found
598 """
599 """
599 lexer = None
600 lexer = None
600 try:
601 try:
601 if mimetype:
602 if mimetype:
602 lexer = get_lexer_for_mimetype(mimetype)
603 lexer = get_lexer_for_mimetype(mimetype)
603 if not lexer:
604 if not lexer:
604 lexer = get_lexer_for_filename(filepath)
605 lexer = get_lexer_for_filename(filepath)
605 except pygments.util.ClassNotFound:
606 except pygments.util.ClassNotFound:
606 pass
607 pass
607
608
608 if not lexer:
609 if not lexer:
609 lexer = get_lexer_by_name('text')
610 lexer = get_lexer_by_name('text')
610
611
611 return lexer
612 return lexer
612
613
613
614
614 def get_lexer_for_filenode(filenode):
615 def get_lexer_for_filenode(filenode):
615 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 return lexer
617 return lexer
617
618
618
619
619 def pygmentize(filenode, **kwargs):
620 def pygmentize(filenode, **kwargs):
620 """
621 """
621 pygmentize function using pygments
622 pygmentize function using pygments
622
623
623 :param filenode:
624 :param filenode:
624 """
625 """
625 lexer = get_lexer_for_filenode(filenode)
626 lexer = get_lexer_for_filenode(filenode)
626 return literal(code_highlight(filenode.content, lexer,
627 return literal(code_highlight(filenode.content, lexer,
627 CodeHtmlFormatter(**kwargs)))
628 CodeHtmlFormatter(**kwargs)))
628
629
629
630
630 def is_following_repo(repo_name, user_id):
631 def is_following_repo(repo_name, user_id):
631 from rhodecode.model.scm import ScmModel
632 from rhodecode.model.scm import ScmModel
632 return ScmModel().is_following_repo(repo_name, user_id)
633 return ScmModel().is_following_repo(repo_name, user_id)
633
634
634
635
635 class _Message(object):
636 class _Message(object):
636 """A message returned by ``Flash.pop_messages()``.
637 """A message returned by ``Flash.pop_messages()``.
637
638
638 Converting the message to a string returns the message text. Instances
639 Converting the message to a string returns the message text. Instances
639 also have the following attributes:
640 also have the following attributes:
640
641
641 * ``message``: the message text.
642 * ``message``: the message text.
642 * ``category``: the category specified when the message was created.
643 * ``category``: the category specified when the message was created.
643 """
644 """
644
645
645 def __init__(self, category, message):
646 def __init__(self, category, message):
646 self.category = category
647 self.category = category
647 self.message = message
648 self.message = message
648
649
649 def __str__(self):
650 def __str__(self):
650 return self.message
651 return self.message
651
652
652 __unicode__ = __str__
653 __unicode__ = __str__
653
654
654 def __html__(self):
655 def __html__(self):
655 return escape(safe_unicode(self.message))
656 return escape(safe_unicode(self.message))
656
657
657
658
658 class Flash(_Flash):
659 class Flash(_Flash):
659
660
660 def pop_messages(self):
661 def pop_messages(self):
661 """Return all accumulated messages and delete them from the session.
662 """Return all accumulated messages and delete them from the session.
662
663
663 The return value is a list of ``Message`` objects.
664 The return value is a list of ``Message`` objects.
664 """
665 """
665 from pylons import session
666 from pylons import session
666
667
667 messages = []
668 messages = []
668
669
669 # Pop the 'old' pylons flash messages. They are tuples of the form
670 # Pop the 'old' pylons flash messages. They are tuples of the form
670 # (category, message)
671 # (category, message)
671 for cat, msg in session.pop(self.session_key, []):
672 for cat, msg in session.pop(self.session_key, []):
672 messages.append(_Message(cat, msg))
673 messages.append(_Message(cat, msg))
673
674
674 # Pop the 'new' pyramid flash messages for each category as list
675 # Pop the 'new' pyramid flash messages for each category as list
675 # of strings.
676 # of strings.
676 for cat in self.categories:
677 for cat in self.categories:
677 for msg in session.pop_flash(queue=cat):
678 for msg in session.pop_flash(queue=cat):
678 messages.append(_Message(cat, msg))
679 messages.append(_Message(cat, msg))
679 # Map messages from the default queue to the 'notice' category.
680 # Map messages from the default queue to the 'notice' category.
680 for msg in session.pop_flash():
681 for msg in session.pop_flash():
681 messages.append(_Message('notice', msg))
682 messages.append(_Message('notice', msg))
682
683
683 session.save()
684 session.save()
684 return messages
685 return messages
685
686
686 def json_alerts(self):
687 def json_alerts(self):
687 payloads = []
688 payloads = []
688 messages = flash.pop_messages()
689 messages = flash.pop_messages()
689 if messages:
690 if messages:
690 for message in messages:
691 for message in messages:
691 subdata = {}
692 subdata = {}
692 if hasattr(message.message, 'rsplit'):
693 if hasattr(message.message, 'rsplit'):
693 flash_data = message.message.rsplit('|DELIM|', 1)
694 flash_data = message.message.rsplit('|DELIM|', 1)
694 org_message = flash_data[0]
695 org_message = flash_data[0]
695 if len(flash_data) > 1:
696 if len(flash_data) > 1:
696 subdata = json.loads(flash_data[1])
697 subdata = json.loads(flash_data[1])
697 else:
698 else:
698 org_message = message.message
699 org_message = message.message
699 payloads.append({
700 payloads.append({
700 'message': {
701 'message': {
701 'message': u'{}'.format(org_message),
702 'message': u'{}'.format(org_message),
702 'level': message.category,
703 'level': message.category,
703 'force': True,
704 'force': True,
704 'subdata': subdata
705 'subdata': subdata
705 }
706 }
706 })
707 })
707 return json.dumps(payloads)
708 return json.dumps(payloads)
708
709
709 flash = Flash()
710 flash = Flash()
710
711
711 #==============================================================================
712 #==============================================================================
712 # SCM FILTERS available via h.
713 # SCM FILTERS available via h.
713 #==============================================================================
714 #==============================================================================
714 from rhodecode.lib.vcs.utils import author_name, author_email
715 from rhodecode.lib.vcs.utils import author_name, author_email
715 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 from rhodecode.model.db import User, ChangesetStatus
717 from rhodecode.model.db import User, ChangesetStatus
717
718
718 age = _age
719 age = _age
719 capitalize = lambda x: x.capitalize()
720 capitalize = lambda x: x.capitalize()
720 email = author_email
721 email = author_email
721 short_id = lambda x: x[:12]
722 short_id = lambda x: x[:12]
722 hide_credentials = lambda x: ''.join(credentials_filter(x))
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
723
724
724
725
725 def age_component(datetime_iso, value=None, time_is_local=False):
726 def age_component(datetime_iso, value=None, time_is_local=False):
726 title = value or format_date(datetime_iso)
727 title = value or format_date(datetime_iso)
727 tzinfo = '+00:00'
728 tzinfo = '+00:00'
728
729
729 # detect if we have a timezone info, otherwise, add it
730 # detect if we have a timezone info, otherwise, add it
730 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 if time_is_local:
732 if time_is_local:
732 tzinfo = time.strftime("+%H:%M",
733 tzinfo = time.strftime("+%H:%M",
733 time.gmtime(
734 time.gmtime(
734 (datetime.now() - datetime.utcnow()).seconds + 1
735 (datetime.now() - datetime.utcnow()).seconds + 1
735 )
736 )
736 )
737 )
737
738
738 return literal(
739 return literal(
739 '<time class="timeago tooltip" '
740 '<time class="timeago tooltip" '
740 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 datetime_iso, title, tzinfo))
742 datetime_iso, title, tzinfo))
742
743
743
744
744 def _shorten_commit_id(commit_id):
745 def _shorten_commit_id(commit_id):
745 from rhodecode import CONFIG
746 from rhodecode import CONFIG
746 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 return commit_id[:def_len]
748 return commit_id[:def_len]
748
749
749
750
750 def show_id(commit):
751 def show_id(commit):
751 """
752 """
752 Configurable function that shows ID
753 Configurable function that shows ID
753 by default it's r123:fffeeefffeee
754 by default it's r123:fffeeefffeee
754
755
755 :param commit: commit instance
756 :param commit: commit instance
756 """
757 """
757 from rhodecode import CONFIG
758 from rhodecode import CONFIG
758 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759
760
760 raw_id = _shorten_commit_id(commit.raw_id)
761 raw_id = _shorten_commit_id(commit.raw_id)
761 if show_idx:
762 if show_idx:
762 return 'r%s:%s' % (commit.idx, raw_id)
763 return 'r%s:%s' % (commit.idx, raw_id)
763 else:
764 else:
764 return '%s' % (raw_id, )
765 return '%s' % (raw_id, )
765
766
766
767
767 def format_date(date):
768 def format_date(date):
768 """
769 """
769 use a standardized formatting for dates used in RhodeCode
770 use a standardized formatting for dates used in RhodeCode
770
771
771 :param date: date/datetime object
772 :param date: date/datetime object
772 :return: formatted date
773 :return: formatted date
773 """
774 """
774
775
775 if date:
776 if date:
776 _fmt = "%a, %d %b %Y %H:%M:%S"
777 _fmt = "%a, %d %b %Y %H:%M:%S"
777 return safe_unicode(date.strftime(_fmt))
778 return safe_unicode(date.strftime(_fmt))
778
779
779 return u""
780 return u""
780
781
781
782
782 class _RepoChecker(object):
783 class _RepoChecker(object):
783
784
784 def __init__(self, backend_alias):
785 def __init__(self, backend_alias):
785 self._backend_alias = backend_alias
786 self._backend_alias = backend_alias
786
787
787 def __call__(self, repository):
788 def __call__(self, repository):
788 if hasattr(repository, 'alias'):
789 if hasattr(repository, 'alias'):
789 _type = repository.alias
790 _type = repository.alias
790 elif hasattr(repository, 'repo_type'):
791 elif hasattr(repository, 'repo_type'):
791 _type = repository.repo_type
792 _type = repository.repo_type
792 else:
793 else:
793 _type = repository
794 _type = repository
794 return _type == self._backend_alias
795 return _type == self._backend_alias
795
796
796 is_git = _RepoChecker('git')
797 is_git = _RepoChecker('git')
797 is_hg = _RepoChecker('hg')
798 is_hg = _RepoChecker('hg')
798 is_svn = _RepoChecker('svn')
799 is_svn = _RepoChecker('svn')
799
800
800
801
801 def get_repo_type_by_name(repo_name):
802 def get_repo_type_by_name(repo_name):
802 repo = Repository.get_by_repo_name(repo_name)
803 repo = Repository.get_by_repo_name(repo_name)
803 return repo.repo_type
804 return repo.repo_type
804
805
805
806
806 def is_svn_without_proxy(repository):
807 def is_svn_without_proxy(repository):
807 if is_svn(repository):
808 if is_svn(repository):
808 from rhodecode.model.settings import VcsSettingsModel
809 from rhodecode.model.settings import VcsSettingsModel
809 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 return False
812 return False
812
813
813
814
814 def discover_user(author):
815 def discover_user(author):
815 """
816 """
816 Tries to discover RhodeCode User based on the autho string. Author string
817 Tries to discover RhodeCode User based on the autho string. Author string
817 is typically `FirstName LastName <email@address.com>`
818 is typically `FirstName LastName <email@address.com>`
818 """
819 """
819
820
820 # if author is already an instance use it for extraction
821 # if author is already an instance use it for extraction
821 if isinstance(author, User):
822 if isinstance(author, User):
822 return author
823 return author
823
824
824 # Valid email in the attribute passed, see if they're in the system
825 # Valid email in the attribute passed, see if they're in the system
825 _email = author_email(author)
826 _email = author_email(author)
826 if _email != '':
827 if _email != '':
827 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 if user is not None:
829 if user is not None:
829 return user
830 return user
830
831
831 # Maybe it's a username, we try to extract it and fetch by username ?
832 # Maybe it's a username, we try to extract it and fetch by username ?
832 _author = author_name(author)
833 _author = author_name(author)
833 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 if user is not None:
835 if user is not None:
835 return user
836 return user
836
837
837 return None
838 return None
838
839
839
840
840 def email_or_none(author):
841 def email_or_none(author):
841 # extract email from the commit string
842 # extract email from the commit string
842 _email = author_email(author)
843 _email = author_email(author)
843
844
844 # If we have an email, use it, otherwise
845 # If we have an email, use it, otherwise
845 # see if it contains a username we can get an email from
846 # see if it contains a username we can get an email from
846 if _email != '':
847 if _email != '':
847 return _email
848 return _email
848 else:
849 else:
849 user = User.get_by_username(
850 user = User.get_by_username(
850 author_name(author), case_insensitive=True, cache=True)
851 author_name(author), case_insensitive=True, cache=True)
851
852
852 if user is not None:
853 if user is not None:
853 return user.email
854 return user.email
854
855
855 # No valid email, not a valid user in the system, none!
856 # No valid email, not a valid user in the system, none!
856 return None
857 return None
857
858
858
859
859 def link_to_user(author, length=0, **kwargs):
860 def link_to_user(author, length=0, **kwargs):
860 user = discover_user(author)
861 user = discover_user(author)
861 # user can be None, but if we have it already it means we can re-use it
862 # user can be None, but if we have it already it means we can re-use it
862 # in the person() function, so we save 1 intensive-query
863 # in the person() function, so we save 1 intensive-query
863 if user:
864 if user:
864 author = user
865 author = user
865
866
866 display_person = person(author, 'username_or_name_or_email')
867 display_person = person(author, 'username_or_name_or_email')
867 if length:
868 if length:
868 display_person = shorter(display_person, length)
869 display_person = shorter(display_person, length)
869
870
870 if user:
871 if user:
871 return link_to(
872 return link_to(
872 escape(display_person),
873 escape(display_person),
873 url('user_profile', username=user.username),
874 url('user_profile', username=user.username),
874 **kwargs)
875 **kwargs)
875 else:
876 else:
876 return escape(display_person)
877 return escape(display_person)
877
878
878
879
879 def person(author, show_attr="username_and_name"):
880 def person(author, show_attr="username_and_name"):
880 user = discover_user(author)
881 user = discover_user(author)
881 if user:
882 if user:
882 return getattr(user, show_attr)
883 return getattr(user, show_attr)
883 else:
884 else:
884 _author = author_name(author)
885 _author = author_name(author)
885 _email = email(author)
886 _email = email(author)
886 return _author or _email
887 return _author or _email
887
888
888
889
889 def author_string(email):
890 def author_string(email):
890 if email:
891 if email:
891 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 if user:
893 if user:
893 if user.firstname or user.lastname:
894 if user.firstname or user.lastname:
894 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 else:
896 else:
896 return email
897 return email
897 else:
898 else:
898 return email
899 return email
899 else:
900 else:
900 return None
901 return None
901
902
902
903
903 def person_by_id(id_, show_attr="username_and_name"):
904 def person_by_id(id_, show_attr="username_and_name"):
904 # attr to return from fetched user
905 # attr to return from fetched user
905 person_getter = lambda usr: getattr(usr, show_attr)
906 person_getter = lambda usr: getattr(usr, show_attr)
906
907
907 #maybe it's an ID ?
908 #maybe it's an ID ?
908 if str(id_).isdigit() or isinstance(id_, int):
909 if str(id_).isdigit() or isinstance(id_, int):
909 id_ = int(id_)
910 id_ = int(id_)
910 user = User.get(id_)
911 user = User.get(id_)
911 if user is not None:
912 if user is not None:
912 return person_getter(user)
913 return person_getter(user)
913 return id_
914 return id_
914
915
915
916
916 def gravatar_with_user(author, show_disabled=False):
917 def gravatar_with_user(author, show_disabled=False):
917 from rhodecode.lib.utils import PartialRenderer
918 from rhodecode.lib.utils import PartialRenderer
918 _render = PartialRenderer('base/base.mako')
919 _render = PartialRenderer('base/base.mako')
919 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920
921
921
922
922 def desc_stylize(value):
923 def desc_stylize(value):
923 """
924 """
924 converts tags from value into html equivalent
925 converts tags from value into html equivalent
925
926
926 :param value:
927 :param value:
927 """
928 """
928 if not value:
929 if not value:
929 return ''
930 return ''
930
931
931 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 '<div class="metatag" tag="lang">\\2</div>', value)
939 '<div class="metatag" tag="lang">\\2</div>', value)
939 value = re.sub(r'\[([a-z]+)\]',
940 value = re.sub(r'\[([a-z]+)\]',
940 '<div class="metatag" tag="\\1">\\1</div>', value)
941 '<div class="metatag" tag="\\1">\\1</div>', value)
941
942
942 return value
943 return value
943
944
944
945
945 def escaped_stylize(value):
946 def escaped_stylize(value):
946 """
947 """
947 converts tags from value into html equivalent, but escaping its value first
948 converts tags from value into html equivalent, but escaping its value first
948 """
949 """
949 if not value:
950 if not value:
950 return ''
951 return ''
951
952
952 # Using default webhelper escape method, but has to force it as a
953 # Using default webhelper escape method, but has to force it as a
953 # plain unicode instead of a markup tag to be used in regex expressions
954 # plain unicode instead of a markup tag to be used in regex expressions
954 value = unicode(escape(safe_unicode(value)))
955 value = unicode(escape(safe_unicode(value)))
955
956
956 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 '<div class="metatag" tag="lang">\\2</div>', value)
964 '<div class="metatag" tag="lang">\\2</div>', value)
964 value = re.sub(r'\[([a-z]+)\]',
965 value = re.sub(r'\[([a-z]+)\]',
965 '<div class="metatag" tag="\\1">\\1</div>', value)
966 '<div class="metatag" tag="\\1">\\1</div>', value)
966
967
967 return value
968 return value
968
969
969
970
970 def bool2icon(value):
971 def bool2icon(value):
971 """
972 """
972 Returns boolean value of a given value, represented as html element with
973 Returns boolean value of a given value, represented as html element with
973 classes that will represent icons
974 classes that will represent icons
974
975
975 :param value: given value to convert to html node
976 :param value: given value to convert to html node
976 """
977 """
977
978
978 if value: # does bool conversion
979 if value: # does bool conversion
979 return HTML.tag('i', class_="icon-true")
980 return HTML.tag('i', class_="icon-true")
980 else: # not true as bool
981 else: # not true as bool
981 return HTML.tag('i', class_="icon-false")
982 return HTML.tag('i', class_="icon-false")
982
983
983
984
984 #==============================================================================
985 #==============================================================================
985 # PERMS
986 # PERMS
986 #==============================================================================
987 #==============================================================================
987 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 csrf_token_key
991 csrf_token_key
991
992
992
993
993 #==============================================================================
994 #==============================================================================
994 # GRAVATAR URL
995 # GRAVATAR URL
995 #==============================================================================
996 #==============================================================================
996 class InitialsGravatar(object):
997 class InitialsGravatar(object):
997 def __init__(self, email_address, first_name, last_name, size=30,
998 def __init__(self, email_address, first_name, last_name, size=30,
998 background=None, text_color='#fff'):
999 background=None, text_color='#fff'):
999 self.size = size
1000 self.size = size
1000 self.first_name = first_name
1001 self.first_name = first_name
1001 self.last_name = last_name
1002 self.last_name = last_name
1002 self.email_address = email_address
1003 self.email_address = email_address
1003 self.background = background or self.str2color(email_address)
1004 self.background = background or self.str2color(email_address)
1004 self.text_color = text_color
1005 self.text_color = text_color
1005
1006
1006 def get_color_bank(self):
1007 def get_color_bank(self):
1007 """
1008 """
1008 returns a predefined list of colors that gravatars can use.
1009 returns a predefined list of colors that gravatars can use.
1009 Those are randomized distinct colors that guarantee readability and
1010 Those are randomized distinct colors that guarantee readability and
1010 uniqueness.
1011 uniqueness.
1011
1012
1012 generated with: http://phrogz.net/css/distinct-colors.html
1013 generated with: http://phrogz.net/css/distinct-colors.html
1013 """
1014 """
1014 return [
1015 return [
1015 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 '#4f8c46', '#368dd9', '#5c0073'
1060 '#4f8c46', '#368dd9', '#5c0073'
1060 ]
1061 ]
1061
1062
1062 def rgb_to_hex_color(self, rgb_tuple):
1063 def rgb_to_hex_color(self, rgb_tuple):
1063 """
1064 """
1064 Converts an rgb_tuple passed to an hex color.
1065 Converts an rgb_tuple passed to an hex color.
1065
1066
1066 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 """
1068 """
1068 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069
1070
1070 def email_to_int_list(self, email_str):
1071 def email_to_int_list(self, email_str):
1071 """
1072 """
1072 Get every byte of the hex digest value of email and turn it to integer.
1073 Get every byte of the hex digest value of email and turn it to integer.
1073 It's going to be always between 0-255
1074 It's going to be always between 0-255
1074 """
1075 """
1075 digest = md5_safe(email_str.lower())
1076 digest = md5_safe(email_str.lower())
1076 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077
1078
1078 def pick_color_bank_index(self, email_str, color_bank):
1079 def pick_color_bank_index(self, email_str, color_bank):
1079 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080
1081
1081 def str2color(self, email_str):
1082 def str2color(self, email_str):
1082 """
1083 """
1083 Tries to map in a stable algorithm an email to color
1084 Tries to map in a stable algorithm an email to color
1084
1085
1085 :param email_str:
1086 :param email_str:
1086 """
1087 """
1087 color_bank = self.get_color_bank()
1088 color_bank = self.get_color_bank()
1088 # pick position (module it's length so we always find it in the
1089 # pick position (module it's length so we always find it in the
1089 # bank even if it's smaller than 256 values
1090 # bank even if it's smaller than 256 values
1090 pos = self.pick_color_bank_index(email_str, color_bank)
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1091 return color_bank[pos]
1092 return color_bank[pos]
1092
1093
1093 def normalize_email(self, email_address):
1094 def normalize_email(self, email_address):
1094 import unicodedata
1095 import unicodedata
1095 # default host used to fill in the fake/missing email
1096 # default host used to fill in the fake/missing email
1096 default_host = u'localhost'
1097 default_host = u'localhost'
1097
1098
1098 if not email_address:
1099 if not email_address:
1099 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100
1101
1101 email_address = safe_unicode(email_address)
1102 email_address = safe_unicode(email_address)
1102
1103
1103 if u'@' not in email_address:
1104 if u'@' not in email_address:
1104 email_address = u'%s@%s' % (email_address, default_host)
1105 email_address = u'%s@%s' % (email_address, default_host)
1105
1106
1106 if email_address.endswith(u'@'):
1107 if email_address.endswith(u'@'):
1107 email_address = u'%s%s' % (email_address, default_host)
1108 email_address = u'%s%s' % (email_address, default_host)
1108
1109
1109 email_address = unicodedata.normalize('NFKD', email_address)\
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1110 .encode('ascii', 'ignore')
1111 .encode('ascii', 'ignore')
1111 return email_address
1112 return email_address
1112
1113
1113 def get_initials(self):
1114 def get_initials(self):
1114 """
1115 """
1115 Returns 2 letter initials calculated based on the input.
1116 Returns 2 letter initials calculated based on the input.
1116 The algorithm picks first given email address, and takes first letter
1117 The algorithm picks first given email address, and takes first letter
1117 of part before @, and then the first letter of server name. In case
1118 of part before @, and then the first letter of server name. In case
1118 the part before @ is in a format of `somestring.somestring2` it replaces
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1119 the server letter with first letter of somestring2
1120 the server letter with first letter of somestring2
1120
1121
1121 In case function was initialized with both first and lastname, this
1122 In case function was initialized with both first and lastname, this
1122 overrides the extraction from email by first letter of the first and
1123 overrides the extraction from email by first letter of the first and
1123 last name. We add special logic to that functionality, In case Full name
1124 last name. We add special logic to that functionality, In case Full name
1124 is compound, like Guido Von Rossum, we use last part of the last name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1125 (Von Rossum) picking `R`.
1126 (Von Rossum) picking `R`.
1126
1127
1127 Function also normalizes the non-ascii characters to they ascii
1128 Function also normalizes the non-ascii characters to they ascii
1128 representation, eg Δ„ => A
1129 representation, eg Δ„ => A
1129 """
1130 """
1130 import unicodedata
1131 import unicodedata
1131 # replace non-ascii to ascii
1132 # replace non-ascii to ascii
1132 first_name = unicodedata.normalize(
1133 first_name = unicodedata.normalize(
1133 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 last_name = unicodedata.normalize(
1135 last_name = unicodedata.normalize(
1135 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136
1137
1137 # do NFKD encoding, and also make sure email has proper format
1138 # do NFKD encoding, and also make sure email has proper format
1138 email_address = self.normalize_email(self.email_address)
1139 email_address = self.normalize_email(self.email_address)
1139
1140
1140 # first push the email initials
1141 # first push the email initials
1141 prefix, server = email_address.split('@', 1)
1142 prefix, server = email_address.split('@', 1)
1142
1143
1143 # check if prefix is maybe a 'firstname.lastname' syntax
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1144 _dot_split = prefix.rsplit('.', 1)
1145 _dot_split = prefix.rsplit('.', 1)
1145 if len(_dot_split) == 2:
1146 if len(_dot_split) == 2:
1146 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 else:
1148 else:
1148 initials = [prefix[0], server[0]]
1149 initials = [prefix[0], server[0]]
1149
1150
1150 # then try to replace either firtname or lastname
1151 # then try to replace either firtname or lastname
1151 fn_letter = (first_name or " ")[0].strip()
1152 fn_letter = (first_name or " ")[0].strip()
1152 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153
1154
1154 if fn_letter:
1155 if fn_letter:
1155 initials[0] = fn_letter
1156 initials[0] = fn_letter
1156
1157
1157 if ln_letter:
1158 if ln_letter:
1158 initials[1] = ln_letter
1159 initials[1] = ln_letter
1159
1160
1160 return ''.join(initials).upper()
1161 return ''.join(initials).upper()
1161
1162
1162 def get_img_data_by_type(self, font_family, img_type):
1163 def get_img_data_by_type(self, font_family, img_type):
1163 default_user = """
1164 default_user = """
1164 <svg xmlns="http://www.w3.org/2000/svg"
1165 <svg xmlns="http://www.w3.org/2000/svg"
1165 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 viewBox="-15 -10 439.165 429.164"
1167 viewBox="-15 -10 439.165 429.164"
1167
1168
1168 xml:space="preserve"
1169 xml:space="preserve"
1169 style="background:{background};" >
1170 style="background:{background};" >
1170
1171
1171 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 168.596,153.916,216.671,
1175 168.596,153.916,216.671,
1175 204.583,216.671z" fill="{text_color}"/>
1176 204.583,216.671z" fill="{text_color}"/>
1176 <path d="M407.164,374.717L360.88,
1177 <path d="M407.164,374.717L360.88,
1177 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 0-48.762-8.122-69.078-23.488
1181 0-48.762-8.122-69.078-23.488
1181 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 19.402-10.527 C409.699,390.129,
1186 19.402-10.527 C409.699,390.129,
1186 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 </svg>""".format(
1188 </svg>""".format(
1188 size=self.size,
1189 size=self.size,
1189 background='#979797', # @grey4
1190 background='#979797', # @grey4
1190 text_color=self.text_color,
1191 text_color=self.text_color,
1191 font_family=font_family)
1192 font_family=font_family)
1192
1193
1193 return {
1194 return {
1194 "default_user": default_user
1195 "default_user": default_user
1195 }[img_type]
1196 }[img_type]
1196
1197
1197 def get_img_data(self, svg_type=None):
1198 def get_img_data(self, svg_type=None):
1198 """
1199 """
1199 generates the svg metadata for image
1200 generates the svg metadata for image
1200 """
1201 """
1201
1202
1202 font_family = ','.join([
1203 font_family = ','.join([
1203 'proximanovaregular',
1204 'proximanovaregular',
1204 'Proxima Nova Regular',
1205 'Proxima Nova Regular',
1205 'Proxima Nova',
1206 'Proxima Nova',
1206 'Arial',
1207 'Arial',
1207 'Lucida Grande',
1208 'Lucida Grande',
1208 'sans-serif'
1209 'sans-serif'
1209 ])
1210 ])
1210 if svg_type:
1211 if svg_type:
1211 return self.get_img_data_by_type(font_family, svg_type)
1212 return self.get_img_data_by_type(font_family, svg_type)
1212
1213
1213 initials = self.get_initials()
1214 initials = self.get_initials()
1214 img_data = """
1215 img_data = """
1215 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 width="{size}" height="{size}"
1217 width="{size}" height="{size}"
1217 style="width: 100%; height: 100%; background-color: {background}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1218 viewBox="0 0 {size} {size}">
1219 viewBox="0 0 {size} {size}">
1219 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 pointer-events="auto" fill="{text_color}"
1221 pointer-events="auto" fill="{text_color}"
1221 font-family="{font_family}"
1222 font-family="{font_family}"
1222 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 </text>
1224 </text>
1224 </svg>""".format(
1225 </svg>""".format(
1225 size=self.size,
1226 size=self.size,
1226 f_size=self.size/1.85, # scale the text inside the box nicely
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1227 background=self.background,
1228 background=self.background,
1228 text_color=self.text_color,
1229 text_color=self.text_color,
1229 text=initials.upper(),
1230 text=initials.upper(),
1230 font_family=font_family)
1231 font_family=font_family)
1231
1232
1232 return img_data
1233 return img_data
1233
1234
1234 def generate_svg(self, svg_type=None):
1235 def generate_svg(self, svg_type=None):
1235 img_data = self.get_img_data(svg_type)
1236 img_data = self.get_img_data(svg_type)
1236 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237
1238
1238
1239
1239 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 svg_type = None
1241 svg_type = None
1241 if email_address == User.DEFAULT_USER_EMAIL:
1242 if email_address == User.DEFAULT_USER_EMAIL:
1242 svg_type = 'default_user'
1243 svg_type = 'default_user'
1243 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 return klass.generate_svg(svg_type=svg_type)
1245 return klass.generate_svg(svg_type=svg_type)
1245
1246
1246
1247
1247 def gravatar_url(email_address, size=30):
1248 def gravatar_url(email_address, size=30):
1248 # doh, we need to re-import those to mock it later
1249 # doh, we need to re-import those to mock it later
1249 from pylons import tmpl_context as c
1250 from pylons import tmpl_context as c
1250
1251
1251 _use_gravatar = c.visual.use_gravatar
1252 _use_gravatar = c.visual.use_gravatar
1252 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253
1254
1254 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 if isinstance(email_address, unicode):
1256 if isinstance(email_address, unicode):
1256 # hashlib crashes on unicode items
1257 # hashlib crashes on unicode items
1257 email_address = safe_str(email_address)
1258 email_address = safe_str(email_address)
1258
1259
1259 # empty email or default user
1260 # empty email or default user
1260 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262
1263
1263 if _use_gravatar:
1264 if _use_gravatar:
1264 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 # get the host and schema here.
1266 # get the host and schema here.
1266 request = get_current_request()
1267 request = get_current_request()
1267 tmpl = safe_str(_gravatar_url)
1268 tmpl = safe_str(_gravatar_url)
1268 tmpl = tmpl.replace('{email}', email_address)\
1269 tmpl = tmpl.replace('{email}', email_address)\
1269 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 .replace('{netloc}', request.host)\
1271 .replace('{netloc}', request.host)\
1271 .replace('{scheme}', request.scheme)\
1272 .replace('{scheme}', request.scheme)\
1272 .replace('{size}', safe_str(size))
1273 .replace('{size}', safe_str(size))
1273 return tmpl
1274 return tmpl
1274 else:
1275 else:
1275 return initials_gravatar(email_address, '', '', size=size)
1276 return initials_gravatar(email_address, '', '', size=size)
1276
1277
1277
1278
1278 class Page(_Page):
1279 class Page(_Page):
1279 """
1280 """
1280 Custom pager to match rendering style with paginator
1281 Custom pager to match rendering style with paginator
1281 """
1282 """
1282
1283
1283 def _get_pos(self, cur_page, max_page, items):
1284 def _get_pos(self, cur_page, max_page, items):
1284 edge = (items / 2) + 1
1285 edge = (items / 2) + 1
1285 if (cur_page <= edge):
1286 if (cur_page <= edge):
1286 radius = max(items / 2, items - cur_page)
1287 radius = max(items / 2, items - cur_page)
1287 elif (max_page - cur_page) < edge:
1288 elif (max_page - cur_page) < edge:
1288 radius = (items - 1) - (max_page - cur_page)
1289 radius = (items - 1) - (max_page - cur_page)
1289 else:
1290 else:
1290 radius = items / 2
1291 radius = items / 2
1291
1292
1292 left = max(1, (cur_page - (radius)))
1293 left = max(1, (cur_page - (radius)))
1293 right = min(max_page, cur_page + (radius))
1294 right = min(max_page, cur_page + (radius))
1294 return left, cur_page, right
1295 return left, cur_page, right
1295
1296
1296 def _range(self, regexp_match):
1297 def _range(self, regexp_match):
1297 """
1298 """
1298 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299
1300
1300 Arguments:
1301 Arguments:
1301
1302
1302 regexp_match
1303 regexp_match
1303 A "re" (regular expressions) match object containing the
1304 A "re" (regular expressions) match object containing the
1304 radius of linked pages around the current page in
1305 radius of linked pages around the current page in
1305 regexp_match.group(1) as a string
1306 regexp_match.group(1) as a string
1306
1307
1307 This function is supposed to be called as a callable in
1308 This function is supposed to be called as a callable in
1308 re.sub.
1309 re.sub.
1309
1310
1310 """
1311 """
1311 radius = int(regexp_match.group(1))
1312 radius = int(regexp_match.group(1))
1312
1313
1313 # Compute the first and last page number within the radius
1314 # Compute the first and last page number within the radius
1314 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 # -> leftmost_page = 5
1316 # -> leftmost_page = 5
1316 # -> rightmost_page = 9
1317 # -> rightmost_page = 9
1317 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 self.last_page,
1319 self.last_page,
1319 (radius * 2) + 1)
1320 (radius * 2) + 1)
1320 nav_items = []
1321 nav_items = []
1321
1322
1322 # Create a link to the first page (unless we are on the first page
1323 # Create a link to the first page (unless we are on the first page
1323 # or there would be no need to insert '..' spacers)
1324 # or there would be no need to insert '..' spacers)
1324 if self.page != self.first_page and self.first_page < leftmost_page:
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1325 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326
1327
1327 # Insert dots if there are pages between the first page
1328 # Insert dots if there are pages between the first page
1328 # and the currently displayed page range
1329 # and the currently displayed page range
1329 if leftmost_page - self.first_page > 1:
1330 if leftmost_page - self.first_page > 1:
1330 # Wrap in a SPAN tag if nolink_attr is set
1331 # Wrap in a SPAN tag if nolink_attr is set
1331 text = '..'
1332 text = '..'
1332 if self.dotdot_attr:
1333 if self.dotdot_attr:
1333 text = HTML.span(c=text, **self.dotdot_attr)
1334 text = HTML.span(c=text, **self.dotdot_attr)
1334 nav_items.append(text)
1335 nav_items.append(text)
1335
1336
1336 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 # Hilight the current page number and do not use a link
1338 # Hilight the current page number and do not use a link
1338 if thispage == self.page:
1339 if thispage == self.page:
1339 text = '%s' % (thispage,)
1340 text = '%s' % (thispage,)
1340 # Wrap in a SPAN tag if nolink_attr is set
1341 # Wrap in a SPAN tag if nolink_attr is set
1341 if self.curpage_attr:
1342 if self.curpage_attr:
1342 text = HTML.span(c=text, **self.curpage_attr)
1343 text = HTML.span(c=text, **self.curpage_attr)
1343 nav_items.append(text)
1344 nav_items.append(text)
1344 # Otherwise create just a link to that page
1345 # Otherwise create just a link to that page
1345 else:
1346 else:
1346 text = '%s' % (thispage,)
1347 text = '%s' % (thispage,)
1347 nav_items.append(self._pagerlink(thispage, text))
1348 nav_items.append(self._pagerlink(thispage, text))
1348
1349
1349 # Insert dots if there are pages between the displayed
1350 # Insert dots if there are pages between the displayed
1350 # page numbers and the end of the page range
1351 # page numbers and the end of the page range
1351 if self.last_page - rightmost_page > 1:
1352 if self.last_page - rightmost_page > 1:
1352 text = '..'
1353 text = '..'
1353 # Wrap in a SPAN tag if nolink_attr is set
1354 # Wrap in a SPAN tag if nolink_attr is set
1354 if self.dotdot_attr:
1355 if self.dotdot_attr:
1355 text = HTML.span(c=text, **self.dotdot_attr)
1356 text = HTML.span(c=text, **self.dotdot_attr)
1356 nav_items.append(text)
1357 nav_items.append(text)
1357
1358
1358 # Create a link to the very last page (unless we are on the last
1359 # Create a link to the very last page (unless we are on the last
1359 # page or there would be no need to insert '..' spacers)
1360 # page or there would be no need to insert '..' spacers)
1360 if self.page != self.last_page and rightmost_page < self.last_page:
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1361 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362
1363
1363 ## prerender links
1364 ## prerender links
1364 #_page_link = url.current()
1365 #_page_link = url.current()
1365 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 return self.separator.join(nav_items)
1368 return self.separator.join(nav_items)
1368
1369
1369 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 show_if_single_page=False, separator=' ', onclick=None,
1371 show_if_single_page=False, separator=' ', onclick=None,
1371 symbol_first='<<', symbol_last='>>',
1372 symbol_first='<<', symbol_last='>>',
1372 symbol_previous='<', symbol_next='>',
1373 symbol_previous='<', symbol_next='>',
1373 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 curpage_attr={'class': 'pager_curpage'},
1375 curpage_attr={'class': 'pager_curpage'},
1375 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376
1377
1377 self.curpage_attr = curpage_attr
1378 self.curpage_attr = curpage_attr
1378 self.separator = separator
1379 self.separator = separator
1379 self.pager_kwargs = kwargs
1380 self.pager_kwargs = kwargs
1380 self.page_param = page_param
1381 self.page_param = page_param
1381 self.partial_param = partial_param
1382 self.partial_param = partial_param
1382 self.onclick = onclick
1383 self.onclick = onclick
1383 self.link_attr = link_attr
1384 self.link_attr = link_attr
1384 self.dotdot_attr = dotdot_attr
1385 self.dotdot_attr = dotdot_attr
1385
1386
1386 # Don't show navigator if there is no more than one page
1387 # Don't show navigator if there is no more than one page
1387 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 return ''
1389 return ''
1389
1390
1390 from string import Template
1391 from string import Template
1391 # Replace ~...~ in token format by range of pages
1392 # Replace ~...~ in token format by range of pages
1392 result = re.sub(r'~(\d+)~', self._range, format)
1393 result = re.sub(r'~(\d+)~', self._range, format)
1393
1394
1394 # Interpolate '%' variables
1395 # Interpolate '%' variables
1395 result = Template(result).safe_substitute({
1396 result = Template(result).safe_substitute({
1396 'first_page': self.first_page,
1397 'first_page': self.first_page,
1397 'last_page': self.last_page,
1398 'last_page': self.last_page,
1398 'page': self.page,
1399 'page': self.page,
1399 'page_count': self.page_count,
1400 'page_count': self.page_count,
1400 'items_per_page': self.items_per_page,
1401 'items_per_page': self.items_per_page,
1401 'first_item': self.first_item,
1402 'first_item': self.first_item,
1402 'last_item': self.last_item,
1403 'last_item': self.last_item,
1403 'item_count': self.item_count,
1404 'item_count': self.item_count,
1404 'link_first': self.page > self.first_page and \
1405 'link_first': self.page > self.first_page and \
1405 self._pagerlink(self.first_page, symbol_first) or '',
1406 self._pagerlink(self.first_page, symbol_first) or '',
1406 'link_last': self.page < self.last_page and \
1407 'link_last': self.page < self.last_page and \
1407 self._pagerlink(self.last_page, symbol_last) or '',
1408 self._pagerlink(self.last_page, symbol_last) or '',
1408 'link_previous': self.previous_page and \
1409 'link_previous': self.previous_page and \
1409 self._pagerlink(self.previous_page, symbol_previous) \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1410 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 'link_next': self.next_page and \
1412 'link_next': self.next_page and \
1412 self._pagerlink(self.next_page, symbol_next) \
1413 self._pagerlink(self.next_page, symbol_next) \
1413 or HTML.span(symbol_next, class_="pg-next disabled")
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1414 })
1415 })
1415
1416
1416 return literal(result)
1417 return literal(result)
1417
1418
1418
1419
1419 #==============================================================================
1420 #==============================================================================
1420 # REPO PAGER, PAGER FOR REPOSITORY
1421 # REPO PAGER, PAGER FOR REPOSITORY
1421 #==============================================================================
1422 #==============================================================================
1422 class RepoPage(Page):
1423 class RepoPage(Page):
1423
1424
1424 def __init__(self, collection, page=1, items_per_page=20,
1425 def __init__(self, collection, page=1, items_per_page=20,
1425 item_count=None, url=None, **kwargs):
1426 item_count=None, url=None, **kwargs):
1426
1427
1427 """Create a "RepoPage" instance. special pager for paging
1428 """Create a "RepoPage" instance. special pager for paging
1428 repository
1429 repository
1429 """
1430 """
1430 self._url_generator = url
1431 self._url_generator = url
1431
1432
1432 # Safe the kwargs class-wide so they can be used in the pager() method
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1433 self.kwargs = kwargs
1434 self.kwargs = kwargs
1434
1435
1435 # Save a reference to the collection
1436 # Save a reference to the collection
1436 self.original_collection = collection
1437 self.original_collection = collection
1437
1438
1438 self.collection = collection
1439 self.collection = collection
1439
1440
1440 # The self.page is the number of the current page.
1441 # The self.page is the number of the current page.
1441 # The first page has the number 1!
1442 # The first page has the number 1!
1442 try:
1443 try:
1443 self.page = int(page) # make it int() if we get it as a string
1444 self.page = int(page) # make it int() if we get it as a string
1444 except (ValueError, TypeError):
1445 except (ValueError, TypeError):
1445 self.page = 1
1446 self.page = 1
1446
1447
1447 self.items_per_page = items_per_page
1448 self.items_per_page = items_per_page
1448
1449
1449 # Unless the user tells us how many items the collections has
1450 # Unless the user tells us how many items the collections has
1450 # we calculate that ourselves.
1451 # we calculate that ourselves.
1451 if item_count is not None:
1452 if item_count is not None:
1452 self.item_count = item_count
1453 self.item_count = item_count
1453 else:
1454 else:
1454 self.item_count = len(self.collection)
1455 self.item_count = len(self.collection)
1455
1456
1456 # Compute the number of the first and last available page
1457 # Compute the number of the first and last available page
1457 if self.item_count > 0:
1458 if self.item_count > 0:
1458 self.first_page = 1
1459 self.first_page = 1
1459 self.page_count = int(math.ceil(float(self.item_count) /
1460 self.page_count = int(math.ceil(float(self.item_count) /
1460 self.items_per_page))
1461 self.items_per_page))
1461 self.last_page = self.first_page + self.page_count - 1
1462 self.last_page = self.first_page + self.page_count - 1
1462
1463
1463 # Make sure that the requested page number is the range of
1464 # Make sure that the requested page number is the range of
1464 # valid pages
1465 # valid pages
1465 if self.page > self.last_page:
1466 if self.page > self.last_page:
1466 self.page = self.last_page
1467 self.page = self.last_page
1467 elif self.page < self.first_page:
1468 elif self.page < self.first_page:
1468 self.page = self.first_page
1469 self.page = self.first_page
1469
1470
1470 # Note: the number of items on this page can be less than
1471 # Note: the number of items on this page can be less than
1471 # items_per_page if the last page is not full
1472 # items_per_page if the last page is not full
1472 self.first_item = max(0, (self.item_count) - (self.page *
1473 self.first_item = max(0, (self.item_count) - (self.page *
1473 items_per_page))
1474 items_per_page))
1474 self.last_item = ((self.item_count - 1) - items_per_page *
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1475 (self.page - 1))
1476 (self.page - 1))
1476
1477
1477 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478
1479
1479 # Links to previous and next page
1480 # Links to previous and next page
1480 if self.page > self.first_page:
1481 if self.page > self.first_page:
1481 self.previous_page = self.page - 1
1482 self.previous_page = self.page - 1
1482 else:
1483 else:
1483 self.previous_page = None
1484 self.previous_page = None
1484
1485
1485 if self.page < self.last_page:
1486 if self.page < self.last_page:
1486 self.next_page = self.page + 1
1487 self.next_page = self.page + 1
1487 else:
1488 else:
1488 self.next_page = None
1489 self.next_page = None
1489
1490
1490 # No items available
1491 # No items available
1491 else:
1492 else:
1492 self.first_page = None
1493 self.first_page = None
1493 self.page_count = 0
1494 self.page_count = 0
1494 self.last_page = None
1495 self.last_page = None
1495 self.first_item = None
1496 self.first_item = None
1496 self.last_item = None
1497 self.last_item = None
1497 self.previous_page = None
1498 self.previous_page = None
1498 self.next_page = None
1499 self.next_page = None
1499 self.items = []
1500 self.items = []
1500
1501
1501 # This is a subclass of the 'list' type. Initialise the list now.
1502 # This is a subclass of the 'list' type. Initialise the list now.
1502 list.__init__(self, reversed(self.items))
1503 list.__init__(self, reversed(self.items))
1503
1504
1504
1505
1505 def changed_tooltip(nodes):
1506 def changed_tooltip(nodes):
1506 """
1507 """
1507 Generates a html string for changed nodes in commit page.
1508 Generates a html string for changed nodes in commit page.
1508 It limits the output to 30 entries
1509 It limits the output to 30 entries
1509
1510
1510 :param nodes: LazyNodesGenerator
1511 :param nodes: LazyNodesGenerator
1511 """
1512 """
1512 if nodes:
1513 if nodes:
1513 pref = ': <br/> '
1514 pref = ': <br/> '
1514 suf = ''
1515 suf = ''
1515 if len(nodes) > 30:
1516 if len(nodes) > 30:
1516 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 for x in nodes[:30]]) + suf)
1519 for x in nodes[:30]]) + suf)
1519 else:
1520 else:
1520 return ': ' + _('No Files')
1521 return ': ' + _('No Files')
1521
1522
1522
1523
1523 def breadcrumb_repo_link(repo):
1524 def breadcrumb_repo_link(repo):
1524 """
1525 """
1525 Makes a breadcrumbs path link to repo
1526 Makes a breadcrumbs path link to repo
1526
1527
1527 ex::
1528 ex::
1528 group >> subgroup >> repo
1529 group >> subgroup >> repo
1529
1530
1530 :param repo: a Repository instance
1531 :param repo: a Repository instance
1531 """
1532 """
1532
1533
1533 path = [
1534 path = [
1534 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 for group in repo.groups_with_parents
1536 for group in repo.groups_with_parents
1536 ] + [
1537 ] + [
1537 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 ]
1539 ]
1539
1540
1540 return literal(' &raquo; '.join(path))
1541 return literal(' &raquo; '.join(path))
1541
1542
1542
1543
1543 def format_byte_size_binary(file_size):
1544 def format_byte_size_binary(file_size):
1544 """
1545 """
1545 Formats file/folder sizes to standard.
1546 Formats file/folder sizes to standard.
1546 """
1547 """
1547 formatted_size = format_byte_size(file_size, binary=True)
1548 formatted_size = format_byte_size(file_size, binary=True)
1548 return formatted_size
1549 return formatted_size
1549
1550
1550
1551
1551 def fancy_file_stats(stats):
1552 def fancy_file_stats(stats):
1552 """
1553 """
1553 Displays a fancy two colored bar for number of added/deleted
1554 Displays a fancy two colored bar for number of added/deleted
1554 lines of code on file
1555 lines of code on file
1555
1556
1556 :param stats: two element list of added/deleted lines of code
1557 :param stats: two element list of added/deleted lines of code
1557 """
1558 """
1558 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1559 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1559 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1560 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1560
1561
1561 def cgen(l_type, a_v, d_v):
1562 def cgen(l_type, a_v, d_v):
1562 mapping = {'tr': 'top-right-rounded-corner-mid',
1563 mapping = {'tr': 'top-right-rounded-corner-mid',
1563 'tl': 'top-left-rounded-corner-mid',
1564 'tl': 'top-left-rounded-corner-mid',
1564 'br': 'bottom-right-rounded-corner-mid',
1565 'br': 'bottom-right-rounded-corner-mid',
1565 'bl': 'bottom-left-rounded-corner-mid'}
1566 'bl': 'bottom-left-rounded-corner-mid'}
1566 map_getter = lambda x: mapping[x]
1567 map_getter = lambda x: mapping[x]
1567
1568
1568 if l_type == 'a' and d_v:
1569 if l_type == 'a' and d_v:
1569 #case when added and deleted are present
1570 #case when added and deleted are present
1570 return ' '.join(map(map_getter, ['tl', 'bl']))
1571 return ' '.join(map(map_getter, ['tl', 'bl']))
1571
1572
1572 if l_type == 'a' and not d_v:
1573 if l_type == 'a' and not d_v:
1573 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1574 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1574
1575
1575 if l_type == 'd' and a_v:
1576 if l_type == 'd' and a_v:
1576 return ' '.join(map(map_getter, ['tr', 'br']))
1577 return ' '.join(map(map_getter, ['tr', 'br']))
1577
1578
1578 if l_type == 'd' and not a_v:
1579 if l_type == 'd' and not a_v:
1579 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1580 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1580
1581
1581 a, d = stats['added'], stats['deleted']
1582 a, d = stats['added'], stats['deleted']
1582 width = 100
1583 width = 100
1583
1584
1584 if stats['binary']: # binary operations like chmod/rename etc
1585 if stats['binary']: # binary operations like chmod/rename etc
1585 lbl = []
1586 lbl = []
1586 bin_op = 0 # undefined
1587 bin_op = 0 # undefined
1587
1588
1588 # prefix with bin for binary files
1589 # prefix with bin for binary files
1589 if BIN_FILENODE in stats['ops']:
1590 if BIN_FILENODE in stats['ops']:
1590 lbl += ['bin']
1591 lbl += ['bin']
1591
1592
1592 if NEW_FILENODE in stats['ops']:
1593 if NEW_FILENODE in stats['ops']:
1593 lbl += [_('new file')]
1594 lbl += [_('new file')]
1594 bin_op = NEW_FILENODE
1595 bin_op = NEW_FILENODE
1595 elif MOD_FILENODE in stats['ops']:
1596 elif MOD_FILENODE in stats['ops']:
1596 lbl += [_('mod')]
1597 lbl += [_('mod')]
1597 bin_op = MOD_FILENODE
1598 bin_op = MOD_FILENODE
1598 elif DEL_FILENODE in stats['ops']:
1599 elif DEL_FILENODE in stats['ops']:
1599 lbl += [_('del')]
1600 lbl += [_('del')]
1600 bin_op = DEL_FILENODE
1601 bin_op = DEL_FILENODE
1601 elif RENAMED_FILENODE in stats['ops']:
1602 elif RENAMED_FILENODE in stats['ops']:
1602 lbl += [_('rename')]
1603 lbl += [_('rename')]
1603 bin_op = RENAMED_FILENODE
1604 bin_op = RENAMED_FILENODE
1604
1605
1605 # chmod can go with other operations, so we add a + to lbl if needed
1606 # chmod can go with other operations, so we add a + to lbl if needed
1606 if CHMOD_FILENODE in stats['ops']:
1607 if CHMOD_FILENODE in stats['ops']:
1607 lbl += [_('chmod')]
1608 lbl += [_('chmod')]
1608 if bin_op == 0:
1609 if bin_op == 0:
1609 bin_op = CHMOD_FILENODE
1610 bin_op = CHMOD_FILENODE
1610
1611
1611 lbl = '+'.join(lbl)
1612 lbl = '+'.join(lbl)
1612 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1613 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1613 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1614 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1614 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1615 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1615 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1616 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1616
1617
1617 t = stats['added'] + stats['deleted']
1618 t = stats['added'] + stats['deleted']
1618 unit = float(width) / (t or 1)
1619 unit = float(width) / (t or 1)
1619
1620
1620 # needs > 9% of width to be visible or 0 to be hidden
1621 # needs > 9% of width to be visible or 0 to be hidden
1621 a_p = max(9, unit * a) if a > 0 else 0
1622 a_p = max(9, unit * a) if a > 0 else 0
1622 d_p = max(9, unit * d) if d > 0 else 0
1623 d_p = max(9, unit * d) if d > 0 else 0
1623 p_sum = a_p + d_p
1624 p_sum = a_p + d_p
1624
1625
1625 if p_sum > width:
1626 if p_sum > width:
1626 #adjust the percentage to be == 100% since we adjusted to 9
1627 #adjust the percentage to be == 100% since we adjusted to 9
1627 if a_p > d_p:
1628 if a_p > d_p:
1628 a_p = a_p - (p_sum - width)
1629 a_p = a_p - (p_sum - width)
1629 else:
1630 else:
1630 d_p = d_p - (p_sum - width)
1631 d_p = d_p - (p_sum - width)
1631
1632
1632 a_v = a if a > 0 else ''
1633 a_v = a if a > 0 else ''
1633 d_v = d if d > 0 else ''
1634 d_v = d if d > 0 else ''
1634
1635
1635 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1636 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1636 cgen('a', a_v, d_v), a_p, a_v
1637 cgen('a', a_v, d_v), a_p, a_v
1637 )
1638 )
1638 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1639 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1639 cgen('d', a_v, d_v), d_p, d_v
1640 cgen('d', a_v, d_v), d_p, d_v
1640 )
1641 )
1641 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1642 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1642
1643
1643
1644
1644 def urlify_text(text_, safe=True):
1645 def urlify_text(text_, safe=True):
1645 """
1646 """
1646 Extrac urls from text and make html links out of them
1647 Extrac urls from text and make html links out of them
1647
1648
1648 :param text_:
1649 :param text_:
1649 """
1650 """
1650
1651
1651 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1652 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1652 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1653 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1653
1654
1654 def url_func(match_obj):
1655 def url_func(match_obj):
1655 url_full = match_obj.groups()[0]
1656 url_full = match_obj.groups()[0]
1656 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1657 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1657 _newtext = url_pat.sub(url_func, text_)
1658 _newtext = url_pat.sub(url_func, text_)
1658 if safe:
1659 if safe:
1659 return literal(_newtext)
1660 return literal(_newtext)
1660 return _newtext
1661 return _newtext
1661
1662
1662
1663
1663 def urlify_commits(text_, repository):
1664 def urlify_commits(text_, repository):
1664 """
1665 """
1665 Extract commit ids from text and make link from them
1666 Extract commit ids from text and make link from them
1666
1667
1667 :param text_:
1668 :param text_:
1668 :param repository: repo name to build the URL with
1669 :param repository: repo name to build the URL with
1669 """
1670 """
1670 from pylons import url # doh, we need to re-import url to mock it later
1671 from pylons import url # doh, we need to re-import url to mock it later
1671 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1672 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1672
1673
1673 def url_func(match_obj):
1674 def url_func(match_obj):
1674 commit_id = match_obj.groups()[1]
1675 commit_id = match_obj.groups()[1]
1675 pref = match_obj.groups()[0]
1676 pref = match_obj.groups()[0]
1676 suf = match_obj.groups()[2]
1677 suf = match_obj.groups()[2]
1677
1678
1678 tmpl = (
1679 tmpl = (
1679 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1680 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1680 '%(commit_id)s</a>%(suf)s'
1681 '%(commit_id)s</a>%(suf)s'
1681 )
1682 )
1682 return tmpl % {
1683 return tmpl % {
1683 'pref': pref,
1684 'pref': pref,
1684 'cls': 'revision-link',
1685 'cls': 'revision-link',
1685 'url': url('changeset_home', repo_name=repository,
1686 'url': url('changeset_home', repo_name=repository,
1686 revision=commit_id, qualified=True),
1687 revision=commit_id, qualified=True),
1687 'commit_id': commit_id,
1688 'commit_id': commit_id,
1688 'suf': suf
1689 'suf': suf
1689 }
1690 }
1690
1691
1691 newtext = URL_PAT.sub(url_func, text_)
1692 newtext = URL_PAT.sub(url_func, text_)
1692
1693
1693 return newtext
1694 return newtext
1694
1695
1695
1696
1696 def _process_url_func(match_obj, repo_name, uid, entry,
1697 def _process_url_func(match_obj, repo_name, uid, entry,
1697 return_raw_data=False):
1698 return_raw_data=False):
1698 pref = ''
1699 pref = ''
1699 if match_obj.group().startswith(' '):
1700 if match_obj.group().startswith(' '):
1700 pref = ' '
1701 pref = ' '
1701
1702
1702 issue_id = ''.join(match_obj.groups())
1703 issue_id = ''.join(match_obj.groups())
1703 tmpl = (
1704 tmpl = (
1704 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1705 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1705 '%(issue-prefix)s%(id-repr)s'
1706 '%(issue-prefix)s%(id-repr)s'
1706 '</a>')
1707 '</a>')
1707
1708
1708 (repo_name_cleaned,
1709 (repo_name_cleaned,
1709 parent_group_name) = RepoGroupModel().\
1710 parent_group_name) = RepoGroupModel().\
1710 _get_group_name_and_parent(repo_name)
1711 _get_group_name_and_parent(repo_name)
1711
1712
1712 # variables replacement
1713 # variables replacement
1713 named_vars = {
1714 named_vars = {
1714 'id': issue_id,
1715 'id': issue_id,
1715 'repo': repo_name,
1716 'repo': repo_name,
1716 'repo_name': repo_name_cleaned,
1717 'repo_name': repo_name_cleaned,
1717 'group_name': parent_group_name
1718 'group_name': parent_group_name
1718 }
1719 }
1719 # named regex variables
1720 # named regex variables
1720 named_vars.update(match_obj.groupdict())
1721 named_vars.update(match_obj.groupdict())
1721 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1722 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1722
1723
1723 data = {
1724 data = {
1724 'pref': pref,
1725 'pref': pref,
1725 'cls': 'issue-tracker-link',
1726 'cls': 'issue-tracker-link',
1726 'url': _url,
1727 'url': _url,
1727 'id-repr': issue_id,
1728 'id-repr': issue_id,
1728 'issue-prefix': entry['pref'],
1729 'issue-prefix': entry['pref'],
1729 'serv': entry['url'],
1730 'serv': entry['url'],
1730 }
1731 }
1731 if return_raw_data:
1732 if return_raw_data:
1732 return {
1733 return {
1733 'id': issue_id,
1734 'id': issue_id,
1734 'url': _url
1735 'url': _url
1735 }
1736 }
1736 return tmpl % data
1737 return tmpl % data
1737
1738
1738
1739
1739 def process_patterns(text_string, repo_name, config=None):
1740 def process_patterns(text_string, repo_name, config=None):
1740 repo = None
1741 repo = None
1741 if repo_name:
1742 if repo_name:
1742 # Retrieving repo_name to avoid invalid repo_name to explode on
1743 # Retrieving repo_name to avoid invalid repo_name to explode on
1743 # IssueTrackerSettingsModel but still passing invalid name further down
1744 # IssueTrackerSettingsModel but still passing invalid name further down
1744 repo = Repository.get_by_repo_name(repo_name, cache=True)
1745 repo = Repository.get_by_repo_name(repo_name, cache=True)
1745
1746
1746 settings_model = IssueTrackerSettingsModel(repo=repo)
1747 settings_model = IssueTrackerSettingsModel(repo=repo)
1747 active_entries = settings_model.get_settings(cache=True)
1748 active_entries = settings_model.get_settings(cache=True)
1748
1749
1749 issues_data = []
1750 issues_data = []
1750 newtext = text_string
1751 newtext = text_string
1751 for uid, entry in active_entries.items():
1752 for uid, entry in active_entries.items():
1752 log.debug('found issue tracker entry with uid %s' % (uid,))
1753 log.debug('found issue tracker entry with uid %s' % (uid,))
1753
1754
1754 if not (entry['pat'] and entry['url']):
1755 if not (entry['pat'] and entry['url']):
1755 log.debug('skipping due to missing data')
1756 log.debug('skipping due to missing data')
1756 continue
1757 continue
1757
1758
1758 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1759 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1759 % (uid, entry['pat'], entry['url'], entry['pref']))
1760 % (uid, entry['pat'], entry['url'], entry['pref']))
1760
1761
1761 try:
1762 try:
1762 pattern = re.compile(r'%s' % entry['pat'])
1763 pattern = re.compile(r'%s' % entry['pat'])
1763 except re.error:
1764 except re.error:
1764 log.exception(
1765 log.exception(
1765 'issue tracker pattern: `%s` failed to compile',
1766 'issue tracker pattern: `%s` failed to compile',
1766 entry['pat'])
1767 entry['pat'])
1767 continue
1768 continue
1768
1769
1769 data_func = partial(
1770 data_func = partial(
1770 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1771 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1771 return_raw_data=True)
1772 return_raw_data=True)
1772
1773
1773 for match_obj in pattern.finditer(text_string):
1774 for match_obj in pattern.finditer(text_string):
1774 issues_data.append(data_func(match_obj))
1775 issues_data.append(data_func(match_obj))
1775
1776
1776 url_func = partial(
1777 url_func = partial(
1777 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1778 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1778
1779
1779 newtext = pattern.sub(url_func, newtext)
1780 newtext = pattern.sub(url_func, newtext)
1780 log.debug('processed prefix:uid `%s`' % (uid,))
1781 log.debug('processed prefix:uid `%s`' % (uid,))
1781
1782
1782 return newtext, issues_data
1783 return newtext, issues_data
1783
1784
1784
1785
1785 def urlify_commit_message(commit_text, repository=None):
1786 def urlify_commit_message(commit_text, repository=None):
1786 """
1787 """
1787 Parses given text message and makes proper links.
1788 Parses given text message and makes proper links.
1788 issues are linked to given issue-server, and rest is a commit link
1789 issues are linked to given issue-server, and rest is a commit link
1789
1790
1790 :param commit_text:
1791 :param commit_text:
1791 :param repository:
1792 :param repository:
1792 """
1793 """
1793 from pylons import url # doh, we need to re-import url to mock it later
1794 from pylons import url # doh, we need to re-import url to mock it later
1794
1795
1795 def escaper(string):
1796 def escaper(string):
1796 return string.replace('<', '&lt;').replace('>', '&gt;')
1797 return string.replace('<', '&lt;').replace('>', '&gt;')
1797
1798
1798 newtext = escaper(commit_text)
1799 newtext = escaper(commit_text)
1799
1800
1800 # extract http/https links and make them real urls
1801 # extract http/https links and make them real urls
1801 newtext = urlify_text(newtext, safe=False)
1802 newtext = urlify_text(newtext, safe=False)
1802
1803
1803 # urlify commits - extract commit ids and make link out of them, if we have
1804 # urlify commits - extract commit ids and make link out of them, if we have
1804 # the scope of repository present.
1805 # the scope of repository present.
1805 if repository:
1806 if repository:
1806 newtext = urlify_commits(newtext, repository)
1807 newtext = urlify_commits(newtext, repository)
1807
1808
1808 # process issue tracker patterns
1809 # process issue tracker patterns
1809 newtext, issues = process_patterns(newtext, repository or '')
1810 newtext, issues = process_patterns(newtext, repository or '')
1810
1811
1811 return literal(newtext)
1812 return literal(newtext)
1812
1813
1813
1814
1814 def rst(source, mentions=False):
1815 return literal('<div class="rst-block">%s</div>' %
1816 MarkupRenderer.rst(source, mentions=mentions))
1817
1818
1819 def markdown(source, mentions=False):
1820 return literal('<div class="markdown-block">%s</div>' %
1821 MarkupRenderer.markdown(source, flavored=True,
1822 mentions=mentions))
1823
1824
1825 def renderer_from_filename(filename, exclude=None):
1815 def renderer_from_filename(filename, exclude=None):
1826 """
1816 """
1827 choose a renderer based on filename
1817 choose a renderer based on filename
1828 """
1818 """
1829
1819
1830 # images
1831
1832 # ipython
1820 # ipython
1833 if filename.endswith('.ipynb'):
1821 for ext in ['*.ipynb']:
1834 return 'ipython'
1822 if fnmatch.fnmatch(filename, pat=ext):
1823 return 'jupyter'
1835
1824
1836 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1825 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1837 if is_markup:
1826 if is_markup:
1838 return is_markup
1827 return is_markup
1839 return None
1828 return None
1840
1829
1841
1830
1842 def render(source, renderer='rst', mentions=False):
1831 def render(source, renderer='rst', mentions=False):
1843 if renderer == 'rst':
1832 if renderer == 'rst':
1844 return rst(source, mentions=mentions)
1833 return literal(
1834 '<div class="rst-block">%s</div>' %
1835 MarkupRenderer.rst(source, mentions=mentions))
1845 elif renderer == 'markdown':
1836 elif renderer == 'markdown':
1846 return markdown(source, mentions=mentions)
1837 return literal(
1847 elif renderer == 'ipython':
1838 '<div class="markdown-block">%s</div>' %
1848 def ipython_renderer(source):
1839 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1849 import nbformat
1840 elif renderer == 'jupyter':
1850 from nbconvert import HTMLExporter
1841 return literal(
1851 notebook = nbformat.reads(source, as_version=4)
1842 '<div class="ipynb">%s</div>' %
1843 MarkupRenderer.jupyter(source))
1852
1844
1853 # 2. Instantiate the exporter. We use the `basic` template for now; we'll get into more details
1854 # later about how to customize the exporter further.
1855 html_exporter = HTMLExporter()
1856 html_exporter.template_file = 'basic'
1857
1858 # 3. Process the notebook we loaded earlier
1859 (body, resources) = html_exporter.from_notebook_node(notebook)
1860
1861 return body
1862
1863 return ipython_renderer(source)
1864 # None means just show the file-source
1845 # None means just show the file-source
1865 return None
1846 return None
1866
1847
1867
1848
1868 def commit_status(repo, commit_id):
1849 def commit_status(repo, commit_id):
1869 return ChangesetStatusModel().get_status(repo, commit_id)
1850 return ChangesetStatusModel().get_status(repo, commit_id)
1870
1851
1871
1852
1872 def commit_status_lbl(commit_status):
1853 def commit_status_lbl(commit_status):
1873 return dict(ChangesetStatus.STATUSES).get(commit_status)
1854 return dict(ChangesetStatus.STATUSES).get(commit_status)
1874
1855
1875
1856
1876 def commit_time(repo_name, commit_id):
1857 def commit_time(repo_name, commit_id):
1877 repo = Repository.get_by_repo_name(repo_name)
1858 repo = Repository.get_by_repo_name(repo_name)
1878 commit = repo.get_commit(commit_id=commit_id)
1859 commit = repo.get_commit(commit_id=commit_id)
1879 return commit.date
1860 return commit.date
1880
1861
1881
1862
1882 def get_permission_name(key):
1863 def get_permission_name(key):
1883 return dict(Permission.PERMS).get(key)
1864 return dict(Permission.PERMS).get(key)
1884
1865
1885
1866
1886 def journal_filter_help():
1867 def journal_filter_help():
1887 return _(
1868 return _(
1888 'Example filter terms:\n' +
1869 'Example filter terms:\n' +
1889 ' repository:vcs\n' +
1870 ' repository:vcs\n' +
1890 ' username:marcin\n' +
1871 ' username:marcin\n' +
1891 ' action:*push*\n' +
1872 ' action:*push*\n' +
1892 ' ip:127.0.0.1\n' +
1873 ' ip:127.0.0.1\n' +
1893 ' date:20120101\n' +
1874 ' date:20120101\n' +
1894 ' date:[20120101100000 TO 20120102]\n' +
1875 ' date:[20120101100000 TO 20120102]\n' +
1895 '\n' +
1876 '\n' +
1896 'Generate wildcards using \'*\' character:\n' +
1877 'Generate wildcards using \'*\' character:\n' +
1897 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1878 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1898 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1879 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1899 '\n' +
1880 '\n' +
1900 'Optional AND / OR operators in queries\n' +
1881 'Optional AND / OR operators in queries\n' +
1901 ' "repository:vcs OR repository:test"\n' +
1882 ' "repository:vcs OR repository:test"\n' +
1902 ' "username:test AND repository:test*"\n'
1883 ' "username:test AND repository:test*"\n'
1903 )
1884 )
1904
1885
1905
1886
1906 def not_mapped_error(repo_name):
1887 def not_mapped_error(repo_name):
1907 flash(_('%s repository is not mapped to db perhaps'
1888 flash(_('%s repository is not mapped to db perhaps'
1908 ' it was created or renamed from the filesystem'
1889 ' it was created or renamed from the filesystem'
1909 ' please run the application again'
1890 ' please run the application again'
1910 ' in order to rescan repositories') % repo_name, category='error')
1891 ' in order to rescan repositories') % repo_name, category='error')
1911
1892
1912
1893
1913 def ip_range(ip_addr):
1894 def ip_range(ip_addr):
1914 from rhodecode.model.db import UserIpMap
1895 from rhodecode.model.db import UserIpMap
1915 s, e = UserIpMap._get_ip_range(ip_addr)
1896 s, e = UserIpMap._get_ip_range(ip_addr)
1916 return '%s - %s' % (s, e)
1897 return '%s - %s' % (s, e)
1917
1898
1918
1899
1919 def form(url, method='post', needs_csrf_token=True, **attrs):
1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1920 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1921 if method.lower() != 'get' and needs_csrf_token:
1902 if method.lower() != 'get' and needs_csrf_token:
1922 raise Exception(
1903 raise Exception(
1923 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1924 'CSRF token. If the endpoint does not require such token you can ' +
1905 'CSRF token. If the endpoint does not require such token you can ' +
1925 'explicitly set the parameter needs_csrf_token to false.')
1906 'explicitly set the parameter needs_csrf_token to false.')
1926
1907
1927 return wh_form(url, method=method, **attrs)
1908 return wh_form(url, method=method, **attrs)
1928
1909
1929
1910
1930 def secure_form(url, method="POST", multipart=False, **attrs):
1911 def secure_form(url, method="POST", multipart=False, **attrs):
1931 """Start a form tag that points the action to an url. This
1912 """Start a form tag that points the action to an url. This
1932 form tag will also include the hidden field containing
1913 form tag will also include the hidden field containing
1933 the auth token.
1914 the auth token.
1934
1915
1935 The url options should be given either as a string, or as a
1916 The url options should be given either as a string, or as a
1936 ``url()`` function. The method for the form defaults to POST.
1917 ``url()`` function. The method for the form defaults to POST.
1937
1918
1938 Options:
1919 Options:
1939
1920
1940 ``multipart``
1921 ``multipart``
1941 If set to True, the enctype is set to "multipart/form-data".
1922 If set to True, the enctype is set to "multipart/form-data".
1942 ``method``
1923 ``method``
1943 The method to use when submitting the form, usually either
1924 The method to use when submitting the form, usually either
1944 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1945 hidden input with name _method is added to simulate the verb
1926 hidden input with name _method is added to simulate the verb
1946 over POST.
1927 over POST.
1947
1928
1948 """
1929 """
1949 from webhelpers.pylonslib.secure_form import insecure_form
1930 from webhelpers.pylonslib.secure_form import insecure_form
1950 form = insecure_form(url, method, multipart, **attrs)
1931 form = insecure_form(url, method, multipart, **attrs)
1951 token = csrf_input()
1932 token = csrf_input()
1952 return literal("%s\n%s" % (form, token))
1933 return literal("%s\n%s" % (form, token))
1953
1934
1954 def csrf_input():
1935 def csrf_input():
1955 return literal(
1936 return literal(
1956 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1937 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1957 csrf_token_key, csrf_token_key, get_csrf_token()))
1938 csrf_token_key, csrf_token_key, get_csrf_token()))
1958
1939
1959 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1940 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1960 select_html = select(name, selected, options, **attrs)
1941 select_html = select(name, selected, options, **attrs)
1961 select2 = """
1942 select2 = """
1962 <script>
1943 <script>
1963 $(document).ready(function() {
1944 $(document).ready(function() {
1964 $('#%s').select2({
1945 $('#%s').select2({
1965 containerCssClass: 'drop-menu',
1946 containerCssClass: 'drop-menu',
1966 dropdownCssClass: 'drop-menu-dropdown',
1947 dropdownCssClass: 'drop-menu-dropdown',
1967 dropdownAutoWidth: true%s
1948 dropdownAutoWidth: true%s
1968 });
1949 });
1969 });
1950 });
1970 </script>
1951 </script>
1971 """
1952 """
1972 filter_option = """,
1953 filter_option = """,
1973 minimumResultsForSearch: -1
1954 minimumResultsForSearch: -1
1974 """
1955 """
1975 input_id = attrs.get('id') or name
1956 input_id = attrs.get('id') or name
1976 filter_enabled = "" if enable_filter else filter_option
1957 filter_enabled = "" if enable_filter else filter_option
1977 select_script = literal(select2 % (input_id, filter_enabled))
1958 select_script = literal(select2 % (input_id, filter_enabled))
1978
1959
1979 return literal(select_html+select_script)
1960 return literal(select_html+select_script)
1980
1961
1981
1962
1982 def get_visual_attr(tmpl_context_var, attr_name):
1963 def get_visual_attr(tmpl_context_var, attr_name):
1983 """
1964 """
1984 A safe way to get a variable from visual variable of template context
1965 A safe way to get a variable from visual variable of template context
1985
1966
1986 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1967 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1987 :param attr_name: name of the attribute we fetch from the c.visual
1968 :param attr_name: name of the attribute we fetch from the c.visual
1988 """
1969 """
1989 visual = getattr(tmpl_context_var, 'visual', None)
1970 visual = getattr(tmpl_context_var, 'visual', None)
1990 if not visual:
1971 if not visual:
1991 return
1972 return
1992 else:
1973 else:
1993 return getattr(visual, attr_name, None)
1974 return getattr(visual, attr_name, None)
1994
1975
1995
1976
1996 def get_last_path_part(file_node):
1977 def get_last_path_part(file_node):
1997 if not file_node.path:
1978 if not file_node.path:
1998 return u''
1979 return u''
1999
1980
2000 path = safe_unicode(file_node.path.split('/')[-1])
1981 path = safe_unicode(file_node.path.split('/')[-1])
2001 return u'../' + path
1982 return u'../' + path
2002
1983
2003
1984
2004 def route_path(*args, **kwds):
1985 def route_path(*args, **kwds):
2005 """
1986 """
2006 Wrapper around pyramids `route_path` function. It is used to generate
1987 Wrapper around pyramids `route_path` function. It is used to generate
2007 URLs from within pylons views or templates. This will be removed when
1988 URLs from within pylons views or templates. This will be removed when
2008 pyramid migration if finished.
1989 pyramid migration if finished.
2009 """
1990 """
2010 req = get_current_request()
1991 req = get_current_request()
2011 return req.route_path(*args, **kwds)
1992 return req.route_path(*args, **kwds)
2012
1993
2013
1994
2014 def route_path_or_none(*args, **kwargs):
1995 def route_path_or_none(*args, **kwargs):
2015 try:
1996 try:
2016 return route_path(*args, **kwargs)
1997 return route_path(*args, **kwargs)
2017 except KeyError:
1998 except KeyError:
2018 return None
1999 return None
2019
2000
2020
2001
2021 def static_url(*args, **kwds):
2002 def static_url(*args, **kwds):
2022 """
2003 """
2023 Wrapper around pyramids `route_path` function. It is used to generate
2004 Wrapper around pyramids `route_path` function. It is used to generate
2024 URLs from within pylons views or templates. This will be removed when
2005 URLs from within pylons views or templates. This will be removed when
2025 pyramid migration if finished.
2006 pyramid migration if finished.
2026 """
2007 """
2027 req = get_current_request()
2008 req = get_current_request()
2028 return req.static_url(*args, **kwds)
2009 return req.static_url(*args, **kwds)
2029
2010
2030
2011
2031 def resource_path(*args, **kwds):
2012 def resource_path(*args, **kwds):
2032 """
2013 """
2033 Wrapper around pyramids `route_path` function. It is used to generate
2014 Wrapper around pyramids `route_path` function. It is used to generate
2034 URLs from within pylons views or templates. This will be removed when
2015 URLs from within pylons views or templates. This will be removed when
2035 pyramid migration if finished.
2016 pyramid migration if finished.
2036 """
2017 """
2037 req = get_current_request()
2018 req = get_current_request()
2038 return req.resource_path(*args, **kwds)
2019 return req.resource_path(*args, **kwds)
@@ -1,282 +1,358 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2017 RhodeCode GmbH
3 # Copyright (C) 2011-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Renderer for markup languages with ability to parse using rst or markdown
23 Renderer for markup languages with ability to parse using rst or markdown
24 """
24 """
25
25
26 import re
26 import re
27 import os
27 import os
28 import logging
28 import logging
29 import itertools
29 import itertools
30
30
31 from mako.lookup import TemplateLookup
31 from mako.lookup import TemplateLookup
32 from mako.template import Template as MakoTemplate
32
33
33 from docutils.core import publish_parts
34 from docutils.core import publish_parts
34 from docutils.parsers.rst import directives
35 from docutils.parsers.rst import directives
35 import markdown
36 import markdown
36
37
37 from rhodecode.lib.markdown_ext import (
38 from rhodecode.lib.markdown_ext import (
38 UrlizeExtension, GithubFlavoredMarkdownExtension)
39 UrlizeExtension, GithubFlavoredMarkdownExtension)
39 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
40 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
40
41
41 log = logging.getLogger(__name__)
42 log = logging.getLogger(__name__)
42
43
43 # default renderer used to generate automated comments
44 # default renderer used to generate automated comments
44 DEFAULT_COMMENTS_RENDERER = 'rst'
45 DEFAULT_COMMENTS_RENDERER = 'rst'
45
46
46
47
47 class MarkupRenderer(object):
48 class MarkupRenderer(object):
48 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
49 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
49
50
50 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
51 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
51 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
52 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
53 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
52 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
54 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
53
55
54 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
56 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
55 markdown_renderer = markdown.Markdown(
57 markdown_renderer = markdown.Markdown(
56 extensions, safe_mode=True, enable_attributes=False)
58 extensions, safe_mode=True, enable_attributes=False)
57
59
58 markdown_renderer_flavored = markdown.Markdown(
60 markdown_renderer_flavored = markdown.Markdown(
59 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
61 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
60 enable_attributes=False)
62 enable_attributes=False)
61
63
62 # extension together with weights. Lower is first means we control how
64 # extension together with weights. Lower is first means we control how
63 # extensions are attached to readme names with those.
65 # extensions are attached to readme names with those.
64 PLAIN_EXTS = [
66 PLAIN_EXTS = [
65 # prefer no extension
67 # prefer no extension
66 ('', 0), # special case that renders READMES names without extension
68 ('', 0), # special case that renders READMES names without extension
67 ('.text', 2), ('.TEXT', 2),
69 ('.text', 2), ('.TEXT', 2),
68 ('.txt', 3), ('.TXT', 3)
70 ('.txt', 3), ('.TXT', 3)
69 ]
71 ]
70
72
71 RST_EXTS = [
73 RST_EXTS = [
72 ('.rst', 1), ('.rest', 1),
74 ('.rst', 1), ('.rest', 1),
73 ('.RST', 2), ('.REST', 2)
75 ('.RST', 2), ('.REST', 2)
74 ]
76 ]
75
77
76 MARKDOWN_EXTS = [
78 MARKDOWN_EXTS = [
77 ('.md', 1), ('.MD', 1),
79 ('.md', 1), ('.MD', 1),
78 ('.mkdn', 2), ('.MKDN', 2),
80 ('.mkdn', 2), ('.MKDN', 2),
79 ('.mdown', 3), ('.MDOWN', 3),
81 ('.mdown', 3), ('.MDOWN', 3),
80 ('.markdown', 4), ('.MARKDOWN', 4)
82 ('.markdown', 4), ('.MARKDOWN', 4)
81 ]
83 ]
82
84
83 def _detect_renderer(self, source, filename=None):
85 def _detect_renderer(self, source, filename=None):
84 """
86 """
85 runs detection of what renderer should be used for generating html
87 runs detection of what renderer should be used for generating html
86 from a markup language
88 from a markup language
87
89
88 filename can be also explicitly a renderer name
90 filename can be also explicitly a renderer name
89
91
90 :param source:
92 :param source:
91 :param filename:
93 :param filename:
92 """
94 """
93
95
94 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
96 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
95 detected_renderer = 'markdown'
97 detected_renderer = 'markdown'
96 elif MarkupRenderer.RST_PAT.findall(filename):
98 elif MarkupRenderer.RST_PAT.findall(filename):
97 detected_renderer = 'rst'
99 detected_renderer = 'rst'
100 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
101 detected_renderer = 'jupyter'
98 elif MarkupRenderer.PLAIN_PAT.findall(filename):
102 elif MarkupRenderer.PLAIN_PAT.findall(filename):
99 detected_renderer = 'plain'
103 detected_renderer = 'plain'
100 else:
104 else:
101 detected_renderer = 'plain'
105 detected_renderer = 'plain'
102
106
103 return getattr(MarkupRenderer, detected_renderer)
107 return getattr(MarkupRenderer, detected_renderer)
104
108
105 @classmethod
109 @classmethod
106 def renderer_from_filename(cls, filename, exclude):
110 def renderer_from_filename(cls, filename, exclude):
107 """
111 """
108 Detect renderer markdown/rst from filename and optionally use exclude
112 Detect renderer markdown/rst from filename and optionally use exclude
109 list to remove some options. This is mostly used in helpers.
113 list to remove some options. This is mostly used in helpers.
110 Returns None when no renderer can be detected.
114 Returns None when no renderer can be detected.
111 """
115 """
112 def _filter(elements):
116 def _filter(elements):
113 if isinstance(exclude, (list, tuple)):
117 if isinstance(exclude, (list, tuple)):
114 return [x for x in elements if x not in exclude]
118 return [x for x in elements if x not in exclude]
115 return elements
119 return elements
116
120
117 if filename.endswith(
121 if filename.endswith(
118 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
122 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
119 return 'markdown'
123 return 'markdown'
120 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
124 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
121 return 'rst'
125 return 'rst'
122
126
123 return None
127 return None
124
128
125 def render(self, source, filename=None):
129 def render(self, source, filename=None):
126 """
130 """
127 Renders a given filename using detected renderer
131 Renders a given filename using detected renderer
128 it detects renderers based on file extension or mimetype.
132 it detects renderers based on file extension or mimetype.
129 At last it will just do a simple html replacing new lines with <br/>
133 At last it will just do a simple html replacing new lines with <br/>
130
134
131 :param file_name:
135 :param file_name:
132 :param source:
136 :param source:
133 """
137 """
134
138
135 renderer = self._detect_renderer(source, filename)
139 renderer = self._detect_renderer(source, filename)
136 readme_data = renderer(source)
140 readme_data = renderer(source)
137 return readme_data
141 return readme_data
138
142
139 @classmethod
143 @classmethod
140 def _flavored_markdown(cls, text):
144 def _flavored_markdown(cls, text):
141 """
145 """
142 Github style flavored markdown
146 Github style flavored markdown
143
147
144 :param text:
148 :param text:
145 """
149 """
146
150
147 # Extract pre blocks.
151 # Extract pre blocks.
148 extractions = {}
152 extractions = {}
149
153
150 def pre_extraction_callback(matchobj):
154 def pre_extraction_callback(matchobj):
151 digest = md5_safe(matchobj.group(0))
155 digest = md5_safe(matchobj.group(0))
152 extractions[digest] = matchobj.group(0)
156 extractions[digest] = matchobj.group(0)
153 return "{gfm-extraction-%s}" % digest
157 return "{gfm-extraction-%s}" % digest
154 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
158 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
155 text = re.sub(pattern, pre_extraction_callback, text)
159 text = re.sub(pattern, pre_extraction_callback, text)
156
160
157 # Prevent foo_bar_baz from ending up with an italic word in the middle.
161 # Prevent foo_bar_baz from ending up with an italic word in the middle.
158 def italic_callback(matchobj):
162 def italic_callback(matchobj):
159 s = matchobj.group(0)
163 s = matchobj.group(0)
160 if list(s).count('_') >= 2:
164 if list(s).count('_') >= 2:
161 return s.replace('_', r'\_')
165 return s.replace('_', r'\_')
162 return s
166 return s
163 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
167 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
164
168
165 # Insert pre block extractions.
169 # Insert pre block extractions.
166 def pre_insert_callback(matchobj):
170 def pre_insert_callback(matchobj):
167 return '\n\n' + extractions[matchobj.group(1)]
171 return '\n\n' + extractions[matchobj.group(1)]
168 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
172 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
169 pre_insert_callback, text)
173 pre_insert_callback, text)
170
174
171 return text
175 return text
172
176
173 @classmethod
177 @classmethod
174 def urlify_text(cls, text):
178 def urlify_text(cls, text):
175 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
179 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
176 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
180 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
177
181
178 def url_func(match_obj):
182 def url_func(match_obj):
179 url_full = match_obj.groups()[0]
183 url_full = match_obj.groups()[0]
180 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
184 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
181
185
182 return url_pat.sub(url_func, text)
186 return url_pat.sub(url_func, text)
183
187
184 @classmethod
188 @classmethod
185 def plain(cls, source, universal_newline=True):
189 def plain(cls, source, universal_newline=True):
186 source = safe_unicode(source)
190 source = safe_unicode(source)
187 if universal_newline:
191 if universal_newline:
188 newline = '\n'
192 newline = '\n'
189 source = newline.join(source.splitlines())
193 source = newline.join(source.splitlines())
190
194
191 source = cls.urlify_text(source)
195 source = cls.urlify_text(source)
192 return '<br />' + source.replace("\n", '<br />')
196 return '<br />' + source.replace("\n", '<br />')
193
197
194 @classmethod
198 @classmethod
195 def markdown(cls, source, safe=True, flavored=True, mentions=False):
199 def markdown(cls, source, safe=True, flavored=True, mentions=False):
196 # It does not allow to insert inline HTML. In presence of HTML tags, it
200 # It does not allow to insert inline HTML. In presence of HTML tags, it
197 # will replace them instead with [HTML_REMOVED]. This is controlled by
201 # will replace them instead with [HTML_REMOVED]. This is controlled by
198 # the safe_mode=True parameter of the markdown method.
202 # the safe_mode=True parameter of the markdown method.
199
203
200 if flavored:
204 if flavored:
201 markdown_renderer = cls.markdown_renderer_flavored
205 markdown_renderer = cls.markdown_renderer_flavored
202 else:
206 else:
203 markdown_renderer = cls.markdown_renderer
207 markdown_renderer = cls.markdown_renderer
204
208
205 if mentions:
209 if mentions:
206 mention_pat = re.compile(MENTIONS_REGEX)
210 mention_pat = re.compile(MENTIONS_REGEX)
207
211
208 def wrapp(match_obj):
212 def wrapp(match_obj):
209 uname = match_obj.groups()[0]
213 uname = match_obj.groups()[0]
210 return ' **@%(uname)s** ' % {'uname': uname}
214 return ' **@%(uname)s** ' % {'uname': uname}
211 mention_hl = mention_pat.sub(wrapp, source).strip()
215 mention_hl = mention_pat.sub(wrapp, source).strip()
212 # we extracted mentions render with this using Mentions false
216 # we extracted mentions render with this using Mentions false
213 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
217 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
214 mentions=False)
218 mentions=False)
215
219
216 source = safe_unicode(source)
220 source = safe_unicode(source)
217 try:
221 try:
218 if flavored:
222 if flavored:
219 source = cls._flavored_markdown(source)
223 source = cls._flavored_markdown(source)
220 return markdown_renderer.convert(source)
224 return markdown_renderer.convert(source)
221 except Exception:
225 except Exception:
222 log.exception('Error when rendering Markdown')
226 log.exception('Error when rendering Markdown')
223 if safe:
227 if safe:
224 log.debug('Fallback to render in plain mode')
228 log.debug('Fallback to render in plain mode')
225 return cls.plain(source)
229 return cls.plain(source)
226 else:
230 else:
227 raise
231 raise
228
232
229 @classmethod
233 @classmethod
230 def rst(cls, source, safe=True, mentions=False):
234 def rst(cls, source, safe=True, mentions=False):
231 if mentions:
235 if mentions:
232 mention_pat = re.compile(MENTIONS_REGEX)
236 mention_pat = re.compile(MENTIONS_REGEX)
233
237
234 def wrapp(match_obj):
238 def wrapp(match_obj):
235 uname = match_obj.groups()[0]
239 uname = match_obj.groups()[0]
236 return ' **@%(uname)s** ' % {'uname': uname}
240 return ' **@%(uname)s** ' % {'uname': uname}
237 mention_hl = mention_pat.sub(wrapp, source).strip()
241 mention_hl = mention_pat.sub(wrapp, source).strip()
238 # we extracted mentions render with this using Mentions false
242 # we extracted mentions render with this using Mentions false
239 return cls.rst(mention_hl, safe=safe, mentions=False)
243 return cls.rst(mention_hl, safe=safe, mentions=False)
240
244
241 source = safe_unicode(source)
245 source = safe_unicode(source)
242 try:
246 try:
243 docutils_settings = dict(
247 docutils_settings = dict(
244 [(alias, None) for alias in
248 [(alias, None) for alias in
245 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
249 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
246
250
247 docutils_settings.update({'input_encoding': 'unicode',
251 docutils_settings.update({'input_encoding': 'unicode',
248 'report_level': 4})
252 'report_level': 4})
249
253
250 for k, v in docutils_settings.iteritems():
254 for k, v in docutils_settings.iteritems():
251 directives.register_directive(k, v)
255 directives.register_directive(k, v)
252
256
253 parts = publish_parts(source=source,
257 parts = publish_parts(source=source,
254 writer_name="html4css1",
258 writer_name="html4css1",
255 settings_overrides=docutils_settings)
259 settings_overrides=docutils_settings)
256
260
257 return parts['html_title'] + parts["fragment"]
261 return parts['html_title'] + parts["fragment"]
258 except Exception:
262 except Exception:
259 log.exception('Error when rendering RST')
263 log.exception('Error when rendering RST')
260 if safe:
264 if safe:
261 log.debug('Fallbacking to render in plain mode')
265 log.debug('Fallbacking to render in plain mode')
262 return cls.plain(source)
266 return cls.plain(source)
263 else:
267 else:
264 raise
268 raise
265
269
270 @classmethod
271 def jupyter(cls, source):
272 from rhodecode.lib import helpers
273 import nbformat
274 from nbconvert import HTMLExporter
275 from traitlets.config import Config
276
277 class CustomHTMLExporter(HTMLExporter):
278 def _template_file_default(self):
279 return 'basic'
280
281 def _sanitize_resources(resources):
282 """
283 Skip/sanitize some of the CSS generated and included in jupyter
284 so it doesn't messes up UI so much
285 """
286
287 # TODO(marcink): probably we should replace this with whole custom
288 # CSS set that doesn't screw up, but jupyter generated html has some
289 # special markers, so it requires Custom HTML exporter template with
290 # _default_template_path_default, to achieve that
291
292 # strip the reset CSS
293 resources[0] = resources[0][resources[0].find('/*! Source'):]
294 return resources
295
296 def as_html(notebook):
297 conf = Config()
298 html_exporter = CustomHTMLExporter(config=conf)
299
300 (body, resources) = html_exporter.from_notebook_node(notebook)
301 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
302 js = MakoTemplate(r'''
303 <!-- Load mathjax -->
304 <!-- MathJax configuration -->
305 <script type="text/x-mathjax-config">
306 MathJax.Hub.Config({
307 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
308 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
309 TeX: {
310 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
311 },
312 tex2jax: {
313 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
314 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
315 processEscapes: true,
316 processEnvironments: true
317 },
318 // Center justify equations in code and markdown cells. Elsewhere
319 // we use CSS to left justify single line equations in code cells.
320 displayAlign: 'center',
321 "HTML-CSS": {
322 styles: {'.MathJax_Display': {"margin": 0}},
323 linebreaks: { automatic: true }
324 },
325 showMathMenu: false
326 });
327 </script>
328 <!-- End of mathjax configuration -->
329 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
330 ''').render(h=helpers)
331
332 css = '<style>{}</style>'.format(
333 ''.join(_sanitize_resources(resources['inlining']['css'])))
334
335 body = '\n'.join([header, css, js, body])
336 return body, resources
337
338 notebook = nbformat.reads(source, as_version=4)
339 (body, resources) = as_html(notebook)
340 return body
341
266
342
267 class RstTemplateRenderer(object):
343 class RstTemplateRenderer(object):
268
344
269 def __init__(self):
345 def __init__(self):
270 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
346 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
271 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
347 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
272 self.template_store = TemplateLookup(
348 self.template_store = TemplateLookup(
273 directories=rst_template_dirs,
349 directories=rst_template_dirs,
274 input_encoding='utf-8',
350 input_encoding='utf-8',
275 imports=['from rhodecode.lib import helpers as h'])
351 imports=['from rhodecode.lib import helpers as h'])
276
352
277 def _get_template(self, templatename):
353 def _get_template(self, templatename):
278 return self.template_store.get_template(templatename)
354 return self.template_store.get_template(templatename)
279
355
280 def render(self, template_name, **kwargs):
356 def render(self, template_name, **kwargs):
281 template = self._get_template(template_name)
357 template = self._get_template(template_name)
282 return template.render(**kwargs)
358 return template.render(**kwargs)
General Comments 0
You need to be logged in to leave comments. Login now