##// END OF EJS Templates
jupyter-rendering: added rendering of notebook into MarkupRenderer class.
marcink -
r1491:4811d677 default
parent child Browse files
Show More
@@ -1,2038 +1,2019 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40 import itertools
41 import fnmatch
41 42
42 43 from datetime import datetime
43 44 from functools import partial
44 45 from pygments.formatters.html import HtmlFormatter
45 46 from pygments import highlight as code_highlight
46 47 from pygments.lexers import (
47 48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 49 from pylons import url as pylons_url
49 50 from pylons.i18n.translation import _, ungettext
50 51 from pyramid.threadlocal import get_current_request
51 52
52 53 from webhelpers.html import literal, HTML, escape
53 54 from webhelpers.html.tools import *
54 55 from webhelpers.html.builder import make_tag
55 56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 59 submit, text, password, textarea, title, ul, xml_declaration, radio
59 60 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 62 from webhelpers.pylonslib import Flash as _Flash
62 63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 66 from webhelpers.date import time_ago_in_words
66 67 from webhelpers.paginate import Page as _Page
67 68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 70 from webhelpers2.number import format_byte_size
70 71
71 72 from rhodecode.lib.action_parser import action_parser
72 73 from rhodecode.lib.ext_json import json
73 74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 77 AttributeDict, safe_int, md5, md5_safe
77 78 from rhodecode.lib.markup_renderer import MarkupRenderer
78 79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 83 from rhodecode.model.db import Permission, User, Repository
83 84 from rhodecode.model.repo_group import RepoGroupModel
84 85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 86
86 87 log = logging.getLogger(__name__)
87 88
88 89
89 90 DEFAULT_USER = User.DEFAULT_USER
90 91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 92
92 93
93 94 def url(*args, **kw):
94 95 return pylons_url(*args, **kw)
95 96
96 97
97 98 def pylons_url_current(*args, **kw):
98 99 """
99 100 This function overrides pylons.url.current() which returns the current
100 101 path so that it will also work from a pyramid only context. This
101 102 should be removed once port to pyramid is complete.
102 103 """
103 104 if not args and not kw:
104 105 request = get_current_request()
105 106 return request.path
106 107 return pylons_url.current(*args, **kw)
107 108
108 109 url.current = pylons_url_current
109 110
110 111
111 112 def url_replace(**qargs):
112 113 """ Returns the current request url while replacing query string args """
113 114
114 115 request = get_current_request()
115 116 new_args = request.GET.mixed()
116 117 new_args.update(qargs)
117 118 return url('', **new_args)
118 119
119 120
120 121 def asset(path, ver=None, **kwargs):
121 122 """
122 123 Helper to generate a static asset file path for rhodecode assets
123 124
124 125 eg. h.asset('images/image.png', ver='3923')
125 126
126 127 :param path: path of asset
127 128 :param ver: optional version query param to append as ?ver=
128 129 """
129 130 request = get_current_request()
130 131 query = {}
131 132 query.update(kwargs)
132 133 if ver:
133 134 query = {'ver': ver}
134 135 return request.static_path(
135 136 'rhodecode:public/{}'.format(path), _query=query)
136 137
137 138
138 139 default_html_escape_table = {
139 140 ord('&'): u'&amp;',
140 141 ord('<'): u'&lt;',
141 142 ord('>'): u'&gt;',
142 143 ord('"'): u'&quot;',
143 144 ord("'"): u'&#39;',
144 145 }
145 146
146 147
147 148 def html_escape(text, html_escape_table=default_html_escape_table):
148 149 """Produce entities within text."""
149 150 return text.translate(html_escape_table)
150 151
151 152
152 153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 154 """
154 155 Truncate string ``s`` at the first occurrence of ``sub``.
155 156
156 157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 158 """
158 159 suffix_if_chopped = suffix_if_chopped or ''
159 160 pos = s.find(sub)
160 161 if pos == -1:
161 162 return s
162 163
163 164 if inclusive:
164 165 pos += len(sub)
165 166
166 167 chopped = s[:pos]
167 168 left = s[pos:].strip()
168 169
169 170 if left and suffix_if_chopped:
170 171 chopped += suffix_if_chopped
171 172
172 173 return chopped
173 174
174 175
175 176 def shorter(text, size=20):
176 177 postfix = '...'
177 178 if len(text) > size:
178 179 return text[:size - len(postfix)] + postfix
179 180 return text
180 181
181 182
182 183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 184 """
184 185 Reset button
185 186 """
186 187 _set_input_attrs(attrs, type, name, value)
187 188 _set_id_attr(attrs, id, name)
188 189 convert_boolean_attrs(attrs, ["disabled"])
189 190 return HTML.input(**attrs)
190 191
191 192 reset = _reset
192 193 safeid = _make_safe_id_component
193 194
194 195
195 196 def branding(name, length=40):
196 197 return truncate(name, length, indicator="")
197 198
198 199
199 200 def FID(raw_id, path):
200 201 """
201 202 Creates a unique ID for filenode based on it's hash of path and commit
202 203 it's safe to use in urls
203 204
204 205 :param raw_id:
205 206 :param path:
206 207 """
207 208
208 209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209 210
210 211
211 212 class _GetError(object):
212 213 """Get error from form_errors, and represent it as span wrapped error
213 214 message
214 215
215 216 :param field_name: field to fetch errors for
216 217 :param form_errors: form errors dict
217 218 """
218 219
219 220 def __call__(self, field_name, form_errors):
220 221 tmpl = """<span class="error_msg">%s</span>"""
221 222 if form_errors and field_name in form_errors:
222 223 return literal(tmpl % form_errors.get(field_name))
223 224
224 225 get_error = _GetError()
225 226
226 227
227 228 class _ToolTip(object):
228 229
229 230 def __call__(self, tooltip_title, trim_at=50):
230 231 """
231 232 Special function just to wrap our text into nice formatted
232 233 autowrapped text
233 234
234 235 :param tooltip_title:
235 236 """
236 237 tooltip_title = escape(tooltip_title)
237 238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 239 return tooltip_title
239 240 tooltip = _ToolTip()
240 241
241 242
242 243 def files_breadcrumbs(repo_name, commit_id, file_path):
243 244 if isinstance(file_path, str):
244 245 file_path = safe_unicode(file_path)
245 246
246 247 # TODO: johbo: Is this always a url like path, or is this operating
247 248 # system dependent?
248 249 path_segments = file_path.split('/')
249 250
250 251 repo_name_html = escape(repo_name)
251 252 if len(path_segments) == 1 and path_segments[0] == '':
252 253 url_segments = [repo_name_html]
253 254 else:
254 255 url_segments = [
255 256 link_to(
256 257 repo_name_html,
257 258 url('files_home',
258 259 repo_name=repo_name,
259 260 revision=commit_id,
260 261 f_path=''),
261 262 class_='pjax-link')]
262 263
263 264 last_cnt = len(path_segments) - 1
264 265 for cnt, segment in enumerate(path_segments):
265 266 if not segment:
266 267 continue
267 268 segment_html = escape(segment)
268 269
269 270 if cnt != last_cnt:
270 271 url_segments.append(
271 272 link_to(
272 273 segment_html,
273 274 url('files_home',
274 275 repo_name=repo_name,
275 276 revision=commit_id,
276 277 f_path='/'.join(path_segments[:cnt + 1])),
277 278 class_='pjax-link'))
278 279 else:
279 280 url_segments.append(segment_html)
280 281
281 282 return literal('/'.join(url_segments))
282 283
283 284
284 285 class CodeHtmlFormatter(HtmlFormatter):
285 286 """
286 287 My code Html Formatter for source codes
287 288 """
288 289
289 290 def wrap(self, source, outfile):
290 291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291 292
292 293 def _wrap_code(self, source):
293 294 for cnt, it in enumerate(source):
294 295 i, t = it
295 296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 297 yield i, t
297 298
298 299 def _wrap_tablelinenos(self, inner):
299 300 dummyoutfile = StringIO.StringIO()
300 301 lncount = 0
301 302 for t, line in inner:
302 303 if t:
303 304 lncount += 1
304 305 dummyoutfile.write(line)
305 306
306 307 fl = self.linenostart
307 308 mw = len(str(lncount + fl - 1))
308 309 sp = self.linenospecial
309 310 st = self.linenostep
310 311 la = self.lineanchors
311 312 aln = self.anchorlinenos
312 313 nocls = self.noclasses
313 314 if sp:
314 315 lines = []
315 316
316 317 for i in range(fl, fl + lncount):
317 318 if i % st == 0:
318 319 if i % sp == 0:
319 320 if aln:
320 321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 322 (la, i, mw, i))
322 323 else:
323 324 lines.append('<span class="special">%*d</span>' % (mw, i))
324 325 else:
325 326 if aln:
326 327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 328 else:
328 329 lines.append('%*d' % (mw, i))
329 330 else:
330 331 lines.append('')
331 332 ls = '\n'.join(lines)
332 333 else:
333 334 lines = []
334 335 for i in range(fl, fl + lncount):
335 336 if i % st == 0:
336 337 if aln:
337 338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 339 else:
339 340 lines.append('%*d' % (mw, i))
340 341 else:
341 342 lines.append('')
342 343 ls = '\n'.join(lines)
343 344
344 345 # in case you wonder about the seemingly redundant <div> here: since the
345 346 # content in the other cell also is wrapped in a div, some browsers in
346 347 # some configurations seem to mess up the formatting...
347 348 if nocls:
348 349 yield 0, ('<table class="%stable">' % self.cssclass +
349 350 '<tr><td><div class="linenodiv" '
350 351 'style="background-color: #f0f0f0; padding-right: 10px">'
351 352 '<pre style="line-height: 125%">' +
352 353 ls + '</pre></div></td><td id="hlcode" class="code">')
353 354 else:
354 355 yield 0, ('<table class="%stable">' % self.cssclass +
355 356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 357 ls + '</pre></div></td><td id="hlcode" class="code">')
357 358 yield 0, dummyoutfile.getvalue()
358 359 yield 0, '</td></tr></table>'
359 360
360 361
361 362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 363 def __init__(self, **kw):
363 364 # only show these line numbers if set
364 365 self.only_lines = kw.pop('only_line_numbers', [])
365 366 self.query_terms = kw.pop('query_terms', [])
366 367 self.max_lines = kw.pop('max_lines', 5)
367 368 self.line_context = kw.pop('line_context', 3)
368 369 self.url = kw.pop('url', None)
369 370
370 371 super(CodeHtmlFormatter, self).__init__(**kw)
371 372
372 373 def _wrap_code(self, source):
373 374 for cnt, it in enumerate(source):
374 375 i, t = it
375 376 t = '<pre>%s</pre>' % t
376 377 yield i, t
377 378
378 379 def _wrap_tablelinenos(self, inner):
379 380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380 381
381 382 last_shown_line_number = 0
382 383 current_line_number = 1
383 384
384 385 for t, line in inner:
385 386 if not t:
386 387 yield t, line
387 388 continue
388 389
389 390 if current_line_number in self.only_lines:
390 391 if last_shown_line_number + 1 != current_line_number:
391 392 yield 0, '<tr>'
392 393 yield 0, '<td class="line">...</td>'
393 394 yield 0, '<td id="hlcode" class="code"></td>'
394 395 yield 0, '</tr>'
395 396
396 397 yield 0, '<tr>'
397 398 if self.url:
398 399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 400 self.url, current_line_number, current_line_number)
400 401 else:
401 402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 403 current_line_number)
403 404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 405 yield 0, '</tr>'
405 406
406 407 last_shown_line_number = current_line_number
407 408
408 409 current_line_number += 1
409 410
410 411
411 412 yield 0, '</table>'
412 413
413 414
414 415 def extract_phrases(text_query):
415 416 """
416 417 Extracts phrases from search term string making sure phrases
417 418 contained in double quotes are kept together - and discarding empty values
418 419 or fully whitespace values eg.
419 420
420 421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421 422
422 423 """
423 424
424 425 in_phrase = False
425 426 buf = ''
426 427 phrases = []
427 428 for char in text_query:
428 429 if in_phrase:
429 430 if char == '"': # end phrase
430 431 phrases.append(buf)
431 432 buf = ''
432 433 in_phrase = False
433 434 continue
434 435 else:
435 436 buf += char
436 437 continue
437 438 else:
438 439 if char == '"': # start phrase
439 440 in_phrase = True
440 441 phrases.append(buf)
441 442 buf = ''
442 443 continue
443 444 elif char == ' ':
444 445 phrases.append(buf)
445 446 buf = ''
446 447 continue
447 448 else:
448 449 buf += char
449 450
450 451 phrases.append(buf)
451 452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 453 return phrases
453 454
454 455
455 456 def get_matching_offsets(text, phrases):
456 457 """
457 458 Returns a list of string offsets in `text` that the list of `terms` match
458 459
459 460 >>> get_matching_offsets('some text here', ['some', 'here'])
460 461 [(0, 4), (10, 14)]
461 462
462 463 """
463 464 offsets = []
464 465 for phrase in phrases:
465 466 for match in re.finditer(phrase, text):
466 467 offsets.append((match.start(), match.end()))
467 468
468 469 return offsets
469 470
470 471
471 472 def normalize_text_for_matching(x):
472 473 """
473 474 Replaces all non alnum characters to spaces and lower cases the string,
474 475 useful for comparing two text strings without punctuation
475 476 """
476 477 return re.sub(r'[^\w]', ' ', x.lower())
477 478
478 479
479 480 def get_matching_line_offsets(lines, terms):
480 481 """ Return a set of `lines` indices (starting from 1) matching a
481 482 text search query, along with `context` lines above/below matching lines
482 483
483 484 :param lines: list of strings representing lines
484 485 :param terms: search term string to match in lines eg. 'some text'
485 486 :param context: number of lines above/below a matching line to add to result
486 487 :param max_lines: cut off for lines of interest
487 488 eg.
488 489
489 490 text = '''
490 491 words words words
491 492 words words words
492 493 some text some
493 494 words words words
494 495 words words words
495 496 text here what
496 497 '''
497 498 get_matching_line_offsets(text, 'text', context=1)
498 499 {3: [(5, 9)], 6: [(0, 4)]]
499 500
500 501 """
501 502 matching_lines = {}
502 503 phrases = [normalize_text_for_matching(phrase)
503 504 for phrase in extract_phrases(terms)]
504 505
505 506 for line_index, line in enumerate(lines, start=1):
506 507 match_offsets = get_matching_offsets(
507 508 normalize_text_for_matching(line), phrases)
508 509 if match_offsets:
509 510 matching_lines[line_index] = match_offsets
510 511
511 512 return matching_lines
512 513
513 514
514 515 def hsv_to_rgb(h, s, v):
515 516 """ Convert hsv color values to rgb """
516 517
517 518 if s == 0.0:
518 519 return v, v, v
519 520 i = int(h * 6.0) # XXX assume int() truncates!
520 521 f = (h * 6.0) - i
521 522 p = v * (1.0 - s)
522 523 q = v * (1.0 - s * f)
523 524 t = v * (1.0 - s * (1.0 - f))
524 525 i = i % 6
525 526 if i == 0:
526 527 return v, t, p
527 528 if i == 1:
528 529 return q, v, p
529 530 if i == 2:
530 531 return p, v, t
531 532 if i == 3:
532 533 return p, q, v
533 534 if i == 4:
534 535 return t, p, v
535 536 if i == 5:
536 537 return v, p, q
537 538
538 539
539 540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 541 """
541 542 Generator for getting n of evenly distributed colors using
542 543 hsv color and golden ratio. It always return same order of colors
543 544
544 545 :param n: number of colors to generate
545 546 :param saturation: saturation of returned colors
546 547 :param lightness: lightness of returned colors
547 548 :returns: RGB tuple
548 549 """
549 550
550 551 golden_ratio = 0.618033988749895
551 552 h = 0.22717784590367374
552 553
553 554 for _ in xrange(n):
554 555 h += golden_ratio
555 556 h %= 1
556 557 HSV_tuple = [h, saturation, lightness]
557 558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559 560
560 561
561 562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 563 """
563 564 Returns a function which when called with an argument returns a unique
564 565 color for that argument, eg.
565 566
566 567 :param n: number of colors to generate
567 568 :param saturation: saturation of returned colors
568 569 :param lightness: lightness of returned colors
569 570 :returns: css RGB string
570 571
571 572 >>> color_hash = color_hasher()
572 573 >>> color_hash('hello')
573 574 'rgb(34, 12, 59)'
574 575 >>> color_hash('hello')
575 576 'rgb(34, 12, 59)'
576 577 >>> color_hash('other')
577 578 'rgb(90, 224, 159)'
578 579 """
579 580
580 581 color_dict = {}
581 582 cgenerator = unique_color_generator(
582 583 saturation=saturation, lightness=lightness)
583 584
584 585 def get_color_string(thing):
585 586 if thing in color_dict:
586 587 col = color_dict[thing]
587 588 else:
588 589 col = color_dict[thing] = cgenerator.next()
589 590 return "rgb(%s)" % (', '.join(col))
590 591
591 592 return get_color_string
592 593
593 594
594 595 def get_lexer_safe(mimetype=None, filepath=None):
595 596 """
596 597 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 598 defaulting to plain text if none could be found
598 599 """
599 600 lexer = None
600 601 try:
601 602 if mimetype:
602 603 lexer = get_lexer_for_mimetype(mimetype)
603 604 if not lexer:
604 605 lexer = get_lexer_for_filename(filepath)
605 606 except pygments.util.ClassNotFound:
606 607 pass
607 608
608 609 if not lexer:
609 610 lexer = get_lexer_by_name('text')
610 611
611 612 return lexer
612 613
613 614
614 615 def get_lexer_for_filenode(filenode):
615 616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 617 return lexer
617 618
618 619
619 620 def pygmentize(filenode, **kwargs):
620 621 """
621 622 pygmentize function using pygments
622 623
623 624 :param filenode:
624 625 """
625 626 lexer = get_lexer_for_filenode(filenode)
626 627 return literal(code_highlight(filenode.content, lexer,
627 628 CodeHtmlFormatter(**kwargs)))
628 629
629 630
630 631 def is_following_repo(repo_name, user_id):
631 632 from rhodecode.model.scm import ScmModel
632 633 return ScmModel().is_following_repo(repo_name, user_id)
633 634
634 635
635 636 class _Message(object):
636 637 """A message returned by ``Flash.pop_messages()``.
637 638
638 639 Converting the message to a string returns the message text. Instances
639 640 also have the following attributes:
640 641
641 642 * ``message``: the message text.
642 643 * ``category``: the category specified when the message was created.
643 644 """
644 645
645 646 def __init__(self, category, message):
646 647 self.category = category
647 648 self.message = message
648 649
649 650 def __str__(self):
650 651 return self.message
651 652
652 653 __unicode__ = __str__
653 654
654 655 def __html__(self):
655 656 return escape(safe_unicode(self.message))
656 657
657 658
658 659 class Flash(_Flash):
659 660
660 661 def pop_messages(self):
661 662 """Return all accumulated messages and delete them from the session.
662 663
663 664 The return value is a list of ``Message`` objects.
664 665 """
665 666 from pylons import session
666 667
667 668 messages = []
668 669
669 670 # Pop the 'old' pylons flash messages. They are tuples of the form
670 671 # (category, message)
671 672 for cat, msg in session.pop(self.session_key, []):
672 673 messages.append(_Message(cat, msg))
673 674
674 675 # Pop the 'new' pyramid flash messages for each category as list
675 676 # of strings.
676 677 for cat in self.categories:
677 678 for msg in session.pop_flash(queue=cat):
678 679 messages.append(_Message(cat, msg))
679 680 # Map messages from the default queue to the 'notice' category.
680 681 for msg in session.pop_flash():
681 682 messages.append(_Message('notice', msg))
682 683
683 684 session.save()
684 685 return messages
685 686
686 687 def json_alerts(self):
687 688 payloads = []
688 689 messages = flash.pop_messages()
689 690 if messages:
690 691 for message in messages:
691 692 subdata = {}
692 693 if hasattr(message.message, 'rsplit'):
693 694 flash_data = message.message.rsplit('|DELIM|', 1)
694 695 org_message = flash_data[0]
695 696 if len(flash_data) > 1:
696 697 subdata = json.loads(flash_data[1])
697 698 else:
698 699 org_message = message.message
699 700 payloads.append({
700 701 'message': {
701 702 'message': u'{}'.format(org_message),
702 703 'level': message.category,
703 704 'force': True,
704 705 'subdata': subdata
705 706 }
706 707 })
707 708 return json.dumps(payloads)
708 709
709 710 flash = Flash()
710 711
711 712 #==============================================================================
712 713 # SCM FILTERS available via h.
713 714 #==============================================================================
714 715 from rhodecode.lib.vcs.utils import author_name, author_email
715 716 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 717 from rhodecode.model.db import User, ChangesetStatus
717 718
718 719 age = _age
719 720 capitalize = lambda x: x.capitalize()
720 721 email = author_email
721 722 short_id = lambda x: x[:12]
722 723 hide_credentials = lambda x: ''.join(credentials_filter(x))
723 724
724 725
725 726 def age_component(datetime_iso, value=None, time_is_local=False):
726 727 title = value or format_date(datetime_iso)
727 728 tzinfo = '+00:00'
728 729
729 730 # detect if we have a timezone info, otherwise, add it
730 731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 732 if time_is_local:
732 733 tzinfo = time.strftime("+%H:%M",
733 734 time.gmtime(
734 735 (datetime.now() - datetime.utcnow()).seconds + 1
735 736 )
736 737 )
737 738
738 739 return literal(
739 740 '<time class="timeago tooltip" '
740 741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 742 datetime_iso, title, tzinfo))
742 743
743 744
744 745 def _shorten_commit_id(commit_id):
745 746 from rhodecode import CONFIG
746 747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 748 return commit_id[:def_len]
748 749
749 750
750 751 def show_id(commit):
751 752 """
752 753 Configurable function that shows ID
753 754 by default it's r123:fffeeefffeee
754 755
755 756 :param commit: commit instance
756 757 """
757 758 from rhodecode import CONFIG
758 759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759 760
760 761 raw_id = _shorten_commit_id(commit.raw_id)
761 762 if show_idx:
762 763 return 'r%s:%s' % (commit.idx, raw_id)
763 764 else:
764 765 return '%s' % (raw_id, )
765 766
766 767
767 768 def format_date(date):
768 769 """
769 770 use a standardized formatting for dates used in RhodeCode
770 771
771 772 :param date: date/datetime object
772 773 :return: formatted date
773 774 """
774 775
775 776 if date:
776 777 _fmt = "%a, %d %b %Y %H:%M:%S"
777 778 return safe_unicode(date.strftime(_fmt))
778 779
779 780 return u""
780 781
781 782
782 783 class _RepoChecker(object):
783 784
784 785 def __init__(self, backend_alias):
785 786 self._backend_alias = backend_alias
786 787
787 788 def __call__(self, repository):
788 789 if hasattr(repository, 'alias'):
789 790 _type = repository.alias
790 791 elif hasattr(repository, 'repo_type'):
791 792 _type = repository.repo_type
792 793 else:
793 794 _type = repository
794 795 return _type == self._backend_alias
795 796
796 797 is_git = _RepoChecker('git')
797 798 is_hg = _RepoChecker('hg')
798 799 is_svn = _RepoChecker('svn')
799 800
800 801
801 802 def get_repo_type_by_name(repo_name):
802 803 repo = Repository.get_by_repo_name(repo_name)
803 804 return repo.repo_type
804 805
805 806
806 807 def is_svn_without_proxy(repository):
807 808 if is_svn(repository):
808 809 from rhodecode.model.settings import VcsSettingsModel
809 810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 812 return False
812 813
813 814
814 815 def discover_user(author):
815 816 """
816 817 Tries to discover RhodeCode User based on the autho string. Author string
817 818 is typically `FirstName LastName <email@address.com>`
818 819 """
819 820
820 821 # if author is already an instance use it for extraction
821 822 if isinstance(author, User):
822 823 return author
823 824
824 825 # Valid email in the attribute passed, see if they're in the system
825 826 _email = author_email(author)
826 827 if _email != '':
827 828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 829 if user is not None:
829 830 return user
830 831
831 832 # Maybe it's a username, we try to extract it and fetch by username ?
832 833 _author = author_name(author)
833 834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 835 if user is not None:
835 836 return user
836 837
837 838 return None
838 839
839 840
840 841 def email_or_none(author):
841 842 # extract email from the commit string
842 843 _email = author_email(author)
843 844
844 845 # If we have an email, use it, otherwise
845 846 # see if it contains a username we can get an email from
846 847 if _email != '':
847 848 return _email
848 849 else:
849 850 user = User.get_by_username(
850 851 author_name(author), case_insensitive=True, cache=True)
851 852
852 853 if user is not None:
853 854 return user.email
854 855
855 856 # No valid email, not a valid user in the system, none!
856 857 return None
857 858
858 859
859 860 def link_to_user(author, length=0, **kwargs):
860 861 user = discover_user(author)
861 862 # user can be None, but if we have it already it means we can re-use it
862 863 # in the person() function, so we save 1 intensive-query
863 864 if user:
864 865 author = user
865 866
866 867 display_person = person(author, 'username_or_name_or_email')
867 868 if length:
868 869 display_person = shorter(display_person, length)
869 870
870 871 if user:
871 872 return link_to(
872 873 escape(display_person),
873 874 url('user_profile', username=user.username),
874 875 **kwargs)
875 876 else:
876 877 return escape(display_person)
877 878
878 879
879 880 def person(author, show_attr="username_and_name"):
880 881 user = discover_user(author)
881 882 if user:
882 883 return getattr(user, show_attr)
883 884 else:
884 885 _author = author_name(author)
885 886 _email = email(author)
886 887 return _author or _email
887 888
888 889
889 890 def author_string(email):
890 891 if email:
891 892 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 893 if user:
893 894 if user.firstname or user.lastname:
894 895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 896 else:
896 897 return email
897 898 else:
898 899 return email
899 900 else:
900 901 return None
901 902
902 903
903 904 def person_by_id(id_, show_attr="username_and_name"):
904 905 # attr to return from fetched user
905 906 person_getter = lambda usr: getattr(usr, show_attr)
906 907
907 908 #maybe it's an ID ?
908 909 if str(id_).isdigit() or isinstance(id_, int):
909 910 id_ = int(id_)
910 911 user = User.get(id_)
911 912 if user is not None:
912 913 return person_getter(user)
913 914 return id_
914 915
915 916
916 917 def gravatar_with_user(author, show_disabled=False):
917 918 from rhodecode.lib.utils import PartialRenderer
918 919 _render = PartialRenderer('base/base.mako')
919 920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920 921
921 922
922 923 def desc_stylize(value):
923 924 """
924 925 converts tags from value into html equivalent
925 926
926 927 :param value:
927 928 """
928 929 if not value:
929 930 return ''
930 931
931 932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 939 '<div class="metatag" tag="lang">\\2</div>', value)
939 940 value = re.sub(r'\[([a-z]+)\]',
940 941 '<div class="metatag" tag="\\1">\\1</div>', value)
941 942
942 943 return value
943 944
944 945
945 946 def escaped_stylize(value):
946 947 """
947 948 converts tags from value into html equivalent, but escaping its value first
948 949 """
949 950 if not value:
950 951 return ''
951 952
952 953 # Using default webhelper escape method, but has to force it as a
953 954 # plain unicode instead of a markup tag to be used in regex expressions
954 955 value = unicode(escape(safe_unicode(value)))
955 956
956 957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 964 '<div class="metatag" tag="lang">\\2</div>', value)
964 965 value = re.sub(r'\[([a-z]+)\]',
965 966 '<div class="metatag" tag="\\1">\\1</div>', value)
966 967
967 968 return value
968 969
969 970
970 971 def bool2icon(value):
971 972 """
972 973 Returns boolean value of a given value, represented as html element with
973 974 classes that will represent icons
974 975
975 976 :param value: given value to convert to html node
976 977 """
977 978
978 979 if value: # does bool conversion
979 980 return HTML.tag('i', class_="icon-true")
980 981 else: # not true as bool
981 982 return HTML.tag('i', class_="icon-false")
982 983
983 984
984 985 #==============================================================================
985 986 # PERMS
986 987 #==============================================================================
987 988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 991 csrf_token_key
991 992
992 993
993 994 #==============================================================================
994 995 # GRAVATAR URL
995 996 #==============================================================================
996 997 class InitialsGravatar(object):
997 998 def __init__(self, email_address, first_name, last_name, size=30,
998 999 background=None, text_color='#fff'):
999 1000 self.size = size
1000 1001 self.first_name = first_name
1001 1002 self.last_name = last_name
1002 1003 self.email_address = email_address
1003 1004 self.background = background or self.str2color(email_address)
1004 1005 self.text_color = text_color
1005 1006
1006 1007 def get_color_bank(self):
1007 1008 """
1008 1009 returns a predefined list of colors that gravatars can use.
1009 1010 Those are randomized distinct colors that guarantee readability and
1010 1011 uniqueness.
1011 1012
1012 1013 generated with: http://phrogz.net/css/distinct-colors.html
1013 1014 """
1014 1015 return [
1015 1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 1060 '#4f8c46', '#368dd9', '#5c0073'
1060 1061 ]
1061 1062
1062 1063 def rgb_to_hex_color(self, rgb_tuple):
1063 1064 """
1064 1065 Converts an rgb_tuple passed to an hex color.
1065 1066
1066 1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 1068 """
1068 1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069 1070
1070 1071 def email_to_int_list(self, email_str):
1071 1072 """
1072 1073 Get every byte of the hex digest value of email and turn it to integer.
1073 1074 It's going to be always between 0-255
1074 1075 """
1075 1076 digest = md5_safe(email_str.lower())
1076 1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077 1078
1078 1079 def pick_color_bank_index(self, email_str, color_bank):
1079 1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080 1081
1081 1082 def str2color(self, email_str):
1082 1083 """
1083 1084 Tries to map in a stable algorithm an email to color
1084 1085
1085 1086 :param email_str:
1086 1087 """
1087 1088 color_bank = self.get_color_bank()
1088 1089 # pick position (module it's length so we always find it in the
1089 1090 # bank even if it's smaller than 256 values
1090 1091 pos = self.pick_color_bank_index(email_str, color_bank)
1091 1092 return color_bank[pos]
1092 1093
1093 1094 def normalize_email(self, email_address):
1094 1095 import unicodedata
1095 1096 # default host used to fill in the fake/missing email
1096 1097 default_host = u'localhost'
1097 1098
1098 1099 if not email_address:
1099 1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100 1101
1101 1102 email_address = safe_unicode(email_address)
1102 1103
1103 1104 if u'@' not in email_address:
1104 1105 email_address = u'%s@%s' % (email_address, default_host)
1105 1106
1106 1107 if email_address.endswith(u'@'):
1107 1108 email_address = u'%s%s' % (email_address, default_host)
1108 1109
1109 1110 email_address = unicodedata.normalize('NFKD', email_address)\
1110 1111 .encode('ascii', 'ignore')
1111 1112 return email_address
1112 1113
1113 1114 def get_initials(self):
1114 1115 """
1115 1116 Returns 2 letter initials calculated based on the input.
1116 1117 The algorithm picks first given email address, and takes first letter
1117 1118 of part before @, and then the first letter of server name. In case
1118 1119 the part before @ is in a format of `somestring.somestring2` it replaces
1119 1120 the server letter with first letter of somestring2
1120 1121
1121 1122 In case function was initialized with both first and lastname, this
1122 1123 overrides the extraction from email by first letter of the first and
1123 1124 last name. We add special logic to that functionality, In case Full name
1124 1125 is compound, like Guido Von Rossum, we use last part of the last name
1125 1126 (Von Rossum) picking `R`.
1126 1127
1127 1128 Function also normalizes the non-ascii characters to they ascii
1128 1129 representation, eg Δ„ => A
1129 1130 """
1130 1131 import unicodedata
1131 1132 # replace non-ascii to ascii
1132 1133 first_name = unicodedata.normalize(
1133 1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 1135 last_name = unicodedata.normalize(
1135 1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136 1137
1137 1138 # do NFKD encoding, and also make sure email has proper format
1138 1139 email_address = self.normalize_email(self.email_address)
1139 1140
1140 1141 # first push the email initials
1141 1142 prefix, server = email_address.split('@', 1)
1142 1143
1143 1144 # check if prefix is maybe a 'firstname.lastname' syntax
1144 1145 _dot_split = prefix.rsplit('.', 1)
1145 1146 if len(_dot_split) == 2:
1146 1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 1148 else:
1148 1149 initials = [prefix[0], server[0]]
1149 1150
1150 1151 # then try to replace either firtname or lastname
1151 1152 fn_letter = (first_name or " ")[0].strip()
1152 1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153 1154
1154 1155 if fn_letter:
1155 1156 initials[0] = fn_letter
1156 1157
1157 1158 if ln_letter:
1158 1159 initials[1] = ln_letter
1159 1160
1160 1161 return ''.join(initials).upper()
1161 1162
1162 1163 def get_img_data_by_type(self, font_family, img_type):
1163 1164 default_user = """
1164 1165 <svg xmlns="http://www.w3.org/2000/svg"
1165 1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 1167 viewBox="-15 -10 439.165 429.164"
1167 1168
1168 1169 xml:space="preserve"
1169 1170 style="background:{background};" >
1170 1171
1171 1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 1175 168.596,153.916,216.671,
1175 1176 204.583,216.671z" fill="{text_color}"/>
1176 1177 <path d="M407.164,374.717L360.88,
1177 1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 1181 0-48.762-8.122-69.078-23.488
1181 1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 1186 19.402-10.527 C409.699,390.129,
1186 1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 1188 </svg>""".format(
1188 1189 size=self.size,
1189 1190 background='#979797', # @grey4
1190 1191 text_color=self.text_color,
1191 1192 font_family=font_family)
1192 1193
1193 1194 return {
1194 1195 "default_user": default_user
1195 1196 }[img_type]
1196 1197
1197 1198 def get_img_data(self, svg_type=None):
1198 1199 """
1199 1200 generates the svg metadata for image
1200 1201 """
1201 1202
1202 1203 font_family = ','.join([
1203 1204 'proximanovaregular',
1204 1205 'Proxima Nova Regular',
1205 1206 'Proxima Nova',
1206 1207 'Arial',
1207 1208 'Lucida Grande',
1208 1209 'sans-serif'
1209 1210 ])
1210 1211 if svg_type:
1211 1212 return self.get_img_data_by_type(font_family, svg_type)
1212 1213
1213 1214 initials = self.get_initials()
1214 1215 img_data = """
1215 1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 1217 width="{size}" height="{size}"
1217 1218 style="width: 100%; height: 100%; background-color: {background}"
1218 1219 viewBox="0 0 {size} {size}">
1219 1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 1221 pointer-events="auto" fill="{text_color}"
1221 1222 font-family="{font_family}"
1222 1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 1224 </text>
1224 1225 </svg>""".format(
1225 1226 size=self.size,
1226 1227 f_size=self.size/1.85, # scale the text inside the box nicely
1227 1228 background=self.background,
1228 1229 text_color=self.text_color,
1229 1230 text=initials.upper(),
1230 1231 font_family=font_family)
1231 1232
1232 1233 return img_data
1233 1234
1234 1235 def generate_svg(self, svg_type=None):
1235 1236 img_data = self.get_img_data(svg_type)
1236 1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237 1238
1238 1239
1239 1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 1241 svg_type = None
1241 1242 if email_address == User.DEFAULT_USER_EMAIL:
1242 1243 svg_type = 'default_user'
1243 1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 1245 return klass.generate_svg(svg_type=svg_type)
1245 1246
1246 1247
1247 1248 def gravatar_url(email_address, size=30):
1248 1249 # doh, we need to re-import those to mock it later
1249 1250 from pylons import tmpl_context as c
1250 1251
1251 1252 _use_gravatar = c.visual.use_gravatar
1252 1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253 1254
1254 1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 1256 if isinstance(email_address, unicode):
1256 1257 # hashlib crashes on unicode items
1257 1258 email_address = safe_str(email_address)
1258 1259
1259 1260 # empty email or default user
1260 1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262 1263
1263 1264 if _use_gravatar:
1264 1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 1266 # get the host and schema here.
1266 1267 request = get_current_request()
1267 1268 tmpl = safe_str(_gravatar_url)
1268 1269 tmpl = tmpl.replace('{email}', email_address)\
1269 1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 1271 .replace('{netloc}', request.host)\
1271 1272 .replace('{scheme}', request.scheme)\
1272 1273 .replace('{size}', safe_str(size))
1273 1274 return tmpl
1274 1275 else:
1275 1276 return initials_gravatar(email_address, '', '', size=size)
1276 1277
1277 1278
1278 1279 class Page(_Page):
1279 1280 """
1280 1281 Custom pager to match rendering style with paginator
1281 1282 """
1282 1283
1283 1284 def _get_pos(self, cur_page, max_page, items):
1284 1285 edge = (items / 2) + 1
1285 1286 if (cur_page <= edge):
1286 1287 radius = max(items / 2, items - cur_page)
1287 1288 elif (max_page - cur_page) < edge:
1288 1289 radius = (items - 1) - (max_page - cur_page)
1289 1290 else:
1290 1291 radius = items / 2
1291 1292
1292 1293 left = max(1, (cur_page - (radius)))
1293 1294 right = min(max_page, cur_page + (radius))
1294 1295 return left, cur_page, right
1295 1296
1296 1297 def _range(self, regexp_match):
1297 1298 """
1298 1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299 1300
1300 1301 Arguments:
1301 1302
1302 1303 regexp_match
1303 1304 A "re" (regular expressions) match object containing the
1304 1305 radius of linked pages around the current page in
1305 1306 regexp_match.group(1) as a string
1306 1307
1307 1308 This function is supposed to be called as a callable in
1308 1309 re.sub.
1309 1310
1310 1311 """
1311 1312 radius = int(regexp_match.group(1))
1312 1313
1313 1314 # Compute the first and last page number within the radius
1314 1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 1316 # -> leftmost_page = 5
1316 1317 # -> rightmost_page = 9
1317 1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 1319 self.last_page,
1319 1320 (radius * 2) + 1)
1320 1321 nav_items = []
1321 1322
1322 1323 # Create a link to the first page (unless we are on the first page
1323 1324 # or there would be no need to insert '..' spacers)
1324 1325 if self.page != self.first_page and self.first_page < leftmost_page:
1325 1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326 1327
1327 1328 # Insert dots if there are pages between the first page
1328 1329 # and the currently displayed page range
1329 1330 if leftmost_page - self.first_page > 1:
1330 1331 # Wrap in a SPAN tag if nolink_attr is set
1331 1332 text = '..'
1332 1333 if self.dotdot_attr:
1333 1334 text = HTML.span(c=text, **self.dotdot_attr)
1334 1335 nav_items.append(text)
1335 1336
1336 1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 1338 # Hilight the current page number and do not use a link
1338 1339 if thispage == self.page:
1339 1340 text = '%s' % (thispage,)
1340 1341 # Wrap in a SPAN tag if nolink_attr is set
1341 1342 if self.curpage_attr:
1342 1343 text = HTML.span(c=text, **self.curpage_attr)
1343 1344 nav_items.append(text)
1344 1345 # Otherwise create just a link to that page
1345 1346 else:
1346 1347 text = '%s' % (thispage,)
1347 1348 nav_items.append(self._pagerlink(thispage, text))
1348 1349
1349 1350 # Insert dots if there are pages between the displayed
1350 1351 # page numbers and the end of the page range
1351 1352 if self.last_page - rightmost_page > 1:
1352 1353 text = '..'
1353 1354 # Wrap in a SPAN tag if nolink_attr is set
1354 1355 if self.dotdot_attr:
1355 1356 text = HTML.span(c=text, **self.dotdot_attr)
1356 1357 nav_items.append(text)
1357 1358
1358 1359 # Create a link to the very last page (unless we are on the last
1359 1360 # page or there would be no need to insert '..' spacers)
1360 1361 if self.page != self.last_page and rightmost_page < self.last_page:
1361 1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362 1363
1363 1364 ## prerender links
1364 1365 #_page_link = url.current()
1365 1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 1368 return self.separator.join(nav_items)
1368 1369
1369 1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 1371 show_if_single_page=False, separator=' ', onclick=None,
1371 1372 symbol_first='<<', symbol_last='>>',
1372 1373 symbol_previous='<', symbol_next='>',
1373 1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 1375 curpage_attr={'class': 'pager_curpage'},
1375 1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376 1377
1377 1378 self.curpage_attr = curpage_attr
1378 1379 self.separator = separator
1379 1380 self.pager_kwargs = kwargs
1380 1381 self.page_param = page_param
1381 1382 self.partial_param = partial_param
1382 1383 self.onclick = onclick
1383 1384 self.link_attr = link_attr
1384 1385 self.dotdot_attr = dotdot_attr
1385 1386
1386 1387 # Don't show navigator if there is no more than one page
1387 1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 1389 return ''
1389 1390
1390 1391 from string import Template
1391 1392 # Replace ~...~ in token format by range of pages
1392 1393 result = re.sub(r'~(\d+)~', self._range, format)
1393 1394
1394 1395 # Interpolate '%' variables
1395 1396 result = Template(result).safe_substitute({
1396 1397 'first_page': self.first_page,
1397 1398 'last_page': self.last_page,
1398 1399 'page': self.page,
1399 1400 'page_count': self.page_count,
1400 1401 'items_per_page': self.items_per_page,
1401 1402 'first_item': self.first_item,
1402 1403 'last_item': self.last_item,
1403 1404 'item_count': self.item_count,
1404 1405 'link_first': self.page > self.first_page and \
1405 1406 self._pagerlink(self.first_page, symbol_first) or '',
1406 1407 'link_last': self.page < self.last_page and \
1407 1408 self._pagerlink(self.last_page, symbol_last) or '',
1408 1409 'link_previous': self.previous_page and \
1409 1410 self._pagerlink(self.previous_page, symbol_previous) \
1410 1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 1412 'link_next': self.next_page and \
1412 1413 self._pagerlink(self.next_page, symbol_next) \
1413 1414 or HTML.span(symbol_next, class_="pg-next disabled")
1414 1415 })
1415 1416
1416 1417 return literal(result)
1417 1418
1418 1419
1419 1420 #==============================================================================
1420 1421 # REPO PAGER, PAGER FOR REPOSITORY
1421 1422 #==============================================================================
1422 1423 class RepoPage(Page):
1423 1424
1424 1425 def __init__(self, collection, page=1, items_per_page=20,
1425 1426 item_count=None, url=None, **kwargs):
1426 1427
1427 1428 """Create a "RepoPage" instance. special pager for paging
1428 1429 repository
1429 1430 """
1430 1431 self._url_generator = url
1431 1432
1432 1433 # Safe the kwargs class-wide so they can be used in the pager() method
1433 1434 self.kwargs = kwargs
1434 1435
1435 1436 # Save a reference to the collection
1436 1437 self.original_collection = collection
1437 1438
1438 1439 self.collection = collection
1439 1440
1440 1441 # The self.page is the number of the current page.
1441 1442 # The first page has the number 1!
1442 1443 try:
1443 1444 self.page = int(page) # make it int() if we get it as a string
1444 1445 except (ValueError, TypeError):
1445 1446 self.page = 1
1446 1447
1447 1448 self.items_per_page = items_per_page
1448 1449
1449 1450 # Unless the user tells us how many items the collections has
1450 1451 # we calculate that ourselves.
1451 1452 if item_count is not None:
1452 1453 self.item_count = item_count
1453 1454 else:
1454 1455 self.item_count = len(self.collection)
1455 1456
1456 1457 # Compute the number of the first and last available page
1457 1458 if self.item_count > 0:
1458 1459 self.first_page = 1
1459 1460 self.page_count = int(math.ceil(float(self.item_count) /
1460 1461 self.items_per_page))
1461 1462 self.last_page = self.first_page + self.page_count - 1
1462 1463
1463 1464 # Make sure that the requested page number is the range of
1464 1465 # valid pages
1465 1466 if self.page > self.last_page:
1466 1467 self.page = self.last_page
1467 1468 elif self.page < self.first_page:
1468 1469 self.page = self.first_page
1469 1470
1470 1471 # Note: the number of items on this page can be less than
1471 1472 # items_per_page if the last page is not full
1472 1473 self.first_item = max(0, (self.item_count) - (self.page *
1473 1474 items_per_page))
1474 1475 self.last_item = ((self.item_count - 1) - items_per_page *
1475 1476 (self.page - 1))
1476 1477
1477 1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478 1479
1479 1480 # Links to previous and next page
1480 1481 if self.page > self.first_page:
1481 1482 self.previous_page = self.page - 1
1482 1483 else:
1483 1484 self.previous_page = None
1484 1485
1485 1486 if self.page < self.last_page:
1486 1487 self.next_page = self.page + 1
1487 1488 else:
1488 1489 self.next_page = None
1489 1490
1490 1491 # No items available
1491 1492 else:
1492 1493 self.first_page = None
1493 1494 self.page_count = 0
1494 1495 self.last_page = None
1495 1496 self.first_item = None
1496 1497 self.last_item = None
1497 1498 self.previous_page = None
1498 1499 self.next_page = None
1499 1500 self.items = []
1500 1501
1501 1502 # This is a subclass of the 'list' type. Initialise the list now.
1502 1503 list.__init__(self, reversed(self.items))
1503 1504
1504 1505
1505 1506 def changed_tooltip(nodes):
1506 1507 """
1507 1508 Generates a html string for changed nodes in commit page.
1508 1509 It limits the output to 30 entries
1509 1510
1510 1511 :param nodes: LazyNodesGenerator
1511 1512 """
1512 1513 if nodes:
1513 1514 pref = ': <br/> '
1514 1515 suf = ''
1515 1516 if len(nodes) > 30:
1516 1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 1519 for x in nodes[:30]]) + suf)
1519 1520 else:
1520 1521 return ': ' + _('No Files')
1521 1522
1522 1523
1523 1524 def breadcrumb_repo_link(repo):
1524 1525 """
1525 1526 Makes a breadcrumbs path link to repo
1526 1527
1527 1528 ex::
1528 1529 group >> subgroup >> repo
1529 1530
1530 1531 :param repo: a Repository instance
1531 1532 """
1532 1533
1533 1534 path = [
1534 1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 1536 for group in repo.groups_with_parents
1536 1537 ] + [
1537 1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 1539 ]
1539 1540
1540 1541 return literal(' &raquo; '.join(path))
1541 1542
1542 1543
1543 1544 def format_byte_size_binary(file_size):
1544 1545 """
1545 1546 Formats file/folder sizes to standard.
1546 1547 """
1547 1548 formatted_size = format_byte_size(file_size, binary=True)
1548 1549 return formatted_size
1549 1550
1550 1551
1551 1552 def fancy_file_stats(stats):
1552 1553 """
1553 1554 Displays a fancy two colored bar for number of added/deleted
1554 1555 lines of code on file
1555 1556
1556 1557 :param stats: two element list of added/deleted lines of code
1557 1558 """
1558 1559 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1559 1560 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1560 1561
1561 1562 def cgen(l_type, a_v, d_v):
1562 1563 mapping = {'tr': 'top-right-rounded-corner-mid',
1563 1564 'tl': 'top-left-rounded-corner-mid',
1564 1565 'br': 'bottom-right-rounded-corner-mid',
1565 1566 'bl': 'bottom-left-rounded-corner-mid'}
1566 1567 map_getter = lambda x: mapping[x]
1567 1568
1568 1569 if l_type == 'a' and d_v:
1569 1570 #case when added and deleted are present
1570 1571 return ' '.join(map(map_getter, ['tl', 'bl']))
1571 1572
1572 1573 if l_type == 'a' and not d_v:
1573 1574 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1574 1575
1575 1576 if l_type == 'd' and a_v:
1576 1577 return ' '.join(map(map_getter, ['tr', 'br']))
1577 1578
1578 1579 if l_type == 'd' and not a_v:
1579 1580 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1580 1581
1581 1582 a, d = stats['added'], stats['deleted']
1582 1583 width = 100
1583 1584
1584 1585 if stats['binary']: # binary operations like chmod/rename etc
1585 1586 lbl = []
1586 1587 bin_op = 0 # undefined
1587 1588
1588 1589 # prefix with bin for binary files
1589 1590 if BIN_FILENODE in stats['ops']:
1590 1591 lbl += ['bin']
1591 1592
1592 1593 if NEW_FILENODE in stats['ops']:
1593 1594 lbl += [_('new file')]
1594 1595 bin_op = NEW_FILENODE
1595 1596 elif MOD_FILENODE in stats['ops']:
1596 1597 lbl += [_('mod')]
1597 1598 bin_op = MOD_FILENODE
1598 1599 elif DEL_FILENODE in stats['ops']:
1599 1600 lbl += [_('del')]
1600 1601 bin_op = DEL_FILENODE
1601 1602 elif RENAMED_FILENODE in stats['ops']:
1602 1603 lbl += [_('rename')]
1603 1604 bin_op = RENAMED_FILENODE
1604 1605
1605 1606 # chmod can go with other operations, so we add a + to lbl if needed
1606 1607 if CHMOD_FILENODE in stats['ops']:
1607 1608 lbl += [_('chmod')]
1608 1609 if bin_op == 0:
1609 1610 bin_op = CHMOD_FILENODE
1610 1611
1611 1612 lbl = '+'.join(lbl)
1612 1613 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1613 1614 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1614 1615 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1615 1616 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1616 1617
1617 1618 t = stats['added'] + stats['deleted']
1618 1619 unit = float(width) / (t or 1)
1619 1620
1620 1621 # needs > 9% of width to be visible or 0 to be hidden
1621 1622 a_p = max(9, unit * a) if a > 0 else 0
1622 1623 d_p = max(9, unit * d) if d > 0 else 0
1623 1624 p_sum = a_p + d_p
1624 1625
1625 1626 if p_sum > width:
1626 1627 #adjust the percentage to be == 100% since we adjusted to 9
1627 1628 if a_p > d_p:
1628 1629 a_p = a_p - (p_sum - width)
1629 1630 else:
1630 1631 d_p = d_p - (p_sum - width)
1631 1632
1632 1633 a_v = a if a > 0 else ''
1633 1634 d_v = d if d > 0 else ''
1634 1635
1635 1636 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1636 1637 cgen('a', a_v, d_v), a_p, a_v
1637 1638 )
1638 1639 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1639 1640 cgen('d', a_v, d_v), d_p, d_v
1640 1641 )
1641 1642 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1642 1643
1643 1644
1644 1645 def urlify_text(text_, safe=True):
1645 1646 """
1646 1647 Extrac urls from text and make html links out of them
1647 1648
1648 1649 :param text_:
1649 1650 """
1650 1651
1651 1652 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1652 1653 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1653 1654
1654 1655 def url_func(match_obj):
1655 1656 url_full = match_obj.groups()[0]
1656 1657 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1657 1658 _newtext = url_pat.sub(url_func, text_)
1658 1659 if safe:
1659 1660 return literal(_newtext)
1660 1661 return _newtext
1661 1662
1662 1663
1663 1664 def urlify_commits(text_, repository):
1664 1665 """
1665 1666 Extract commit ids from text and make link from them
1666 1667
1667 1668 :param text_:
1668 1669 :param repository: repo name to build the URL with
1669 1670 """
1670 1671 from pylons import url # doh, we need to re-import url to mock it later
1671 1672 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1672 1673
1673 1674 def url_func(match_obj):
1674 1675 commit_id = match_obj.groups()[1]
1675 1676 pref = match_obj.groups()[0]
1676 1677 suf = match_obj.groups()[2]
1677 1678
1678 1679 tmpl = (
1679 1680 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1680 1681 '%(commit_id)s</a>%(suf)s'
1681 1682 )
1682 1683 return tmpl % {
1683 1684 'pref': pref,
1684 1685 'cls': 'revision-link',
1685 1686 'url': url('changeset_home', repo_name=repository,
1686 1687 revision=commit_id, qualified=True),
1687 1688 'commit_id': commit_id,
1688 1689 'suf': suf
1689 1690 }
1690 1691
1691 1692 newtext = URL_PAT.sub(url_func, text_)
1692 1693
1693 1694 return newtext
1694 1695
1695 1696
1696 1697 def _process_url_func(match_obj, repo_name, uid, entry,
1697 1698 return_raw_data=False):
1698 1699 pref = ''
1699 1700 if match_obj.group().startswith(' '):
1700 1701 pref = ' '
1701 1702
1702 1703 issue_id = ''.join(match_obj.groups())
1703 1704 tmpl = (
1704 1705 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1705 1706 '%(issue-prefix)s%(id-repr)s'
1706 1707 '</a>')
1707 1708
1708 1709 (repo_name_cleaned,
1709 1710 parent_group_name) = RepoGroupModel().\
1710 1711 _get_group_name_and_parent(repo_name)
1711 1712
1712 1713 # variables replacement
1713 1714 named_vars = {
1714 1715 'id': issue_id,
1715 1716 'repo': repo_name,
1716 1717 'repo_name': repo_name_cleaned,
1717 1718 'group_name': parent_group_name
1718 1719 }
1719 1720 # named regex variables
1720 1721 named_vars.update(match_obj.groupdict())
1721 1722 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1722 1723
1723 1724 data = {
1724 1725 'pref': pref,
1725 1726 'cls': 'issue-tracker-link',
1726 1727 'url': _url,
1727 1728 'id-repr': issue_id,
1728 1729 'issue-prefix': entry['pref'],
1729 1730 'serv': entry['url'],
1730 1731 }
1731 1732 if return_raw_data:
1732 1733 return {
1733 1734 'id': issue_id,
1734 1735 'url': _url
1735 1736 }
1736 1737 return tmpl % data
1737 1738
1738 1739
1739 1740 def process_patterns(text_string, repo_name, config=None):
1740 1741 repo = None
1741 1742 if repo_name:
1742 1743 # Retrieving repo_name to avoid invalid repo_name to explode on
1743 1744 # IssueTrackerSettingsModel but still passing invalid name further down
1744 1745 repo = Repository.get_by_repo_name(repo_name, cache=True)
1745 1746
1746 1747 settings_model = IssueTrackerSettingsModel(repo=repo)
1747 1748 active_entries = settings_model.get_settings(cache=True)
1748 1749
1749 1750 issues_data = []
1750 1751 newtext = text_string
1751 1752 for uid, entry in active_entries.items():
1752 1753 log.debug('found issue tracker entry with uid %s' % (uid,))
1753 1754
1754 1755 if not (entry['pat'] and entry['url']):
1755 1756 log.debug('skipping due to missing data')
1756 1757 continue
1757 1758
1758 1759 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1759 1760 % (uid, entry['pat'], entry['url'], entry['pref']))
1760 1761
1761 1762 try:
1762 1763 pattern = re.compile(r'%s' % entry['pat'])
1763 1764 except re.error:
1764 1765 log.exception(
1765 1766 'issue tracker pattern: `%s` failed to compile',
1766 1767 entry['pat'])
1767 1768 continue
1768 1769
1769 1770 data_func = partial(
1770 1771 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1771 1772 return_raw_data=True)
1772 1773
1773 1774 for match_obj in pattern.finditer(text_string):
1774 1775 issues_data.append(data_func(match_obj))
1775 1776
1776 1777 url_func = partial(
1777 1778 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1778 1779
1779 1780 newtext = pattern.sub(url_func, newtext)
1780 1781 log.debug('processed prefix:uid `%s`' % (uid,))
1781 1782
1782 1783 return newtext, issues_data
1783 1784
1784 1785
1785 1786 def urlify_commit_message(commit_text, repository=None):
1786 1787 """
1787 1788 Parses given text message and makes proper links.
1788 1789 issues are linked to given issue-server, and rest is a commit link
1789 1790
1790 1791 :param commit_text:
1791 1792 :param repository:
1792 1793 """
1793 1794 from pylons import url # doh, we need to re-import url to mock it later
1794 1795
1795 1796 def escaper(string):
1796 1797 return string.replace('<', '&lt;').replace('>', '&gt;')
1797 1798
1798 1799 newtext = escaper(commit_text)
1799 1800
1800 1801 # extract http/https links and make them real urls
1801 1802 newtext = urlify_text(newtext, safe=False)
1802 1803
1803 1804 # urlify commits - extract commit ids and make link out of them, if we have
1804 1805 # the scope of repository present.
1805 1806 if repository:
1806 1807 newtext = urlify_commits(newtext, repository)
1807 1808
1808 1809 # process issue tracker patterns
1809 1810 newtext, issues = process_patterns(newtext, repository or '')
1810 1811
1811 1812 return literal(newtext)
1812 1813
1813 1814
1814 def rst(source, mentions=False):
1815 return literal('<div class="rst-block">%s</div>' %
1816 MarkupRenderer.rst(source, mentions=mentions))
1817
1818
1819 def markdown(source, mentions=False):
1820 return literal('<div class="markdown-block">%s</div>' %
1821 MarkupRenderer.markdown(source, flavored=True,
1822 mentions=mentions))
1823
1824
1825 1815 def renderer_from_filename(filename, exclude=None):
1826 1816 """
1827 1817 choose a renderer based on filename
1828 1818 """
1829 1819
1830 # images
1831
1832 1820 # ipython
1833 if filename.endswith('.ipynb'):
1834 return 'ipython'
1821 for ext in ['*.ipynb']:
1822 if fnmatch.fnmatch(filename, pat=ext):
1823 return 'jupyter'
1835 1824
1836 1825 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1837 1826 if is_markup:
1838 1827 return is_markup
1839 1828 return None
1840 1829
1841 1830
1842 1831 def render(source, renderer='rst', mentions=False):
1843 1832 if renderer == 'rst':
1844 return rst(source, mentions=mentions)
1833 return literal(
1834 '<div class="rst-block">%s</div>' %
1835 MarkupRenderer.rst(source, mentions=mentions))
1845 1836 elif renderer == 'markdown':
1846 return markdown(source, mentions=mentions)
1847 elif renderer == 'ipython':
1848 def ipython_renderer(source):
1849 import nbformat
1850 from nbconvert import HTMLExporter
1851 notebook = nbformat.reads(source, as_version=4)
1837 return literal(
1838 '<div class="markdown-block">%s</div>' %
1839 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1840 elif renderer == 'jupyter':
1841 return literal(
1842 '<div class="ipynb">%s</div>' %
1843 MarkupRenderer.jupyter(source))
1852 1844
1853 # 2. Instantiate the exporter. We use the `basic` template for now; we'll get into more details
1854 # later about how to customize the exporter further.
1855 html_exporter = HTMLExporter()
1856 html_exporter.template_file = 'basic'
1857
1858 # 3. Process the notebook we loaded earlier
1859 (body, resources) = html_exporter.from_notebook_node(notebook)
1860
1861 return body
1862
1863 return ipython_renderer(source)
1864 1845 # None means just show the file-source
1865 1846 return None
1866 1847
1867 1848
1868 1849 def commit_status(repo, commit_id):
1869 1850 return ChangesetStatusModel().get_status(repo, commit_id)
1870 1851
1871 1852
1872 1853 def commit_status_lbl(commit_status):
1873 1854 return dict(ChangesetStatus.STATUSES).get(commit_status)
1874 1855
1875 1856
1876 1857 def commit_time(repo_name, commit_id):
1877 1858 repo = Repository.get_by_repo_name(repo_name)
1878 1859 commit = repo.get_commit(commit_id=commit_id)
1879 1860 return commit.date
1880 1861
1881 1862
1882 1863 def get_permission_name(key):
1883 1864 return dict(Permission.PERMS).get(key)
1884 1865
1885 1866
1886 1867 def journal_filter_help():
1887 1868 return _(
1888 1869 'Example filter terms:\n' +
1889 1870 ' repository:vcs\n' +
1890 1871 ' username:marcin\n' +
1891 1872 ' action:*push*\n' +
1892 1873 ' ip:127.0.0.1\n' +
1893 1874 ' date:20120101\n' +
1894 1875 ' date:[20120101100000 TO 20120102]\n' +
1895 1876 '\n' +
1896 1877 'Generate wildcards using \'*\' character:\n' +
1897 1878 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1898 1879 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1899 1880 '\n' +
1900 1881 'Optional AND / OR operators in queries\n' +
1901 1882 ' "repository:vcs OR repository:test"\n' +
1902 1883 ' "username:test AND repository:test*"\n'
1903 1884 )
1904 1885
1905 1886
1906 1887 def not_mapped_error(repo_name):
1907 1888 flash(_('%s repository is not mapped to db perhaps'
1908 1889 ' it was created or renamed from the filesystem'
1909 1890 ' please run the application again'
1910 1891 ' in order to rescan repositories') % repo_name, category='error')
1911 1892
1912 1893
1913 1894 def ip_range(ip_addr):
1914 1895 from rhodecode.model.db import UserIpMap
1915 1896 s, e = UserIpMap._get_ip_range(ip_addr)
1916 1897 return '%s - %s' % (s, e)
1917 1898
1918 1899
1919 1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1920 1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1921 1902 if method.lower() != 'get' and needs_csrf_token:
1922 1903 raise Exception(
1923 1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1924 1905 'CSRF token. If the endpoint does not require such token you can ' +
1925 1906 'explicitly set the parameter needs_csrf_token to false.')
1926 1907
1927 1908 return wh_form(url, method=method, **attrs)
1928 1909
1929 1910
1930 1911 def secure_form(url, method="POST", multipart=False, **attrs):
1931 1912 """Start a form tag that points the action to an url. This
1932 1913 form tag will also include the hidden field containing
1933 1914 the auth token.
1934 1915
1935 1916 The url options should be given either as a string, or as a
1936 1917 ``url()`` function. The method for the form defaults to POST.
1937 1918
1938 1919 Options:
1939 1920
1940 1921 ``multipart``
1941 1922 If set to True, the enctype is set to "multipart/form-data".
1942 1923 ``method``
1943 1924 The method to use when submitting the form, usually either
1944 1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1945 1926 hidden input with name _method is added to simulate the verb
1946 1927 over POST.
1947 1928
1948 1929 """
1949 1930 from webhelpers.pylonslib.secure_form import insecure_form
1950 1931 form = insecure_form(url, method, multipart, **attrs)
1951 1932 token = csrf_input()
1952 1933 return literal("%s\n%s" % (form, token))
1953 1934
1954 1935 def csrf_input():
1955 1936 return literal(
1956 1937 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1957 1938 csrf_token_key, csrf_token_key, get_csrf_token()))
1958 1939
1959 1940 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1960 1941 select_html = select(name, selected, options, **attrs)
1961 1942 select2 = """
1962 1943 <script>
1963 1944 $(document).ready(function() {
1964 1945 $('#%s').select2({
1965 1946 containerCssClass: 'drop-menu',
1966 1947 dropdownCssClass: 'drop-menu-dropdown',
1967 1948 dropdownAutoWidth: true%s
1968 1949 });
1969 1950 });
1970 1951 </script>
1971 1952 """
1972 1953 filter_option = """,
1973 1954 minimumResultsForSearch: -1
1974 1955 """
1975 1956 input_id = attrs.get('id') or name
1976 1957 filter_enabled = "" if enable_filter else filter_option
1977 1958 select_script = literal(select2 % (input_id, filter_enabled))
1978 1959
1979 1960 return literal(select_html+select_script)
1980 1961
1981 1962
1982 1963 def get_visual_attr(tmpl_context_var, attr_name):
1983 1964 """
1984 1965 A safe way to get a variable from visual variable of template context
1985 1966
1986 1967 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1987 1968 :param attr_name: name of the attribute we fetch from the c.visual
1988 1969 """
1989 1970 visual = getattr(tmpl_context_var, 'visual', None)
1990 1971 if not visual:
1991 1972 return
1992 1973 else:
1993 1974 return getattr(visual, attr_name, None)
1994 1975
1995 1976
1996 1977 def get_last_path_part(file_node):
1997 1978 if not file_node.path:
1998 1979 return u''
1999 1980
2000 1981 path = safe_unicode(file_node.path.split('/')[-1])
2001 1982 return u'../' + path
2002 1983
2003 1984
2004 1985 def route_path(*args, **kwds):
2005 1986 """
2006 1987 Wrapper around pyramids `route_path` function. It is used to generate
2007 1988 URLs from within pylons views or templates. This will be removed when
2008 1989 pyramid migration if finished.
2009 1990 """
2010 1991 req = get_current_request()
2011 1992 return req.route_path(*args, **kwds)
2012 1993
2013 1994
2014 1995 def route_path_or_none(*args, **kwargs):
2015 1996 try:
2016 1997 return route_path(*args, **kwargs)
2017 1998 except KeyError:
2018 1999 return None
2019 2000
2020 2001
2021 2002 def static_url(*args, **kwds):
2022 2003 """
2023 2004 Wrapper around pyramids `route_path` function. It is used to generate
2024 2005 URLs from within pylons views or templates. This will be removed when
2025 2006 pyramid migration if finished.
2026 2007 """
2027 2008 req = get_current_request()
2028 2009 return req.static_url(*args, **kwds)
2029 2010
2030 2011
2031 2012 def resource_path(*args, **kwds):
2032 2013 """
2033 2014 Wrapper around pyramids `route_path` function. It is used to generate
2034 2015 URLs from within pylons views or templates. This will be removed when
2035 2016 pyramid migration if finished.
2036 2017 """
2037 2018 req = get_current_request()
2038 2019 return req.resource_path(*args, **kwds)
@@ -1,282 +1,358 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Renderer for markup languages with ability to parse using rst or markdown
24 24 """
25 25
26 26 import re
27 27 import os
28 28 import logging
29 29 import itertools
30 30
31 31 from mako.lookup import TemplateLookup
32 from mako.template import Template as MakoTemplate
32 33
33 34 from docutils.core import publish_parts
34 35 from docutils.parsers.rst import directives
35 36 import markdown
36 37
37 38 from rhodecode.lib.markdown_ext import (
38 39 UrlizeExtension, GithubFlavoredMarkdownExtension)
39 40 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
40 41
41 42 log = logging.getLogger(__name__)
42 43
43 44 # default renderer used to generate automated comments
44 45 DEFAULT_COMMENTS_RENDERER = 'rst'
45 46
46 47
47 48 class MarkupRenderer(object):
48 49 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
49 50
50 51 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
51 52 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
53 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
52 54 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
53 55
54 56 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
55 57 markdown_renderer = markdown.Markdown(
56 58 extensions, safe_mode=True, enable_attributes=False)
57 59
58 60 markdown_renderer_flavored = markdown.Markdown(
59 61 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
60 62 enable_attributes=False)
61 63
62 64 # extension together with weights. Lower is first means we control how
63 65 # extensions are attached to readme names with those.
64 66 PLAIN_EXTS = [
65 67 # prefer no extension
66 68 ('', 0), # special case that renders READMES names without extension
67 69 ('.text', 2), ('.TEXT', 2),
68 70 ('.txt', 3), ('.TXT', 3)
69 71 ]
70 72
71 73 RST_EXTS = [
72 74 ('.rst', 1), ('.rest', 1),
73 75 ('.RST', 2), ('.REST', 2)
74 76 ]
75 77
76 78 MARKDOWN_EXTS = [
77 79 ('.md', 1), ('.MD', 1),
78 80 ('.mkdn', 2), ('.MKDN', 2),
79 81 ('.mdown', 3), ('.MDOWN', 3),
80 82 ('.markdown', 4), ('.MARKDOWN', 4)
81 83 ]
82 84
83 85 def _detect_renderer(self, source, filename=None):
84 86 """
85 87 runs detection of what renderer should be used for generating html
86 88 from a markup language
87 89
88 90 filename can be also explicitly a renderer name
89 91
90 92 :param source:
91 93 :param filename:
92 94 """
93 95
94 96 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
95 97 detected_renderer = 'markdown'
96 98 elif MarkupRenderer.RST_PAT.findall(filename):
97 99 detected_renderer = 'rst'
100 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
101 detected_renderer = 'jupyter'
98 102 elif MarkupRenderer.PLAIN_PAT.findall(filename):
99 103 detected_renderer = 'plain'
100 104 else:
101 105 detected_renderer = 'plain'
102 106
103 107 return getattr(MarkupRenderer, detected_renderer)
104 108
105 109 @classmethod
106 110 def renderer_from_filename(cls, filename, exclude):
107 111 """
108 112 Detect renderer markdown/rst from filename and optionally use exclude
109 113 list to remove some options. This is mostly used in helpers.
110 114 Returns None when no renderer can be detected.
111 115 """
112 116 def _filter(elements):
113 117 if isinstance(exclude, (list, tuple)):
114 118 return [x for x in elements if x not in exclude]
115 119 return elements
116 120
117 121 if filename.endswith(
118 122 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
119 123 return 'markdown'
120 124 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
121 125 return 'rst'
122 126
123 127 return None
124 128
125 129 def render(self, source, filename=None):
126 130 """
127 131 Renders a given filename using detected renderer
128 132 it detects renderers based on file extension or mimetype.
129 133 At last it will just do a simple html replacing new lines with <br/>
130 134
131 135 :param file_name:
132 136 :param source:
133 137 """
134 138
135 139 renderer = self._detect_renderer(source, filename)
136 140 readme_data = renderer(source)
137 141 return readme_data
138 142
139 143 @classmethod
140 144 def _flavored_markdown(cls, text):
141 145 """
142 146 Github style flavored markdown
143 147
144 148 :param text:
145 149 """
146 150
147 151 # Extract pre blocks.
148 152 extractions = {}
149 153
150 154 def pre_extraction_callback(matchobj):
151 155 digest = md5_safe(matchobj.group(0))
152 156 extractions[digest] = matchobj.group(0)
153 157 return "{gfm-extraction-%s}" % digest
154 158 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
155 159 text = re.sub(pattern, pre_extraction_callback, text)
156 160
157 161 # Prevent foo_bar_baz from ending up with an italic word in the middle.
158 162 def italic_callback(matchobj):
159 163 s = matchobj.group(0)
160 164 if list(s).count('_') >= 2:
161 165 return s.replace('_', r'\_')
162 166 return s
163 167 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
164 168
165 169 # Insert pre block extractions.
166 170 def pre_insert_callback(matchobj):
167 171 return '\n\n' + extractions[matchobj.group(1)]
168 172 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
169 173 pre_insert_callback, text)
170 174
171 175 return text
172 176
173 177 @classmethod
174 178 def urlify_text(cls, text):
175 179 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
176 180 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
177 181
178 182 def url_func(match_obj):
179 183 url_full = match_obj.groups()[0]
180 184 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
181 185
182 186 return url_pat.sub(url_func, text)
183 187
184 188 @classmethod
185 189 def plain(cls, source, universal_newline=True):
186 190 source = safe_unicode(source)
187 191 if universal_newline:
188 192 newline = '\n'
189 193 source = newline.join(source.splitlines())
190 194
191 195 source = cls.urlify_text(source)
192 196 return '<br />' + source.replace("\n", '<br />')
193 197
194 198 @classmethod
195 199 def markdown(cls, source, safe=True, flavored=True, mentions=False):
196 200 # It does not allow to insert inline HTML. In presence of HTML tags, it
197 201 # will replace them instead with [HTML_REMOVED]. This is controlled by
198 202 # the safe_mode=True parameter of the markdown method.
199 203
200 204 if flavored:
201 205 markdown_renderer = cls.markdown_renderer_flavored
202 206 else:
203 207 markdown_renderer = cls.markdown_renderer
204 208
205 209 if mentions:
206 210 mention_pat = re.compile(MENTIONS_REGEX)
207 211
208 212 def wrapp(match_obj):
209 213 uname = match_obj.groups()[0]
210 214 return ' **@%(uname)s** ' % {'uname': uname}
211 215 mention_hl = mention_pat.sub(wrapp, source).strip()
212 216 # we extracted mentions render with this using Mentions false
213 217 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
214 218 mentions=False)
215 219
216 220 source = safe_unicode(source)
217 221 try:
218 222 if flavored:
219 223 source = cls._flavored_markdown(source)
220 224 return markdown_renderer.convert(source)
221 225 except Exception:
222 226 log.exception('Error when rendering Markdown')
223 227 if safe:
224 228 log.debug('Fallback to render in plain mode')
225 229 return cls.plain(source)
226 230 else:
227 231 raise
228 232
229 233 @classmethod
230 234 def rst(cls, source, safe=True, mentions=False):
231 235 if mentions:
232 236 mention_pat = re.compile(MENTIONS_REGEX)
233 237
234 238 def wrapp(match_obj):
235 239 uname = match_obj.groups()[0]
236 240 return ' **@%(uname)s** ' % {'uname': uname}
237 241 mention_hl = mention_pat.sub(wrapp, source).strip()
238 242 # we extracted mentions render with this using Mentions false
239 243 return cls.rst(mention_hl, safe=safe, mentions=False)
240 244
241 245 source = safe_unicode(source)
242 246 try:
243 247 docutils_settings = dict(
244 248 [(alias, None) for alias in
245 249 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
246 250
247 251 docutils_settings.update({'input_encoding': 'unicode',
248 252 'report_level': 4})
249 253
250 254 for k, v in docutils_settings.iteritems():
251 255 directives.register_directive(k, v)
252 256
253 257 parts = publish_parts(source=source,
254 258 writer_name="html4css1",
255 259 settings_overrides=docutils_settings)
256 260
257 261 return parts['html_title'] + parts["fragment"]
258 262 except Exception:
259 263 log.exception('Error when rendering RST')
260 264 if safe:
261 265 log.debug('Fallbacking to render in plain mode')
262 266 return cls.plain(source)
263 267 else:
264 268 raise
265 269
270 @classmethod
271 def jupyter(cls, source):
272 from rhodecode.lib import helpers
273 import nbformat
274 from nbconvert import HTMLExporter
275 from traitlets.config import Config
276
277 class CustomHTMLExporter(HTMLExporter):
278 def _template_file_default(self):
279 return 'basic'
280
281 def _sanitize_resources(resources):
282 """
283 Skip/sanitize some of the CSS generated and included in jupyter
284 so it doesn't messes up UI so much
285 """
286
287 # TODO(marcink): probably we should replace this with whole custom
288 # CSS set that doesn't screw up, but jupyter generated html has some
289 # special markers, so it requires Custom HTML exporter template with
290 # _default_template_path_default, to achieve that
291
292 # strip the reset CSS
293 resources[0] = resources[0][resources[0].find('/*! Source'):]
294 return resources
295
296 def as_html(notebook):
297 conf = Config()
298 html_exporter = CustomHTMLExporter(config=conf)
299
300 (body, resources) = html_exporter.from_notebook_node(notebook)
301 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
302 js = MakoTemplate(r'''
303 <!-- Load mathjax -->
304 <!-- MathJax configuration -->
305 <script type="text/x-mathjax-config">
306 MathJax.Hub.Config({
307 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
308 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
309 TeX: {
310 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
311 },
312 tex2jax: {
313 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
314 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
315 processEscapes: true,
316 processEnvironments: true
317 },
318 // Center justify equations in code and markdown cells. Elsewhere
319 // we use CSS to left justify single line equations in code cells.
320 displayAlign: 'center',
321 "HTML-CSS": {
322 styles: {'.MathJax_Display': {"margin": 0}},
323 linebreaks: { automatic: true }
324 },
325 showMathMenu: false
326 });
327 </script>
328 <!-- End of mathjax configuration -->
329 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
330 ''').render(h=helpers)
331
332 css = '<style>{}</style>'.format(
333 ''.join(_sanitize_resources(resources['inlining']['css'])))
334
335 body = '\n'.join([header, css, js, body])
336 return body, resources
337
338 notebook = nbformat.reads(source, as_version=4)
339 (body, resources) = as_html(notebook)
340 return body
341
266 342
267 343 class RstTemplateRenderer(object):
268 344
269 345 def __init__(self):
270 346 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
271 347 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
272 348 self.template_store = TemplateLookup(
273 349 directories=rst_template_dirs,
274 350 input_encoding='utf-8',
275 351 imports=['from rhodecode.lib import helpers as h'])
276 352
277 353 def _get_template(self, templatename):
278 354 return self.template_store.get_template(templatename)
279 355
280 356 def render(self, template_name, **kwargs):
281 357 template = self._get_template(template_name)
282 358 return template.render(**kwargs)
General Comments 0
You need to be logged in to leave comments. Login now