##// END OF EJS Templates
markup: make relative links pint to raw files for images and to standard files as links....
marcink -
r2003:f0eeceed default
parent child Browse files
Show More
@@ -1,2045 +1,2045 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 from collections import OrderedDict
40 40
41 41 import pygments
42 42 import itertools
43 43 import fnmatch
44 44
45 45 from datetime import datetime
46 46 from functools import partial
47 47 from pygments.formatters.html import HtmlFormatter
48 48 from pygments import highlight as code_highlight
49 49 from pygments.lexers import (
50 50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51 51
52 52 from pyramid.threadlocal import get_current_request
53 53
54 54 from webhelpers.html import literal, HTML, escape
55 55 from webhelpers.html.tools import *
56 56 from webhelpers.html.builder import make_tag
57 57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 60 submit, text, password, textarea, title, ul, xml_declaration, radio
61 61 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 63 from webhelpers.pylonslib import Flash as _Flash
64 64 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
65 65 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
66 66 replace_whitespace, urlify, truncate, wrap_paragraphs
67 67 from webhelpers.date import time_ago_in_words
68 68 from webhelpers.paginate import Page as _Page
69 69 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
70 70 convert_boolean_attrs, NotGiven, _make_safe_id_component
71 71 from webhelpers2.number import format_byte_size
72 72
73 73 from rhodecode.lib.action_parser import action_parser
74 74 from rhodecode.lib.ext_json import json
75 75 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
76 76 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
77 77 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
78 78 AttributeDict, safe_int, md5, md5_safe
79 79 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
80 80 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
81 81 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
82 82 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
83 83 from rhodecode.model.changeset_status import ChangesetStatusModel
84 84 from rhodecode.model.db import Permission, User, Repository
85 85 from rhodecode.model.repo_group import RepoGroupModel
86 86 from rhodecode.model.settings import IssueTrackerSettingsModel
87 87
88 88 log = logging.getLogger(__name__)
89 89
90 90
91 91 DEFAULT_USER = User.DEFAULT_USER
92 92 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
93 93
94 94
95 95 def url(*args, **kw):
96 96 from pylons import url as pylons_url
97 97 return pylons_url(*args, **kw)
98 98
99 99
100 100 def pylons_url_current(*args, **kw):
101 101 """
102 102 This function overrides pylons.url.current() which returns the current
103 103 path so that it will also work from a pyramid only context. This
104 104 should be removed once port to pyramid is complete.
105 105 """
106 106 from pylons import url as pylons_url
107 107 if not args and not kw:
108 108 request = get_current_request()
109 109 return request.path
110 110 return pylons_url.current(*args, **kw)
111 111
112 112 url.current = pylons_url_current
113 113
114 114
115 115 def url_replace(**qargs):
116 116 """ Returns the current request url while replacing query string args """
117 117
118 118 request = get_current_request()
119 119 new_args = request.GET.mixed()
120 120 new_args.update(qargs)
121 121 return url('', **new_args)
122 122
123 123
124 124 def asset(path, ver=None, **kwargs):
125 125 """
126 126 Helper to generate a static asset file path for rhodecode assets
127 127
128 128 eg. h.asset('images/image.png', ver='3923')
129 129
130 130 :param path: path of asset
131 131 :param ver: optional version query param to append as ?ver=
132 132 """
133 133 request = get_current_request()
134 134 query = {}
135 135 query.update(kwargs)
136 136 if ver:
137 137 query = {'ver': ver}
138 138 return request.static_path(
139 139 'rhodecode:public/{}'.format(path), _query=query)
140 140
141 141
142 142 default_html_escape_table = {
143 143 ord('&'): u'&amp;',
144 144 ord('<'): u'&lt;',
145 145 ord('>'): u'&gt;',
146 146 ord('"'): u'&quot;',
147 147 ord("'"): u'&#39;',
148 148 }
149 149
150 150
151 151 def html_escape(text, html_escape_table=default_html_escape_table):
152 152 """Produce entities within text."""
153 153 return text.translate(html_escape_table)
154 154
155 155
156 156 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
157 157 """
158 158 Truncate string ``s`` at the first occurrence of ``sub``.
159 159
160 160 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
161 161 """
162 162 suffix_if_chopped = suffix_if_chopped or ''
163 163 pos = s.find(sub)
164 164 if pos == -1:
165 165 return s
166 166
167 167 if inclusive:
168 168 pos += len(sub)
169 169
170 170 chopped = s[:pos]
171 171 left = s[pos:].strip()
172 172
173 173 if left and suffix_if_chopped:
174 174 chopped += suffix_if_chopped
175 175
176 176 return chopped
177 177
178 178
179 179 def shorter(text, size=20):
180 180 postfix = '...'
181 181 if len(text) > size:
182 182 return text[:size - len(postfix)] + postfix
183 183 return text
184 184
185 185
186 186 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
187 187 """
188 188 Reset button
189 189 """
190 190 _set_input_attrs(attrs, type, name, value)
191 191 _set_id_attr(attrs, id, name)
192 192 convert_boolean_attrs(attrs, ["disabled"])
193 193 return HTML.input(**attrs)
194 194
195 195 reset = _reset
196 196 safeid = _make_safe_id_component
197 197
198 198
199 199 def branding(name, length=40):
200 200 return truncate(name, length, indicator="")
201 201
202 202
203 203 def FID(raw_id, path):
204 204 """
205 205 Creates a unique ID for filenode based on it's hash of path and commit
206 206 it's safe to use in urls
207 207
208 208 :param raw_id:
209 209 :param path:
210 210 """
211 211
212 212 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
213 213
214 214
215 215 class _GetError(object):
216 216 """Get error from form_errors, and represent it as span wrapped error
217 217 message
218 218
219 219 :param field_name: field to fetch errors for
220 220 :param form_errors: form errors dict
221 221 """
222 222
223 223 def __call__(self, field_name, form_errors):
224 224 tmpl = """<span class="error_msg">%s</span>"""
225 225 if form_errors and field_name in form_errors:
226 226 return literal(tmpl % form_errors.get(field_name))
227 227
228 228 get_error = _GetError()
229 229
230 230
231 231 class _ToolTip(object):
232 232
233 233 def __call__(self, tooltip_title, trim_at=50):
234 234 """
235 235 Special function just to wrap our text into nice formatted
236 236 autowrapped text
237 237
238 238 :param tooltip_title:
239 239 """
240 240 tooltip_title = escape(tooltip_title)
241 241 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
242 242 return tooltip_title
243 243 tooltip = _ToolTip()
244 244
245 245
246 246 def files_breadcrumbs(repo_name, commit_id, file_path):
247 247 if isinstance(file_path, str):
248 248 file_path = safe_unicode(file_path)
249 249
250 250 # TODO: johbo: Is this always a url like path, or is this operating
251 251 # system dependent?
252 252 path_segments = file_path.split('/')
253 253
254 254 repo_name_html = escape(repo_name)
255 255 if len(path_segments) == 1 and path_segments[0] == '':
256 256 url_segments = [repo_name_html]
257 257 else:
258 258 url_segments = [
259 259 link_to(
260 260 repo_name_html,
261 261 route_path(
262 262 'repo_files',
263 263 repo_name=repo_name,
264 264 commit_id=commit_id,
265 265 f_path=''),
266 266 class_='pjax-link')]
267 267
268 268 last_cnt = len(path_segments) - 1
269 269 for cnt, segment in enumerate(path_segments):
270 270 if not segment:
271 271 continue
272 272 segment_html = escape(segment)
273 273
274 274 if cnt != last_cnt:
275 275 url_segments.append(
276 276 link_to(
277 277 segment_html,
278 278 route_path(
279 279 'repo_files',
280 280 repo_name=repo_name,
281 281 commit_id=commit_id,
282 282 f_path='/'.join(path_segments[:cnt + 1])),
283 283 class_='pjax-link'))
284 284 else:
285 285 url_segments.append(segment_html)
286 286
287 287 return literal('/'.join(url_segments))
288 288
289 289
290 290 class CodeHtmlFormatter(HtmlFormatter):
291 291 """
292 292 My code Html Formatter for source codes
293 293 """
294 294
295 295 def wrap(self, source, outfile):
296 296 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
297 297
298 298 def _wrap_code(self, source):
299 299 for cnt, it in enumerate(source):
300 300 i, t = it
301 301 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
302 302 yield i, t
303 303
304 304 def _wrap_tablelinenos(self, inner):
305 305 dummyoutfile = StringIO.StringIO()
306 306 lncount = 0
307 307 for t, line in inner:
308 308 if t:
309 309 lncount += 1
310 310 dummyoutfile.write(line)
311 311
312 312 fl = self.linenostart
313 313 mw = len(str(lncount + fl - 1))
314 314 sp = self.linenospecial
315 315 st = self.linenostep
316 316 la = self.lineanchors
317 317 aln = self.anchorlinenos
318 318 nocls = self.noclasses
319 319 if sp:
320 320 lines = []
321 321
322 322 for i in range(fl, fl + lncount):
323 323 if i % st == 0:
324 324 if i % sp == 0:
325 325 if aln:
326 326 lines.append('<a href="#%s%d" class="special">%*d</a>' %
327 327 (la, i, mw, i))
328 328 else:
329 329 lines.append('<span class="special">%*d</span>' % (mw, i))
330 330 else:
331 331 if aln:
332 332 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
333 333 else:
334 334 lines.append('%*d' % (mw, i))
335 335 else:
336 336 lines.append('')
337 337 ls = '\n'.join(lines)
338 338 else:
339 339 lines = []
340 340 for i in range(fl, fl + lncount):
341 341 if i % st == 0:
342 342 if aln:
343 343 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
344 344 else:
345 345 lines.append('%*d' % (mw, i))
346 346 else:
347 347 lines.append('')
348 348 ls = '\n'.join(lines)
349 349
350 350 # in case you wonder about the seemingly redundant <div> here: since the
351 351 # content in the other cell also is wrapped in a div, some browsers in
352 352 # some configurations seem to mess up the formatting...
353 353 if nocls:
354 354 yield 0, ('<table class="%stable">' % self.cssclass +
355 355 '<tr><td><div class="linenodiv" '
356 356 'style="background-color: #f0f0f0; padding-right: 10px">'
357 357 '<pre style="line-height: 125%">' +
358 358 ls + '</pre></div></td><td id="hlcode" class="code">')
359 359 else:
360 360 yield 0, ('<table class="%stable">' % self.cssclass +
361 361 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
362 362 ls + '</pre></div></td><td id="hlcode" class="code">')
363 363 yield 0, dummyoutfile.getvalue()
364 364 yield 0, '</td></tr></table>'
365 365
366 366
367 367 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
368 368 def __init__(self, **kw):
369 369 # only show these line numbers if set
370 370 self.only_lines = kw.pop('only_line_numbers', [])
371 371 self.query_terms = kw.pop('query_terms', [])
372 372 self.max_lines = kw.pop('max_lines', 5)
373 373 self.line_context = kw.pop('line_context', 3)
374 374 self.url = kw.pop('url', None)
375 375
376 376 super(CodeHtmlFormatter, self).__init__(**kw)
377 377
378 378 def _wrap_code(self, source):
379 379 for cnt, it in enumerate(source):
380 380 i, t = it
381 381 t = '<pre>%s</pre>' % t
382 382 yield i, t
383 383
384 384 def _wrap_tablelinenos(self, inner):
385 385 yield 0, '<table class="code-highlight %stable">' % self.cssclass
386 386
387 387 last_shown_line_number = 0
388 388 current_line_number = 1
389 389
390 390 for t, line in inner:
391 391 if not t:
392 392 yield t, line
393 393 continue
394 394
395 395 if current_line_number in self.only_lines:
396 396 if last_shown_line_number + 1 != current_line_number:
397 397 yield 0, '<tr>'
398 398 yield 0, '<td class="line">...</td>'
399 399 yield 0, '<td id="hlcode" class="code"></td>'
400 400 yield 0, '</tr>'
401 401
402 402 yield 0, '<tr>'
403 403 if self.url:
404 404 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
405 405 self.url, current_line_number, current_line_number)
406 406 else:
407 407 yield 0, '<td class="line"><a href="">%i</a></td>' % (
408 408 current_line_number)
409 409 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
410 410 yield 0, '</tr>'
411 411
412 412 last_shown_line_number = current_line_number
413 413
414 414 current_line_number += 1
415 415
416 416
417 417 yield 0, '</table>'
418 418
419 419
420 420 def extract_phrases(text_query):
421 421 """
422 422 Extracts phrases from search term string making sure phrases
423 423 contained in double quotes are kept together - and discarding empty values
424 424 or fully whitespace values eg.
425 425
426 426 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
427 427
428 428 """
429 429
430 430 in_phrase = False
431 431 buf = ''
432 432 phrases = []
433 433 for char in text_query:
434 434 if in_phrase:
435 435 if char == '"': # end phrase
436 436 phrases.append(buf)
437 437 buf = ''
438 438 in_phrase = False
439 439 continue
440 440 else:
441 441 buf += char
442 442 continue
443 443 else:
444 444 if char == '"': # start phrase
445 445 in_phrase = True
446 446 phrases.append(buf)
447 447 buf = ''
448 448 continue
449 449 elif char == ' ':
450 450 phrases.append(buf)
451 451 buf = ''
452 452 continue
453 453 else:
454 454 buf += char
455 455
456 456 phrases.append(buf)
457 457 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
458 458 return phrases
459 459
460 460
461 461 def get_matching_offsets(text, phrases):
462 462 """
463 463 Returns a list of string offsets in `text` that the list of `terms` match
464 464
465 465 >>> get_matching_offsets('some text here', ['some', 'here'])
466 466 [(0, 4), (10, 14)]
467 467
468 468 """
469 469 offsets = []
470 470 for phrase in phrases:
471 471 for match in re.finditer(phrase, text):
472 472 offsets.append((match.start(), match.end()))
473 473
474 474 return offsets
475 475
476 476
477 477 def normalize_text_for_matching(x):
478 478 """
479 479 Replaces all non alnum characters to spaces and lower cases the string,
480 480 useful for comparing two text strings without punctuation
481 481 """
482 482 return re.sub(r'[^\w]', ' ', x.lower())
483 483
484 484
485 485 def get_matching_line_offsets(lines, terms):
486 486 """ Return a set of `lines` indices (starting from 1) matching a
487 487 text search query, along with `context` lines above/below matching lines
488 488
489 489 :param lines: list of strings representing lines
490 490 :param terms: search term string to match in lines eg. 'some text'
491 491 :param context: number of lines above/below a matching line to add to result
492 492 :param max_lines: cut off for lines of interest
493 493 eg.
494 494
495 495 text = '''
496 496 words words words
497 497 words words words
498 498 some text some
499 499 words words words
500 500 words words words
501 501 text here what
502 502 '''
503 503 get_matching_line_offsets(text, 'text', context=1)
504 504 {3: [(5, 9)], 6: [(0, 4)]]
505 505
506 506 """
507 507 matching_lines = {}
508 508 phrases = [normalize_text_for_matching(phrase)
509 509 for phrase in extract_phrases(terms)]
510 510
511 511 for line_index, line in enumerate(lines, start=1):
512 512 match_offsets = get_matching_offsets(
513 513 normalize_text_for_matching(line), phrases)
514 514 if match_offsets:
515 515 matching_lines[line_index] = match_offsets
516 516
517 517 return matching_lines
518 518
519 519
520 520 def hsv_to_rgb(h, s, v):
521 521 """ Convert hsv color values to rgb """
522 522
523 523 if s == 0.0:
524 524 return v, v, v
525 525 i = int(h * 6.0) # XXX assume int() truncates!
526 526 f = (h * 6.0) - i
527 527 p = v * (1.0 - s)
528 528 q = v * (1.0 - s * f)
529 529 t = v * (1.0 - s * (1.0 - f))
530 530 i = i % 6
531 531 if i == 0:
532 532 return v, t, p
533 533 if i == 1:
534 534 return q, v, p
535 535 if i == 2:
536 536 return p, v, t
537 537 if i == 3:
538 538 return p, q, v
539 539 if i == 4:
540 540 return t, p, v
541 541 if i == 5:
542 542 return v, p, q
543 543
544 544
545 545 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
546 546 """
547 547 Generator for getting n of evenly distributed colors using
548 548 hsv color and golden ratio. It always return same order of colors
549 549
550 550 :param n: number of colors to generate
551 551 :param saturation: saturation of returned colors
552 552 :param lightness: lightness of returned colors
553 553 :returns: RGB tuple
554 554 """
555 555
556 556 golden_ratio = 0.618033988749895
557 557 h = 0.22717784590367374
558 558
559 559 for _ in xrange(n):
560 560 h += golden_ratio
561 561 h %= 1
562 562 HSV_tuple = [h, saturation, lightness]
563 563 RGB_tuple = hsv_to_rgb(*HSV_tuple)
564 564 yield map(lambda x: str(int(x * 256)), RGB_tuple)
565 565
566 566
567 567 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
568 568 """
569 569 Returns a function which when called with an argument returns a unique
570 570 color for that argument, eg.
571 571
572 572 :param n: number of colors to generate
573 573 :param saturation: saturation of returned colors
574 574 :param lightness: lightness of returned colors
575 575 :returns: css RGB string
576 576
577 577 >>> color_hash = color_hasher()
578 578 >>> color_hash('hello')
579 579 'rgb(34, 12, 59)'
580 580 >>> color_hash('hello')
581 581 'rgb(34, 12, 59)'
582 582 >>> color_hash('other')
583 583 'rgb(90, 224, 159)'
584 584 """
585 585
586 586 color_dict = {}
587 587 cgenerator = unique_color_generator(
588 588 saturation=saturation, lightness=lightness)
589 589
590 590 def get_color_string(thing):
591 591 if thing in color_dict:
592 592 col = color_dict[thing]
593 593 else:
594 594 col = color_dict[thing] = cgenerator.next()
595 595 return "rgb(%s)" % (', '.join(col))
596 596
597 597 return get_color_string
598 598
599 599
600 600 def get_lexer_safe(mimetype=None, filepath=None):
601 601 """
602 602 Tries to return a relevant pygments lexer using mimetype/filepath name,
603 603 defaulting to plain text if none could be found
604 604 """
605 605 lexer = None
606 606 try:
607 607 if mimetype:
608 608 lexer = get_lexer_for_mimetype(mimetype)
609 609 if not lexer:
610 610 lexer = get_lexer_for_filename(filepath)
611 611 except pygments.util.ClassNotFound:
612 612 pass
613 613
614 614 if not lexer:
615 615 lexer = get_lexer_by_name('text')
616 616
617 617 return lexer
618 618
619 619
620 620 def get_lexer_for_filenode(filenode):
621 621 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
622 622 return lexer
623 623
624 624
625 625 def pygmentize(filenode, **kwargs):
626 626 """
627 627 pygmentize function using pygments
628 628
629 629 :param filenode:
630 630 """
631 631 lexer = get_lexer_for_filenode(filenode)
632 632 return literal(code_highlight(filenode.content, lexer,
633 633 CodeHtmlFormatter(**kwargs)))
634 634
635 635
636 636 def is_following_repo(repo_name, user_id):
637 637 from rhodecode.model.scm import ScmModel
638 638 return ScmModel().is_following_repo(repo_name, user_id)
639 639
640 640
641 641 class _Message(object):
642 642 """A message returned by ``Flash.pop_messages()``.
643 643
644 644 Converting the message to a string returns the message text. Instances
645 645 also have the following attributes:
646 646
647 647 * ``message``: the message text.
648 648 * ``category``: the category specified when the message was created.
649 649 """
650 650
651 651 def __init__(self, category, message):
652 652 self.category = category
653 653 self.message = message
654 654
655 655 def __str__(self):
656 656 return self.message
657 657
658 658 __unicode__ = __str__
659 659
660 660 def __html__(self):
661 661 return escape(safe_unicode(self.message))
662 662
663 663
664 664 class Flash(_Flash):
665 665
666 666 def pop_messages(self, request=None):
667 667 """Return all accumulated messages and delete them from the session.
668 668
669 669 The return value is a list of ``Message`` objects.
670 670 """
671 671 messages = []
672 672
673 673 if request:
674 674 session = request.session
675 675 else:
676 676 from pylons import session
677 677
678 678 # Pop the 'old' pylons flash messages. They are tuples of the form
679 679 # (category, message)
680 680 for cat, msg in session.pop(self.session_key, []):
681 681 messages.append(_Message(cat, msg))
682 682
683 683 # Pop the 'new' pyramid flash messages for each category as list
684 684 # of strings.
685 685 for cat in self.categories:
686 686 for msg in session.pop_flash(queue=cat):
687 687 messages.append(_Message(cat, msg))
688 688 # Map messages from the default queue to the 'notice' category.
689 689 for msg in session.pop_flash():
690 690 messages.append(_Message('notice', msg))
691 691
692 692 session.save()
693 693 return messages
694 694
695 695 def json_alerts(self, request=None):
696 696 payloads = []
697 697 messages = flash.pop_messages(request=request)
698 698 if messages:
699 699 for message in messages:
700 700 subdata = {}
701 701 if hasattr(message.message, 'rsplit'):
702 702 flash_data = message.message.rsplit('|DELIM|', 1)
703 703 org_message = flash_data[0]
704 704 if len(flash_data) > 1:
705 705 subdata = json.loads(flash_data[1])
706 706 else:
707 707 org_message = message.message
708 708 payloads.append({
709 709 'message': {
710 710 'message': u'{}'.format(org_message),
711 711 'level': message.category,
712 712 'force': True,
713 713 'subdata': subdata
714 714 }
715 715 })
716 716 return json.dumps(payloads)
717 717
718 718 flash = Flash()
719 719
720 720 #==============================================================================
721 721 # SCM FILTERS available via h.
722 722 #==============================================================================
723 723 from rhodecode.lib.vcs.utils import author_name, author_email
724 724 from rhodecode.lib.utils2 import credentials_filter, age as _age
725 725 from rhodecode.model.db import User, ChangesetStatus
726 726
727 727 age = _age
728 728 capitalize = lambda x: x.capitalize()
729 729 email = author_email
730 730 short_id = lambda x: x[:12]
731 731 hide_credentials = lambda x: ''.join(credentials_filter(x))
732 732
733 733
734 734 def age_component(datetime_iso, value=None, time_is_local=False):
735 735 title = value or format_date(datetime_iso)
736 736 tzinfo = '+00:00'
737 737
738 738 # detect if we have a timezone info, otherwise, add it
739 739 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
740 740 if time_is_local:
741 741 tzinfo = time.strftime("+%H:%M",
742 742 time.gmtime(
743 743 (datetime.now() - datetime.utcnow()).seconds + 1
744 744 )
745 745 )
746 746
747 747 return literal(
748 748 '<time class="timeago tooltip" '
749 749 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
750 750 datetime_iso, title, tzinfo))
751 751
752 752
753 753 def _shorten_commit_id(commit_id):
754 754 from rhodecode import CONFIG
755 755 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
756 756 return commit_id[:def_len]
757 757
758 758
759 759 def show_id(commit):
760 760 """
761 761 Configurable function that shows ID
762 762 by default it's r123:fffeeefffeee
763 763
764 764 :param commit: commit instance
765 765 """
766 766 from rhodecode import CONFIG
767 767 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
768 768
769 769 raw_id = _shorten_commit_id(commit.raw_id)
770 770 if show_idx:
771 771 return 'r%s:%s' % (commit.idx, raw_id)
772 772 else:
773 773 return '%s' % (raw_id, )
774 774
775 775
776 776 def format_date(date):
777 777 """
778 778 use a standardized formatting for dates used in RhodeCode
779 779
780 780 :param date: date/datetime object
781 781 :return: formatted date
782 782 """
783 783
784 784 if date:
785 785 _fmt = "%a, %d %b %Y %H:%M:%S"
786 786 return safe_unicode(date.strftime(_fmt))
787 787
788 788 return u""
789 789
790 790
791 791 class _RepoChecker(object):
792 792
793 793 def __init__(self, backend_alias):
794 794 self._backend_alias = backend_alias
795 795
796 796 def __call__(self, repository):
797 797 if hasattr(repository, 'alias'):
798 798 _type = repository.alias
799 799 elif hasattr(repository, 'repo_type'):
800 800 _type = repository.repo_type
801 801 else:
802 802 _type = repository
803 803 return _type == self._backend_alias
804 804
805 805 is_git = _RepoChecker('git')
806 806 is_hg = _RepoChecker('hg')
807 807 is_svn = _RepoChecker('svn')
808 808
809 809
810 810 def get_repo_type_by_name(repo_name):
811 811 repo = Repository.get_by_repo_name(repo_name)
812 812 return repo.repo_type
813 813
814 814
815 815 def is_svn_without_proxy(repository):
816 816 if is_svn(repository):
817 817 from rhodecode.model.settings import VcsSettingsModel
818 818 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
819 819 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
820 820 return False
821 821
822 822
823 823 def discover_user(author):
824 824 """
825 825 Tries to discover RhodeCode User based on the autho string. Author string
826 826 is typically `FirstName LastName <email@address.com>`
827 827 """
828 828
829 829 # if author is already an instance use it for extraction
830 830 if isinstance(author, User):
831 831 return author
832 832
833 833 # Valid email in the attribute passed, see if they're in the system
834 834 _email = author_email(author)
835 835 if _email != '':
836 836 user = User.get_by_email(_email, case_insensitive=True, cache=True)
837 837 if user is not None:
838 838 return user
839 839
840 840 # Maybe it's a username, we try to extract it and fetch by username ?
841 841 _author = author_name(author)
842 842 user = User.get_by_username(_author, case_insensitive=True, cache=True)
843 843 if user is not None:
844 844 return user
845 845
846 846 return None
847 847
848 848
849 849 def email_or_none(author):
850 850 # extract email from the commit string
851 851 _email = author_email(author)
852 852
853 853 # If we have an email, use it, otherwise
854 854 # see if it contains a username we can get an email from
855 855 if _email != '':
856 856 return _email
857 857 else:
858 858 user = User.get_by_username(
859 859 author_name(author), case_insensitive=True, cache=True)
860 860
861 861 if user is not None:
862 862 return user.email
863 863
864 864 # No valid email, not a valid user in the system, none!
865 865 return None
866 866
867 867
868 868 def link_to_user(author, length=0, **kwargs):
869 869 user = discover_user(author)
870 870 # user can be None, but if we have it already it means we can re-use it
871 871 # in the person() function, so we save 1 intensive-query
872 872 if user:
873 873 author = user
874 874
875 875 display_person = person(author, 'username_or_name_or_email')
876 876 if length:
877 877 display_person = shorter(display_person, length)
878 878
879 879 if user:
880 880 return link_to(
881 881 escape(display_person),
882 882 route_path('user_profile', username=user.username),
883 883 **kwargs)
884 884 else:
885 885 return escape(display_person)
886 886
887 887
888 888 def person(author, show_attr="username_and_name"):
889 889 user = discover_user(author)
890 890 if user:
891 891 return getattr(user, show_attr)
892 892 else:
893 893 _author = author_name(author)
894 894 _email = email(author)
895 895 return _author or _email
896 896
897 897
898 898 def author_string(email):
899 899 if email:
900 900 user = User.get_by_email(email, case_insensitive=True, cache=True)
901 901 if user:
902 902 if user.first_name or user.last_name:
903 903 return '%s %s &lt;%s&gt;' % (
904 904 user.first_name, user.last_name, email)
905 905 else:
906 906 return email
907 907 else:
908 908 return email
909 909 else:
910 910 return None
911 911
912 912
913 913 def person_by_id(id_, show_attr="username_and_name"):
914 914 # attr to return from fetched user
915 915 person_getter = lambda usr: getattr(usr, show_attr)
916 916
917 917 #maybe it's an ID ?
918 918 if str(id_).isdigit() or isinstance(id_, int):
919 919 id_ = int(id_)
920 920 user = User.get(id_)
921 921 if user is not None:
922 922 return person_getter(user)
923 923 return id_
924 924
925 925
926 926 def gravatar_with_user(request, author, show_disabled=False):
927 927 _render = request.get_partial_renderer('base/base.mako')
928 928 return _render('gravatar_with_user', author, show_disabled=show_disabled)
929 929
930 930
931 931 def desc_stylize(value):
932 932 """
933 933 converts tags from value into html equivalent
934 934
935 935 :param value:
936 936 """
937 937 if not value:
938 938 return ''
939 939
940 940 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
941 941 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
942 942 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
943 943 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
944 944 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
945 945 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
946 946 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
947 947 '<div class="metatag" tag="lang">\\2</div>', value)
948 948 value = re.sub(r'\[([a-z]+)\]',
949 949 '<div class="metatag" tag="\\1">\\1</div>', value)
950 950
951 951 return value
952 952
953 953
954 954 def escaped_stylize(value):
955 955 """
956 956 converts tags from value into html equivalent, but escaping its value first
957 957 """
958 958 if not value:
959 959 return ''
960 960
961 961 # Using default webhelper escape method, but has to force it as a
962 962 # plain unicode instead of a markup tag to be used in regex expressions
963 963 value = unicode(escape(safe_unicode(value)))
964 964
965 965 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
966 966 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
967 967 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
968 968 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
969 969 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
970 970 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
971 971 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
972 972 '<div class="metatag" tag="lang">\\2</div>', value)
973 973 value = re.sub(r'\[([a-z]+)\]',
974 974 '<div class="metatag" tag="\\1">\\1</div>', value)
975 975
976 976 return value
977 977
978 978
979 979 def bool2icon(value):
980 980 """
981 981 Returns boolean value of a given value, represented as html element with
982 982 classes that will represent icons
983 983
984 984 :param value: given value to convert to html node
985 985 """
986 986
987 987 if value: # does bool conversion
988 988 return HTML.tag('i', class_="icon-true")
989 989 else: # not true as bool
990 990 return HTML.tag('i', class_="icon-false")
991 991
992 992
993 993 #==============================================================================
994 994 # PERMS
995 995 #==============================================================================
996 996 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
997 997 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
998 998 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
999 999 csrf_token_key
1000 1000
1001 1001
1002 1002 #==============================================================================
1003 1003 # GRAVATAR URL
1004 1004 #==============================================================================
1005 1005 class InitialsGravatar(object):
1006 1006 def __init__(self, email_address, first_name, last_name, size=30,
1007 1007 background=None, text_color='#fff'):
1008 1008 self.size = size
1009 1009 self.first_name = first_name
1010 1010 self.last_name = last_name
1011 1011 self.email_address = email_address
1012 1012 self.background = background or self.str2color(email_address)
1013 1013 self.text_color = text_color
1014 1014
1015 1015 def get_color_bank(self):
1016 1016 """
1017 1017 returns a predefined list of colors that gravatars can use.
1018 1018 Those are randomized distinct colors that guarantee readability and
1019 1019 uniqueness.
1020 1020
1021 1021 generated with: http://phrogz.net/css/distinct-colors.html
1022 1022 """
1023 1023 return [
1024 1024 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1025 1025 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1026 1026 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1027 1027 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1028 1028 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1029 1029 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1030 1030 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1031 1031 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1032 1032 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1033 1033 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1034 1034 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1035 1035 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1036 1036 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1037 1037 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1038 1038 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1039 1039 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1040 1040 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1041 1041 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1042 1042 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1043 1043 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1044 1044 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1045 1045 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1046 1046 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1047 1047 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1048 1048 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1049 1049 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1050 1050 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1051 1051 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1052 1052 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1053 1053 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1054 1054 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1055 1055 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1056 1056 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1057 1057 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1058 1058 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1059 1059 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1060 1060 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1061 1061 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1062 1062 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1063 1063 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1064 1064 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1065 1065 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1066 1066 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1067 1067 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1068 1068 '#4f8c46', '#368dd9', '#5c0073'
1069 1069 ]
1070 1070
1071 1071 def rgb_to_hex_color(self, rgb_tuple):
1072 1072 """
1073 1073 Converts an rgb_tuple passed to an hex color.
1074 1074
1075 1075 :param rgb_tuple: tuple with 3 ints represents rgb color space
1076 1076 """
1077 1077 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1078 1078
1079 1079 def email_to_int_list(self, email_str):
1080 1080 """
1081 1081 Get every byte of the hex digest value of email and turn it to integer.
1082 1082 It's going to be always between 0-255
1083 1083 """
1084 1084 digest = md5_safe(email_str.lower())
1085 1085 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1086 1086
1087 1087 def pick_color_bank_index(self, email_str, color_bank):
1088 1088 return self.email_to_int_list(email_str)[0] % len(color_bank)
1089 1089
1090 1090 def str2color(self, email_str):
1091 1091 """
1092 1092 Tries to map in a stable algorithm an email to color
1093 1093
1094 1094 :param email_str:
1095 1095 """
1096 1096 color_bank = self.get_color_bank()
1097 1097 # pick position (module it's length so we always find it in the
1098 1098 # bank even if it's smaller than 256 values
1099 1099 pos = self.pick_color_bank_index(email_str, color_bank)
1100 1100 return color_bank[pos]
1101 1101
1102 1102 def normalize_email(self, email_address):
1103 1103 import unicodedata
1104 1104 # default host used to fill in the fake/missing email
1105 1105 default_host = u'localhost'
1106 1106
1107 1107 if not email_address:
1108 1108 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1109 1109
1110 1110 email_address = safe_unicode(email_address)
1111 1111
1112 1112 if u'@' not in email_address:
1113 1113 email_address = u'%s@%s' % (email_address, default_host)
1114 1114
1115 1115 if email_address.endswith(u'@'):
1116 1116 email_address = u'%s%s' % (email_address, default_host)
1117 1117
1118 1118 email_address = unicodedata.normalize('NFKD', email_address)\
1119 1119 .encode('ascii', 'ignore')
1120 1120 return email_address
1121 1121
1122 1122 def get_initials(self):
1123 1123 """
1124 1124 Returns 2 letter initials calculated based on the input.
1125 1125 The algorithm picks first given email address, and takes first letter
1126 1126 of part before @, and then the first letter of server name. In case
1127 1127 the part before @ is in a format of `somestring.somestring2` it replaces
1128 1128 the server letter with first letter of somestring2
1129 1129
1130 1130 In case function was initialized with both first and lastname, this
1131 1131 overrides the extraction from email by first letter of the first and
1132 1132 last name. We add special logic to that functionality, In case Full name
1133 1133 is compound, like Guido Von Rossum, we use last part of the last name
1134 1134 (Von Rossum) picking `R`.
1135 1135
1136 1136 Function also normalizes the non-ascii characters to they ascii
1137 1137 representation, eg Ą => A
1138 1138 """
1139 1139 import unicodedata
1140 1140 # replace non-ascii to ascii
1141 1141 first_name = unicodedata.normalize(
1142 1142 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1143 1143 last_name = unicodedata.normalize(
1144 1144 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1145 1145
1146 1146 # do NFKD encoding, and also make sure email has proper format
1147 1147 email_address = self.normalize_email(self.email_address)
1148 1148
1149 1149 # first push the email initials
1150 1150 prefix, server = email_address.split('@', 1)
1151 1151
1152 1152 # check if prefix is maybe a 'first_name.last_name' syntax
1153 1153 _dot_split = prefix.rsplit('.', 1)
1154 1154 if len(_dot_split) == 2:
1155 1155 initials = [_dot_split[0][0], _dot_split[1][0]]
1156 1156 else:
1157 1157 initials = [prefix[0], server[0]]
1158 1158
1159 1159 # then try to replace either first_name or last_name
1160 1160 fn_letter = (first_name or " ")[0].strip()
1161 1161 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1162 1162
1163 1163 if fn_letter:
1164 1164 initials[0] = fn_letter
1165 1165
1166 1166 if ln_letter:
1167 1167 initials[1] = ln_letter
1168 1168
1169 1169 return ''.join(initials).upper()
1170 1170
1171 1171 def get_img_data_by_type(self, font_family, img_type):
1172 1172 default_user = """
1173 1173 <svg xmlns="http://www.w3.org/2000/svg"
1174 1174 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1175 1175 viewBox="-15 -10 439.165 429.164"
1176 1176
1177 1177 xml:space="preserve"
1178 1178 style="background:{background};" >
1179 1179
1180 1180 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1181 1181 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1182 1182 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1183 1183 168.596,153.916,216.671,
1184 1184 204.583,216.671z" fill="{text_color}"/>
1185 1185 <path d="M407.164,374.717L360.88,
1186 1186 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1187 1187 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1188 1188 15.366-44.203,23.488-69.076,23.488c-24.877,
1189 1189 0-48.762-8.122-69.078-23.488
1190 1190 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1191 1191 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1192 1192 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1193 1193 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1194 1194 19.402-10.527 C409.699,390.129,
1195 1195 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1196 1196 </svg>""".format(
1197 1197 size=self.size,
1198 1198 background='#979797', # @grey4
1199 1199 text_color=self.text_color,
1200 1200 font_family=font_family)
1201 1201
1202 1202 return {
1203 1203 "default_user": default_user
1204 1204 }[img_type]
1205 1205
1206 1206 def get_img_data(self, svg_type=None):
1207 1207 """
1208 1208 generates the svg metadata for image
1209 1209 """
1210 1210
1211 1211 font_family = ','.join([
1212 1212 'proximanovaregular',
1213 1213 'Proxima Nova Regular',
1214 1214 'Proxima Nova',
1215 1215 'Arial',
1216 1216 'Lucida Grande',
1217 1217 'sans-serif'
1218 1218 ])
1219 1219 if svg_type:
1220 1220 return self.get_img_data_by_type(font_family, svg_type)
1221 1221
1222 1222 initials = self.get_initials()
1223 1223 img_data = """
1224 1224 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1225 1225 width="{size}" height="{size}"
1226 1226 style="width: 100%; height: 100%; background-color: {background}"
1227 1227 viewBox="0 0 {size} {size}">
1228 1228 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1229 1229 pointer-events="auto" fill="{text_color}"
1230 1230 font-family="{font_family}"
1231 1231 style="font-weight: 400; font-size: {f_size}px;">{text}
1232 1232 </text>
1233 1233 </svg>""".format(
1234 1234 size=self.size,
1235 1235 f_size=self.size/1.85, # scale the text inside the box nicely
1236 1236 background=self.background,
1237 1237 text_color=self.text_color,
1238 1238 text=initials.upper(),
1239 1239 font_family=font_family)
1240 1240
1241 1241 return img_data
1242 1242
1243 1243 def generate_svg(self, svg_type=None):
1244 1244 img_data = self.get_img_data(svg_type)
1245 1245 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1246 1246
1247 1247
1248 1248 def initials_gravatar(email_address, first_name, last_name, size=30):
1249 1249 svg_type = None
1250 1250 if email_address == User.DEFAULT_USER_EMAIL:
1251 1251 svg_type = 'default_user'
1252 1252 klass = InitialsGravatar(email_address, first_name, last_name, size)
1253 1253 return klass.generate_svg(svg_type=svg_type)
1254 1254
1255 1255
1256 1256 def gravatar_url(email_address, size=30, request=None):
1257 1257 request = get_current_request()
1258 1258 if request and hasattr(request, 'call_context'):
1259 1259 _use_gravatar = request.call_context.visual.use_gravatar
1260 1260 _gravatar_url = request.call_context.visual.gravatar_url
1261 1261 else:
1262 1262 # doh, we need to re-import those to mock it later
1263 1263 from pylons import tmpl_context as c
1264 1264
1265 1265 _use_gravatar = c.visual.use_gravatar
1266 1266 _gravatar_url = c.visual.gravatar_url
1267 1267
1268 1268 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1269 1269
1270 1270 email_address = email_address or User.DEFAULT_USER_EMAIL
1271 1271 if isinstance(email_address, unicode):
1272 1272 # hashlib crashes on unicode items
1273 1273 email_address = safe_str(email_address)
1274 1274
1275 1275 # empty email or default user
1276 1276 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1277 1277 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1278 1278
1279 1279 if _use_gravatar:
1280 1280 # TODO: Disuse pyramid thread locals. Think about another solution to
1281 1281 # get the host and schema here.
1282 1282 request = get_current_request()
1283 1283 tmpl = safe_str(_gravatar_url)
1284 1284 tmpl = tmpl.replace('{email}', email_address)\
1285 1285 .replace('{md5email}', md5_safe(email_address.lower())) \
1286 1286 .replace('{netloc}', request.host)\
1287 1287 .replace('{scheme}', request.scheme)\
1288 1288 .replace('{size}', safe_str(size))
1289 1289 return tmpl
1290 1290 else:
1291 1291 return initials_gravatar(email_address, '', '', size=size)
1292 1292
1293 1293
1294 1294 class Page(_Page):
1295 1295 """
1296 1296 Custom pager to match rendering style with paginator
1297 1297 """
1298 1298
1299 1299 def _get_pos(self, cur_page, max_page, items):
1300 1300 edge = (items / 2) + 1
1301 1301 if (cur_page <= edge):
1302 1302 radius = max(items / 2, items - cur_page)
1303 1303 elif (max_page - cur_page) < edge:
1304 1304 radius = (items - 1) - (max_page - cur_page)
1305 1305 else:
1306 1306 radius = items / 2
1307 1307
1308 1308 left = max(1, (cur_page - (radius)))
1309 1309 right = min(max_page, cur_page + (radius))
1310 1310 return left, cur_page, right
1311 1311
1312 1312 def _range(self, regexp_match):
1313 1313 """
1314 1314 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1315 1315
1316 1316 Arguments:
1317 1317
1318 1318 regexp_match
1319 1319 A "re" (regular expressions) match object containing the
1320 1320 radius of linked pages around the current page in
1321 1321 regexp_match.group(1) as a string
1322 1322
1323 1323 This function is supposed to be called as a callable in
1324 1324 re.sub.
1325 1325
1326 1326 """
1327 1327 radius = int(regexp_match.group(1))
1328 1328
1329 1329 # Compute the first and last page number within the radius
1330 1330 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1331 1331 # -> leftmost_page = 5
1332 1332 # -> rightmost_page = 9
1333 1333 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1334 1334 self.last_page,
1335 1335 (radius * 2) + 1)
1336 1336 nav_items = []
1337 1337
1338 1338 # Create a link to the first page (unless we are on the first page
1339 1339 # or there would be no need to insert '..' spacers)
1340 1340 if self.page != self.first_page and self.first_page < leftmost_page:
1341 1341 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1342 1342
1343 1343 # Insert dots if there are pages between the first page
1344 1344 # and the currently displayed page range
1345 1345 if leftmost_page - self.first_page > 1:
1346 1346 # Wrap in a SPAN tag if nolink_attr is set
1347 1347 text = '..'
1348 1348 if self.dotdot_attr:
1349 1349 text = HTML.span(c=text, **self.dotdot_attr)
1350 1350 nav_items.append(text)
1351 1351
1352 1352 for thispage in xrange(leftmost_page, rightmost_page + 1):
1353 1353 # Hilight the current page number and do not use a link
1354 1354 if thispage == self.page:
1355 1355 text = '%s' % (thispage,)
1356 1356 # Wrap in a SPAN tag if nolink_attr is set
1357 1357 if self.curpage_attr:
1358 1358 text = HTML.span(c=text, **self.curpage_attr)
1359 1359 nav_items.append(text)
1360 1360 # Otherwise create just a link to that page
1361 1361 else:
1362 1362 text = '%s' % (thispage,)
1363 1363 nav_items.append(self._pagerlink(thispage, text))
1364 1364
1365 1365 # Insert dots if there are pages between the displayed
1366 1366 # page numbers and the end of the page range
1367 1367 if self.last_page - rightmost_page > 1:
1368 1368 text = '..'
1369 1369 # Wrap in a SPAN tag if nolink_attr is set
1370 1370 if self.dotdot_attr:
1371 1371 text = HTML.span(c=text, **self.dotdot_attr)
1372 1372 nav_items.append(text)
1373 1373
1374 1374 # Create a link to the very last page (unless we are on the last
1375 1375 # page or there would be no need to insert '..' spacers)
1376 1376 if self.page != self.last_page and rightmost_page < self.last_page:
1377 1377 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1378 1378
1379 1379 ## prerender links
1380 1380 #_page_link = url.current()
1381 1381 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1382 1382 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1383 1383 return self.separator.join(nav_items)
1384 1384
1385 1385 def pager(self, format='~2~', page_param='page', partial_param='partial',
1386 1386 show_if_single_page=False, separator=' ', onclick=None,
1387 1387 symbol_first='<<', symbol_last='>>',
1388 1388 symbol_previous='<', symbol_next='>',
1389 1389 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1390 1390 curpage_attr={'class': 'pager_curpage'},
1391 1391 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1392 1392
1393 1393 self.curpage_attr = curpage_attr
1394 1394 self.separator = separator
1395 1395 self.pager_kwargs = kwargs
1396 1396 self.page_param = page_param
1397 1397 self.partial_param = partial_param
1398 1398 self.onclick = onclick
1399 1399 self.link_attr = link_attr
1400 1400 self.dotdot_attr = dotdot_attr
1401 1401
1402 1402 # Don't show navigator if there is no more than one page
1403 1403 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1404 1404 return ''
1405 1405
1406 1406 from string import Template
1407 1407 # Replace ~...~ in token format by range of pages
1408 1408 result = re.sub(r'~(\d+)~', self._range, format)
1409 1409
1410 1410 # Interpolate '%' variables
1411 1411 result = Template(result).safe_substitute({
1412 1412 'first_page': self.first_page,
1413 1413 'last_page': self.last_page,
1414 1414 'page': self.page,
1415 1415 'page_count': self.page_count,
1416 1416 'items_per_page': self.items_per_page,
1417 1417 'first_item': self.first_item,
1418 1418 'last_item': self.last_item,
1419 1419 'item_count': self.item_count,
1420 1420 'link_first': self.page > self.first_page and \
1421 1421 self._pagerlink(self.first_page, symbol_first) or '',
1422 1422 'link_last': self.page < self.last_page and \
1423 1423 self._pagerlink(self.last_page, symbol_last) or '',
1424 1424 'link_previous': self.previous_page and \
1425 1425 self._pagerlink(self.previous_page, symbol_previous) \
1426 1426 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1427 1427 'link_next': self.next_page and \
1428 1428 self._pagerlink(self.next_page, symbol_next) \
1429 1429 or HTML.span(symbol_next, class_="pg-next disabled")
1430 1430 })
1431 1431
1432 1432 return literal(result)
1433 1433
1434 1434
1435 1435 #==============================================================================
1436 1436 # REPO PAGER, PAGER FOR REPOSITORY
1437 1437 #==============================================================================
1438 1438 class RepoPage(Page):
1439 1439
1440 1440 def __init__(self, collection, page=1, items_per_page=20,
1441 1441 item_count=None, url=None, **kwargs):
1442 1442
1443 1443 """Create a "RepoPage" instance. special pager for paging
1444 1444 repository
1445 1445 """
1446 1446 self._url_generator = url
1447 1447
1448 1448 # Safe the kwargs class-wide so they can be used in the pager() method
1449 1449 self.kwargs = kwargs
1450 1450
1451 1451 # Save a reference to the collection
1452 1452 self.original_collection = collection
1453 1453
1454 1454 self.collection = collection
1455 1455
1456 1456 # The self.page is the number of the current page.
1457 1457 # The first page has the number 1!
1458 1458 try:
1459 1459 self.page = int(page) # make it int() if we get it as a string
1460 1460 except (ValueError, TypeError):
1461 1461 self.page = 1
1462 1462
1463 1463 self.items_per_page = items_per_page
1464 1464
1465 1465 # Unless the user tells us how many items the collections has
1466 1466 # we calculate that ourselves.
1467 1467 if item_count is not None:
1468 1468 self.item_count = item_count
1469 1469 else:
1470 1470 self.item_count = len(self.collection)
1471 1471
1472 1472 # Compute the number of the first and last available page
1473 1473 if self.item_count > 0:
1474 1474 self.first_page = 1
1475 1475 self.page_count = int(math.ceil(float(self.item_count) /
1476 1476 self.items_per_page))
1477 1477 self.last_page = self.first_page + self.page_count - 1
1478 1478
1479 1479 # Make sure that the requested page number is the range of
1480 1480 # valid pages
1481 1481 if self.page > self.last_page:
1482 1482 self.page = self.last_page
1483 1483 elif self.page < self.first_page:
1484 1484 self.page = self.first_page
1485 1485
1486 1486 # Note: the number of items on this page can be less than
1487 1487 # items_per_page if the last page is not full
1488 1488 self.first_item = max(0, (self.item_count) - (self.page *
1489 1489 items_per_page))
1490 1490 self.last_item = ((self.item_count - 1) - items_per_page *
1491 1491 (self.page - 1))
1492 1492
1493 1493 self.items = list(self.collection[self.first_item:self.last_item + 1])
1494 1494
1495 1495 # Links to previous and next page
1496 1496 if self.page > self.first_page:
1497 1497 self.previous_page = self.page - 1
1498 1498 else:
1499 1499 self.previous_page = None
1500 1500
1501 1501 if self.page < self.last_page:
1502 1502 self.next_page = self.page + 1
1503 1503 else:
1504 1504 self.next_page = None
1505 1505
1506 1506 # No items available
1507 1507 else:
1508 1508 self.first_page = None
1509 1509 self.page_count = 0
1510 1510 self.last_page = None
1511 1511 self.first_item = None
1512 1512 self.last_item = None
1513 1513 self.previous_page = None
1514 1514 self.next_page = None
1515 1515 self.items = []
1516 1516
1517 1517 # This is a subclass of the 'list' type. Initialise the list now.
1518 1518 list.__init__(self, reversed(self.items))
1519 1519
1520 1520
1521 1521 def breadcrumb_repo_link(repo):
1522 1522 """
1523 1523 Makes a breadcrumbs path link to repo
1524 1524
1525 1525 ex::
1526 1526 group >> subgroup >> repo
1527 1527
1528 1528 :param repo: a Repository instance
1529 1529 """
1530 1530
1531 1531 path = [
1532 1532 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1533 1533 for group in repo.groups_with_parents
1534 1534 ] + [
1535 1535 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1536 1536 ]
1537 1537
1538 1538 return literal(' &raquo; '.join(path))
1539 1539
1540 1540
1541 1541 def format_byte_size_binary(file_size):
1542 1542 """
1543 1543 Formats file/folder sizes to standard.
1544 1544 """
1545 1545 if file_size is None:
1546 1546 file_size = 0
1547 1547
1548 1548 formatted_size = format_byte_size(file_size, binary=True)
1549 1549 return formatted_size
1550 1550
1551 1551
1552 1552 def urlify_text(text_, safe=True):
1553 1553 """
1554 1554 Extrac urls from text and make html links out of them
1555 1555
1556 1556 :param text_:
1557 1557 """
1558 1558
1559 1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1560 1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1561 1561
1562 1562 def url_func(match_obj):
1563 1563 url_full = match_obj.groups()[0]
1564 1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1565 1565 _newtext = url_pat.sub(url_func, text_)
1566 1566 if safe:
1567 1567 return literal(_newtext)
1568 1568 return _newtext
1569 1569
1570 1570
1571 1571 def urlify_commits(text_, repository):
1572 1572 """
1573 1573 Extract commit ids from text and make link from them
1574 1574
1575 1575 :param text_:
1576 1576 :param repository: repo name to build the URL with
1577 1577 """
1578 1578
1579 1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1580 1580
1581 1581 def url_func(match_obj):
1582 1582 commit_id = match_obj.groups()[1]
1583 1583 pref = match_obj.groups()[0]
1584 1584 suf = match_obj.groups()[2]
1585 1585
1586 1586 tmpl = (
1587 1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1588 1588 '%(commit_id)s</a>%(suf)s'
1589 1589 )
1590 1590 return tmpl % {
1591 1591 'pref': pref,
1592 1592 'cls': 'revision-link',
1593 1593 'url': route_url('repo_commit', repo_name=repository,
1594 1594 commit_id=commit_id),
1595 1595 'commit_id': commit_id,
1596 1596 'suf': suf
1597 1597 }
1598 1598
1599 1599 newtext = URL_PAT.sub(url_func, text_)
1600 1600
1601 1601 return newtext
1602 1602
1603 1603
1604 1604 def _process_url_func(match_obj, repo_name, uid, entry,
1605 1605 return_raw_data=False, link_format='html'):
1606 1606 pref = ''
1607 1607 if match_obj.group().startswith(' '):
1608 1608 pref = ' '
1609 1609
1610 1610 issue_id = ''.join(match_obj.groups())
1611 1611
1612 1612 if link_format == 'html':
1613 1613 tmpl = (
1614 1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1615 1615 '%(issue-prefix)s%(id-repr)s'
1616 1616 '</a>')
1617 1617 elif link_format == 'rst':
1618 1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1619 1619 elif link_format == 'markdown':
1620 1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1621 1621 else:
1622 1622 raise ValueError('Bad link_format:{}'.format(link_format))
1623 1623
1624 1624 (repo_name_cleaned,
1625 1625 parent_group_name) = RepoGroupModel().\
1626 1626 _get_group_name_and_parent(repo_name)
1627 1627
1628 1628 # variables replacement
1629 1629 named_vars = {
1630 1630 'id': issue_id,
1631 1631 'repo': repo_name,
1632 1632 'repo_name': repo_name_cleaned,
1633 1633 'group_name': parent_group_name
1634 1634 }
1635 1635 # named regex variables
1636 1636 named_vars.update(match_obj.groupdict())
1637 1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1638 1638
1639 1639 data = {
1640 1640 'pref': pref,
1641 1641 'cls': 'issue-tracker-link',
1642 1642 'url': _url,
1643 1643 'id-repr': issue_id,
1644 1644 'issue-prefix': entry['pref'],
1645 1645 'serv': entry['url'],
1646 1646 }
1647 1647 if return_raw_data:
1648 1648 return {
1649 1649 'id': issue_id,
1650 1650 'url': _url
1651 1651 }
1652 1652 return tmpl % data
1653 1653
1654 1654
1655 1655 def process_patterns(text_string, repo_name, link_format='html'):
1656 1656 allowed_formats = ['html', 'rst', 'markdown']
1657 1657 if link_format not in allowed_formats:
1658 1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1659 1659 allowed_formats, link_format))
1660 1660
1661 1661 repo = None
1662 1662 if repo_name:
1663 1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1664 1664 # IssueTrackerSettingsModel but still passing invalid name further down
1665 1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1666 1666
1667 1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1668 1668 active_entries = settings_model.get_settings(cache=True)
1669 1669
1670 1670 issues_data = []
1671 1671 newtext = text_string
1672 1672
1673 1673 for uid, entry in active_entries.items():
1674 1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1675 1675
1676 1676 if not (entry['pat'] and entry['url']):
1677 1677 log.debug('skipping due to missing data')
1678 1678 continue
1679 1679
1680 1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1681 1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1682 1682
1683 1683 try:
1684 1684 pattern = re.compile(r'%s' % entry['pat'])
1685 1685 except re.error:
1686 1686 log.exception(
1687 1687 'issue tracker pattern: `%s` failed to compile',
1688 1688 entry['pat'])
1689 1689 continue
1690 1690
1691 1691 data_func = partial(
1692 1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1693 1693 return_raw_data=True)
1694 1694
1695 1695 for match_obj in pattern.finditer(text_string):
1696 1696 issues_data.append(data_func(match_obj))
1697 1697
1698 1698 url_func = partial(
1699 1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1700 1700 link_format=link_format)
1701 1701
1702 1702 newtext = pattern.sub(url_func, newtext)
1703 1703 log.debug('processed prefix:uid `%s`' % (uid,))
1704 1704
1705 1705 return newtext, issues_data
1706 1706
1707 1707
1708 1708 def urlify_commit_message(commit_text, repository=None):
1709 1709 """
1710 1710 Parses given text message and makes proper links.
1711 1711 issues are linked to given issue-server, and rest is a commit link
1712 1712
1713 1713 :param commit_text:
1714 1714 :param repository:
1715 1715 """
1716 1716 from pylons import url # doh, we need to re-import url to mock it later
1717 1717
1718 1718 def escaper(string):
1719 1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1720 1720
1721 1721 newtext = escaper(commit_text)
1722 1722
1723 1723 # extract http/https links and make them real urls
1724 1724 newtext = urlify_text(newtext, safe=False)
1725 1725
1726 1726 # urlify commits - extract commit ids and make link out of them, if we have
1727 1727 # the scope of repository present.
1728 1728 if repository:
1729 1729 newtext = urlify_commits(newtext, repository)
1730 1730
1731 1731 # process issue tracker patterns
1732 1732 newtext, issues = process_patterns(newtext, repository or '')
1733 1733
1734 1734 return literal(newtext)
1735 1735
1736 1736
1737 1737 def render_binary(repo_name, file_obj):
1738 1738 """
1739 1739 Choose how to render a binary file
1740 1740 """
1741 1741 filename = file_obj.name
1742 1742
1743 1743 # images
1744 1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1745 1745 if fnmatch.fnmatch(filename, pat=ext):
1746 1746 alt = filename
1747 1747 src = route_path(
1748 1748 'repo_file_raw', repo_name=repo_name,
1749 1749 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1750 1750 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1751 1751
1752 1752
1753 1753 def renderer_from_filename(filename, exclude=None):
1754 1754 """
1755 1755 choose a renderer based on filename, this works only for text based files
1756 1756 """
1757 1757
1758 1758 # ipython
1759 1759 for ext in ['*.ipynb']:
1760 1760 if fnmatch.fnmatch(filename, pat=ext):
1761 1761 return 'jupyter'
1762 1762
1763 1763 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1764 1764 if is_markup:
1765 1765 return is_markup
1766 1766 return None
1767 1767
1768 1768
1769 def render(source, renderer='rst', mentions=False, relative_url=None,
1769 def render(source, renderer='rst', mentions=False, relative_urls=None,
1770 1770 repo_name=None):
1771 1771
1772 1772 def maybe_convert_relative_links(html_source):
1773 if relative_url:
1774 return relative_links(html_source, relative_url)
1773 if relative_urls:
1774 return relative_links(html_source, relative_urls)
1775 1775 return html_source
1776 1776
1777 1777 if renderer == 'rst':
1778 1778 if repo_name:
1779 1779 # process patterns on comments if we pass in repo name
1780 1780 source, issues = process_patterns(
1781 1781 source, repo_name, link_format='rst')
1782 1782
1783 1783 return literal(
1784 1784 '<div class="rst-block">%s</div>' %
1785 1785 maybe_convert_relative_links(
1786 1786 MarkupRenderer.rst(source, mentions=mentions)))
1787 1787 elif renderer == 'markdown':
1788 1788 if repo_name:
1789 1789 # process patterns on comments if we pass in repo name
1790 1790 source, issues = process_patterns(
1791 1791 source, repo_name, link_format='markdown')
1792 1792
1793 1793 return literal(
1794 1794 '<div class="markdown-block">%s</div>' %
1795 1795 maybe_convert_relative_links(
1796 1796 MarkupRenderer.markdown(source, flavored=True,
1797 1797 mentions=mentions)))
1798 1798 elif renderer == 'jupyter':
1799 1799 return literal(
1800 1800 '<div class="ipynb">%s</div>' %
1801 1801 maybe_convert_relative_links(
1802 1802 MarkupRenderer.jupyter(source)))
1803 1803
1804 1804 # None means just show the file-source
1805 1805 return None
1806 1806
1807 1807
1808 1808 def commit_status(repo, commit_id):
1809 1809 return ChangesetStatusModel().get_status(repo, commit_id)
1810 1810
1811 1811
1812 1812 def commit_status_lbl(commit_status):
1813 1813 return dict(ChangesetStatus.STATUSES).get(commit_status)
1814 1814
1815 1815
1816 1816 def commit_time(repo_name, commit_id):
1817 1817 repo = Repository.get_by_repo_name(repo_name)
1818 1818 commit = repo.get_commit(commit_id=commit_id)
1819 1819 return commit.date
1820 1820
1821 1821
1822 1822 def get_permission_name(key):
1823 1823 return dict(Permission.PERMS).get(key)
1824 1824
1825 1825
1826 1826 def journal_filter_help(request):
1827 1827 _ = request.translate
1828 1828
1829 1829 return _(
1830 1830 'Example filter terms:\n' +
1831 1831 ' repository:vcs\n' +
1832 1832 ' username:marcin\n' +
1833 1833 ' username:(NOT marcin)\n' +
1834 1834 ' action:*push*\n' +
1835 1835 ' ip:127.0.0.1\n' +
1836 1836 ' date:20120101\n' +
1837 1837 ' date:[20120101100000 TO 20120102]\n' +
1838 1838 '\n' +
1839 1839 'Generate wildcards using \'*\' character:\n' +
1840 1840 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1841 1841 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1842 1842 '\n' +
1843 1843 'Optional AND / OR operators in queries\n' +
1844 1844 ' "repository:vcs OR repository:test"\n' +
1845 1845 ' "username:test AND repository:test*"\n'
1846 1846 )
1847 1847
1848 1848
1849 1849 def search_filter_help(searcher, request):
1850 1850 _ = request.translate
1851 1851
1852 1852 terms = ''
1853 1853 return _(
1854 1854 'Example filter terms for `{searcher}` search:\n' +
1855 1855 '{terms}\n' +
1856 1856 'Generate wildcards using \'*\' character:\n' +
1857 1857 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1858 1858 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1859 1859 '\n' +
1860 1860 'Optional AND / OR operators in queries\n' +
1861 1861 ' "repo_name:vcs OR repo_name:test"\n' +
1862 1862 ' "owner:test AND repo_name:test*"\n' +
1863 1863 'More: {search_doc}'
1864 1864 ).format(searcher=searcher.name,
1865 1865 terms=terms, search_doc=searcher.query_lang_doc)
1866 1866
1867 1867
1868 1868 def not_mapped_error(repo_name):
1869 1869 from rhodecode.translation import _
1870 1870 flash(_('%s repository is not mapped to db perhaps'
1871 1871 ' it was created or renamed from the filesystem'
1872 1872 ' please run the application again'
1873 1873 ' in order to rescan repositories') % repo_name, category='error')
1874 1874
1875 1875
1876 1876 def ip_range(ip_addr):
1877 1877 from rhodecode.model.db import UserIpMap
1878 1878 s, e = UserIpMap._get_ip_range(ip_addr)
1879 1879 return '%s - %s' % (s, e)
1880 1880
1881 1881
1882 1882 def form(url, method='post', needs_csrf_token=True, **attrs):
1883 1883 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1884 1884 if method.lower() != 'get' and needs_csrf_token:
1885 1885 raise Exception(
1886 1886 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1887 1887 'CSRF token. If the endpoint does not require such token you can ' +
1888 1888 'explicitly set the parameter needs_csrf_token to false.')
1889 1889
1890 1890 return wh_form(url, method=method, **attrs)
1891 1891
1892 1892
1893 1893 def secure_form(url, method="POST", multipart=False, **attrs):
1894 1894 """Start a form tag that points the action to an url. This
1895 1895 form tag will also include the hidden field containing
1896 1896 the auth token.
1897 1897
1898 1898 The url options should be given either as a string, or as a
1899 1899 ``url()`` function. The method for the form defaults to POST.
1900 1900
1901 1901 Options:
1902 1902
1903 1903 ``multipart``
1904 1904 If set to True, the enctype is set to "multipart/form-data".
1905 1905 ``method``
1906 1906 The method to use when submitting the form, usually either
1907 1907 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1908 1908 hidden input with name _method is added to simulate the verb
1909 1909 over POST.
1910 1910
1911 1911 """
1912 1912 from webhelpers.pylonslib.secure_form import insecure_form
1913 1913 form = insecure_form(url, method, multipart, **attrs)
1914 1914
1915 1915 session = None
1916 1916 # TODO(marcink): after pyramid migration require request variable ALWAYS
1917 1917 if 'request' in attrs:
1918 1918 session = attrs['request'].session
1919 1919
1920 1920 token = literal(
1921 1921 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1922 1922 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1923 1923
1924 1924 return literal("%s\n%s" % (form, token))
1925 1925
1926 1926
1927 1927 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1928 1928 select_html = select(name, selected, options, **attrs)
1929 1929 select2 = """
1930 1930 <script>
1931 1931 $(document).ready(function() {
1932 1932 $('#%s').select2({
1933 1933 containerCssClass: 'drop-menu',
1934 1934 dropdownCssClass: 'drop-menu-dropdown',
1935 1935 dropdownAutoWidth: true%s
1936 1936 });
1937 1937 });
1938 1938 </script>
1939 1939 """
1940 1940 filter_option = """,
1941 1941 minimumResultsForSearch: -1
1942 1942 """
1943 1943 input_id = attrs.get('id') or name
1944 1944 filter_enabled = "" if enable_filter else filter_option
1945 1945 select_script = literal(select2 % (input_id, filter_enabled))
1946 1946
1947 1947 return literal(select_html+select_script)
1948 1948
1949 1949
1950 1950 def get_visual_attr(tmpl_context_var, attr_name):
1951 1951 """
1952 1952 A safe way to get a variable from visual variable of template context
1953 1953
1954 1954 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1955 1955 :param attr_name: name of the attribute we fetch from the c.visual
1956 1956 """
1957 1957 visual = getattr(tmpl_context_var, 'visual', None)
1958 1958 if not visual:
1959 1959 return
1960 1960 else:
1961 1961 return getattr(visual, attr_name, None)
1962 1962
1963 1963
1964 1964 def get_last_path_part(file_node):
1965 1965 if not file_node.path:
1966 1966 return u''
1967 1967
1968 1968 path = safe_unicode(file_node.path.split('/')[-1])
1969 1969 return u'../' + path
1970 1970
1971 1971
1972 1972 def route_url(*args, **kwargs):
1973 1973 """
1974 1974 Wrapper around pyramids `route_url` (fully qualified url) function.
1975 1975 It is used to generate URLs from within pylons views or templates.
1976 1976 This will be removed when pyramid migration if finished.
1977 1977 """
1978 1978 req = get_current_request()
1979 1979 return req.route_url(*args, **kwargs)
1980 1980
1981 1981
1982 1982 def route_path(*args, **kwargs):
1983 1983 """
1984 1984 Wrapper around pyramids `route_path` function. It is used to generate
1985 1985 URLs from within pylons views or templates. This will be removed when
1986 1986 pyramid migration if finished.
1987 1987 """
1988 1988 req = get_current_request()
1989 1989 return req.route_path(*args, **kwargs)
1990 1990
1991 1991
1992 1992 def route_path_or_none(*args, **kwargs):
1993 1993 try:
1994 1994 return route_path(*args, **kwargs)
1995 1995 except KeyError:
1996 1996 return None
1997 1997
1998 1998
1999 1999 def static_url(*args, **kwds):
2000 2000 """
2001 2001 Wrapper around pyramids `route_path` function. It is used to generate
2002 2002 URLs from within pylons views or templates. This will be removed when
2003 2003 pyramid migration if finished.
2004 2004 """
2005 2005 req = get_current_request()
2006 2006 return req.static_url(*args, **kwds)
2007 2007
2008 2008
2009 2009 def resource_path(*args, **kwds):
2010 2010 """
2011 2011 Wrapper around pyramids `route_path` function. It is used to generate
2012 2012 URLs from within pylons views or templates. This will be removed when
2013 2013 pyramid migration if finished.
2014 2014 """
2015 2015 req = get_current_request()
2016 2016 return req.resource_path(*args, **kwds)
2017 2017
2018 2018
2019 2019 def api_call_example(method, args):
2020 2020 """
2021 2021 Generates an API call example via CURL
2022 2022 """
2023 2023 args_json = json.dumps(OrderedDict([
2024 2024 ('id', 1),
2025 2025 ('auth_token', 'SECRET'),
2026 2026 ('method', method),
2027 2027 ('args', args)
2028 2028 ]))
2029 2029 return literal(
2030 2030 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2031 2031 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2032 2032 "and needs to be of `api calls` role."
2033 2033 .format(
2034 2034 api_url=route_url('apiv2'),
2035 2035 token_url=route_url('my_account_auth_tokens'),
2036 2036 data=args_json))
2037 2037
2038 2038
2039 2039 def notification_description(notification, request):
2040 2040 """
2041 2041 Generate notification human readable description based on notification type
2042 2042 """
2043 2043 from rhodecode.model.notification import NotificationModel
2044 2044 return NotificationModel().make_description(
2045 2045 notification, translate=request.translate)
@@ -1,495 +1,499 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Renderer for markup languages with ability to parse using rst or markdown
24 24 """
25 25
26 26 import re
27 27 import os
28 28 import lxml
29 29 import logging
30 30 import urlparse
31 31
32 32 from mako.lookup import TemplateLookup
33 33 from mako.template import Template as MakoTemplate
34 34
35 35 from docutils.core import publish_parts
36 36 from docutils.parsers.rst import directives
37 37 from docutils import writers
38 38 from docutils.writers import html4css1
39 39 import markdown
40 40
41 41 from rhodecode.lib.markdown_ext import GithubFlavoredMarkdownExtension
42 42 from rhodecode.lib.utils2 import (
43 43 safe_str, safe_unicode, md5_safe, MENTIONS_REGEX)
44 44
45 45 log = logging.getLogger(__name__)
46 46
47 47 # default renderer used to generate automated comments
48 48 DEFAULT_COMMENTS_RENDERER = 'rst'
49 49
50 50
51 51 class CustomHTMLTranslator(writers.html4css1.HTMLTranslator):
52 52 """
53 53 Custom HTML Translator used for sandboxing potential
54 54 JS injections in ref links
55 55 """
56 56
57 57 def visit_reference(self, node):
58 58 if 'refuri' in node.attributes:
59 59 refuri = node['refuri']
60 60 if ':' in refuri:
61 61 prefix, link = refuri.lstrip().split(':', 1)
62 62 if prefix == 'javascript':
63 63 # we don't allow javascript type of refs...
64 64 node['refuri'] = 'javascript:alert("SandBoxedJavascript")'
65 65
66 66 # old style class requires this...
67 67 return html4css1.HTMLTranslator.visit_reference(self, node)
68 68
69 69
70 70 class RhodeCodeWriter(writers.html4css1.Writer):
71 71 def __init__(self):
72 72 writers.Writer.__init__(self)
73 73 self.translator_class = CustomHTMLTranslator
74 74
75 75
76 def relative_links(html_source, server_path):
76 def relative_links(html_source, server_paths):
77 77 if not html_source:
78 78 return html_source
79 79
80 80 try:
81 81 from lxml.html import fromstring
82 82 from lxml.html import tostring
83 83 except ImportError:
84 84 log.exception('Failed to import lxml')
85 85 return html_source
86 86
87 87 try:
88 88 doc = lxml.html.fromstring(html_source)
89 89 except Exception:
90 90 return html_source
91 91
92 92 for el in doc.cssselect('img, video'):
93 93 src = el.attrib.get('src')
94 94 if src:
95 el.attrib['src'] = relative_path(src, server_path)
95 el.attrib['src'] = relative_path(src, server_paths['raw'])
96 96
97 97 for el in doc.cssselect('a:not(.gfm)'):
98 98 src = el.attrib.get('href')
99 99 if src:
100 el.attrib['href'] = relative_path(src, server_path)
100 raw_mode = el.attrib['href'].endswith('?raw=1')
101 if raw_mode:
102 el.attrib['href'] = relative_path(src, server_paths['raw'])
103 else:
104 el.attrib['href'] = relative_path(src, server_paths['standard'])
101 105
102 106 return lxml.html.tostring(doc)
103 107
104 108
105 109 def relative_path(path, request_path, is_repo_file=None):
106 110 """
107 111 relative link support, path is a rel path, and request_path is current
108 112 server path (not absolute)
109 113
110 114 e.g.
111 115
112 116 path = '../logo.png'
113 117 request_path= '/repo/files/path/file.md'
114 118 produces: '/repo/files/logo.png'
115 119 """
116 120 # TODO(marcink): unicode/str support ?
117 121 # maybe=> safe_unicode(urllib.quote(safe_str(final_path), '/:'))
118 122
119 123 def dummy_check(p):
120 124 return True # assume default is a valid file path
121 125
122 126 is_repo_file = is_repo_file or dummy_check
123 127 if not path:
124 128 return request_path
125 129
126 130 path = safe_unicode(path)
127 131 request_path = safe_unicode(request_path)
128 132
129 133 if path.startswith((u'data:', u'javascript:', u'#', u':')):
130 134 # skip data, anchor, invalid links
131 135 return path
132 136
133 137 is_absolute = bool(urlparse.urlparse(path).netloc)
134 138 if is_absolute:
135 139 return path
136 140
137 141 if not request_path:
138 142 return path
139 143
140 144 if path.startswith(u'/'):
141 145 path = path[1:]
142 146
143 147 if path.startswith(u'./'):
144 148 path = path[2:]
145 149
146 150 parts = request_path.split('/')
147 151 # compute how deep we need to traverse the request_path
148 152 depth = 0
149 153
150 154 if is_repo_file(request_path):
151 155 # if request path is a VALID file, we use a relative path with
152 156 # one level up
153 157 depth += 1
154 158
155 159 while path.startswith(u'../'):
156 160 depth += 1
157 161 path = path[3:]
158 162
159 163 if depth > 0:
160 164 parts = parts[:-depth]
161 165
162 166 parts.append(path)
163 167 final_path = u'/'.join(parts).lstrip(u'/')
164 168
165 169 return u'/' + final_path
166 170
167 171
168 172 class MarkupRenderer(object):
169 173 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
170 174
171 175 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
172 176 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
173 177 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
174 178 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
175 179
176 180 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
177 181 markdown_renderer = markdown.Markdown(
178 182 extensions, safe_mode=True, enable_attributes=False)
179 183
180 184 markdown_renderer_flavored = markdown.Markdown(
181 185 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
182 186 enable_attributes=False)
183 187
184 188 # extension together with weights. Lower is first means we control how
185 189 # extensions are attached to readme names with those.
186 190 PLAIN_EXTS = [
187 191 # prefer no extension
188 192 ('', 0), # special case that renders READMES names without extension
189 193 ('.text', 2), ('.TEXT', 2),
190 194 ('.txt', 3), ('.TXT', 3)
191 195 ]
192 196
193 197 RST_EXTS = [
194 198 ('.rst', 1), ('.rest', 1),
195 199 ('.RST', 2), ('.REST', 2)
196 200 ]
197 201
198 202 MARKDOWN_EXTS = [
199 203 ('.md', 1), ('.MD', 1),
200 204 ('.mkdn', 2), ('.MKDN', 2),
201 205 ('.mdown', 3), ('.MDOWN', 3),
202 206 ('.markdown', 4), ('.MARKDOWN', 4)
203 207 ]
204 208
205 209 def _detect_renderer(self, source, filename=None):
206 210 """
207 211 runs detection of what renderer should be used for generating html
208 212 from a markup language
209 213
210 214 filename can be also explicitly a renderer name
211 215
212 216 :param source:
213 217 :param filename:
214 218 """
215 219
216 220 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
217 221 detected_renderer = 'markdown'
218 222 elif MarkupRenderer.RST_PAT.findall(filename):
219 223 detected_renderer = 'rst'
220 224 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
221 225 detected_renderer = 'jupyter'
222 226 elif MarkupRenderer.PLAIN_PAT.findall(filename):
223 227 detected_renderer = 'plain'
224 228 else:
225 229 detected_renderer = 'plain'
226 230
227 231 return getattr(MarkupRenderer, detected_renderer)
228 232
229 233 @classmethod
230 234 def renderer_from_filename(cls, filename, exclude):
231 235 """
232 236 Detect renderer markdown/rst from filename and optionally use exclude
233 237 list to remove some options. This is mostly used in helpers.
234 238 Returns None when no renderer can be detected.
235 239 """
236 240 def _filter(elements):
237 241 if isinstance(exclude, (list, tuple)):
238 242 return [x for x in elements if x not in exclude]
239 243 return elements
240 244
241 245 if filename.endswith(
242 246 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
243 247 return 'markdown'
244 248 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
245 249 return 'rst'
246 250
247 251 return None
248 252
249 253 def render(self, source, filename=None):
250 254 """
251 255 Renders a given filename using detected renderer
252 256 it detects renderers based on file extension or mimetype.
253 257 At last it will just do a simple html replacing new lines with <br/>
254 258
255 259 :param file_name:
256 260 :param source:
257 261 """
258 262
259 263 renderer = self._detect_renderer(source, filename)
260 264 readme_data = renderer(source)
261 265 return readme_data
262 266
263 267 @classmethod
264 268 def _flavored_markdown(cls, text):
265 269 """
266 270 Github style flavored markdown
267 271
268 272 :param text:
269 273 """
270 274
271 275 # Extract pre blocks.
272 276 extractions = {}
273 277
274 278 def pre_extraction_callback(matchobj):
275 279 digest = md5_safe(matchobj.group(0))
276 280 extractions[digest] = matchobj.group(0)
277 281 return "{gfm-extraction-%s}" % digest
278 282 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
279 283 text = re.sub(pattern, pre_extraction_callback, text)
280 284
281 285 # Prevent foo_bar_baz from ending up with an italic word in the middle.
282 286 def italic_callback(matchobj):
283 287 s = matchobj.group(0)
284 288 if list(s).count('_') >= 2:
285 289 return s.replace('_', r'\_')
286 290 return s
287 291 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
288 292
289 293 # Insert pre block extractions.
290 294 def pre_insert_callback(matchobj):
291 295 return '\n\n' + extractions[matchobj.group(1)]
292 296 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
293 297 pre_insert_callback, text)
294 298
295 299 return text
296 300
297 301 @classmethod
298 302 def urlify_text(cls, text):
299 303 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
300 304 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
301 305
302 306 def url_func(match_obj):
303 307 url_full = match_obj.groups()[0]
304 308 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
305 309
306 310 return url_pat.sub(url_func, text)
307 311
308 312 @classmethod
309 313 def plain(cls, source, universal_newline=True):
310 314 source = safe_unicode(source)
311 315 if universal_newline:
312 316 newline = '\n'
313 317 source = newline.join(source.splitlines())
314 318
315 319 source = cls.urlify_text(source)
316 320 return '<br />' + source.replace("\n", '<br />')
317 321
318 322 @classmethod
319 323 def markdown(cls, source, safe=True, flavored=True, mentions=False):
320 324 # It does not allow to insert inline HTML. In presence of HTML tags, it
321 325 # will replace them instead with [HTML_REMOVED]. This is controlled by
322 326 # the safe_mode=True parameter of the markdown method.
323 327
324 328 if flavored:
325 329 markdown_renderer = cls.markdown_renderer_flavored
326 330 else:
327 331 markdown_renderer = cls.markdown_renderer
328 332
329 333 if mentions:
330 334 mention_pat = re.compile(MENTIONS_REGEX)
331 335
332 336 def wrapp(match_obj):
333 337 uname = match_obj.groups()[0]
334 338 return ' **@%(uname)s** ' % {'uname': uname}
335 339 mention_hl = mention_pat.sub(wrapp, source).strip()
336 340 # we extracted mentions render with this using Mentions false
337 341 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
338 342 mentions=False)
339 343
340 344 source = safe_unicode(source)
341 345 try:
342 346 if flavored:
343 347 source = cls._flavored_markdown(source)
344 348 return markdown_renderer.convert(source)
345 349 except Exception:
346 350 log.exception('Error when rendering Markdown')
347 351 if safe:
348 352 log.debug('Fallback to render in plain mode')
349 353 return cls.plain(source)
350 354 else:
351 355 raise
352 356
353 357 @classmethod
354 358 def rst(cls, source, safe=True, mentions=False):
355 359 if mentions:
356 360 mention_pat = re.compile(MENTIONS_REGEX)
357 361
358 362 def wrapp(match_obj):
359 363 uname = match_obj.groups()[0]
360 364 return ' **@%(uname)s** ' % {'uname': uname}
361 365 mention_hl = mention_pat.sub(wrapp, source).strip()
362 366 # we extracted mentions render with this using Mentions false
363 367 return cls.rst(mention_hl, safe=safe, mentions=False)
364 368
365 369 source = safe_unicode(source)
366 370 try:
367 371 docutils_settings = dict(
368 372 [(alias, None) for alias in
369 373 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
370 374
371 375 docutils_settings.update({'input_encoding': 'unicode',
372 376 'report_level': 4})
373 377
374 378 for k, v in docutils_settings.iteritems():
375 379 directives.register_directive(k, v)
376 380
377 381 parts = publish_parts(source=source,
378 382 writer=RhodeCodeWriter(),
379 383 settings_overrides=docutils_settings)
380 384
381 385 return parts['html_title'] + parts["fragment"]
382 386 except Exception:
383 387 log.exception('Error when rendering RST')
384 388 if safe:
385 389 log.debug('Fallbacking to render in plain mode')
386 390 return cls.plain(source)
387 391 else:
388 392 raise
389 393
390 394 @classmethod
391 395 def jupyter(cls, source, safe=True):
392 396 from rhodecode.lib import helpers
393 397
394 398 from traitlets.config import Config
395 399 import nbformat
396 400 from nbconvert import HTMLExporter
397 401 from nbconvert.preprocessors import Preprocessor
398 402
399 403 class CustomHTMLExporter(HTMLExporter):
400 404 def _template_file_default(self):
401 405 return 'basic'
402 406
403 407 class Sandbox(Preprocessor):
404 408
405 409 def preprocess(self, nb, resources):
406 410 sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)'
407 411 for cell in nb['cells']:
408 412 if safe and 'outputs' in cell:
409 413 for cell_output in cell['outputs']:
410 414 if 'data' in cell_output:
411 415 if 'application/javascript' in cell_output['data']:
412 416 cell_output['data']['text/plain'] = sandbox_text
413 417 cell_output['data'].pop('application/javascript', None)
414 418 return nb, resources
415 419
416 420 def _sanitize_resources(resources):
417 421 """
418 422 Skip/sanitize some of the CSS generated and included in jupyter
419 423 so it doesn't messes up UI so much
420 424 """
421 425
422 426 # TODO(marcink): probably we should replace this with whole custom
423 427 # CSS set that doesn't screw up, but jupyter generated html has some
424 428 # special markers, so it requires Custom HTML exporter template with
425 429 # _default_template_path_default, to achieve that
426 430
427 431 # strip the reset CSS
428 432 resources[0] = resources[0][resources[0].find('/*! Source'):]
429 433 return resources
430 434
431 435 def as_html(notebook):
432 436 conf = Config()
433 437 conf.CustomHTMLExporter.preprocessors = [Sandbox]
434 438 html_exporter = CustomHTMLExporter(config=conf)
435 439
436 440 (body, resources) = html_exporter.from_notebook_node(notebook)
437 441 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
438 442 js = MakoTemplate(r'''
439 443 <!-- Load mathjax -->
440 444 <!-- MathJax configuration -->
441 445 <script type="text/x-mathjax-config">
442 446 MathJax.Hub.Config({
443 447 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
444 448 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
445 449 TeX: {
446 450 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
447 451 },
448 452 tex2jax: {
449 453 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
450 454 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
451 455 processEscapes: true,
452 456 processEnvironments: true
453 457 },
454 458 // Center justify equations in code and markdown cells. Elsewhere
455 459 // we use CSS to left justify single line equations in code cells.
456 460 displayAlign: 'center',
457 461 "HTML-CSS": {
458 462 styles: {'.MathJax_Display': {"margin": 0}},
459 463 linebreaks: { automatic: true },
460 464 availableFonts: ["STIX", "TeX"]
461 465 },
462 466 showMathMenu: false
463 467 });
464 468 </script>
465 469 <!-- End of mathjax configuration -->
466 470 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
467 471 ''').render(h=helpers)
468 472
469 473 css = '<style>{}</style>'.format(
470 474 ''.join(_sanitize_resources(resources['inlining']['css'])))
471 475
472 476 body = '\n'.join([header, css, js, body])
473 477 return body, resources
474 478
475 479 notebook = nbformat.reads(source, as_version=4)
476 480 (body, resources) = as_html(notebook)
477 481 return body
478 482
479 483
480 484 class RstTemplateRenderer(object):
481 485
482 486 def __init__(self):
483 487 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
484 488 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
485 489 self.template_store = TemplateLookup(
486 490 directories=rst_template_dirs,
487 491 input_encoding='utf-8',
488 492 imports=['from rhodecode.lib import helpers as h'])
489 493
490 494 def _get_template(self, templatename):
491 495 return self.template_store.get_template(templatename)
492 496
493 497 def render(self, template_name, **kwargs):
494 498 template = self._get_template(template_name)
495 499 return template.render(**kwargs)
@@ -1,98 +1,104 b''
1 1 <%namespace name="sourceblock" file="/codeblocks/source.mako"/>
2 2
3 3 <div id="codeblock" class="codeblock">
4 4 <div class="codeblock-header">
5 5 <div class="stats">
6 6 <span>
7 7 <strong>
8 8 <i class="icon-file-text"></i>
9 9 ${c.file}
10 10 </strong>
11 11 </span>
12 12 % if c.lf_node:
13 13 <span title="${_('This file is a pointer to large binary file')}"> | ${_('LargeFile')} ${h.format_byte_size_binary(c.lf_node.size)} </span>
14 14 % endif
15 15 <span> | ${c.file.lines()[0]} ${_ungettext('line', 'lines', c.file.lines()[0])}</span>
16 16 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
17 17 <span> | ${c.file.mimetype} </span>
18 18 <span> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
19 19 <span class="item last"> | <i class="tooltip icon-clipboard clipboard-action" data-clipboard-text="${c.file.path}" title="${_('Copy the full path')}"></i></span>
20 20 </div>
21 21 <div class="buttons">
22 22 <a id="file_history_overview" href="#">
23 23 ${_('History')}
24 24 </a>
25 25 <a id="file_history_overview_full" style="display: none" href="${h.route_path('repo_changelog_file',repo_name=c.repo_name, commit_id=c.commit.raw_id, f_path=c.f_path)}">
26 26 ${_('Show Full History')}
27 27 </a> |
28 28 %if c.annotate:
29 29 ${h.link_to(_('Source'), h.route_path('repo_files', repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path))}
30 30 %else:
31 31 ${h.link_to(_('Annotation'), h.route_path('repo_files:annotated',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path))}
32 32 %endif
33 33 | ${h.link_to(_('Raw'), h.route_path('repo_file_raw',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path))}
34 34 |
35 35 % if c.lf_node:
36 36 <a href="${h.route_path('repo_file_download',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path, _query=dict(lf=1))}">
37 37 ${_('Download largefile')}
38 38 </a>
39 39 % else:
40 40 <a href="${h.route_path('repo_file_download',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path)}">
41 41 ${_('Download')}
42 42 </a>
43 43 % endif
44 44
45 45 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
46 46 |
47 47 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
48 48 <a href="${h.route_path('repo_files_edit_file',repo_name=c.repo_name,commit_id=c.branch_or_raw_id,f_path=c.f_path, _anchor='edit')}">
49 49 ${_('Edit on Branch:{}').format(c.branch_name)}
50 50 </a>
51 51 | <a class="btn-danger btn-link" href="${h.route_path('repo_files_remove_file',repo_name=c.repo_name,commit_id=c.branch_or_raw_id,f_path=c.f_path, _anchor='edit')}">${_('Delete')}
52 52 </a>
53 53 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
54 54 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
55 55 | ${h.link_to(_('Delete'), h.route_path('repo_files_remove_file',repo_name=c.repo_name,commit_id=c.branch_or_raw_id,f_path=c.f_path, _anchor='edit'),class_="btn-danger btn-link")}
56 56 %else:
57 57 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
58 58 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
59 59 %endif
60 60 %endif
61 61 </div>
62 62 </div>
63 63 <div id="file_history_container"></div>
64 64 <div class="code-body">
65 65 %if c.file.is_binary:
66 66 <% rendered_binary = h.render_binary(c.repo_name, c.file)%>
67 67 % if rendered_binary:
68 68 ${rendered_binary}
69 69 % else:
70 70 <div>
71 71 ${_('Binary file (%s)') % c.file.mimetype}
72 72 </div>
73 73 % endif
74 74 %else:
75 75 % if c.file.size < c.visual.cut_off_limit_file:
76 76 %if c.renderer and not c.annotate:
77 ${h.render(c.file.content, renderer=c.renderer, relative_url=h.route_path('repo_file_raw',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path))}
77 <%
78 relative_urls = {
79 'raw': h.route_path('repo_file_raw',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path),
80 'standard': h.route_path('repo_files',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path),
81 }
82 %>
83 ${h.render(c.file.content, renderer=c.renderer, relative_urls=relative_urls)}
78 84 %else:
79 85 <table class="cb codehilite">
80 86 %if c.annotate:
81 87 <% color_hasher = h.color_hasher() %>
82 88 %for annotation, lines in c.annotated_lines:
83 89 ${sourceblock.render_annotation_lines(annotation, lines, color_hasher)}
84 90 %endfor
85 91 %else:
86 92 %for line_num, tokens in enumerate(c.lines, 1):
87 93 ${sourceblock.render_line(line_num, tokens)}
88 94 %endfor
89 95 %endif
90 96 </table>
91 97 %endif
92 98 %else:
93 99 ${_('File size {} is bigger then allowed limit {}. ').format(h.format_byte_size_binary(c.file.size), h.format_byte_size_binary(c.visual.cut_off_limit_file))} ${h.link_to(_('Show as raw'),
94 100 h.route_path('repo_file_raw',repo_name=c.repo_name,commit_id=c.commit.raw_id,f_path=c.f_path))}
95 101 %endif
96 102 %endif
97 103 </div>
98 104 </div> No newline at end of file
@@ -1,255 +1,258 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import pytest
22 22
23 23 from rhodecode.lib.markup_renderer import (
24 24 MarkupRenderer, RstTemplateRenderer, relative_path, relative_links)
25 25
26 26
27 27 @pytest.mark.parametrize(
28 28 "filename, expected_renderer",
29 29 [
30 30 ('readme.md', 'markdown'),
31 31 ('readme.Md', 'markdown'),
32 32 ('readme.MdoWn', 'markdown'),
33 33 ('readme.rst', 'rst'),
34 34 ('readme.Rst', 'rst'),
35 35 ('readme.rest', 'rst'),
36 36 ('readme.rest', 'rst'),
37 37
38 38 ('markdown.xml', 'plain'),
39 39 ('rest.xml', 'plain'),
40 40 ('readme.xml', 'plain'),
41 41
42 42 ('readme', 'plain'),
43 43 ('README', 'plain'),
44 44 ('readme.mdx', 'plain'),
45 45 ('readme.rstx', 'plain'),
46 46 ('readmex', 'plain'),
47 47 ])
48 48 def test_detect_renderer(filename, expected_renderer):
49 49 detected_renderer = MarkupRenderer()._detect_renderer(
50 50 '', filename=filename).__name__
51 51 assert expected_renderer == detected_renderer
52 52
53 53
54 54 def test_markdown_xss_link():
55 55 xss_md = "[link](javascript:alert('XSS: pwned!'))"
56 56 rendered_html = MarkupRenderer.markdown(xss_md)
57 57 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
58 58
59 59
60 60 def test_markdown_xss_inline_html():
61 61 xss_md = '\n'.join([
62 62 '> <a name="n"',
63 63 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
64 64 rendered_html = MarkupRenderer.markdown(xss_md)
65 65 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
66 66
67 67
68 68 def test_markdown_inline_html():
69 69 xss_md = '\n'.join(['> <a name="n"',
70 70 '> href="https://rhodecode.com">link</a>'])
71 71 rendered_html = MarkupRenderer.markdown(xss_md)
72 72 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
73 73
74 74
75 75 def test_rst_xss_link():
76 76 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
77 77 rendered_html = MarkupRenderer.rst(xss_rst)
78 78 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
79 79
80 80
81 81 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
82 82 def test_rst_xss_inline_html():
83 83 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
84 84 rendered_html = MarkupRenderer.rst(xss_rst)
85 85 assert 'href="javascript:alert(' not in rendered_html
86 86
87 87
88 88 def test_rst_xss_raw_directive():
89 89 xss_rst = '\n'.join([
90 90 '.. raw:: html',
91 91 '',
92 92 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
93 93 rendered_html = MarkupRenderer.rst(xss_rst)
94 94 assert 'href="javascript:alert(' not in rendered_html
95 95
96 96
97 97 def test_render_rst_template_without_files():
98 98 expected = u'''\
99 99 Pull request updated. Auto status change to |under_review|
100 100
101 101 .. role:: added
102 102 .. role:: removed
103 103 .. parsed-literal::
104 104
105 105 Changed commits:
106 106 * :added:`2 added`
107 107 * :removed:`3 removed`
108 108
109 109 No file changes found
110 110
111 111 .. |under_review| replace:: *"NEW STATUS"*'''
112 112
113 113 params = {
114 114 'under_review_label': 'NEW STATUS',
115 115 'added_commits': ['a', 'b'],
116 116 'removed_commits': ['a', 'b', 'c'],
117 117 'changed_files': [],
118 118 'added_files': [],
119 119 'modified_files': [],
120 120 'removed_files': [],
121 121 }
122 122 renderer = RstTemplateRenderer()
123 123 rendered = renderer.render('pull_request_update.mako', **params)
124 124 assert expected == rendered
125 125
126 126
127 127 def test_render_rst_template_with_files():
128 128 expected = u'''\
129 129 Pull request updated. Auto status change to |under_review|
130 130
131 131 .. role:: added
132 132 .. role:: removed
133 133 .. parsed-literal::
134 134
135 135 Changed commits:
136 136 * :added:`1 added`
137 137 * :removed:`3 removed`
138 138
139 139 Changed files:
140 140 * `A /path/a.py <#a_c--68ed34923b68>`_
141 141 * `A /path/b.js <#a_c--64f90608b607>`_
142 142 * `M /path/d.js <#a_c--85842bf30c6e>`_
143 143 * `M /path/ę.py <#a_c--d713adf009cd>`_
144 144 * R /path/ź.py
145 145
146 146 .. |under_review| replace:: *"NEW STATUS"*'''
147 147
148 148 added = ['/path/a.py', '/path/b.js']
149 149 modified = ['/path/d.js', u'/path/ę.py']
150 150 removed = [u'/path/ź.py']
151 151
152 152 params = {
153 153 'under_review_label': 'NEW STATUS',
154 154 'added_commits': ['a'],
155 155 'removed_commits': ['a', 'b', 'c'],
156 156 'changed_files': added + modified + removed,
157 157 'added_files': added,
158 158 'modified_files': modified,
159 159 'removed_files': removed,
160 160 }
161 161 renderer = RstTemplateRenderer()
162 162 rendered = renderer.render('pull_request_update.mako', **params)
163 163
164 164 assert expected == rendered
165 165
166 166
167 167 def test_render_rst_auto_status_template():
168 168 expected = u'''\
169 169 Auto status change to |new_status|
170 170
171 171 .. |new_status| replace:: *"NEW STATUS"*'''
172 172
173 173 params = {
174 174 'new_status_label': 'NEW STATUS',
175 175 'pull_request': None,
176 176 'commit_id': None,
177 177 }
178 178 renderer = RstTemplateRenderer()
179 179 rendered = renderer.render('auto_status_change.mako', **params)
180 180 assert expected == rendered
181 181
182 182
183 183 @pytest.mark.parametrize(
184 184 "src_path, server_path, is_path, expected",
185 185 [
186 186 ('source.png', '/repo/files/path', lambda p: False,
187 187 '/repo/files/path/source.png'),
188 188
189 189 ('source.png', 'mk/git/blob/master/README.md', lambda p: True,
190 190 '/mk/git/blob/master/source.png'),
191 191
192 192 ('./source.png', 'mk/git/blob/master/README.md', lambda p: True,
193 193 '/mk/git/blob/master/source.png'),
194 194
195 195 ('/source.png', 'mk/git/blob/master/README.md', lambda p: True,
196 196 '/mk/git/blob/master/source.png'),
197 197
198 198 ('./source.png', 'repo/files/path/source.md', lambda p: True,
199 199 '/repo/files/path/source.png'),
200 200
201 201 ('./source.png', '/repo/files/path/file.md', lambda p: True,
202 202 '/repo/files/path/source.png'),
203 203
204 204 ('../source.png', '/repo/files/path/file.md', lambda p: True,
205 205 '/repo/files/source.png'),
206 206
207 207 ('./../source.png', '/repo/files/path/file.md', lambda p: True,
208 208 '/repo/files/source.png'),
209 209
210 210 ('./source.png', '/repo/files/path/file.md', lambda p: True,
211 211 '/repo/files/path/source.png'),
212 212
213 213 ('../../../source.png', 'path/file.md', lambda p: True,
214 214 '/source.png'),
215 215
216 216 ('../../../../../source.png', '/path/file.md', None,
217 217 '/source.png'),
218 218
219 219 ('../../../../../source.png', 'files/path/file.md', None,
220 220 '/source.png'),
221 221
222 222 ('../../../../../https://google.com/image.png', 'files/path/file.md', None,
223 223 '/https://google.com/image.png'),
224 224
225 225 ('https://google.com/image.png', 'files/path/file.md', None,
226 226 'https://google.com/image.png'),
227 227
228 228 ('://foo', '/files/path/file.md', None,
229 229 '://foo'),
230 230
231 231 (u'한글.png', '/files/path/file.md', None,
232 232 u'/files/path/한글.png'),
233 233
234 234 ('my custom image.png', '/files/path/file.md', None,
235 235 '/files/path/my custom image.png'),
236 236 ])
237 237 def test_relative_path(src_path, server_path, is_path, expected):
238 238 path = relative_path(src_path, server_path, is_path)
239 239 assert path == expected
240 240
241 241
242 242 @pytest.mark.parametrize(
243 243 "src_html, expected_html",
244 244 [
245 245 ('<div></div>', '<div></div>'),
246 246 ('<img src="/file.png"></img>', '<img src="/path/raw/file.png">'),
247 247 ('<img src="data:abcd"/>', '<img src="data:abcd">'),
248 ('<a href="/file.png"></a>', '<a href="/path/raw/file.png"></a>'),
248 ('<a href="/file.png?raw=1"></a>', '<a href="/path/raw/file.png?raw=1"></a>'),
249 ('<a href="/file.png"></a>', '<a href="/path/file.png"></a>'),
249 250 ('<a href="#anchor"></a>', '<a href="#anchor"></a>'),
250 ('<a href="./README.md"></a>', '<a href="/path/raw/README.md"></a>'),
251 ('<a href="../README.md"></a>', '<a href="/path/README.md"></a>'),
251 ('<a href="./README.md?raw=1"></a>', '<a href="/path/raw/README.md?raw=1"></a>'),
252 ('<a href="./README.md"></a>', '<a href="/path/README.md"></a>'),
253 ('<a href="../README.md"></a>', '<a href="/README.md"></a>'),
252 254
253 255 ])
254 256 def test_relative_links(src_html, expected_html):
255 assert relative_links(src_html, '/path/raw/file.md') == expected_html
257 server_paths = {'raw': '/path/raw/file.md', 'standard': '/path/file.md'}
258 assert relative_links(src_html, server_paths=server_paths) == expected_html
General Comments 0
You need to be logged in to leave comments. Login now