##// END OF EJS Templates
file-renderer: escape alt text to prevent XSS on binary files with bad filenames.
marcink -
r2993:97626a52 default
parent child Browse files
Show More
@@ -1,2101 +1,2105 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2018 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import os
29 29 import random
30 30 import hashlib
31 31 import StringIO
32 32 import textwrap
33 33 import urllib
34 34 import math
35 35 import logging
36 36 import re
37 37 import urlparse
38 38 import time
39 39 import string
40 40 import hashlib
41 41 from collections import OrderedDict
42 42
43 43 import pygments
44 44 import itertools
45 45 import fnmatch
46 import bleach
46 47
47 48 from datetime import datetime
48 49 from functools import partial
49 50 from pygments.formatters.html import HtmlFormatter
50 51 from pygments import highlight as code_highlight
51 52 from pygments.lexers import (
52 53 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
53 54
54 55 from pyramid.threadlocal import get_current_request
55 56
56 57 from webhelpers.html import literal, HTML, escape
57 58 from webhelpers.html.tools import *
58 59 from webhelpers.html.builder import make_tag
59 60 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
60 61 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
61 62 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
62 63 submit, text, password, textarea, title, ul, xml_declaration, radio
63 64 from webhelpers.html.tools import auto_link, button_to, highlight, \
64 65 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
65 66 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
66 67 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
67 68 replace_whitespace, urlify, truncate, wrap_paragraphs
68 69 from webhelpers.date import time_ago_in_words
69 70 from webhelpers.paginate import Page as _Page
70 71 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
71 72 convert_boolean_attrs, NotGiven, _make_safe_id_component
72 73 from webhelpers2.number import format_byte_size
73 74
74 75 from rhodecode.lib.action_parser import action_parser
75 76 from rhodecode.lib.ext_json import json
76 77 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
77 78 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
78 79 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
79 80 AttributeDict, safe_int, md5, md5_safe
80 81 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
81 82 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
82 83 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
83 84 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
84 85 from rhodecode.model.changeset_status import ChangesetStatusModel
85 86 from rhodecode.model.db import Permission, User, Repository
86 87 from rhodecode.model.repo_group import RepoGroupModel
87 88 from rhodecode.model.settings import IssueTrackerSettingsModel
88 89
89 90 log = logging.getLogger(__name__)
90 91
91 92
92 93 DEFAULT_USER = User.DEFAULT_USER
93 94 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
94 95
95 96
96 97 def asset(path, ver=None, **kwargs):
97 98 """
98 99 Helper to generate a static asset file path for rhodecode assets
99 100
100 101 eg. h.asset('images/image.png', ver='3923')
101 102
102 103 :param path: path of asset
103 104 :param ver: optional version query param to append as ?ver=
104 105 """
105 106 request = get_current_request()
106 107 query = {}
107 108 query.update(kwargs)
108 109 if ver:
109 110 query = {'ver': ver}
110 111 return request.static_path(
111 112 'rhodecode:public/{}'.format(path), _query=query)
112 113
113 114
114 115 default_html_escape_table = {
115 116 ord('&'): u'&amp;',
116 117 ord('<'): u'&lt;',
117 118 ord('>'): u'&gt;',
118 119 ord('"'): u'&quot;',
119 120 ord("'"): u'&#39;',
120 121 }
121 122
122 123
123 124 def html_escape(text, html_escape_table=default_html_escape_table):
124 125 """Produce entities within text."""
125 126 return text.translate(html_escape_table)
126 127
127 128
128 129 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
129 130 """
130 131 Truncate string ``s`` at the first occurrence of ``sub``.
131 132
132 133 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
133 134 """
134 135 suffix_if_chopped = suffix_if_chopped or ''
135 136 pos = s.find(sub)
136 137 if pos == -1:
137 138 return s
138 139
139 140 if inclusive:
140 141 pos += len(sub)
141 142
142 143 chopped = s[:pos]
143 144 left = s[pos:].strip()
144 145
145 146 if left and suffix_if_chopped:
146 147 chopped += suffix_if_chopped
147 148
148 149 return chopped
149 150
150 151
151 152 def shorter(text, size=20):
152 153 postfix = '...'
153 154 if len(text) > size:
154 155 return text[:size - len(postfix)] + postfix
155 156 return text
156 157
157 158
158 159 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
159 160 """
160 161 Reset button
161 162 """
162 163 _set_input_attrs(attrs, type, name, value)
163 164 _set_id_attr(attrs, id, name)
164 165 convert_boolean_attrs(attrs, ["disabled"])
165 166 return HTML.input(**attrs)
166 167
167 168 reset = _reset
168 169 safeid = _make_safe_id_component
169 170
170 171
171 172 def branding(name, length=40):
172 173 return truncate(name, length, indicator="")
173 174
174 175
175 176 def FID(raw_id, path):
176 177 """
177 178 Creates a unique ID for filenode based on it's hash of path and commit
178 179 it's safe to use in urls
179 180
180 181 :param raw_id:
181 182 :param path:
182 183 """
183 184
184 185 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
185 186
186 187
187 188 class _GetError(object):
188 189 """Get error from form_errors, and represent it as span wrapped error
189 190 message
190 191
191 192 :param field_name: field to fetch errors for
192 193 :param form_errors: form errors dict
193 194 """
194 195
195 196 def __call__(self, field_name, form_errors):
196 197 tmpl = """<span class="error_msg">%s</span>"""
197 198 if form_errors and field_name in form_errors:
198 199 return literal(tmpl % form_errors.get(field_name))
199 200
200 201 get_error = _GetError()
201 202
202 203
203 204 class _ToolTip(object):
204 205
205 206 def __call__(self, tooltip_title, trim_at=50):
206 207 """
207 208 Special function just to wrap our text into nice formatted
208 209 autowrapped text
209 210
210 211 :param tooltip_title:
211 212 """
212 213 tooltip_title = escape(tooltip_title)
213 214 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
214 215 return tooltip_title
215 216 tooltip = _ToolTip()
216 217
217 218
218 219 def files_breadcrumbs(repo_name, commit_id, file_path):
219 220 if isinstance(file_path, str):
220 221 file_path = safe_unicode(file_path)
221 222
222 223 # TODO: johbo: Is this always a url like path, or is this operating
223 224 # system dependent?
224 225 path_segments = file_path.split('/')
225 226
226 227 repo_name_html = escape(repo_name)
227 228 if len(path_segments) == 1 and path_segments[0] == '':
228 229 url_segments = [repo_name_html]
229 230 else:
230 231 url_segments = [
231 232 link_to(
232 233 repo_name_html,
233 234 route_path(
234 235 'repo_files',
235 236 repo_name=repo_name,
236 237 commit_id=commit_id,
237 238 f_path=''),
238 239 class_='pjax-link')]
239 240
240 241 last_cnt = len(path_segments) - 1
241 242 for cnt, segment in enumerate(path_segments):
242 243 if not segment:
243 244 continue
244 245 segment_html = escape(segment)
245 246
246 247 if cnt != last_cnt:
247 248 url_segments.append(
248 249 link_to(
249 250 segment_html,
250 251 route_path(
251 252 'repo_files',
252 253 repo_name=repo_name,
253 254 commit_id=commit_id,
254 255 f_path='/'.join(path_segments[:cnt + 1])),
255 256 class_='pjax-link'))
256 257 else:
257 258 url_segments.append(segment_html)
258 259
259 260 return literal('/'.join(url_segments))
260 261
261 262
262 263 class CodeHtmlFormatter(HtmlFormatter):
263 264 """
264 265 My code Html Formatter for source codes
265 266 """
266 267
267 268 def wrap(self, source, outfile):
268 269 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
269 270
270 271 def _wrap_code(self, source):
271 272 for cnt, it in enumerate(source):
272 273 i, t = it
273 274 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
274 275 yield i, t
275 276
276 277 def _wrap_tablelinenos(self, inner):
277 278 dummyoutfile = StringIO.StringIO()
278 279 lncount = 0
279 280 for t, line in inner:
280 281 if t:
281 282 lncount += 1
282 283 dummyoutfile.write(line)
283 284
284 285 fl = self.linenostart
285 286 mw = len(str(lncount + fl - 1))
286 287 sp = self.linenospecial
287 288 st = self.linenostep
288 289 la = self.lineanchors
289 290 aln = self.anchorlinenos
290 291 nocls = self.noclasses
291 292 if sp:
292 293 lines = []
293 294
294 295 for i in range(fl, fl + lncount):
295 296 if i % st == 0:
296 297 if i % sp == 0:
297 298 if aln:
298 299 lines.append('<a href="#%s%d" class="special">%*d</a>' %
299 300 (la, i, mw, i))
300 301 else:
301 302 lines.append('<span class="special">%*d</span>' % (mw, i))
302 303 else:
303 304 if aln:
304 305 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
305 306 else:
306 307 lines.append('%*d' % (mw, i))
307 308 else:
308 309 lines.append('')
309 310 ls = '\n'.join(lines)
310 311 else:
311 312 lines = []
312 313 for i in range(fl, fl + lncount):
313 314 if i % st == 0:
314 315 if aln:
315 316 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
316 317 else:
317 318 lines.append('%*d' % (mw, i))
318 319 else:
319 320 lines.append('')
320 321 ls = '\n'.join(lines)
321 322
322 323 # in case you wonder about the seemingly redundant <div> here: since the
323 324 # content in the other cell also is wrapped in a div, some browsers in
324 325 # some configurations seem to mess up the formatting...
325 326 if nocls:
326 327 yield 0, ('<table class="%stable">' % self.cssclass +
327 328 '<tr><td><div class="linenodiv" '
328 329 'style="background-color: #f0f0f0; padding-right: 10px">'
329 330 '<pre style="line-height: 125%">' +
330 331 ls + '</pre></div></td><td id="hlcode" class="code">')
331 332 else:
332 333 yield 0, ('<table class="%stable">' % self.cssclass +
333 334 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
334 335 ls + '</pre></div></td><td id="hlcode" class="code">')
335 336 yield 0, dummyoutfile.getvalue()
336 337 yield 0, '</td></tr></table>'
337 338
338 339
339 340 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
340 341 def __init__(self, **kw):
341 342 # only show these line numbers if set
342 343 self.only_lines = kw.pop('only_line_numbers', [])
343 344 self.query_terms = kw.pop('query_terms', [])
344 345 self.max_lines = kw.pop('max_lines', 5)
345 346 self.line_context = kw.pop('line_context', 3)
346 347 self.url = kw.pop('url', None)
347 348
348 349 super(CodeHtmlFormatter, self).__init__(**kw)
349 350
350 351 def _wrap_code(self, source):
351 352 for cnt, it in enumerate(source):
352 353 i, t = it
353 354 t = '<pre>%s</pre>' % t
354 355 yield i, t
355 356
356 357 def _wrap_tablelinenos(self, inner):
357 358 yield 0, '<table class="code-highlight %stable">' % self.cssclass
358 359
359 360 last_shown_line_number = 0
360 361 current_line_number = 1
361 362
362 363 for t, line in inner:
363 364 if not t:
364 365 yield t, line
365 366 continue
366 367
367 368 if current_line_number in self.only_lines:
368 369 if last_shown_line_number + 1 != current_line_number:
369 370 yield 0, '<tr>'
370 371 yield 0, '<td class="line">...</td>'
371 372 yield 0, '<td id="hlcode" class="code"></td>'
372 373 yield 0, '</tr>'
373 374
374 375 yield 0, '<tr>'
375 376 if self.url:
376 377 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
377 378 self.url, current_line_number, current_line_number)
378 379 else:
379 380 yield 0, '<td class="line"><a href="">%i</a></td>' % (
380 381 current_line_number)
381 382 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
382 383 yield 0, '</tr>'
383 384
384 385 last_shown_line_number = current_line_number
385 386
386 387 current_line_number += 1
387 388
388 389
389 390 yield 0, '</table>'
390 391
391 392
392 393 def extract_phrases(text_query):
393 394 """
394 395 Extracts phrases from search term string making sure phrases
395 396 contained in double quotes are kept together - and discarding empty values
396 397 or fully whitespace values eg.
397 398
398 399 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
399 400
400 401 """
401 402
402 403 in_phrase = False
403 404 buf = ''
404 405 phrases = []
405 406 for char in text_query:
406 407 if in_phrase:
407 408 if char == '"': # end phrase
408 409 phrases.append(buf)
409 410 buf = ''
410 411 in_phrase = False
411 412 continue
412 413 else:
413 414 buf += char
414 415 continue
415 416 else:
416 417 if char == '"': # start phrase
417 418 in_phrase = True
418 419 phrases.append(buf)
419 420 buf = ''
420 421 continue
421 422 elif char == ' ':
422 423 phrases.append(buf)
423 424 buf = ''
424 425 continue
425 426 else:
426 427 buf += char
427 428
428 429 phrases.append(buf)
429 430 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
430 431 return phrases
431 432
432 433
433 434 def get_matching_offsets(text, phrases):
434 435 """
435 436 Returns a list of string offsets in `text` that the list of `terms` match
436 437
437 438 >>> get_matching_offsets('some text here', ['some', 'here'])
438 439 [(0, 4), (10, 14)]
439 440
440 441 """
441 442 offsets = []
442 443 for phrase in phrases:
443 444 for match in re.finditer(phrase, text):
444 445 offsets.append((match.start(), match.end()))
445 446
446 447 return offsets
447 448
448 449
449 450 def normalize_text_for_matching(x):
450 451 """
451 452 Replaces all non alnum characters to spaces and lower cases the string,
452 453 useful for comparing two text strings without punctuation
453 454 """
454 455 return re.sub(r'[^\w]', ' ', x.lower())
455 456
456 457
457 458 def get_matching_line_offsets(lines, terms):
458 459 """ Return a set of `lines` indices (starting from 1) matching a
459 460 text search query, along with `context` lines above/below matching lines
460 461
461 462 :param lines: list of strings representing lines
462 463 :param terms: search term string to match in lines eg. 'some text'
463 464 :param context: number of lines above/below a matching line to add to result
464 465 :param max_lines: cut off for lines of interest
465 466 eg.
466 467
467 468 text = '''
468 469 words words words
469 470 words words words
470 471 some text some
471 472 words words words
472 473 words words words
473 474 text here what
474 475 '''
475 476 get_matching_line_offsets(text, 'text', context=1)
476 477 {3: [(5, 9)], 6: [(0, 4)]]
477 478
478 479 """
479 480 matching_lines = {}
480 481 phrases = [normalize_text_for_matching(phrase)
481 482 for phrase in extract_phrases(terms)]
482 483
483 484 for line_index, line in enumerate(lines, start=1):
484 485 match_offsets = get_matching_offsets(
485 486 normalize_text_for_matching(line), phrases)
486 487 if match_offsets:
487 488 matching_lines[line_index] = match_offsets
488 489
489 490 return matching_lines
490 491
491 492
492 493 def hsv_to_rgb(h, s, v):
493 494 """ Convert hsv color values to rgb """
494 495
495 496 if s == 0.0:
496 497 return v, v, v
497 498 i = int(h * 6.0) # XXX assume int() truncates!
498 499 f = (h * 6.0) - i
499 500 p = v * (1.0 - s)
500 501 q = v * (1.0 - s * f)
501 502 t = v * (1.0 - s * (1.0 - f))
502 503 i = i % 6
503 504 if i == 0:
504 505 return v, t, p
505 506 if i == 1:
506 507 return q, v, p
507 508 if i == 2:
508 509 return p, v, t
509 510 if i == 3:
510 511 return p, q, v
511 512 if i == 4:
512 513 return t, p, v
513 514 if i == 5:
514 515 return v, p, q
515 516
516 517
517 518 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
518 519 """
519 520 Generator for getting n of evenly distributed colors using
520 521 hsv color and golden ratio. It always return same order of colors
521 522
522 523 :param n: number of colors to generate
523 524 :param saturation: saturation of returned colors
524 525 :param lightness: lightness of returned colors
525 526 :returns: RGB tuple
526 527 """
527 528
528 529 golden_ratio = 0.618033988749895
529 530 h = 0.22717784590367374
530 531
531 532 for _ in xrange(n):
532 533 h += golden_ratio
533 534 h %= 1
534 535 HSV_tuple = [h, saturation, lightness]
535 536 RGB_tuple = hsv_to_rgb(*HSV_tuple)
536 537 yield map(lambda x: str(int(x * 256)), RGB_tuple)
537 538
538 539
539 540 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
540 541 """
541 542 Returns a function which when called with an argument returns a unique
542 543 color for that argument, eg.
543 544
544 545 :param n: number of colors to generate
545 546 :param saturation: saturation of returned colors
546 547 :param lightness: lightness of returned colors
547 548 :returns: css RGB string
548 549
549 550 >>> color_hash = color_hasher()
550 551 >>> color_hash('hello')
551 552 'rgb(34, 12, 59)'
552 553 >>> color_hash('hello')
553 554 'rgb(34, 12, 59)'
554 555 >>> color_hash('other')
555 556 'rgb(90, 224, 159)'
556 557 """
557 558
558 559 color_dict = {}
559 560 cgenerator = unique_color_generator(
560 561 saturation=saturation, lightness=lightness)
561 562
562 563 def get_color_string(thing):
563 564 if thing in color_dict:
564 565 col = color_dict[thing]
565 566 else:
566 567 col = color_dict[thing] = cgenerator.next()
567 568 return "rgb(%s)" % (', '.join(col))
568 569
569 570 return get_color_string
570 571
571 572
572 573 def get_lexer_safe(mimetype=None, filepath=None):
573 574 """
574 575 Tries to return a relevant pygments lexer using mimetype/filepath name,
575 576 defaulting to plain text if none could be found
576 577 """
577 578 lexer = None
578 579 try:
579 580 if mimetype:
580 581 lexer = get_lexer_for_mimetype(mimetype)
581 582 if not lexer:
582 583 lexer = get_lexer_for_filename(filepath)
583 584 except pygments.util.ClassNotFound:
584 585 pass
585 586
586 587 if not lexer:
587 588 lexer = get_lexer_by_name('text')
588 589
589 590 return lexer
590 591
591 592
592 593 def get_lexer_for_filenode(filenode):
593 594 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
594 595 return lexer
595 596
596 597
597 598 def pygmentize(filenode, **kwargs):
598 599 """
599 600 pygmentize function using pygments
600 601
601 602 :param filenode:
602 603 """
603 604 lexer = get_lexer_for_filenode(filenode)
604 605 return literal(code_highlight(filenode.content, lexer,
605 606 CodeHtmlFormatter(**kwargs)))
606 607
607 608
608 609 def is_following_repo(repo_name, user_id):
609 610 from rhodecode.model.scm import ScmModel
610 611 return ScmModel().is_following_repo(repo_name, user_id)
611 612
612 613
613 614 class _Message(object):
614 615 """A message returned by ``Flash.pop_messages()``.
615 616
616 617 Converting the message to a string returns the message text. Instances
617 618 also have the following attributes:
618 619
619 620 * ``message``: the message text.
620 621 * ``category``: the category specified when the message was created.
621 622 """
622 623
623 624 def __init__(self, category, message):
624 625 self.category = category
625 626 self.message = message
626 627
627 628 def __str__(self):
628 629 return self.message
629 630
630 631 __unicode__ = __str__
631 632
632 633 def __html__(self):
633 634 return escape(safe_unicode(self.message))
634 635
635 636
636 637 class Flash(object):
637 638 # List of allowed categories. If None, allow any category.
638 639 categories = ["warning", "notice", "error", "success"]
639 640
640 641 # Default category if none is specified.
641 642 default_category = "notice"
642 643
643 644 def __init__(self, session_key="flash", categories=None,
644 645 default_category=None):
645 646 """
646 647 Instantiate a ``Flash`` object.
647 648
648 649 ``session_key`` is the key to save the messages under in the user's
649 650 session.
650 651
651 652 ``categories`` is an optional list which overrides the default list
652 653 of categories.
653 654
654 655 ``default_category`` overrides the default category used for messages
655 656 when none is specified.
656 657 """
657 658 self.session_key = session_key
658 659 if categories is not None:
659 660 self.categories = categories
660 661 if default_category is not None:
661 662 self.default_category = default_category
662 663 if self.categories and self.default_category not in self.categories:
663 664 raise ValueError(
664 665 "unrecognized default category %r" % (self.default_category,))
665 666
666 667 def pop_messages(self, session=None, request=None):
667 668 """
668 669 Return all accumulated messages and delete them from the session.
669 670
670 671 The return value is a list of ``Message`` objects.
671 672 """
672 673 messages = []
673 674
674 675 if not session:
675 676 if not request:
676 677 request = get_current_request()
677 678 session = request.session
678 679
679 680 # Pop the 'old' pylons flash messages. They are tuples of the form
680 681 # (category, message)
681 682 for cat, msg in session.pop(self.session_key, []):
682 683 messages.append(_Message(cat, msg))
683 684
684 685 # Pop the 'new' pyramid flash messages for each category as list
685 686 # of strings.
686 687 for cat in self.categories:
687 688 for msg in session.pop_flash(queue=cat):
688 689 messages.append(_Message(cat, msg))
689 690 # Map messages from the default queue to the 'notice' category.
690 691 for msg in session.pop_flash():
691 692 messages.append(_Message('notice', msg))
692 693
693 694 session.save()
694 695 return messages
695 696
696 697 def json_alerts(self, session=None, request=None):
697 698 payloads = []
698 699 messages = flash.pop_messages(session=session, request=request)
699 700 if messages:
700 701 for message in messages:
701 702 subdata = {}
702 703 if hasattr(message.message, 'rsplit'):
703 704 flash_data = message.message.rsplit('|DELIM|', 1)
704 705 org_message = flash_data[0]
705 706 if len(flash_data) > 1:
706 707 subdata = json.loads(flash_data[1])
707 708 else:
708 709 org_message = message.message
709 710 payloads.append({
710 711 'message': {
711 712 'message': u'{}'.format(org_message),
712 713 'level': message.category,
713 714 'force': True,
714 715 'subdata': subdata
715 716 }
716 717 })
717 718 return json.dumps(payloads)
718 719
719 720 def __call__(self, message, category=None, ignore_duplicate=False,
720 721 session=None, request=None):
721 722
722 723 if not session:
723 724 if not request:
724 725 request = get_current_request()
725 726 session = request.session
726 727
727 728 session.flash(
728 729 message, queue=category, allow_duplicate=not ignore_duplicate)
729 730
730 731
731 732 flash = Flash()
732 733
733 734 #==============================================================================
734 735 # SCM FILTERS available via h.
735 736 #==============================================================================
736 737 from rhodecode.lib.vcs.utils import author_name, author_email
737 738 from rhodecode.lib.utils2 import credentials_filter, age as _age
738 739 from rhodecode.model.db import User, ChangesetStatus
739 740
740 741 age = _age
741 742 capitalize = lambda x: x.capitalize()
742 743 email = author_email
743 744 short_id = lambda x: x[:12]
744 745 hide_credentials = lambda x: ''.join(credentials_filter(x))
745 746
746 747
747 748 import pytz
748 749 import tzlocal
749 750 local_timezone = tzlocal.get_localzone()
750 751
751 752
752 753 def age_component(datetime_iso, value=None, time_is_local=False):
753 754 title = value or format_date(datetime_iso)
754 755 tzinfo = '+00:00'
755 756
756 757 # detect if we have a timezone info, otherwise, add it
757 758 if time_is_local and isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
758 759 force_timezone = os.environ.get('RC_TIMEZONE', '')
759 760 if force_timezone:
760 761 force_timezone = pytz.timezone(force_timezone)
761 762 timezone = force_timezone or local_timezone
762 763 offset = timezone.localize(datetime_iso).strftime('%z')
763 764 tzinfo = '{}:{}'.format(offset[:-2], offset[-2:])
764 765
765 766 return literal(
766 767 '<time class="timeago tooltip" '
767 768 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
768 769 datetime_iso, title, tzinfo))
769 770
770 771
771 772 def _shorten_commit_id(commit_id):
772 773 from rhodecode import CONFIG
773 774 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
774 775 return commit_id[:def_len]
775 776
776 777
777 778 def show_id(commit):
778 779 """
779 780 Configurable function that shows ID
780 781 by default it's r123:fffeeefffeee
781 782
782 783 :param commit: commit instance
783 784 """
784 785 from rhodecode import CONFIG
785 786 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
786 787
787 788 raw_id = _shorten_commit_id(commit.raw_id)
788 789 if show_idx:
789 790 return 'r%s:%s' % (commit.idx, raw_id)
790 791 else:
791 792 return '%s' % (raw_id, )
792 793
793 794
794 795 def format_date(date):
795 796 """
796 797 use a standardized formatting for dates used in RhodeCode
797 798
798 799 :param date: date/datetime object
799 800 :return: formatted date
800 801 """
801 802
802 803 if date:
803 804 _fmt = "%a, %d %b %Y %H:%M:%S"
804 805 return safe_unicode(date.strftime(_fmt))
805 806
806 807 return u""
807 808
808 809
809 810 class _RepoChecker(object):
810 811
811 812 def __init__(self, backend_alias):
812 813 self._backend_alias = backend_alias
813 814
814 815 def __call__(self, repository):
815 816 if hasattr(repository, 'alias'):
816 817 _type = repository.alias
817 818 elif hasattr(repository, 'repo_type'):
818 819 _type = repository.repo_type
819 820 else:
820 821 _type = repository
821 822 return _type == self._backend_alias
822 823
823 824 is_git = _RepoChecker('git')
824 825 is_hg = _RepoChecker('hg')
825 826 is_svn = _RepoChecker('svn')
826 827
827 828
828 829 def get_repo_type_by_name(repo_name):
829 830 repo = Repository.get_by_repo_name(repo_name)
830 831 return repo.repo_type
831 832
832 833
833 834 def is_svn_without_proxy(repository):
834 835 if is_svn(repository):
835 836 from rhodecode.model.settings import VcsSettingsModel
836 837 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
837 838 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
838 839 return False
839 840
840 841
841 842 def discover_user(author):
842 843 """
843 844 Tries to discover RhodeCode User based on the autho string. Author string
844 845 is typically `FirstName LastName <email@address.com>`
845 846 """
846 847
847 848 # if author is already an instance use it for extraction
848 849 if isinstance(author, User):
849 850 return author
850 851
851 852 # Valid email in the attribute passed, see if they're in the system
852 853 _email = author_email(author)
853 854 if _email != '':
854 855 user = User.get_by_email(_email, case_insensitive=True, cache=True)
855 856 if user is not None:
856 857 return user
857 858
858 859 # Maybe it's a username, we try to extract it and fetch by username ?
859 860 _author = author_name(author)
860 861 user = User.get_by_username(_author, case_insensitive=True, cache=True)
861 862 if user is not None:
862 863 return user
863 864
864 865 return None
865 866
866 867
867 868 def email_or_none(author):
868 869 # extract email from the commit string
869 870 _email = author_email(author)
870 871
871 872 # If we have an email, use it, otherwise
872 873 # see if it contains a username we can get an email from
873 874 if _email != '':
874 875 return _email
875 876 else:
876 877 user = User.get_by_username(
877 878 author_name(author), case_insensitive=True, cache=True)
878 879
879 880 if user is not None:
880 881 return user.email
881 882
882 883 # No valid email, not a valid user in the system, none!
883 884 return None
884 885
885 886
886 887 def link_to_user(author, length=0, **kwargs):
887 888 user = discover_user(author)
888 889 # user can be None, but if we have it already it means we can re-use it
889 890 # in the person() function, so we save 1 intensive-query
890 891 if user:
891 892 author = user
892 893
893 894 display_person = person(author, 'username_or_name_or_email')
894 895 if length:
895 896 display_person = shorter(display_person, length)
896 897
897 898 if user:
898 899 return link_to(
899 900 escape(display_person),
900 901 route_path('user_profile', username=user.username),
901 902 **kwargs)
902 903 else:
903 904 return escape(display_person)
904 905
905 906
906 907 def link_to_group(users_group_name, **kwargs):
907 908 return link_to(
908 909 escape(users_group_name),
909 910 route_path('user_group_profile', user_group_name=users_group_name),
910 911 **kwargs)
911 912
912 913
913 914 def person(author, show_attr="username_and_name"):
914 915 user = discover_user(author)
915 916 if user:
916 917 return getattr(user, show_attr)
917 918 else:
918 919 _author = author_name(author)
919 920 _email = email(author)
920 921 return _author or _email
921 922
922 923
923 924 def author_string(email):
924 925 if email:
925 926 user = User.get_by_email(email, case_insensitive=True, cache=True)
926 927 if user:
927 928 if user.first_name or user.last_name:
928 929 return '%s %s &lt;%s&gt;' % (
929 930 user.first_name, user.last_name, email)
930 931 else:
931 932 return email
932 933 else:
933 934 return email
934 935 else:
935 936 return None
936 937
937 938
938 939 def person_by_id(id_, show_attr="username_and_name"):
939 940 # attr to return from fetched user
940 941 person_getter = lambda usr: getattr(usr, show_attr)
941 942
942 943 #maybe it's an ID ?
943 944 if str(id_).isdigit() or isinstance(id_, int):
944 945 id_ = int(id_)
945 946 user = User.get(id_)
946 947 if user is not None:
947 948 return person_getter(user)
948 949 return id_
949 950
950 951
951 952 def gravatar_with_user(request, author, show_disabled=False):
952 953 _render = request.get_partial_renderer(
953 954 'rhodecode:templates/base/base.mako')
954 955 return _render('gravatar_with_user', author, show_disabled=show_disabled)
955 956
956 957
957 958 tags_paterns = OrderedDict((
958 959 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
959 960 '<div class="metatag" tag="lang">\\2</div>')),
960 961
961 962 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
962 963 '<div class="metatag" tag="see">see: \\1 </div>')),
963 964
964 965 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((http://|https://|/)(.*?)\)\]'),
965 966 '<div class="metatag" tag="url"> <a href="\\2\\3">\\1</a> </div>')),
966 967
967 968 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
968 969 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
969 970
970 971 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
971 972 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
972 973
973 974 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
974 975 '<div class="metatag" tag="state \\1">\\1</div>')),
975 976
976 977 # label in grey
977 978 ('label', (re.compile(r'\[([a-z]+)\]'),
978 979 '<div class="metatag" tag="label">\\1</div>')),
979 980
980 981 # generic catch all in grey
981 982 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
982 983 '<div class="metatag" tag="generic">\\1</div>')),
983 984 ))
984 985
985 986
986 987 def extract_metatags(value):
987 988 """
988 989 Extract supported meta-tags from given text value
989 990 """
990 991 tags = []
991 992 if not value:
992 993 return tags, ''
993 994
994 995 for key, val in tags_paterns.items():
995 996 pat, replace_html = val
996 997 tags.extend([(key, x.group()) for x in pat.finditer(value)])
997 998 value = pat.sub('', value)
998 999
999 1000 return tags, value
1000 1001
1001 1002
1002 1003 def style_metatag(tag_type, value):
1003 1004 """
1004 1005 converts tags from value into html equivalent
1005 1006 """
1006 1007 if not value:
1007 1008 return ''
1008 1009
1009 1010 html_value = value
1010 1011 tag_data = tags_paterns.get(tag_type)
1011 1012 if tag_data:
1012 1013 pat, replace_html = tag_data
1013 1014 # convert to plain `unicode` instead of a markup tag to be used in
1014 1015 # regex expressions. safe_unicode doesn't work here
1015 1016 html_value = pat.sub(replace_html, unicode(value))
1016 1017
1017 1018 return html_value
1018 1019
1019 1020
1020 1021 def bool2icon(value):
1021 1022 """
1022 1023 Returns boolean value of a given value, represented as html element with
1023 1024 classes that will represent icons
1024 1025
1025 1026 :param value: given value to convert to html node
1026 1027 """
1027 1028
1028 1029 if value: # does bool conversion
1029 1030 return HTML.tag('i', class_="icon-true")
1030 1031 else: # not true as bool
1031 1032 return HTML.tag('i', class_="icon-false")
1032 1033
1033 1034
1034 1035 #==============================================================================
1035 1036 # PERMS
1036 1037 #==============================================================================
1037 1038 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1038 1039 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1039 1040 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1040 1041 csrf_token_key
1041 1042
1042 1043
1043 1044 #==============================================================================
1044 1045 # GRAVATAR URL
1045 1046 #==============================================================================
1046 1047 class InitialsGravatar(object):
1047 1048 def __init__(self, email_address, first_name, last_name, size=30,
1048 1049 background=None, text_color='#fff'):
1049 1050 self.size = size
1050 1051 self.first_name = first_name
1051 1052 self.last_name = last_name
1052 1053 self.email_address = email_address
1053 1054 self.background = background or self.str2color(email_address)
1054 1055 self.text_color = text_color
1055 1056
1056 1057 def get_color_bank(self):
1057 1058 """
1058 1059 returns a predefined list of colors that gravatars can use.
1059 1060 Those are randomized distinct colors that guarantee readability and
1060 1061 uniqueness.
1061 1062
1062 1063 generated with: http://phrogz.net/css/distinct-colors.html
1063 1064 """
1064 1065 return [
1065 1066 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1066 1067 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1067 1068 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1068 1069 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1069 1070 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1070 1071 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1071 1072 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1072 1073 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1073 1074 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1074 1075 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1075 1076 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1076 1077 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1077 1078 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1078 1079 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1079 1080 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1080 1081 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1081 1082 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1082 1083 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1083 1084 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1084 1085 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1085 1086 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1086 1087 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1087 1088 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1088 1089 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1089 1090 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1090 1091 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1091 1092 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1092 1093 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1093 1094 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1094 1095 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1095 1096 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1096 1097 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1097 1098 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1098 1099 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1099 1100 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1100 1101 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1101 1102 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1102 1103 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1103 1104 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1104 1105 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1105 1106 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1106 1107 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1107 1108 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1108 1109 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1109 1110 '#4f8c46', '#368dd9', '#5c0073'
1110 1111 ]
1111 1112
1112 1113 def rgb_to_hex_color(self, rgb_tuple):
1113 1114 """
1114 1115 Converts an rgb_tuple passed to an hex color.
1115 1116
1116 1117 :param rgb_tuple: tuple with 3 ints represents rgb color space
1117 1118 """
1118 1119 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1119 1120
1120 1121 def email_to_int_list(self, email_str):
1121 1122 """
1122 1123 Get every byte of the hex digest value of email and turn it to integer.
1123 1124 It's going to be always between 0-255
1124 1125 """
1125 1126 digest = md5_safe(email_str.lower())
1126 1127 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1127 1128
1128 1129 def pick_color_bank_index(self, email_str, color_bank):
1129 1130 return self.email_to_int_list(email_str)[0] % len(color_bank)
1130 1131
1131 1132 def str2color(self, email_str):
1132 1133 """
1133 1134 Tries to map in a stable algorithm an email to color
1134 1135
1135 1136 :param email_str:
1136 1137 """
1137 1138 color_bank = self.get_color_bank()
1138 1139 # pick position (module it's length so we always find it in the
1139 1140 # bank even if it's smaller than 256 values
1140 1141 pos = self.pick_color_bank_index(email_str, color_bank)
1141 1142 return color_bank[pos]
1142 1143
1143 1144 def normalize_email(self, email_address):
1144 1145 import unicodedata
1145 1146 # default host used to fill in the fake/missing email
1146 1147 default_host = u'localhost'
1147 1148
1148 1149 if not email_address:
1149 1150 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1150 1151
1151 1152 email_address = safe_unicode(email_address)
1152 1153
1153 1154 if u'@' not in email_address:
1154 1155 email_address = u'%s@%s' % (email_address, default_host)
1155 1156
1156 1157 if email_address.endswith(u'@'):
1157 1158 email_address = u'%s%s' % (email_address, default_host)
1158 1159
1159 1160 email_address = unicodedata.normalize('NFKD', email_address)\
1160 1161 .encode('ascii', 'ignore')
1161 1162 return email_address
1162 1163
1163 1164 def get_initials(self):
1164 1165 """
1165 1166 Returns 2 letter initials calculated based on the input.
1166 1167 The algorithm picks first given email address, and takes first letter
1167 1168 of part before @, and then the first letter of server name. In case
1168 1169 the part before @ is in a format of `somestring.somestring2` it replaces
1169 1170 the server letter with first letter of somestring2
1170 1171
1171 1172 In case function was initialized with both first and lastname, this
1172 1173 overrides the extraction from email by first letter of the first and
1173 1174 last name. We add special logic to that functionality, In case Full name
1174 1175 is compound, like Guido Von Rossum, we use last part of the last name
1175 1176 (Von Rossum) picking `R`.
1176 1177
1177 1178 Function also normalizes the non-ascii characters to they ascii
1178 1179 representation, eg Δ„ => A
1179 1180 """
1180 1181 import unicodedata
1181 1182 # replace non-ascii to ascii
1182 1183 first_name = unicodedata.normalize(
1183 1184 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1184 1185 last_name = unicodedata.normalize(
1185 1186 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1186 1187
1187 1188 # do NFKD encoding, and also make sure email has proper format
1188 1189 email_address = self.normalize_email(self.email_address)
1189 1190
1190 1191 # first push the email initials
1191 1192 prefix, server = email_address.split('@', 1)
1192 1193
1193 1194 # check if prefix is maybe a 'first_name.last_name' syntax
1194 1195 _dot_split = prefix.rsplit('.', 1)
1195 1196 if len(_dot_split) == 2 and _dot_split[1]:
1196 1197 initials = [_dot_split[0][0], _dot_split[1][0]]
1197 1198 else:
1198 1199 initials = [prefix[0], server[0]]
1199 1200
1200 1201 # then try to replace either first_name or last_name
1201 1202 fn_letter = (first_name or " ")[0].strip()
1202 1203 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1203 1204
1204 1205 if fn_letter:
1205 1206 initials[0] = fn_letter
1206 1207
1207 1208 if ln_letter:
1208 1209 initials[1] = ln_letter
1209 1210
1210 1211 return ''.join(initials).upper()
1211 1212
1212 1213 def get_img_data_by_type(self, font_family, img_type):
1213 1214 default_user = """
1214 1215 <svg xmlns="http://www.w3.org/2000/svg"
1215 1216 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1216 1217 viewBox="-15 -10 439.165 429.164"
1217 1218
1218 1219 xml:space="preserve"
1219 1220 style="background:{background};" >
1220 1221
1221 1222 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1222 1223 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1223 1224 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1224 1225 168.596,153.916,216.671,
1225 1226 204.583,216.671z" fill="{text_color}"/>
1226 1227 <path d="M407.164,374.717L360.88,
1227 1228 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1228 1229 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1229 1230 15.366-44.203,23.488-69.076,23.488c-24.877,
1230 1231 0-48.762-8.122-69.078-23.488
1231 1232 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1232 1233 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1233 1234 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1234 1235 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1235 1236 19.402-10.527 C409.699,390.129,
1236 1237 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1237 1238 </svg>""".format(
1238 1239 size=self.size,
1239 1240 background='#979797', # @grey4
1240 1241 text_color=self.text_color,
1241 1242 font_family=font_family)
1242 1243
1243 1244 return {
1244 1245 "default_user": default_user
1245 1246 }[img_type]
1246 1247
1247 1248 def get_img_data(self, svg_type=None):
1248 1249 """
1249 1250 generates the svg metadata for image
1250 1251 """
1251 1252
1252 1253 font_family = ','.join([
1253 1254 'proximanovaregular',
1254 1255 'Proxima Nova Regular',
1255 1256 'Proxima Nova',
1256 1257 'Arial',
1257 1258 'Lucida Grande',
1258 1259 'sans-serif'
1259 1260 ])
1260 1261 if svg_type:
1261 1262 return self.get_img_data_by_type(font_family, svg_type)
1262 1263
1263 1264 initials = self.get_initials()
1264 1265 img_data = """
1265 1266 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1266 1267 width="{size}" height="{size}"
1267 1268 style="width: 100%; height: 100%; background-color: {background}"
1268 1269 viewBox="0 0 {size} {size}">
1269 1270 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1270 1271 pointer-events="auto" fill="{text_color}"
1271 1272 font-family="{font_family}"
1272 1273 style="font-weight: 400; font-size: {f_size}px;">{text}
1273 1274 </text>
1274 1275 </svg>""".format(
1275 1276 size=self.size,
1276 1277 f_size=self.size/1.85, # scale the text inside the box nicely
1277 1278 background=self.background,
1278 1279 text_color=self.text_color,
1279 1280 text=initials.upper(),
1280 1281 font_family=font_family)
1281 1282
1282 1283 return img_data
1283 1284
1284 1285 def generate_svg(self, svg_type=None):
1285 1286 img_data = self.get_img_data(svg_type)
1286 1287 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1287 1288
1288 1289
1289 1290 def initials_gravatar(email_address, first_name, last_name, size=30):
1290 1291 svg_type = None
1291 1292 if email_address == User.DEFAULT_USER_EMAIL:
1292 1293 svg_type = 'default_user'
1293 1294 klass = InitialsGravatar(email_address, first_name, last_name, size)
1294 1295 return klass.generate_svg(svg_type=svg_type)
1295 1296
1296 1297
1297 1298 def gravatar_url(email_address, size=30, request=None):
1298 1299 request = get_current_request()
1299 1300 _use_gravatar = request.call_context.visual.use_gravatar
1300 1301 _gravatar_url = request.call_context.visual.gravatar_url
1301 1302
1302 1303 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1303 1304
1304 1305 email_address = email_address or User.DEFAULT_USER_EMAIL
1305 1306 if isinstance(email_address, unicode):
1306 1307 # hashlib crashes on unicode items
1307 1308 email_address = safe_str(email_address)
1308 1309
1309 1310 # empty email or default user
1310 1311 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1311 1312 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1312 1313
1313 1314 if _use_gravatar:
1314 1315 # TODO: Disuse pyramid thread locals. Think about another solution to
1315 1316 # get the host and schema here.
1316 1317 request = get_current_request()
1317 1318 tmpl = safe_str(_gravatar_url)
1318 1319 tmpl = tmpl.replace('{email}', email_address)\
1319 1320 .replace('{md5email}', md5_safe(email_address.lower())) \
1320 1321 .replace('{netloc}', request.host)\
1321 1322 .replace('{scheme}', request.scheme)\
1322 1323 .replace('{size}', safe_str(size))
1323 1324 return tmpl
1324 1325 else:
1325 1326 return initials_gravatar(email_address, '', '', size=size)
1326 1327
1327 1328
1328 1329 class Page(_Page):
1329 1330 """
1330 1331 Custom pager to match rendering style with paginator
1331 1332 """
1332 1333
1333 1334 def _get_pos(self, cur_page, max_page, items):
1334 1335 edge = (items / 2) + 1
1335 1336 if (cur_page <= edge):
1336 1337 radius = max(items / 2, items - cur_page)
1337 1338 elif (max_page - cur_page) < edge:
1338 1339 radius = (items - 1) - (max_page - cur_page)
1339 1340 else:
1340 1341 radius = items / 2
1341 1342
1342 1343 left = max(1, (cur_page - (radius)))
1343 1344 right = min(max_page, cur_page + (radius))
1344 1345 return left, cur_page, right
1345 1346
1346 1347 def _range(self, regexp_match):
1347 1348 """
1348 1349 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1349 1350
1350 1351 Arguments:
1351 1352
1352 1353 regexp_match
1353 1354 A "re" (regular expressions) match object containing the
1354 1355 radius of linked pages around the current page in
1355 1356 regexp_match.group(1) as a string
1356 1357
1357 1358 This function is supposed to be called as a callable in
1358 1359 re.sub.
1359 1360
1360 1361 """
1361 1362 radius = int(regexp_match.group(1))
1362 1363
1363 1364 # Compute the first and last page number within the radius
1364 1365 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1365 1366 # -> leftmost_page = 5
1366 1367 # -> rightmost_page = 9
1367 1368 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1368 1369 self.last_page,
1369 1370 (radius * 2) + 1)
1370 1371 nav_items = []
1371 1372
1372 1373 # Create a link to the first page (unless we are on the first page
1373 1374 # or there would be no need to insert '..' spacers)
1374 1375 if self.page != self.first_page and self.first_page < leftmost_page:
1375 1376 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1376 1377
1377 1378 # Insert dots if there are pages between the first page
1378 1379 # and the currently displayed page range
1379 1380 if leftmost_page - self.first_page > 1:
1380 1381 # Wrap in a SPAN tag if nolink_attr is set
1381 1382 text = '..'
1382 1383 if self.dotdot_attr:
1383 1384 text = HTML.span(c=text, **self.dotdot_attr)
1384 1385 nav_items.append(text)
1385 1386
1386 1387 for thispage in xrange(leftmost_page, rightmost_page + 1):
1387 1388 # Hilight the current page number and do not use a link
1388 1389 if thispage == self.page:
1389 1390 text = '%s' % (thispage,)
1390 1391 # Wrap in a SPAN tag if nolink_attr is set
1391 1392 if self.curpage_attr:
1392 1393 text = HTML.span(c=text, **self.curpage_attr)
1393 1394 nav_items.append(text)
1394 1395 # Otherwise create just a link to that page
1395 1396 else:
1396 1397 text = '%s' % (thispage,)
1397 1398 nav_items.append(self._pagerlink(thispage, text))
1398 1399
1399 1400 # Insert dots if there are pages between the displayed
1400 1401 # page numbers and the end of the page range
1401 1402 if self.last_page - rightmost_page > 1:
1402 1403 text = '..'
1403 1404 # Wrap in a SPAN tag if nolink_attr is set
1404 1405 if self.dotdot_attr:
1405 1406 text = HTML.span(c=text, **self.dotdot_attr)
1406 1407 nav_items.append(text)
1407 1408
1408 1409 # Create a link to the very last page (unless we are on the last
1409 1410 # page or there would be no need to insert '..' spacers)
1410 1411 if self.page != self.last_page and rightmost_page < self.last_page:
1411 1412 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1412 1413
1413 1414 ## prerender links
1414 1415 #_page_link = url.current()
1415 1416 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1416 1417 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1417 1418 return self.separator.join(nav_items)
1418 1419
1419 1420 def pager(self, format='~2~', page_param='page', partial_param='partial',
1420 1421 show_if_single_page=False, separator=' ', onclick=None,
1421 1422 symbol_first='<<', symbol_last='>>',
1422 1423 symbol_previous='<', symbol_next='>',
1423 1424 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1424 1425 curpage_attr={'class': 'pager_curpage'},
1425 1426 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1426 1427
1427 1428 self.curpage_attr = curpage_attr
1428 1429 self.separator = separator
1429 1430 self.pager_kwargs = kwargs
1430 1431 self.page_param = page_param
1431 1432 self.partial_param = partial_param
1432 1433 self.onclick = onclick
1433 1434 self.link_attr = link_attr
1434 1435 self.dotdot_attr = dotdot_attr
1435 1436
1436 1437 # Don't show navigator if there is no more than one page
1437 1438 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1438 1439 return ''
1439 1440
1440 1441 from string import Template
1441 1442 # Replace ~...~ in token format by range of pages
1442 1443 result = re.sub(r'~(\d+)~', self._range, format)
1443 1444
1444 1445 # Interpolate '%' variables
1445 1446 result = Template(result).safe_substitute({
1446 1447 'first_page': self.first_page,
1447 1448 'last_page': self.last_page,
1448 1449 'page': self.page,
1449 1450 'page_count': self.page_count,
1450 1451 'items_per_page': self.items_per_page,
1451 1452 'first_item': self.first_item,
1452 1453 'last_item': self.last_item,
1453 1454 'item_count': self.item_count,
1454 1455 'link_first': self.page > self.first_page and \
1455 1456 self._pagerlink(self.first_page, symbol_first) or '',
1456 1457 'link_last': self.page < self.last_page and \
1457 1458 self._pagerlink(self.last_page, symbol_last) or '',
1458 1459 'link_previous': self.previous_page and \
1459 1460 self._pagerlink(self.previous_page, symbol_previous) \
1460 1461 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1461 1462 'link_next': self.next_page and \
1462 1463 self._pagerlink(self.next_page, symbol_next) \
1463 1464 or HTML.span(symbol_next, class_="pg-next disabled")
1464 1465 })
1465 1466
1466 1467 return literal(result)
1467 1468
1468 1469
1469 1470 #==============================================================================
1470 1471 # REPO PAGER, PAGER FOR REPOSITORY
1471 1472 #==============================================================================
1472 1473 class RepoPage(Page):
1473 1474
1474 1475 def __init__(self, collection, page=1, items_per_page=20,
1475 1476 item_count=None, url=None, **kwargs):
1476 1477
1477 1478 """Create a "RepoPage" instance. special pager for paging
1478 1479 repository
1479 1480 """
1480 1481 self._url_generator = url
1481 1482
1482 1483 # Safe the kwargs class-wide so they can be used in the pager() method
1483 1484 self.kwargs = kwargs
1484 1485
1485 1486 # Save a reference to the collection
1486 1487 self.original_collection = collection
1487 1488
1488 1489 self.collection = collection
1489 1490
1490 1491 # The self.page is the number of the current page.
1491 1492 # The first page has the number 1!
1492 1493 try:
1493 1494 self.page = int(page) # make it int() if we get it as a string
1494 1495 except (ValueError, TypeError):
1495 1496 self.page = 1
1496 1497
1497 1498 self.items_per_page = items_per_page
1498 1499
1499 1500 # Unless the user tells us how many items the collections has
1500 1501 # we calculate that ourselves.
1501 1502 if item_count is not None:
1502 1503 self.item_count = item_count
1503 1504 else:
1504 1505 self.item_count = len(self.collection)
1505 1506
1506 1507 # Compute the number of the first and last available page
1507 1508 if self.item_count > 0:
1508 1509 self.first_page = 1
1509 1510 self.page_count = int(math.ceil(float(self.item_count) /
1510 1511 self.items_per_page))
1511 1512 self.last_page = self.first_page + self.page_count - 1
1512 1513
1513 1514 # Make sure that the requested page number is the range of
1514 1515 # valid pages
1515 1516 if self.page > self.last_page:
1516 1517 self.page = self.last_page
1517 1518 elif self.page < self.first_page:
1518 1519 self.page = self.first_page
1519 1520
1520 1521 # Note: the number of items on this page can be less than
1521 1522 # items_per_page if the last page is not full
1522 1523 self.first_item = max(0, (self.item_count) - (self.page *
1523 1524 items_per_page))
1524 1525 self.last_item = ((self.item_count - 1) - items_per_page *
1525 1526 (self.page - 1))
1526 1527
1527 1528 self.items = list(self.collection[self.first_item:self.last_item + 1])
1528 1529
1529 1530 # Links to previous and next page
1530 1531 if self.page > self.first_page:
1531 1532 self.previous_page = self.page - 1
1532 1533 else:
1533 1534 self.previous_page = None
1534 1535
1535 1536 if self.page < self.last_page:
1536 1537 self.next_page = self.page + 1
1537 1538 else:
1538 1539 self.next_page = None
1539 1540
1540 1541 # No items available
1541 1542 else:
1542 1543 self.first_page = None
1543 1544 self.page_count = 0
1544 1545 self.last_page = None
1545 1546 self.first_item = None
1546 1547 self.last_item = None
1547 1548 self.previous_page = None
1548 1549 self.next_page = None
1549 1550 self.items = []
1550 1551
1551 1552 # This is a subclass of the 'list' type. Initialise the list now.
1552 1553 list.__init__(self, reversed(self.items))
1553 1554
1554 1555
1555 1556 def breadcrumb_repo_link(repo):
1556 1557 """
1557 1558 Makes a breadcrumbs path link to repo
1558 1559
1559 1560 ex::
1560 1561 group >> subgroup >> repo
1561 1562
1562 1563 :param repo: a Repository instance
1563 1564 """
1564 1565
1565 1566 path = [
1566 1567 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1567 1568 for group in repo.groups_with_parents
1568 1569 ] + [
1569 1570 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1570 1571 ]
1571 1572
1572 1573 return literal(' &raquo; '.join(path))
1573 1574
1574 1575
1575 1576 def format_byte_size_binary(file_size):
1576 1577 """
1577 1578 Formats file/folder sizes to standard.
1578 1579 """
1579 1580 if file_size is None:
1580 1581 file_size = 0
1581 1582
1582 1583 formatted_size = format_byte_size(file_size, binary=True)
1583 1584 return formatted_size
1584 1585
1585 1586
1586 1587 def urlify_text(text_, safe=True):
1587 1588 """
1588 1589 Extrac urls from text and make html links out of them
1589 1590
1590 1591 :param text_:
1591 1592 """
1592 1593
1593 1594 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1594 1595 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1595 1596
1596 1597 def url_func(match_obj):
1597 1598 url_full = match_obj.groups()[0]
1598 1599 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1599 1600 _newtext = url_pat.sub(url_func, text_)
1600 1601 if safe:
1601 1602 return literal(_newtext)
1602 1603 return _newtext
1603 1604
1604 1605
1605 1606 def urlify_commits(text_, repository):
1606 1607 """
1607 1608 Extract commit ids from text and make link from them
1608 1609
1609 1610 :param text_:
1610 1611 :param repository: repo name to build the URL with
1611 1612 """
1612 1613
1613 1614 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1614 1615
1615 1616 def url_func(match_obj):
1616 1617 commit_id = match_obj.groups()[1]
1617 1618 pref = match_obj.groups()[0]
1618 1619 suf = match_obj.groups()[2]
1619 1620
1620 1621 tmpl = (
1621 1622 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1622 1623 '%(commit_id)s</a>%(suf)s'
1623 1624 )
1624 1625 return tmpl % {
1625 1626 'pref': pref,
1626 1627 'cls': 'revision-link',
1627 1628 'url': route_url('repo_commit', repo_name=repository,
1628 1629 commit_id=commit_id),
1629 1630 'commit_id': commit_id,
1630 1631 'suf': suf
1631 1632 }
1632 1633
1633 1634 newtext = URL_PAT.sub(url_func, text_)
1634 1635
1635 1636 return newtext
1636 1637
1637 1638
1638 1639 def _process_url_func(match_obj, repo_name, uid, entry,
1639 1640 return_raw_data=False, link_format='html'):
1640 1641 pref = ''
1641 1642 if match_obj.group().startswith(' '):
1642 1643 pref = ' '
1643 1644
1644 1645 issue_id = ''.join(match_obj.groups())
1645 1646
1646 1647 if link_format == 'html':
1647 1648 tmpl = (
1648 1649 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1649 1650 '%(issue-prefix)s%(id-repr)s'
1650 1651 '</a>')
1651 1652 elif link_format == 'rst':
1652 1653 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1653 1654 elif link_format == 'markdown':
1654 1655 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1655 1656 else:
1656 1657 raise ValueError('Bad link_format:{}'.format(link_format))
1657 1658
1658 1659 (repo_name_cleaned,
1659 1660 parent_group_name) = RepoGroupModel().\
1660 1661 _get_group_name_and_parent(repo_name)
1661 1662
1662 1663 # variables replacement
1663 1664 named_vars = {
1664 1665 'id': issue_id,
1665 1666 'repo': repo_name,
1666 1667 'repo_name': repo_name_cleaned,
1667 1668 'group_name': parent_group_name
1668 1669 }
1669 1670 # named regex variables
1670 1671 named_vars.update(match_obj.groupdict())
1671 1672 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1672 1673
1673 1674 data = {
1674 1675 'pref': pref,
1675 1676 'cls': 'issue-tracker-link',
1676 1677 'url': _url,
1677 1678 'id-repr': issue_id,
1678 1679 'issue-prefix': entry['pref'],
1679 1680 'serv': entry['url'],
1680 1681 }
1681 1682 if return_raw_data:
1682 1683 return {
1683 1684 'id': issue_id,
1684 1685 'url': _url
1685 1686 }
1686 1687 return tmpl % data
1687 1688
1688 1689
1689 1690 def get_active_pattern_entries(repo_name):
1690 1691 repo = None
1691 1692 if repo_name:
1692 1693 # Retrieving repo_name to avoid invalid repo_name to explode on
1693 1694 # IssueTrackerSettingsModel but still passing invalid name further down
1694 1695 repo = Repository.get_by_repo_name(repo_name, cache=True)
1695 1696
1696 1697 settings_model = IssueTrackerSettingsModel(repo=repo)
1697 1698 active_entries = settings_model.get_settings(cache=True)
1698 1699 return active_entries
1699 1700
1700 1701
1701 1702 def process_patterns(text_string, repo_name, link_format='html',
1702 1703 active_entries=None):
1703 1704
1704 1705 allowed_formats = ['html', 'rst', 'markdown']
1705 1706 if link_format not in allowed_formats:
1706 1707 raise ValueError('Link format can be only one of:{} got {}'.format(
1707 1708 allowed_formats, link_format))
1708 1709
1709 1710 active_entries = active_entries or get_active_pattern_entries(repo_name)
1710 1711 issues_data = []
1711 1712 newtext = text_string
1712 1713
1713 1714 for uid, entry in active_entries.items():
1714 1715 log.debug('found issue tracker entry with uid %s' % (uid,))
1715 1716
1716 1717 if not (entry['pat'] and entry['url']):
1717 1718 log.debug('skipping due to missing data')
1718 1719 continue
1719 1720
1720 1721 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1721 1722 % (uid, entry['pat'], entry['url'], entry['pref']))
1722 1723
1723 1724 try:
1724 1725 pattern = re.compile(r'%s' % entry['pat'])
1725 1726 except re.error:
1726 1727 log.exception(
1727 1728 'issue tracker pattern: `%s` failed to compile',
1728 1729 entry['pat'])
1729 1730 continue
1730 1731
1731 1732 data_func = partial(
1732 1733 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1733 1734 return_raw_data=True)
1734 1735
1735 1736 for match_obj in pattern.finditer(text_string):
1736 1737 issues_data.append(data_func(match_obj))
1737 1738
1738 1739 url_func = partial(
1739 1740 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1740 1741 link_format=link_format)
1741 1742
1742 1743 newtext = pattern.sub(url_func, newtext)
1743 1744 log.debug('processed prefix:uid `%s`' % (uid,))
1744 1745
1745 1746 return newtext, issues_data
1746 1747
1747 1748
1748 1749 def urlify_commit_message(commit_text, repository=None,
1749 1750 active_pattern_entries=None):
1750 1751 """
1751 1752 Parses given text message and makes proper links.
1752 1753 issues are linked to given issue-server, and rest is a commit link
1753 1754
1754 1755 :param commit_text:
1755 1756 :param repository:
1756 1757 """
1757 1758 def escaper(string):
1758 1759 return string.replace('<', '&lt;').replace('>', '&gt;')
1759 1760
1760 1761 newtext = escaper(commit_text)
1761 1762
1762 1763 # extract http/https links and make them real urls
1763 1764 newtext = urlify_text(newtext, safe=False)
1764 1765
1765 1766 # urlify commits - extract commit ids and make link out of them, if we have
1766 1767 # the scope of repository present.
1767 1768 if repository:
1768 1769 newtext = urlify_commits(newtext, repository)
1769 1770
1770 1771 # process issue tracker patterns
1771 1772 newtext, issues = process_patterns(newtext, repository or '',
1772 1773 active_entries=active_pattern_entries)
1773 1774
1774 1775 return literal(newtext)
1775 1776
1776 1777
1777 1778 def render_binary(repo_name, file_obj):
1778 1779 """
1779 1780 Choose how to render a binary file
1780 1781 """
1782
1781 1783 filename = file_obj.name
1782 1784
1783 1785 # images
1784 1786 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1785 1787 if fnmatch.fnmatch(filename, pat=ext):
1786 alt = filename
1788 alt = escape(filename)
1787 1789 src = route_path(
1788 1790 'repo_file_raw', repo_name=repo_name,
1789 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1790 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1791 commit_id=file_obj.commit.raw_id,
1792 f_path=file_obj.path)
1793 return literal(
1794 '<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1791 1795
1792 1796
1793 1797 def renderer_from_filename(filename, exclude=None):
1794 1798 """
1795 1799 choose a renderer based on filename, this works only for text based files
1796 1800 """
1797 1801
1798 1802 # ipython
1799 1803 for ext in ['*.ipynb']:
1800 1804 if fnmatch.fnmatch(filename, pat=ext):
1801 1805 return 'jupyter'
1802 1806
1803 1807 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1804 1808 if is_markup:
1805 1809 return is_markup
1806 1810 return None
1807 1811
1808 1812
1809 1813 def render(source, renderer='rst', mentions=False, relative_urls=None,
1810 1814 repo_name=None):
1811 1815
1812 1816 def maybe_convert_relative_links(html_source):
1813 1817 if relative_urls:
1814 1818 return relative_links(html_source, relative_urls)
1815 1819 return html_source
1816 1820
1817 1821 if renderer == 'plain':
1818 1822 return literal(
1819 1823 MarkupRenderer.plain(source, leading_newline=False))
1820 1824
1821 1825 elif renderer == 'rst':
1822 1826 if repo_name:
1823 1827 # process patterns on comments if we pass in repo name
1824 1828 source, issues = process_patterns(
1825 1829 source, repo_name, link_format='rst')
1826 1830
1827 1831 return literal(
1828 1832 '<div class="rst-block">%s</div>' %
1829 1833 maybe_convert_relative_links(
1830 1834 MarkupRenderer.rst(source, mentions=mentions)))
1831 1835
1832 1836 elif renderer == 'markdown':
1833 1837 if repo_name:
1834 1838 # process patterns on comments if we pass in repo name
1835 1839 source, issues = process_patterns(
1836 1840 source, repo_name, link_format='markdown')
1837 1841
1838 1842 return literal(
1839 1843 '<div class="markdown-block">%s</div>' %
1840 1844 maybe_convert_relative_links(
1841 1845 MarkupRenderer.markdown(source, flavored=True,
1842 1846 mentions=mentions)))
1843 1847
1844 1848 elif renderer == 'jupyter':
1845 1849 return literal(
1846 1850 '<div class="ipynb">%s</div>' %
1847 1851 maybe_convert_relative_links(
1848 1852 MarkupRenderer.jupyter(source)))
1849 1853
1850 1854 # None means just show the file-source
1851 1855 return None
1852 1856
1853 1857
1854 1858 def commit_status(repo, commit_id):
1855 1859 return ChangesetStatusModel().get_status(repo, commit_id)
1856 1860
1857 1861
1858 1862 def commit_status_lbl(commit_status):
1859 1863 return dict(ChangesetStatus.STATUSES).get(commit_status)
1860 1864
1861 1865
1862 1866 def commit_time(repo_name, commit_id):
1863 1867 repo = Repository.get_by_repo_name(repo_name)
1864 1868 commit = repo.get_commit(commit_id=commit_id)
1865 1869 return commit.date
1866 1870
1867 1871
1868 1872 def get_permission_name(key):
1869 1873 return dict(Permission.PERMS).get(key)
1870 1874
1871 1875
1872 1876 def journal_filter_help(request):
1873 1877 _ = request.translate
1874 1878 from rhodecode.lib.audit_logger import ACTIONS
1875 1879 actions = '\n'.join(textwrap.wrap(', '.join(sorted(ACTIONS.keys())), 80))
1876 1880
1877 1881 return _(
1878 1882 'Example filter terms:\n' +
1879 1883 ' repository:vcs\n' +
1880 1884 ' username:marcin\n' +
1881 1885 ' username:(NOT marcin)\n' +
1882 1886 ' action:*push*\n' +
1883 1887 ' ip:127.0.0.1\n' +
1884 1888 ' date:20120101\n' +
1885 1889 ' date:[20120101100000 TO 20120102]\n' +
1886 1890 '\n' +
1887 1891 'Actions: {actions}\n' +
1888 1892 '\n' +
1889 1893 'Generate wildcards using \'*\' character:\n' +
1890 1894 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1891 1895 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1892 1896 '\n' +
1893 1897 'Optional AND / OR operators in queries\n' +
1894 1898 ' "repository:vcs OR repository:test"\n' +
1895 1899 ' "username:test AND repository:test*"\n'
1896 1900 ).format(actions=actions)
1897 1901
1898 1902
1899 1903 def search_filter_help(searcher, request):
1900 1904 _ = request.translate
1901 1905
1902 1906 terms = ''
1903 1907 return _(
1904 1908 'Example filter terms for `{searcher}` search:\n' +
1905 1909 '{terms}\n' +
1906 1910 'Generate wildcards using \'*\' character:\n' +
1907 1911 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1908 1912 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1909 1913 '\n' +
1910 1914 'Optional AND / OR operators in queries\n' +
1911 1915 ' "repo_name:vcs OR repo_name:test"\n' +
1912 1916 ' "owner:test AND repo_name:test*"\n' +
1913 1917 'More: {search_doc}'
1914 1918 ).format(searcher=searcher.name,
1915 1919 terms=terms, search_doc=searcher.query_lang_doc)
1916 1920
1917 1921
1918 1922 def not_mapped_error(repo_name):
1919 1923 from rhodecode.translation import _
1920 1924 flash(_('%s repository is not mapped to db perhaps'
1921 1925 ' it was created or renamed from the filesystem'
1922 1926 ' please run the application again'
1923 1927 ' in order to rescan repositories') % repo_name, category='error')
1924 1928
1925 1929
1926 1930 def ip_range(ip_addr):
1927 1931 from rhodecode.model.db import UserIpMap
1928 1932 s, e = UserIpMap._get_ip_range(ip_addr)
1929 1933 return '%s - %s' % (s, e)
1930 1934
1931 1935
1932 1936 def form(url, method='post', needs_csrf_token=True, **attrs):
1933 1937 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1934 1938 if method.lower() != 'get' and needs_csrf_token:
1935 1939 raise Exception(
1936 1940 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1937 1941 'CSRF token. If the endpoint does not require such token you can ' +
1938 1942 'explicitly set the parameter needs_csrf_token to false.')
1939 1943
1940 1944 return wh_form(url, method=method, **attrs)
1941 1945
1942 1946
1943 1947 def secure_form(form_url, method="POST", multipart=False, **attrs):
1944 1948 """Start a form tag that points the action to an url. This
1945 1949 form tag will also include the hidden field containing
1946 1950 the auth token.
1947 1951
1948 1952 The url options should be given either as a string, or as a
1949 1953 ``url()`` function. The method for the form defaults to POST.
1950 1954
1951 1955 Options:
1952 1956
1953 1957 ``multipart``
1954 1958 If set to True, the enctype is set to "multipart/form-data".
1955 1959 ``method``
1956 1960 The method to use when submitting the form, usually either
1957 1961 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1958 1962 hidden input with name _method is added to simulate the verb
1959 1963 over POST.
1960 1964
1961 1965 """
1962 1966 from webhelpers.pylonslib.secure_form import insecure_form
1963 1967
1964 1968 if 'request' in attrs:
1965 1969 session = attrs['request'].session
1966 1970 del attrs['request']
1967 1971 else:
1968 1972 raise ValueError(
1969 1973 'Calling this form requires request= to be passed as argument')
1970 1974
1971 1975 form = insecure_form(form_url, method, multipart, **attrs)
1972 1976 token = literal(
1973 1977 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1974 1978 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1975 1979
1976 1980 return literal("%s\n%s" % (form, token))
1977 1981
1978 1982
1979 1983 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1980 1984 select_html = select(name, selected, options, **attrs)
1981 1985 select2 = """
1982 1986 <script>
1983 1987 $(document).ready(function() {
1984 1988 $('#%s').select2({
1985 1989 containerCssClass: 'drop-menu',
1986 1990 dropdownCssClass: 'drop-menu-dropdown',
1987 1991 dropdownAutoWidth: true%s
1988 1992 });
1989 1993 });
1990 1994 </script>
1991 1995 """
1992 1996 filter_option = """,
1993 1997 minimumResultsForSearch: -1
1994 1998 """
1995 1999 input_id = attrs.get('id') or name
1996 2000 filter_enabled = "" if enable_filter else filter_option
1997 2001 select_script = literal(select2 % (input_id, filter_enabled))
1998 2002
1999 2003 return literal(select_html+select_script)
2000 2004
2001 2005
2002 2006 def get_visual_attr(tmpl_context_var, attr_name):
2003 2007 """
2004 2008 A safe way to get a variable from visual variable of template context
2005 2009
2006 2010 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
2007 2011 :param attr_name: name of the attribute we fetch from the c.visual
2008 2012 """
2009 2013 visual = getattr(tmpl_context_var, 'visual', None)
2010 2014 if not visual:
2011 2015 return
2012 2016 else:
2013 2017 return getattr(visual, attr_name, None)
2014 2018
2015 2019
2016 2020 def get_last_path_part(file_node):
2017 2021 if not file_node.path:
2018 2022 return u''
2019 2023
2020 2024 path = safe_unicode(file_node.path.split('/')[-1])
2021 2025 return u'../' + path
2022 2026
2023 2027
2024 2028 def route_url(*args, **kwargs):
2025 2029 """
2026 2030 Wrapper around pyramids `route_url` (fully qualified url) function.
2027 2031 """
2028 2032 req = get_current_request()
2029 2033 return req.route_url(*args, **kwargs)
2030 2034
2031 2035
2032 2036 def route_path(*args, **kwargs):
2033 2037 """
2034 2038 Wrapper around pyramids `route_path` function.
2035 2039 """
2036 2040 req = get_current_request()
2037 2041 return req.route_path(*args, **kwargs)
2038 2042
2039 2043
2040 2044 def route_path_or_none(*args, **kwargs):
2041 2045 try:
2042 2046 return route_path(*args, **kwargs)
2043 2047 except KeyError:
2044 2048 return None
2045 2049
2046 2050
2047 2051 def current_route_path(request, **kw):
2048 2052 new_args = request.GET.mixed()
2049 2053 new_args.update(kw)
2050 2054 return request.current_route_path(_query=new_args)
2051 2055
2052 2056
2053 2057 def api_call_example(method, args):
2054 2058 """
2055 2059 Generates an API call example via CURL
2056 2060 """
2057 2061 args_json = json.dumps(OrderedDict([
2058 2062 ('id', 1),
2059 2063 ('auth_token', 'SECRET'),
2060 2064 ('method', method),
2061 2065 ('args', args)
2062 2066 ]))
2063 2067 return literal(
2064 2068 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2065 2069 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2066 2070 "and needs to be of `api calls` role."
2067 2071 .format(
2068 2072 api_url=route_url('apiv2'),
2069 2073 token_url=route_url('my_account_auth_tokens'),
2070 2074 data=args_json))
2071 2075
2072 2076
2073 2077 def notification_description(notification, request):
2074 2078 """
2075 2079 Generate notification human readable description based on notification type
2076 2080 """
2077 2081 from rhodecode.model.notification import NotificationModel
2078 2082 return NotificationModel().make_description(
2079 2083 notification, translate=request.translate)
2080 2084
2081 2085
2082 2086 def go_import_header(request, db_repo=None):
2083 2087 """
2084 2088 Creates a header for go-import functionality in Go Lang
2085 2089 """
2086 2090
2087 2091 if not db_repo:
2088 2092 return
2089 2093 if 'go-get' not in request.GET:
2090 2094 return
2091 2095
2092 2096 clone_url = db_repo.clone_url()
2093 2097 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2094 2098 # we have a repo and go-get flag,
2095 2099 return literal('<meta name="go-import" content="{} {} {}">'.format(
2096 2100 prefix, db_repo.repo_type, clone_url))
2097 2101
2098 2102
2099 2103 def reviewer_as_json(*args, **kwargs):
2100 2104 from rhodecode.apps.repository.utils import reviewer_as_json as _reviewer_as_json
2101 2105 return _reviewer_as_json(*args, **kwargs)
General Comments 0
You need to be logged in to leave comments. Login now