##// END OF EJS Templates
helpers: add timezone info into tooltip....
marcink -
r1457:8e79c99b default
parent child Browse files
Show More
@@ -1,2004 +1,2004 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40 import itertools
41 41
42 42 from datetime import datetime
43 43 from functools import partial
44 44 from pygments.formatters.html import HtmlFormatter
45 45 from pygments import highlight as code_highlight
46 46 from pygments.lexers import (
47 47 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 48 from pylons import url as pylons_url
49 49 from pylons.i18n.translation import _, ungettext
50 50 from pyramid.threadlocal import get_current_request
51 51
52 52 from webhelpers.html import literal, HTML, escape
53 53 from webhelpers.html.tools import *
54 54 from webhelpers.html.builder import make_tag
55 55 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 56 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 57 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 58 submit, text, password, textarea, title, ul, xml_declaration, radio
59 59 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 60 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 61 from webhelpers.pylonslib import Flash as _Flash
62 62 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 63 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 64 replace_whitespace, urlify, truncate, wrap_paragraphs
65 65 from webhelpers.date import time_ago_in_words
66 66 from webhelpers.paginate import Page as _Page
67 67 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 68 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 69 from webhelpers2.number import format_byte_size
70 70
71 71 from rhodecode.lib.action_parser import action_parser
72 72 from rhodecode.lib.ext_json import json
73 73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 76 AttributeDict, safe_int, md5, md5_safe
77 77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 82 from rhodecode.model.db import Permission, User, Repository
83 83 from rhodecode.model.repo_group import RepoGroupModel
84 84 from rhodecode.model.settings import IssueTrackerSettingsModel
85 85
86 86 log = logging.getLogger(__name__)
87 87
88 88
89 89 DEFAULT_USER = User.DEFAULT_USER
90 90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 91
92 92
93 93 def url(*args, **kw):
94 94 return pylons_url(*args, **kw)
95 95
96 96
97 97 def pylons_url_current(*args, **kw):
98 98 """
99 99 This function overrides pylons.url.current() which returns the current
100 100 path so that it will also work from a pyramid only context. This
101 101 should be removed once port to pyramid is complete.
102 102 """
103 103 if not args and not kw:
104 104 request = get_current_request()
105 105 return request.path
106 106 return pylons_url.current(*args, **kw)
107 107
108 108 url.current = pylons_url_current
109 109
110 110
111 111 def url_replace(**qargs):
112 112 """ Returns the current request url while replacing query string args """
113 113
114 114 request = get_current_request()
115 115 new_args = request.GET.mixed()
116 116 new_args.update(qargs)
117 117 return url('', **new_args)
118 118
119 119
120 120 def asset(path, ver=None):
121 121 """
122 122 Helper to generate a static asset file path for rhodecode assets
123 123
124 124 eg. h.asset('images/image.png', ver='3923')
125 125
126 126 :param path: path of asset
127 127 :param ver: optional version query param to append as ?ver=
128 128 """
129 129 request = get_current_request()
130 130 query = {}
131 131 if ver:
132 132 query = {'ver': ver}
133 133 return request.static_path(
134 134 'rhodecode:public/{}'.format(path), _query=query)
135 135
136 136
137 137 default_html_escape_table = {
138 138 ord('&'): u'&amp;',
139 139 ord('<'): u'&lt;',
140 140 ord('>'): u'&gt;',
141 141 ord('"'): u'&quot;',
142 142 ord("'"): u'&#39;',
143 143 }
144 144
145 145
146 146 def html_escape(text, html_escape_table=default_html_escape_table):
147 147 """Produce entities within text."""
148 148 return text.translate(html_escape_table)
149 149
150 150
151 151 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
152 152 """
153 153 Truncate string ``s`` at the first occurrence of ``sub``.
154 154
155 155 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
156 156 """
157 157 suffix_if_chopped = suffix_if_chopped or ''
158 158 pos = s.find(sub)
159 159 if pos == -1:
160 160 return s
161 161
162 162 if inclusive:
163 163 pos += len(sub)
164 164
165 165 chopped = s[:pos]
166 166 left = s[pos:].strip()
167 167
168 168 if left and suffix_if_chopped:
169 169 chopped += suffix_if_chopped
170 170
171 171 return chopped
172 172
173 173
174 174 def shorter(text, size=20):
175 175 postfix = '...'
176 176 if len(text) > size:
177 177 return text[:size - len(postfix)] + postfix
178 178 return text
179 179
180 180
181 181 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
182 182 """
183 183 Reset button
184 184 """
185 185 _set_input_attrs(attrs, type, name, value)
186 186 _set_id_attr(attrs, id, name)
187 187 convert_boolean_attrs(attrs, ["disabled"])
188 188 return HTML.input(**attrs)
189 189
190 190 reset = _reset
191 191 safeid = _make_safe_id_component
192 192
193 193
194 194 def branding(name, length=40):
195 195 return truncate(name, length, indicator="")
196 196
197 197
198 198 def FID(raw_id, path):
199 199 """
200 200 Creates a unique ID for filenode based on it's hash of path and commit
201 201 it's safe to use in urls
202 202
203 203 :param raw_id:
204 204 :param path:
205 205 """
206 206
207 207 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
208 208
209 209
210 210 class _GetError(object):
211 211 """Get error from form_errors, and represent it as span wrapped error
212 212 message
213 213
214 214 :param field_name: field to fetch errors for
215 215 :param form_errors: form errors dict
216 216 """
217 217
218 218 def __call__(self, field_name, form_errors):
219 219 tmpl = """<span class="error_msg">%s</span>"""
220 220 if form_errors and field_name in form_errors:
221 221 return literal(tmpl % form_errors.get(field_name))
222 222
223 223 get_error = _GetError()
224 224
225 225
226 226 class _ToolTip(object):
227 227
228 228 def __call__(self, tooltip_title, trim_at=50):
229 229 """
230 230 Special function just to wrap our text into nice formatted
231 231 autowrapped text
232 232
233 233 :param tooltip_title:
234 234 """
235 235 tooltip_title = escape(tooltip_title)
236 236 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
237 237 return tooltip_title
238 238 tooltip = _ToolTip()
239 239
240 240
241 241 def files_breadcrumbs(repo_name, commit_id, file_path):
242 242 if isinstance(file_path, str):
243 243 file_path = safe_unicode(file_path)
244 244
245 245 # TODO: johbo: Is this always a url like path, or is this operating
246 246 # system dependent?
247 247 path_segments = file_path.split('/')
248 248
249 249 repo_name_html = escape(repo_name)
250 250 if len(path_segments) == 1 and path_segments[0] == '':
251 251 url_segments = [repo_name_html]
252 252 else:
253 253 url_segments = [
254 254 link_to(
255 255 repo_name_html,
256 256 url('files_home',
257 257 repo_name=repo_name,
258 258 revision=commit_id,
259 259 f_path=''),
260 260 class_='pjax-link')]
261 261
262 262 last_cnt = len(path_segments) - 1
263 263 for cnt, segment in enumerate(path_segments):
264 264 if not segment:
265 265 continue
266 266 segment_html = escape(segment)
267 267
268 268 if cnt != last_cnt:
269 269 url_segments.append(
270 270 link_to(
271 271 segment_html,
272 272 url('files_home',
273 273 repo_name=repo_name,
274 274 revision=commit_id,
275 275 f_path='/'.join(path_segments[:cnt + 1])),
276 276 class_='pjax-link'))
277 277 else:
278 278 url_segments.append(segment_html)
279 279
280 280 return literal('/'.join(url_segments))
281 281
282 282
283 283 class CodeHtmlFormatter(HtmlFormatter):
284 284 """
285 285 My code Html Formatter for source codes
286 286 """
287 287
288 288 def wrap(self, source, outfile):
289 289 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
290 290
291 291 def _wrap_code(self, source):
292 292 for cnt, it in enumerate(source):
293 293 i, t = it
294 294 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
295 295 yield i, t
296 296
297 297 def _wrap_tablelinenos(self, inner):
298 298 dummyoutfile = StringIO.StringIO()
299 299 lncount = 0
300 300 for t, line in inner:
301 301 if t:
302 302 lncount += 1
303 303 dummyoutfile.write(line)
304 304
305 305 fl = self.linenostart
306 306 mw = len(str(lncount + fl - 1))
307 307 sp = self.linenospecial
308 308 st = self.linenostep
309 309 la = self.lineanchors
310 310 aln = self.anchorlinenos
311 311 nocls = self.noclasses
312 312 if sp:
313 313 lines = []
314 314
315 315 for i in range(fl, fl + lncount):
316 316 if i % st == 0:
317 317 if i % sp == 0:
318 318 if aln:
319 319 lines.append('<a href="#%s%d" class="special">%*d</a>' %
320 320 (la, i, mw, i))
321 321 else:
322 322 lines.append('<span class="special">%*d</span>' % (mw, i))
323 323 else:
324 324 if aln:
325 325 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
326 326 else:
327 327 lines.append('%*d' % (mw, i))
328 328 else:
329 329 lines.append('')
330 330 ls = '\n'.join(lines)
331 331 else:
332 332 lines = []
333 333 for i in range(fl, fl + lncount):
334 334 if i % st == 0:
335 335 if aln:
336 336 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
337 337 else:
338 338 lines.append('%*d' % (mw, i))
339 339 else:
340 340 lines.append('')
341 341 ls = '\n'.join(lines)
342 342
343 343 # in case you wonder about the seemingly redundant <div> here: since the
344 344 # content in the other cell also is wrapped in a div, some browsers in
345 345 # some configurations seem to mess up the formatting...
346 346 if nocls:
347 347 yield 0, ('<table class="%stable">' % self.cssclass +
348 348 '<tr><td><div class="linenodiv" '
349 349 'style="background-color: #f0f0f0; padding-right: 10px">'
350 350 '<pre style="line-height: 125%">' +
351 351 ls + '</pre></div></td><td id="hlcode" class="code">')
352 352 else:
353 353 yield 0, ('<table class="%stable">' % self.cssclass +
354 354 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
355 355 ls + '</pre></div></td><td id="hlcode" class="code">')
356 356 yield 0, dummyoutfile.getvalue()
357 357 yield 0, '</td></tr></table>'
358 358
359 359
360 360 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
361 361 def __init__(self, **kw):
362 362 # only show these line numbers if set
363 363 self.only_lines = kw.pop('only_line_numbers', [])
364 364 self.query_terms = kw.pop('query_terms', [])
365 365 self.max_lines = kw.pop('max_lines', 5)
366 366 self.line_context = kw.pop('line_context', 3)
367 367 self.url = kw.pop('url', None)
368 368
369 369 super(CodeHtmlFormatter, self).__init__(**kw)
370 370
371 371 def _wrap_code(self, source):
372 372 for cnt, it in enumerate(source):
373 373 i, t = it
374 374 t = '<pre>%s</pre>' % t
375 375 yield i, t
376 376
377 377 def _wrap_tablelinenos(self, inner):
378 378 yield 0, '<table class="code-highlight %stable">' % self.cssclass
379 379
380 380 last_shown_line_number = 0
381 381 current_line_number = 1
382 382
383 383 for t, line in inner:
384 384 if not t:
385 385 yield t, line
386 386 continue
387 387
388 388 if current_line_number in self.only_lines:
389 389 if last_shown_line_number + 1 != current_line_number:
390 390 yield 0, '<tr>'
391 391 yield 0, '<td class="line">...</td>'
392 392 yield 0, '<td id="hlcode" class="code"></td>'
393 393 yield 0, '</tr>'
394 394
395 395 yield 0, '<tr>'
396 396 if self.url:
397 397 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
398 398 self.url, current_line_number, current_line_number)
399 399 else:
400 400 yield 0, '<td class="line"><a href="">%i</a></td>' % (
401 401 current_line_number)
402 402 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
403 403 yield 0, '</tr>'
404 404
405 405 last_shown_line_number = current_line_number
406 406
407 407 current_line_number += 1
408 408
409 409
410 410 yield 0, '</table>'
411 411
412 412
413 413 def extract_phrases(text_query):
414 414 """
415 415 Extracts phrases from search term string making sure phrases
416 416 contained in double quotes are kept together - and discarding empty values
417 417 or fully whitespace values eg.
418 418
419 419 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
420 420
421 421 """
422 422
423 423 in_phrase = False
424 424 buf = ''
425 425 phrases = []
426 426 for char in text_query:
427 427 if in_phrase:
428 428 if char == '"': # end phrase
429 429 phrases.append(buf)
430 430 buf = ''
431 431 in_phrase = False
432 432 continue
433 433 else:
434 434 buf += char
435 435 continue
436 436 else:
437 437 if char == '"': # start phrase
438 438 in_phrase = True
439 439 phrases.append(buf)
440 440 buf = ''
441 441 continue
442 442 elif char == ' ':
443 443 phrases.append(buf)
444 444 buf = ''
445 445 continue
446 446 else:
447 447 buf += char
448 448
449 449 phrases.append(buf)
450 450 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
451 451 return phrases
452 452
453 453
454 454 def get_matching_offsets(text, phrases):
455 455 """
456 456 Returns a list of string offsets in `text` that the list of `terms` match
457 457
458 458 >>> get_matching_offsets('some text here', ['some', 'here'])
459 459 [(0, 4), (10, 14)]
460 460
461 461 """
462 462 offsets = []
463 463 for phrase in phrases:
464 464 for match in re.finditer(phrase, text):
465 465 offsets.append((match.start(), match.end()))
466 466
467 467 return offsets
468 468
469 469
470 470 def normalize_text_for_matching(x):
471 471 """
472 472 Replaces all non alnum characters to spaces and lower cases the string,
473 473 useful for comparing two text strings without punctuation
474 474 """
475 475 return re.sub(r'[^\w]', ' ', x.lower())
476 476
477 477
478 478 def get_matching_line_offsets(lines, terms):
479 479 """ Return a set of `lines` indices (starting from 1) matching a
480 480 text search query, along with `context` lines above/below matching lines
481 481
482 482 :param lines: list of strings representing lines
483 483 :param terms: search term string to match in lines eg. 'some text'
484 484 :param context: number of lines above/below a matching line to add to result
485 485 :param max_lines: cut off for lines of interest
486 486 eg.
487 487
488 488 text = '''
489 489 words words words
490 490 words words words
491 491 some text some
492 492 words words words
493 493 words words words
494 494 text here what
495 495 '''
496 496 get_matching_line_offsets(text, 'text', context=1)
497 497 {3: [(5, 9)], 6: [(0, 4)]]
498 498
499 499 """
500 500 matching_lines = {}
501 501 phrases = [normalize_text_for_matching(phrase)
502 502 for phrase in extract_phrases(terms)]
503 503
504 504 for line_index, line in enumerate(lines, start=1):
505 505 match_offsets = get_matching_offsets(
506 506 normalize_text_for_matching(line), phrases)
507 507 if match_offsets:
508 508 matching_lines[line_index] = match_offsets
509 509
510 510 return matching_lines
511 511
512 512
513 513 def hsv_to_rgb(h, s, v):
514 514 """ Convert hsv color values to rgb """
515 515
516 516 if s == 0.0:
517 517 return v, v, v
518 518 i = int(h * 6.0) # XXX assume int() truncates!
519 519 f = (h * 6.0) - i
520 520 p = v * (1.0 - s)
521 521 q = v * (1.0 - s * f)
522 522 t = v * (1.0 - s * (1.0 - f))
523 523 i = i % 6
524 524 if i == 0:
525 525 return v, t, p
526 526 if i == 1:
527 527 return q, v, p
528 528 if i == 2:
529 529 return p, v, t
530 530 if i == 3:
531 531 return p, q, v
532 532 if i == 4:
533 533 return t, p, v
534 534 if i == 5:
535 535 return v, p, q
536 536
537 537
538 538 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
539 539 """
540 540 Generator for getting n of evenly distributed colors using
541 541 hsv color and golden ratio. It always return same order of colors
542 542
543 543 :param n: number of colors to generate
544 544 :param saturation: saturation of returned colors
545 545 :param lightness: lightness of returned colors
546 546 :returns: RGB tuple
547 547 """
548 548
549 549 golden_ratio = 0.618033988749895
550 550 h = 0.22717784590367374
551 551
552 552 for _ in xrange(n):
553 553 h += golden_ratio
554 554 h %= 1
555 555 HSV_tuple = [h, saturation, lightness]
556 556 RGB_tuple = hsv_to_rgb(*HSV_tuple)
557 557 yield map(lambda x: str(int(x * 256)), RGB_tuple)
558 558
559 559
560 560 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
561 561 """
562 562 Returns a function which when called with an argument returns a unique
563 563 color for that argument, eg.
564 564
565 565 :param n: number of colors to generate
566 566 :param saturation: saturation of returned colors
567 567 :param lightness: lightness of returned colors
568 568 :returns: css RGB string
569 569
570 570 >>> color_hash = color_hasher()
571 571 >>> color_hash('hello')
572 572 'rgb(34, 12, 59)'
573 573 >>> color_hash('hello')
574 574 'rgb(34, 12, 59)'
575 575 >>> color_hash('other')
576 576 'rgb(90, 224, 159)'
577 577 """
578 578
579 579 color_dict = {}
580 580 cgenerator = unique_color_generator(
581 581 saturation=saturation, lightness=lightness)
582 582
583 583 def get_color_string(thing):
584 584 if thing in color_dict:
585 585 col = color_dict[thing]
586 586 else:
587 587 col = color_dict[thing] = cgenerator.next()
588 588 return "rgb(%s)" % (', '.join(col))
589 589
590 590 return get_color_string
591 591
592 592
593 593 def get_lexer_safe(mimetype=None, filepath=None):
594 594 """
595 595 Tries to return a relevant pygments lexer using mimetype/filepath name,
596 596 defaulting to plain text if none could be found
597 597 """
598 598 lexer = None
599 599 try:
600 600 if mimetype:
601 601 lexer = get_lexer_for_mimetype(mimetype)
602 602 if not lexer:
603 603 lexer = get_lexer_for_filename(filepath)
604 604 except pygments.util.ClassNotFound:
605 605 pass
606 606
607 607 if not lexer:
608 608 lexer = get_lexer_by_name('text')
609 609
610 610 return lexer
611 611
612 612
613 613 def get_lexer_for_filenode(filenode):
614 614 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
615 615 return lexer
616 616
617 617
618 618 def pygmentize(filenode, **kwargs):
619 619 """
620 620 pygmentize function using pygments
621 621
622 622 :param filenode:
623 623 """
624 624 lexer = get_lexer_for_filenode(filenode)
625 625 return literal(code_highlight(filenode.content, lexer,
626 626 CodeHtmlFormatter(**kwargs)))
627 627
628 628
629 629 def is_following_repo(repo_name, user_id):
630 630 from rhodecode.model.scm import ScmModel
631 631 return ScmModel().is_following_repo(repo_name, user_id)
632 632
633 633
634 634 class _Message(object):
635 635 """A message returned by ``Flash.pop_messages()``.
636 636
637 637 Converting the message to a string returns the message text. Instances
638 638 also have the following attributes:
639 639
640 640 * ``message``: the message text.
641 641 * ``category``: the category specified when the message was created.
642 642 """
643 643
644 644 def __init__(self, category, message):
645 645 self.category = category
646 646 self.message = message
647 647
648 648 def __str__(self):
649 649 return self.message
650 650
651 651 __unicode__ = __str__
652 652
653 653 def __html__(self):
654 654 return escape(safe_unicode(self.message))
655 655
656 656
657 657 class Flash(_Flash):
658 658
659 659 def pop_messages(self):
660 660 """Return all accumulated messages and delete them from the session.
661 661
662 662 The return value is a list of ``Message`` objects.
663 663 """
664 664 from pylons import session
665 665
666 666 messages = []
667 667
668 668 # Pop the 'old' pylons flash messages. They are tuples of the form
669 669 # (category, message)
670 670 for cat, msg in session.pop(self.session_key, []):
671 671 messages.append(_Message(cat, msg))
672 672
673 673 # Pop the 'new' pyramid flash messages for each category as list
674 674 # of strings.
675 675 for cat in self.categories:
676 676 for msg in session.pop_flash(queue=cat):
677 677 messages.append(_Message(cat, msg))
678 678 # Map messages from the default queue to the 'notice' category.
679 679 for msg in session.pop_flash():
680 680 messages.append(_Message('notice', msg))
681 681
682 682 session.save()
683 683 return messages
684 684
685 685 def json_alerts(self):
686 686 payloads = []
687 687 messages = flash.pop_messages()
688 688 if messages:
689 689 for message in messages:
690 690 subdata = {}
691 691 if hasattr(message.message, 'rsplit'):
692 692 flash_data = message.message.rsplit('|DELIM|', 1)
693 693 org_message = flash_data[0]
694 694 if len(flash_data) > 1:
695 695 subdata = json.loads(flash_data[1])
696 696 else:
697 697 org_message = message.message
698 698 payloads.append({
699 699 'message': {
700 700 'message': u'{}'.format(org_message),
701 701 'level': message.category,
702 702 'force': True,
703 703 'subdata': subdata
704 704 }
705 705 })
706 706 return json.dumps(payloads)
707 707
708 708 flash = Flash()
709 709
710 710 #==============================================================================
711 711 # SCM FILTERS available via h.
712 712 #==============================================================================
713 713 from rhodecode.lib.vcs.utils import author_name, author_email
714 714 from rhodecode.lib.utils2 import credentials_filter, age as _age
715 715 from rhodecode.model.db import User, ChangesetStatus
716 716
717 717 age = _age
718 718 capitalize = lambda x: x.capitalize()
719 719 email = author_email
720 720 short_id = lambda x: x[:12]
721 721 hide_credentials = lambda x: ''.join(credentials_filter(x))
722 722
723 723
724 724 def age_component(datetime_iso, value=None, time_is_local=False):
725 725 title = value or format_date(datetime_iso)
726 726 tzinfo = '+00:00'
727 727
728 728 # detect if we have a timezone info, otherwise, add it
729 729 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
730 730 if time_is_local:
731 731 tzinfo = time.strftime("+%H:%M",
732 732 time.gmtime(
733 733 (datetime.now() - datetime.utcnow()).seconds + 1
734 734 )
735 735 )
736 736
737 737 return literal(
738 738 '<time class="timeago tooltip" '
739 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
739 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
740 740 datetime_iso, title, tzinfo))
741 741
742 742
743 743 def _shorten_commit_id(commit_id):
744 744 from rhodecode import CONFIG
745 745 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
746 746 return commit_id[:def_len]
747 747
748 748
749 749 def show_id(commit):
750 750 """
751 751 Configurable function that shows ID
752 752 by default it's r123:fffeeefffeee
753 753
754 754 :param commit: commit instance
755 755 """
756 756 from rhodecode import CONFIG
757 757 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
758 758
759 759 raw_id = _shorten_commit_id(commit.raw_id)
760 760 if show_idx:
761 761 return 'r%s:%s' % (commit.idx, raw_id)
762 762 else:
763 763 return '%s' % (raw_id, )
764 764
765 765
766 766 def format_date(date):
767 767 """
768 768 use a standardized formatting for dates used in RhodeCode
769 769
770 770 :param date: date/datetime object
771 771 :return: formatted date
772 772 """
773 773
774 774 if date:
775 775 _fmt = "%a, %d %b %Y %H:%M:%S"
776 776 return safe_unicode(date.strftime(_fmt))
777 777
778 778 return u""
779 779
780 780
781 781 class _RepoChecker(object):
782 782
783 783 def __init__(self, backend_alias):
784 784 self._backend_alias = backend_alias
785 785
786 786 def __call__(self, repository):
787 787 if hasattr(repository, 'alias'):
788 788 _type = repository.alias
789 789 elif hasattr(repository, 'repo_type'):
790 790 _type = repository.repo_type
791 791 else:
792 792 _type = repository
793 793 return _type == self._backend_alias
794 794
795 795 is_git = _RepoChecker('git')
796 796 is_hg = _RepoChecker('hg')
797 797 is_svn = _RepoChecker('svn')
798 798
799 799
800 800 def get_repo_type_by_name(repo_name):
801 801 repo = Repository.get_by_repo_name(repo_name)
802 802 return repo.repo_type
803 803
804 804
805 805 def is_svn_without_proxy(repository):
806 806 if is_svn(repository):
807 807 from rhodecode.model.settings import VcsSettingsModel
808 808 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
809 809 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
810 810 return False
811 811
812 812
813 813 def discover_user(author):
814 814 """
815 815 Tries to discover RhodeCode User based on the autho string. Author string
816 816 is typically `FirstName LastName <email@address.com>`
817 817 """
818 818
819 819 # if author is already an instance use it for extraction
820 820 if isinstance(author, User):
821 821 return author
822 822
823 823 # Valid email in the attribute passed, see if they're in the system
824 824 _email = author_email(author)
825 825 if _email != '':
826 826 user = User.get_by_email(_email, case_insensitive=True, cache=True)
827 827 if user is not None:
828 828 return user
829 829
830 830 # Maybe it's a username, we try to extract it and fetch by username ?
831 831 _author = author_name(author)
832 832 user = User.get_by_username(_author, case_insensitive=True, cache=True)
833 833 if user is not None:
834 834 return user
835 835
836 836 return None
837 837
838 838
839 839 def email_or_none(author):
840 840 # extract email from the commit string
841 841 _email = author_email(author)
842 842
843 843 # If we have an email, use it, otherwise
844 844 # see if it contains a username we can get an email from
845 845 if _email != '':
846 846 return _email
847 847 else:
848 848 user = User.get_by_username(
849 849 author_name(author), case_insensitive=True, cache=True)
850 850
851 851 if user is not None:
852 852 return user.email
853 853
854 854 # No valid email, not a valid user in the system, none!
855 855 return None
856 856
857 857
858 858 def link_to_user(author, length=0, **kwargs):
859 859 user = discover_user(author)
860 860 # user can be None, but if we have it already it means we can re-use it
861 861 # in the person() function, so we save 1 intensive-query
862 862 if user:
863 863 author = user
864 864
865 865 display_person = person(author, 'username_or_name_or_email')
866 866 if length:
867 867 display_person = shorter(display_person, length)
868 868
869 869 if user:
870 870 return link_to(
871 871 escape(display_person),
872 872 url('user_profile', username=user.username),
873 873 **kwargs)
874 874 else:
875 875 return escape(display_person)
876 876
877 877
878 878 def person(author, show_attr="username_and_name"):
879 879 user = discover_user(author)
880 880 if user:
881 881 return getattr(user, show_attr)
882 882 else:
883 883 _author = author_name(author)
884 884 _email = email(author)
885 885 return _author or _email
886 886
887 887
888 888 def author_string(email):
889 889 if email:
890 890 user = User.get_by_email(email, case_insensitive=True, cache=True)
891 891 if user:
892 892 if user.firstname or user.lastname:
893 893 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
894 894 else:
895 895 return email
896 896 else:
897 897 return email
898 898 else:
899 899 return None
900 900
901 901
902 902 def person_by_id(id_, show_attr="username_and_name"):
903 903 # attr to return from fetched user
904 904 person_getter = lambda usr: getattr(usr, show_attr)
905 905
906 906 #maybe it's an ID ?
907 907 if str(id_).isdigit() or isinstance(id_, int):
908 908 id_ = int(id_)
909 909 user = User.get(id_)
910 910 if user is not None:
911 911 return person_getter(user)
912 912 return id_
913 913
914 914
915 915 def gravatar_with_user(author, show_disabled=False):
916 916 from rhodecode.lib.utils import PartialRenderer
917 917 _render = PartialRenderer('base/base.mako')
918 918 return _render('gravatar_with_user', author, show_disabled=show_disabled)
919 919
920 920
921 921 def desc_stylize(value):
922 922 """
923 923 converts tags from value into html equivalent
924 924
925 925 :param value:
926 926 """
927 927 if not value:
928 928 return ''
929 929
930 930 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
931 931 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
932 932 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 933 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
934 934 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
935 935 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
936 936 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
937 937 '<div class="metatag" tag="lang">\\2</div>', value)
938 938 value = re.sub(r'\[([a-z]+)\]',
939 939 '<div class="metatag" tag="\\1">\\1</div>', value)
940 940
941 941 return value
942 942
943 943
944 944 def escaped_stylize(value):
945 945 """
946 946 converts tags from value into html equivalent, but escaping its value first
947 947 """
948 948 if not value:
949 949 return ''
950 950
951 951 # Using default webhelper escape method, but has to force it as a
952 952 # plain unicode instead of a markup tag to be used in regex expressions
953 953 value = unicode(escape(safe_unicode(value)))
954 954
955 955 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
956 956 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
957 957 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 958 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
959 959 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
960 960 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
961 961 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
962 962 '<div class="metatag" tag="lang">\\2</div>', value)
963 963 value = re.sub(r'\[([a-z]+)\]',
964 964 '<div class="metatag" tag="\\1">\\1</div>', value)
965 965
966 966 return value
967 967
968 968
969 969 def bool2icon(value):
970 970 """
971 971 Returns boolean value of a given value, represented as html element with
972 972 classes that will represent icons
973 973
974 974 :param value: given value to convert to html node
975 975 """
976 976
977 977 if value: # does bool conversion
978 978 return HTML.tag('i', class_="icon-true")
979 979 else: # not true as bool
980 980 return HTML.tag('i', class_="icon-false")
981 981
982 982
983 983 #==============================================================================
984 984 # PERMS
985 985 #==============================================================================
986 986 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
987 987 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
988 988 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
989 989 csrf_token_key
990 990
991 991
992 992 #==============================================================================
993 993 # GRAVATAR URL
994 994 #==============================================================================
995 995 class InitialsGravatar(object):
996 996 def __init__(self, email_address, first_name, last_name, size=30,
997 997 background=None, text_color='#fff'):
998 998 self.size = size
999 999 self.first_name = first_name
1000 1000 self.last_name = last_name
1001 1001 self.email_address = email_address
1002 1002 self.background = background or self.str2color(email_address)
1003 1003 self.text_color = text_color
1004 1004
1005 1005 def get_color_bank(self):
1006 1006 """
1007 1007 returns a predefined list of colors that gravatars can use.
1008 1008 Those are randomized distinct colors that guarantee readability and
1009 1009 uniqueness.
1010 1010
1011 1011 generated with: http://phrogz.net/css/distinct-colors.html
1012 1012 """
1013 1013 return [
1014 1014 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1015 1015 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1016 1016 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1017 1017 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1018 1018 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1019 1019 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1020 1020 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1021 1021 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1022 1022 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1023 1023 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1024 1024 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1025 1025 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1026 1026 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1027 1027 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1028 1028 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1029 1029 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1030 1030 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1031 1031 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1032 1032 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1033 1033 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1034 1034 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1035 1035 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1036 1036 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1037 1037 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1038 1038 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1039 1039 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1040 1040 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1041 1041 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1042 1042 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1043 1043 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1044 1044 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1045 1045 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1046 1046 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1047 1047 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1048 1048 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1049 1049 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1050 1050 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1051 1051 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1052 1052 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1053 1053 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1054 1054 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1055 1055 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1056 1056 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1057 1057 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1058 1058 '#4f8c46', '#368dd9', '#5c0073'
1059 1059 ]
1060 1060
1061 1061 def rgb_to_hex_color(self, rgb_tuple):
1062 1062 """
1063 1063 Converts an rgb_tuple passed to an hex color.
1064 1064
1065 1065 :param rgb_tuple: tuple with 3 ints represents rgb color space
1066 1066 """
1067 1067 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1068 1068
1069 1069 def email_to_int_list(self, email_str):
1070 1070 """
1071 1071 Get every byte of the hex digest value of email and turn it to integer.
1072 1072 It's going to be always between 0-255
1073 1073 """
1074 1074 digest = md5_safe(email_str.lower())
1075 1075 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1076 1076
1077 1077 def pick_color_bank_index(self, email_str, color_bank):
1078 1078 return self.email_to_int_list(email_str)[0] % len(color_bank)
1079 1079
1080 1080 def str2color(self, email_str):
1081 1081 """
1082 1082 Tries to map in a stable algorithm an email to color
1083 1083
1084 1084 :param email_str:
1085 1085 """
1086 1086 color_bank = self.get_color_bank()
1087 1087 # pick position (module it's length so we always find it in the
1088 1088 # bank even if it's smaller than 256 values
1089 1089 pos = self.pick_color_bank_index(email_str, color_bank)
1090 1090 return color_bank[pos]
1091 1091
1092 1092 def normalize_email(self, email_address):
1093 1093 import unicodedata
1094 1094 # default host used to fill in the fake/missing email
1095 1095 default_host = u'localhost'
1096 1096
1097 1097 if not email_address:
1098 1098 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1099 1099
1100 1100 email_address = safe_unicode(email_address)
1101 1101
1102 1102 if u'@' not in email_address:
1103 1103 email_address = u'%s@%s' % (email_address, default_host)
1104 1104
1105 1105 if email_address.endswith(u'@'):
1106 1106 email_address = u'%s%s' % (email_address, default_host)
1107 1107
1108 1108 email_address = unicodedata.normalize('NFKD', email_address)\
1109 1109 .encode('ascii', 'ignore')
1110 1110 return email_address
1111 1111
1112 1112 def get_initials(self):
1113 1113 """
1114 1114 Returns 2 letter initials calculated based on the input.
1115 1115 The algorithm picks first given email address, and takes first letter
1116 1116 of part before @, and then the first letter of server name. In case
1117 1117 the part before @ is in a format of `somestring.somestring2` it replaces
1118 1118 the server letter with first letter of somestring2
1119 1119
1120 1120 In case function was initialized with both first and lastname, this
1121 1121 overrides the extraction from email by first letter of the first and
1122 1122 last name. We add special logic to that functionality, In case Full name
1123 1123 is compound, like Guido Von Rossum, we use last part of the last name
1124 1124 (Von Rossum) picking `R`.
1125 1125
1126 1126 Function also normalizes the non-ascii characters to they ascii
1127 1127 representation, eg Δ„ => A
1128 1128 """
1129 1129 import unicodedata
1130 1130 # replace non-ascii to ascii
1131 1131 first_name = unicodedata.normalize(
1132 1132 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1133 1133 last_name = unicodedata.normalize(
1134 1134 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1135 1135
1136 1136 # do NFKD encoding, and also make sure email has proper format
1137 1137 email_address = self.normalize_email(self.email_address)
1138 1138
1139 1139 # first push the email initials
1140 1140 prefix, server = email_address.split('@', 1)
1141 1141
1142 1142 # check if prefix is maybe a 'firstname.lastname' syntax
1143 1143 _dot_split = prefix.rsplit('.', 1)
1144 1144 if len(_dot_split) == 2:
1145 1145 initials = [_dot_split[0][0], _dot_split[1][0]]
1146 1146 else:
1147 1147 initials = [prefix[0], server[0]]
1148 1148
1149 1149 # then try to replace either firtname or lastname
1150 1150 fn_letter = (first_name or " ")[0].strip()
1151 1151 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1152 1152
1153 1153 if fn_letter:
1154 1154 initials[0] = fn_letter
1155 1155
1156 1156 if ln_letter:
1157 1157 initials[1] = ln_letter
1158 1158
1159 1159 return ''.join(initials).upper()
1160 1160
1161 1161 def get_img_data_by_type(self, font_family, img_type):
1162 1162 default_user = """
1163 1163 <svg xmlns="http://www.w3.org/2000/svg"
1164 1164 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1165 1165 viewBox="-15 -10 439.165 429.164"
1166 1166
1167 1167 xml:space="preserve"
1168 1168 style="background:{background};" >
1169 1169
1170 1170 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1171 1171 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1172 1172 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1173 1173 168.596,153.916,216.671,
1174 1174 204.583,216.671z" fill="{text_color}"/>
1175 1175 <path d="M407.164,374.717L360.88,
1176 1176 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1177 1177 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1178 1178 15.366-44.203,23.488-69.076,23.488c-24.877,
1179 1179 0-48.762-8.122-69.078-23.488
1180 1180 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1181 1181 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1182 1182 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1183 1183 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1184 1184 19.402-10.527 C409.699,390.129,
1185 1185 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1186 1186 </svg>""".format(
1187 1187 size=self.size,
1188 1188 background='#979797', # @grey4
1189 1189 text_color=self.text_color,
1190 1190 font_family=font_family)
1191 1191
1192 1192 return {
1193 1193 "default_user": default_user
1194 1194 }[img_type]
1195 1195
1196 1196 def get_img_data(self, svg_type=None):
1197 1197 """
1198 1198 generates the svg metadata for image
1199 1199 """
1200 1200
1201 1201 font_family = ','.join([
1202 1202 'proximanovaregular',
1203 1203 'Proxima Nova Regular',
1204 1204 'Proxima Nova',
1205 1205 'Arial',
1206 1206 'Lucida Grande',
1207 1207 'sans-serif'
1208 1208 ])
1209 1209 if svg_type:
1210 1210 return self.get_img_data_by_type(font_family, svg_type)
1211 1211
1212 1212 initials = self.get_initials()
1213 1213 img_data = """
1214 1214 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1215 1215 width="{size}" height="{size}"
1216 1216 style="width: 100%; height: 100%; background-color: {background}"
1217 1217 viewBox="0 0 {size} {size}">
1218 1218 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1219 1219 pointer-events="auto" fill="{text_color}"
1220 1220 font-family="{font_family}"
1221 1221 style="font-weight: 400; font-size: {f_size}px;">{text}
1222 1222 </text>
1223 1223 </svg>""".format(
1224 1224 size=self.size,
1225 1225 f_size=self.size/1.85, # scale the text inside the box nicely
1226 1226 background=self.background,
1227 1227 text_color=self.text_color,
1228 1228 text=initials.upper(),
1229 1229 font_family=font_family)
1230 1230
1231 1231 return img_data
1232 1232
1233 1233 def generate_svg(self, svg_type=None):
1234 1234 img_data = self.get_img_data(svg_type)
1235 1235 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1236 1236
1237 1237
1238 1238 def initials_gravatar(email_address, first_name, last_name, size=30):
1239 1239 svg_type = None
1240 1240 if email_address == User.DEFAULT_USER_EMAIL:
1241 1241 svg_type = 'default_user'
1242 1242 klass = InitialsGravatar(email_address, first_name, last_name, size)
1243 1243 return klass.generate_svg(svg_type=svg_type)
1244 1244
1245 1245
1246 1246 def gravatar_url(email_address, size=30):
1247 1247 # doh, we need to re-import those to mock it later
1248 1248 from pylons import tmpl_context as c
1249 1249
1250 1250 _use_gravatar = c.visual.use_gravatar
1251 1251 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1252 1252
1253 1253 email_address = email_address or User.DEFAULT_USER_EMAIL
1254 1254 if isinstance(email_address, unicode):
1255 1255 # hashlib crashes on unicode items
1256 1256 email_address = safe_str(email_address)
1257 1257
1258 1258 # empty email or default user
1259 1259 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1260 1260 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1261 1261
1262 1262 if _use_gravatar:
1263 1263 # TODO: Disuse pyramid thread locals. Think about another solution to
1264 1264 # get the host and schema here.
1265 1265 request = get_current_request()
1266 1266 tmpl = safe_str(_gravatar_url)
1267 1267 tmpl = tmpl.replace('{email}', email_address)\
1268 1268 .replace('{md5email}', md5_safe(email_address.lower())) \
1269 1269 .replace('{netloc}', request.host)\
1270 1270 .replace('{scheme}', request.scheme)\
1271 1271 .replace('{size}', safe_str(size))
1272 1272 return tmpl
1273 1273 else:
1274 1274 return initials_gravatar(email_address, '', '', size=size)
1275 1275
1276 1276
1277 1277 class Page(_Page):
1278 1278 """
1279 1279 Custom pager to match rendering style with paginator
1280 1280 """
1281 1281
1282 1282 def _get_pos(self, cur_page, max_page, items):
1283 1283 edge = (items / 2) + 1
1284 1284 if (cur_page <= edge):
1285 1285 radius = max(items / 2, items - cur_page)
1286 1286 elif (max_page - cur_page) < edge:
1287 1287 radius = (items - 1) - (max_page - cur_page)
1288 1288 else:
1289 1289 radius = items / 2
1290 1290
1291 1291 left = max(1, (cur_page - (radius)))
1292 1292 right = min(max_page, cur_page + (radius))
1293 1293 return left, cur_page, right
1294 1294
1295 1295 def _range(self, regexp_match):
1296 1296 """
1297 1297 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1298 1298
1299 1299 Arguments:
1300 1300
1301 1301 regexp_match
1302 1302 A "re" (regular expressions) match object containing the
1303 1303 radius of linked pages around the current page in
1304 1304 regexp_match.group(1) as a string
1305 1305
1306 1306 This function is supposed to be called as a callable in
1307 1307 re.sub.
1308 1308
1309 1309 """
1310 1310 radius = int(regexp_match.group(1))
1311 1311
1312 1312 # Compute the first and last page number within the radius
1313 1313 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1314 1314 # -> leftmost_page = 5
1315 1315 # -> rightmost_page = 9
1316 1316 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1317 1317 self.last_page,
1318 1318 (radius * 2) + 1)
1319 1319 nav_items = []
1320 1320
1321 1321 # Create a link to the first page (unless we are on the first page
1322 1322 # or there would be no need to insert '..' spacers)
1323 1323 if self.page != self.first_page and self.first_page < leftmost_page:
1324 1324 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1325 1325
1326 1326 # Insert dots if there are pages between the first page
1327 1327 # and the currently displayed page range
1328 1328 if leftmost_page - self.first_page > 1:
1329 1329 # Wrap in a SPAN tag if nolink_attr is set
1330 1330 text = '..'
1331 1331 if self.dotdot_attr:
1332 1332 text = HTML.span(c=text, **self.dotdot_attr)
1333 1333 nav_items.append(text)
1334 1334
1335 1335 for thispage in xrange(leftmost_page, rightmost_page + 1):
1336 1336 # Hilight the current page number and do not use a link
1337 1337 if thispage == self.page:
1338 1338 text = '%s' % (thispage,)
1339 1339 # Wrap in a SPAN tag if nolink_attr is set
1340 1340 if self.curpage_attr:
1341 1341 text = HTML.span(c=text, **self.curpage_attr)
1342 1342 nav_items.append(text)
1343 1343 # Otherwise create just a link to that page
1344 1344 else:
1345 1345 text = '%s' % (thispage,)
1346 1346 nav_items.append(self._pagerlink(thispage, text))
1347 1347
1348 1348 # Insert dots if there are pages between the displayed
1349 1349 # page numbers and the end of the page range
1350 1350 if self.last_page - rightmost_page > 1:
1351 1351 text = '..'
1352 1352 # Wrap in a SPAN tag if nolink_attr is set
1353 1353 if self.dotdot_attr:
1354 1354 text = HTML.span(c=text, **self.dotdot_attr)
1355 1355 nav_items.append(text)
1356 1356
1357 1357 # Create a link to the very last page (unless we are on the last
1358 1358 # page or there would be no need to insert '..' spacers)
1359 1359 if self.page != self.last_page and rightmost_page < self.last_page:
1360 1360 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1361 1361
1362 1362 ## prerender links
1363 1363 #_page_link = url.current()
1364 1364 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1365 1365 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 1366 return self.separator.join(nav_items)
1367 1367
1368 1368 def pager(self, format='~2~', page_param='page', partial_param='partial',
1369 1369 show_if_single_page=False, separator=' ', onclick=None,
1370 1370 symbol_first='<<', symbol_last='>>',
1371 1371 symbol_previous='<', symbol_next='>',
1372 1372 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1373 1373 curpage_attr={'class': 'pager_curpage'},
1374 1374 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1375 1375
1376 1376 self.curpage_attr = curpage_attr
1377 1377 self.separator = separator
1378 1378 self.pager_kwargs = kwargs
1379 1379 self.page_param = page_param
1380 1380 self.partial_param = partial_param
1381 1381 self.onclick = onclick
1382 1382 self.link_attr = link_attr
1383 1383 self.dotdot_attr = dotdot_attr
1384 1384
1385 1385 # Don't show navigator if there is no more than one page
1386 1386 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1387 1387 return ''
1388 1388
1389 1389 from string import Template
1390 1390 # Replace ~...~ in token format by range of pages
1391 1391 result = re.sub(r'~(\d+)~', self._range, format)
1392 1392
1393 1393 # Interpolate '%' variables
1394 1394 result = Template(result).safe_substitute({
1395 1395 'first_page': self.first_page,
1396 1396 'last_page': self.last_page,
1397 1397 'page': self.page,
1398 1398 'page_count': self.page_count,
1399 1399 'items_per_page': self.items_per_page,
1400 1400 'first_item': self.first_item,
1401 1401 'last_item': self.last_item,
1402 1402 'item_count': self.item_count,
1403 1403 'link_first': self.page > self.first_page and \
1404 1404 self._pagerlink(self.first_page, symbol_first) or '',
1405 1405 'link_last': self.page < self.last_page and \
1406 1406 self._pagerlink(self.last_page, symbol_last) or '',
1407 1407 'link_previous': self.previous_page and \
1408 1408 self._pagerlink(self.previous_page, symbol_previous) \
1409 1409 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1410 1410 'link_next': self.next_page and \
1411 1411 self._pagerlink(self.next_page, symbol_next) \
1412 1412 or HTML.span(symbol_next, class_="pg-next disabled")
1413 1413 })
1414 1414
1415 1415 return literal(result)
1416 1416
1417 1417
1418 1418 #==============================================================================
1419 1419 # REPO PAGER, PAGER FOR REPOSITORY
1420 1420 #==============================================================================
1421 1421 class RepoPage(Page):
1422 1422
1423 1423 def __init__(self, collection, page=1, items_per_page=20,
1424 1424 item_count=None, url=None, **kwargs):
1425 1425
1426 1426 """Create a "RepoPage" instance. special pager for paging
1427 1427 repository
1428 1428 """
1429 1429 self._url_generator = url
1430 1430
1431 1431 # Safe the kwargs class-wide so they can be used in the pager() method
1432 1432 self.kwargs = kwargs
1433 1433
1434 1434 # Save a reference to the collection
1435 1435 self.original_collection = collection
1436 1436
1437 1437 self.collection = collection
1438 1438
1439 1439 # The self.page is the number of the current page.
1440 1440 # The first page has the number 1!
1441 1441 try:
1442 1442 self.page = int(page) # make it int() if we get it as a string
1443 1443 except (ValueError, TypeError):
1444 1444 self.page = 1
1445 1445
1446 1446 self.items_per_page = items_per_page
1447 1447
1448 1448 # Unless the user tells us how many items the collections has
1449 1449 # we calculate that ourselves.
1450 1450 if item_count is not None:
1451 1451 self.item_count = item_count
1452 1452 else:
1453 1453 self.item_count = len(self.collection)
1454 1454
1455 1455 # Compute the number of the first and last available page
1456 1456 if self.item_count > 0:
1457 1457 self.first_page = 1
1458 1458 self.page_count = int(math.ceil(float(self.item_count) /
1459 1459 self.items_per_page))
1460 1460 self.last_page = self.first_page + self.page_count - 1
1461 1461
1462 1462 # Make sure that the requested page number is the range of
1463 1463 # valid pages
1464 1464 if self.page > self.last_page:
1465 1465 self.page = self.last_page
1466 1466 elif self.page < self.first_page:
1467 1467 self.page = self.first_page
1468 1468
1469 1469 # Note: the number of items on this page can be less than
1470 1470 # items_per_page if the last page is not full
1471 1471 self.first_item = max(0, (self.item_count) - (self.page *
1472 1472 items_per_page))
1473 1473 self.last_item = ((self.item_count - 1) - items_per_page *
1474 1474 (self.page - 1))
1475 1475
1476 1476 self.items = list(self.collection[self.first_item:self.last_item + 1])
1477 1477
1478 1478 # Links to previous and next page
1479 1479 if self.page > self.first_page:
1480 1480 self.previous_page = self.page - 1
1481 1481 else:
1482 1482 self.previous_page = None
1483 1483
1484 1484 if self.page < self.last_page:
1485 1485 self.next_page = self.page + 1
1486 1486 else:
1487 1487 self.next_page = None
1488 1488
1489 1489 # No items available
1490 1490 else:
1491 1491 self.first_page = None
1492 1492 self.page_count = 0
1493 1493 self.last_page = None
1494 1494 self.first_item = None
1495 1495 self.last_item = None
1496 1496 self.previous_page = None
1497 1497 self.next_page = None
1498 1498 self.items = []
1499 1499
1500 1500 # This is a subclass of the 'list' type. Initialise the list now.
1501 1501 list.__init__(self, reversed(self.items))
1502 1502
1503 1503
1504 1504 def changed_tooltip(nodes):
1505 1505 """
1506 1506 Generates a html string for changed nodes in commit page.
1507 1507 It limits the output to 30 entries
1508 1508
1509 1509 :param nodes: LazyNodesGenerator
1510 1510 """
1511 1511 if nodes:
1512 1512 pref = ': <br/> '
1513 1513 suf = ''
1514 1514 if len(nodes) > 30:
1515 1515 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1516 1516 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1517 1517 for x in nodes[:30]]) + suf)
1518 1518 else:
1519 1519 return ': ' + _('No Files')
1520 1520
1521 1521
1522 1522 def breadcrumb_repo_link(repo):
1523 1523 """
1524 1524 Makes a breadcrumbs path link to repo
1525 1525
1526 1526 ex::
1527 1527 group >> subgroup >> repo
1528 1528
1529 1529 :param repo: a Repository instance
1530 1530 """
1531 1531
1532 1532 path = [
1533 1533 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1534 1534 for group in repo.groups_with_parents
1535 1535 ] + [
1536 1536 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1537 1537 ]
1538 1538
1539 1539 return literal(' &raquo; '.join(path))
1540 1540
1541 1541
1542 1542 def format_byte_size_binary(file_size):
1543 1543 """
1544 1544 Formats file/folder sizes to standard.
1545 1545 """
1546 1546 formatted_size = format_byte_size(file_size, binary=True)
1547 1547 return formatted_size
1548 1548
1549 1549
1550 1550 def fancy_file_stats(stats):
1551 1551 """
1552 1552 Displays a fancy two colored bar for number of added/deleted
1553 1553 lines of code on file
1554 1554
1555 1555 :param stats: two element list of added/deleted lines of code
1556 1556 """
1557 1557 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1558 1558 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1559 1559
1560 1560 def cgen(l_type, a_v, d_v):
1561 1561 mapping = {'tr': 'top-right-rounded-corner-mid',
1562 1562 'tl': 'top-left-rounded-corner-mid',
1563 1563 'br': 'bottom-right-rounded-corner-mid',
1564 1564 'bl': 'bottom-left-rounded-corner-mid'}
1565 1565 map_getter = lambda x: mapping[x]
1566 1566
1567 1567 if l_type == 'a' and d_v:
1568 1568 #case when added and deleted are present
1569 1569 return ' '.join(map(map_getter, ['tl', 'bl']))
1570 1570
1571 1571 if l_type == 'a' and not d_v:
1572 1572 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1573 1573
1574 1574 if l_type == 'd' and a_v:
1575 1575 return ' '.join(map(map_getter, ['tr', 'br']))
1576 1576
1577 1577 if l_type == 'd' and not a_v:
1578 1578 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1579 1579
1580 1580 a, d = stats['added'], stats['deleted']
1581 1581 width = 100
1582 1582
1583 1583 if stats['binary']: # binary operations like chmod/rename etc
1584 1584 lbl = []
1585 1585 bin_op = 0 # undefined
1586 1586
1587 1587 # prefix with bin for binary files
1588 1588 if BIN_FILENODE in stats['ops']:
1589 1589 lbl += ['bin']
1590 1590
1591 1591 if NEW_FILENODE in stats['ops']:
1592 1592 lbl += [_('new file')]
1593 1593 bin_op = NEW_FILENODE
1594 1594 elif MOD_FILENODE in stats['ops']:
1595 1595 lbl += [_('mod')]
1596 1596 bin_op = MOD_FILENODE
1597 1597 elif DEL_FILENODE in stats['ops']:
1598 1598 lbl += [_('del')]
1599 1599 bin_op = DEL_FILENODE
1600 1600 elif RENAMED_FILENODE in stats['ops']:
1601 1601 lbl += [_('rename')]
1602 1602 bin_op = RENAMED_FILENODE
1603 1603
1604 1604 # chmod can go with other operations, so we add a + to lbl if needed
1605 1605 if CHMOD_FILENODE in stats['ops']:
1606 1606 lbl += [_('chmod')]
1607 1607 if bin_op == 0:
1608 1608 bin_op = CHMOD_FILENODE
1609 1609
1610 1610 lbl = '+'.join(lbl)
1611 1611 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1612 1612 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1613 1613 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1614 1614 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1615 1615
1616 1616 t = stats['added'] + stats['deleted']
1617 1617 unit = float(width) / (t or 1)
1618 1618
1619 1619 # needs > 9% of width to be visible or 0 to be hidden
1620 1620 a_p = max(9, unit * a) if a > 0 else 0
1621 1621 d_p = max(9, unit * d) if d > 0 else 0
1622 1622 p_sum = a_p + d_p
1623 1623
1624 1624 if p_sum > width:
1625 1625 #adjust the percentage to be == 100% since we adjusted to 9
1626 1626 if a_p > d_p:
1627 1627 a_p = a_p - (p_sum - width)
1628 1628 else:
1629 1629 d_p = d_p - (p_sum - width)
1630 1630
1631 1631 a_v = a if a > 0 else ''
1632 1632 d_v = d if d > 0 else ''
1633 1633
1634 1634 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1635 1635 cgen('a', a_v, d_v), a_p, a_v
1636 1636 )
1637 1637 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1638 1638 cgen('d', a_v, d_v), d_p, d_v
1639 1639 )
1640 1640 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1641 1641
1642 1642
1643 1643 def urlify_text(text_, safe=True):
1644 1644 """
1645 1645 Extrac urls from text and make html links out of them
1646 1646
1647 1647 :param text_:
1648 1648 """
1649 1649
1650 1650 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1651 1651 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1652 1652
1653 1653 def url_func(match_obj):
1654 1654 url_full = match_obj.groups()[0]
1655 1655 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1656 1656 _newtext = url_pat.sub(url_func, text_)
1657 1657 if safe:
1658 1658 return literal(_newtext)
1659 1659 return _newtext
1660 1660
1661 1661
1662 1662 def urlify_commits(text_, repository):
1663 1663 """
1664 1664 Extract commit ids from text and make link from them
1665 1665
1666 1666 :param text_:
1667 1667 :param repository: repo name to build the URL with
1668 1668 """
1669 1669 from pylons import url # doh, we need to re-import url to mock it later
1670 1670 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1671 1671
1672 1672 def url_func(match_obj):
1673 1673 commit_id = match_obj.groups()[1]
1674 1674 pref = match_obj.groups()[0]
1675 1675 suf = match_obj.groups()[2]
1676 1676
1677 1677 tmpl = (
1678 1678 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1679 1679 '%(commit_id)s</a>%(suf)s'
1680 1680 )
1681 1681 return tmpl % {
1682 1682 'pref': pref,
1683 1683 'cls': 'revision-link',
1684 1684 'url': url('changeset_home', repo_name=repository,
1685 1685 revision=commit_id, qualified=True),
1686 1686 'commit_id': commit_id,
1687 1687 'suf': suf
1688 1688 }
1689 1689
1690 1690 newtext = URL_PAT.sub(url_func, text_)
1691 1691
1692 1692 return newtext
1693 1693
1694 1694
1695 1695 def _process_url_func(match_obj, repo_name, uid, entry,
1696 1696 return_raw_data=False):
1697 1697 pref = ''
1698 1698 if match_obj.group().startswith(' '):
1699 1699 pref = ' '
1700 1700
1701 1701 issue_id = ''.join(match_obj.groups())
1702 1702 tmpl = (
1703 1703 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1704 1704 '%(issue-prefix)s%(id-repr)s'
1705 1705 '</a>')
1706 1706
1707 1707 (repo_name_cleaned,
1708 1708 parent_group_name) = RepoGroupModel().\
1709 1709 _get_group_name_and_parent(repo_name)
1710 1710
1711 1711 # variables replacement
1712 1712 named_vars = {
1713 1713 'id': issue_id,
1714 1714 'repo': repo_name,
1715 1715 'repo_name': repo_name_cleaned,
1716 1716 'group_name': parent_group_name
1717 1717 }
1718 1718 # named regex variables
1719 1719 named_vars.update(match_obj.groupdict())
1720 1720 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1721 1721
1722 1722 data = {
1723 1723 'pref': pref,
1724 1724 'cls': 'issue-tracker-link',
1725 1725 'url': _url,
1726 1726 'id-repr': issue_id,
1727 1727 'issue-prefix': entry['pref'],
1728 1728 'serv': entry['url'],
1729 1729 }
1730 1730 if return_raw_data:
1731 1731 return {
1732 1732 'id': issue_id,
1733 1733 'url': _url
1734 1734 }
1735 1735 return tmpl % data
1736 1736
1737 1737
1738 1738 def process_patterns(text_string, repo_name, config=None):
1739 1739 repo = None
1740 1740 if repo_name:
1741 1741 # Retrieving repo_name to avoid invalid repo_name to explode on
1742 1742 # IssueTrackerSettingsModel but still passing invalid name further down
1743 1743 repo = Repository.get_by_repo_name(repo_name, cache=True)
1744 1744
1745 1745 settings_model = IssueTrackerSettingsModel(repo=repo)
1746 1746 active_entries = settings_model.get_settings(cache=True)
1747 1747
1748 1748 issues_data = []
1749 1749 newtext = text_string
1750 1750 for uid, entry in active_entries.items():
1751 1751 log.debug('found issue tracker entry with uid %s' % (uid,))
1752 1752
1753 1753 if not (entry['pat'] and entry['url']):
1754 1754 log.debug('skipping due to missing data')
1755 1755 continue
1756 1756
1757 1757 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1758 1758 % (uid, entry['pat'], entry['url'], entry['pref']))
1759 1759
1760 1760 try:
1761 1761 pattern = re.compile(r'%s' % entry['pat'])
1762 1762 except re.error:
1763 1763 log.exception(
1764 1764 'issue tracker pattern: `%s` failed to compile',
1765 1765 entry['pat'])
1766 1766 continue
1767 1767
1768 1768 data_func = partial(
1769 1769 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1770 1770 return_raw_data=True)
1771 1771
1772 1772 for match_obj in pattern.finditer(text_string):
1773 1773 issues_data.append(data_func(match_obj))
1774 1774
1775 1775 url_func = partial(
1776 1776 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1777 1777
1778 1778 newtext = pattern.sub(url_func, newtext)
1779 1779 log.debug('processed prefix:uid `%s`' % (uid,))
1780 1780
1781 1781 return newtext, issues_data
1782 1782
1783 1783
1784 1784 def urlify_commit_message(commit_text, repository=None):
1785 1785 """
1786 1786 Parses given text message and makes proper links.
1787 1787 issues are linked to given issue-server, and rest is a commit link
1788 1788
1789 1789 :param commit_text:
1790 1790 :param repository:
1791 1791 """
1792 1792 from pylons import url # doh, we need to re-import url to mock it later
1793 1793
1794 1794 def escaper(string):
1795 1795 return string.replace('<', '&lt;').replace('>', '&gt;')
1796 1796
1797 1797 newtext = escaper(commit_text)
1798 1798
1799 1799 # extract http/https links and make them real urls
1800 1800 newtext = urlify_text(newtext, safe=False)
1801 1801
1802 1802 # urlify commits - extract commit ids and make link out of them, if we have
1803 1803 # the scope of repository present.
1804 1804 if repository:
1805 1805 newtext = urlify_commits(newtext, repository)
1806 1806
1807 1807 # process issue tracker patterns
1808 1808 newtext, issues = process_patterns(newtext, repository or '')
1809 1809
1810 1810 return literal(newtext)
1811 1811
1812 1812
1813 1813 def rst(source, mentions=False):
1814 1814 return literal('<div class="rst-block">%s</div>' %
1815 1815 MarkupRenderer.rst(source, mentions=mentions))
1816 1816
1817 1817
1818 1818 def markdown(source, mentions=False):
1819 1819 return literal('<div class="markdown-block">%s</div>' %
1820 1820 MarkupRenderer.markdown(source, flavored=True,
1821 1821 mentions=mentions))
1822 1822
1823 1823 def renderer_from_filename(filename, exclude=None):
1824 1824 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1825 1825
1826 1826
1827 1827 def render(source, renderer='rst', mentions=False):
1828 1828 if renderer == 'rst':
1829 1829 return rst(source, mentions=mentions)
1830 1830 if renderer == 'markdown':
1831 1831 return markdown(source, mentions=mentions)
1832 1832
1833 1833
1834 1834 def commit_status(repo, commit_id):
1835 1835 return ChangesetStatusModel().get_status(repo, commit_id)
1836 1836
1837 1837
1838 1838 def commit_status_lbl(commit_status):
1839 1839 return dict(ChangesetStatus.STATUSES).get(commit_status)
1840 1840
1841 1841
1842 1842 def commit_time(repo_name, commit_id):
1843 1843 repo = Repository.get_by_repo_name(repo_name)
1844 1844 commit = repo.get_commit(commit_id=commit_id)
1845 1845 return commit.date
1846 1846
1847 1847
1848 1848 def get_permission_name(key):
1849 1849 return dict(Permission.PERMS).get(key)
1850 1850
1851 1851
1852 1852 def journal_filter_help():
1853 1853 return _(
1854 1854 'Example filter terms:\n' +
1855 1855 ' repository:vcs\n' +
1856 1856 ' username:marcin\n' +
1857 1857 ' action:*push*\n' +
1858 1858 ' ip:127.0.0.1\n' +
1859 1859 ' date:20120101\n' +
1860 1860 ' date:[20120101100000 TO 20120102]\n' +
1861 1861 '\n' +
1862 1862 'Generate wildcards using \'*\' character:\n' +
1863 1863 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1864 1864 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1865 1865 '\n' +
1866 1866 'Optional AND / OR operators in queries\n' +
1867 1867 ' "repository:vcs OR repository:test"\n' +
1868 1868 ' "username:test AND repository:test*"\n'
1869 1869 )
1870 1870
1871 1871
1872 1872 def not_mapped_error(repo_name):
1873 1873 flash(_('%s repository is not mapped to db perhaps'
1874 1874 ' it was created or renamed from the filesystem'
1875 1875 ' please run the application again'
1876 1876 ' in order to rescan repositories') % repo_name, category='error')
1877 1877
1878 1878
1879 1879 def ip_range(ip_addr):
1880 1880 from rhodecode.model.db import UserIpMap
1881 1881 s, e = UserIpMap._get_ip_range(ip_addr)
1882 1882 return '%s - %s' % (s, e)
1883 1883
1884 1884
1885 1885 def form(url, method='post', needs_csrf_token=True, **attrs):
1886 1886 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1887 1887 if method.lower() != 'get' and needs_csrf_token:
1888 1888 raise Exception(
1889 1889 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1890 1890 'CSRF token. If the endpoint does not require such token you can ' +
1891 1891 'explicitly set the parameter needs_csrf_token to false.')
1892 1892
1893 1893 return wh_form(url, method=method, **attrs)
1894 1894
1895 1895
1896 1896 def secure_form(url, method="POST", multipart=False, **attrs):
1897 1897 """Start a form tag that points the action to an url. This
1898 1898 form tag will also include the hidden field containing
1899 1899 the auth token.
1900 1900
1901 1901 The url options should be given either as a string, or as a
1902 1902 ``url()`` function. The method for the form defaults to POST.
1903 1903
1904 1904 Options:
1905 1905
1906 1906 ``multipart``
1907 1907 If set to True, the enctype is set to "multipart/form-data".
1908 1908 ``method``
1909 1909 The method to use when submitting the form, usually either
1910 1910 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1911 1911 hidden input with name _method is added to simulate the verb
1912 1912 over POST.
1913 1913
1914 1914 """
1915 1915 from webhelpers.pylonslib.secure_form import insecure_form
1916 1916 form = insecure_form(url, method, multipart, **attrs)
1917 1917 token = csrf_input()
1918 1918 return literal("%s\n%s" % (form, token))
1919 1919
1920 1920 def csrf_input():
1921 1921 return literal(
1922 1922 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1923 1923 csrf_token_key, csrf_token_key, get_csrf_token()))
1924 1924
1925 1925 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1926 1926 select_html = select(name, selected, options, **attrs)
1927 1927 select2 = """
1928 1928 <script>
1929 1929 $(document).ready(function() {
1930 1930 $('#%s').select2({
1931 1931 containerCssClass: 'drop-menu',
1932 1932 dropdownCssClass: 'drop-menu-dropdown',
1933 1933 dropdownAutoWidth: true%s
1934 1934 });
1935 1935 });
1936 1936 </script>
1937 1937 """
1938 1938 filter_option = """,
1939 1939 minimumResultsForSearch: -1
1940 1940 """
1941 1941 input_id = attrs.get('id') or name
1942 1942 filter_enabled = "" if enable_filter else filter_option
1943 1943 select_script = literal(select2 % (input_id, filter_enabled))
1944 1944
1945 1945 return literal(select_html+select_script)
1946 1946
1947 1947
1948 1948 def get_visual_attr(tmpl_context_var, attr_name):
1949 1949 """
1950 1950 A safe way to get a variable from visual variable of template context
1951 1951
1952 1952 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1953 1953 :param attr_name: name of the attribute we fetch from the c.visual
1954 1954 """
1955 1955 visual = getattr(tmpl_context_var, 'visual', None)
1956 1956 if not visual:
1957 1957 return
1958 1958 else:
1959 1959 return getattr(visual, attr_name, None)
1960 1960
1961 1961
1962 1962 def get_last_path_part(file_node):
1963 1963 if not file_node.path:
1964 1964 return u''
1965 1965
1966 1966 path = safe_unicode(file_node.path.split('/')[-1])
1967 1967 return u'../' + path
1968 1968
1969 1969
1970 1970 def route_path(*args, **kwds):
1971 1971 """
1972 1972 Wrapper around pyramids `route_path` function. It is used to generate
1973 1973 URLs from within pylons views or templates. This will be removed when
1974 1974 pyramid migration if finished.
1975 1975 """
1976 1976 req = get_current_request()
1977 1977 return req.route_path(*args, **kwds)
1978 1978
1979 1979
1980 1980 def route_path_or_none(*args, **kwargs):
1981 1981 try:
1982 1982 return route_path(*args, **kwargs)
1983 1983 except KeyError:
1984 1984 return None
1985 1985
1986 1986
1987 1987 def static_url(*args, **kwds):
1988 1988 """
1989 1989 Wrapper around pyramids `route_path` function. It is used to generate
1990 1990 URLs from within pylons views or templates. This will be removed when
1991 1991 pyramid migration if finished.
1992 1992 """
1993 1993 req = get_current_request()
1994 1994 return req.static_url(*args, **kwds)
1995 1995
1996 1996
1997 1997 def resource_path(*args, **kwds):
1998 1998 """
1999 1999 Wrapper around pyramids `route_path` function. It is used to generate
2000 2000 URLs from within pylons views or templates. This will be removed when
2001 2001 pyramid migration if finished.
2002 2002 """
2003 2003 req = get_current_request()
2004 2004 return req.resource_path(*args, **kwds)
General Comments 0
You need to be logged in to leave comments. Login now