##// END OF EJS Templates
helpers: fix tzinfo can be undefined if wrong object is passed.
marcink -
r1352:7f65a6d5 default
parent child Browse files
Show More
@@ -1,2004 +1,2003 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40
41 41 from datetime import datetime
42 42 from functools import partial
43 43 from pygments.formatters.html import HtmlFormatter
44 44 from pygments import highlight as code_highlight
45 45 from pygments.lexers import (
46 46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 47 from pylons import url as pylons_url
48 48 from pylons.i18n.translation import _, ungettext
49 49 from pyramid.threadlocal import get_current_request
50 50
51 51 from webhelpers.html import literal, HTML, escape
52 52 from webhelpers.html.tools import *
53 53 from webhelpers.html.builder import make_tag
54 54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 60 from webhelpers.pylonslib import Flash as _Flash
61 61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 64 from webhelpers.date import time_ago_in_words
65 65 from webhelpers.paginate import Page as _Page
66 66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 68 from webhelpers2.number import format_byte_size
69 69
70 70 from rhodecode.lib.action_parser import action_parser
71 71 from rhodecode.lib.ext_json import json
72 72 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
73 73 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
74 74 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
75 75 AttributeDict, safe_int, md5, md5_safe
76 76 from rhodecode.lib.markup_renderer import MarkupRenderer
77 77 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
78 78 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
79 79 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
80 80 from rhodecode.model.changeset_status import ChangesetStatusModel
81 81 from rhodecode.model.db import Permission, User, Repository
82 82 from rhodecode.model.repo_group import RepoGroupModel
83 83 from rhodecode.model.settings import IssueTrackerSettingsModel
84 84
85 85 log = logging.getLogger(__name__)
86 86
87 87
88 88 DEFAULT_USER = User.DEFAULT_USER
89 89 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
90 90
91 91
92 92 def url(*args, **kw):
93 93 return pylons_url(*args, **kw)
94 94
95 95
96 96 def pylons_url_current(*args, **kw):
97 97 """
98 98 This function overrides pylons.url.current() which returns the current
99 99 path so that it will also work from a pyramid only context. This
100 100 should be removed once port to pyramid is complete.
101 101 """
102 102 if not args and not kw:
103 103 request = get_current_request()
104 104 return request.path
105 105 return pylons_url.current(*args, **kw)
106 106
107 107 url.current = pylons_url_current
108 108
109 109
110 110 def url_replace(**qargs):
111 111 """ Returns the current request url while replacing query string args """
112 112
113 113 request = get_current_request()
114 114 new_args = request.GET.mixed()
115 115 new_args.update(qargs)
116 116 return url('', **new_args)
117 117
118 118
119 119 def asset(path, ver=None):
120 120 """
121 121 Helper to generate a static asset file path for rhodecode assets
122 122
123 123 eg. h.asset('images/image.png', ver='3923')
124 124
125 125 :param path: path of asset
126 126 :param ver: optional version query param to append as ?ver=
127 127 """
128 128 request = get_current_request()
129 129 query = {}
130 130 if ver:
131 131 query = {'ver': ver}
132 132 return request.static_path(
133 133 'rhodecode:public/{}'.format(path), _query=query)
134 134
135 135
136 136 default_html_escape_table = {
137 137 ord('&'): u'&amp;',
138 138 ord('<'): u'&lt;',
139 139 ord('>'): u'&gt;',
140 140 ord('"'): u'&quot;',
141 141 ord("'"): u'&#39;',
142 142 }
143 143
144 144
145 145 def html_escape(text, html_escape_table=default_html_escape_table):
146 146 """Produce entities within text."""
147 147 return text.translate(html_escape_table)
148 148
149 149
150 150 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
151 151 """
152 152 Truncate string ``s`` at the first occurrence of ``sub``.
153 153
154 154 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
155 155 """
156 156 suffix_if_chopped = suffix_if_chopped or ''
157 157 pos = s.find(sub)
158 158 if pos == -1:
159 159 return s
160 160
161 161 if inclusive:
162 162 pos += len(sub)
163 163
164 164 chopped = s[:pos]
165 165 left = s[pos:].strip()
166 166
167 167 if left and suffix_if_chopped:
168 168 chopped += suffix_if_chopped
169 169
170 170 return chopped
171 171
172 172
173 173 def shorter(text, size=20):
174 174 postfix = '...'
175 175 if len(text) > size:
176 176 return text[:size - len(postfix)] + postfix
177 177 return text
178 178
179 179
180 180 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
181 181 """
182 182 Reset button
183 183 """
184 184 _set_input_attrs(attrs, type, name, value)
185 185 _set_id_attr(attrs, id, name)
186 186 convert_boolean_attrs(attrs, ["disabled"])
187 187 return HTML.input(**attrs)
188 188
189 189 reset = _reset
190 190 safeid = _make_safe_id_component
191 191
192 192
193 193 def branding(name, length=40):
194 194 return truncate(name, length, indicator="")
195 195
196 196
197 197 def FID(raw_id, path):
198 198 """
199 199 Creates a unique ID for filenode based on it's hash of path and commit
200 200 it's safe to use in urls
201 201
202 202 :param raw_id:
203 203 :param path:
204 204 """
205 205
206 206 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
207 207
208 208
209 209 class _GetError(object):
210 210 """Get error from form_errors, and represent it as span wrapped error
211 211 message
212 212
213 213 :param field_name: field to fetch errors for
214 214 :param form_errors: form errors dict
215 215 """
216 216
217 217 def __call__(self, field_name, form_errors):
218 218 tmpl = """<span class="error_msg">%s</span>"""
219 219 if form_errors and field_name in form_errors:
220 220 return literal(tmpl % form_errors.get(field_name))
221 221
222 222 get_error = _GetError()
223 223
224 224
225 225 class _ToolTip(object):
226 226
227 227 def __call__(self, tooltip_title, trim_at=50):
228 228 """
229 229 Special function just to wrap our text into nice formatted
230 230 autowrapped text
231 231
232 232 :param tooltip_title:
233 233 """
234 234 tooltip_title = escape(tooltip_title)
235 235 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
236 236 return tooltip_title
237 237 tooltip = _ToolTip()
238 238
239 239
240 240 def files_breadcrumbs(repo_name, commit_id, file_path):
241 241 if isinstance(file_path, str):
242 242 file_path = safe_unicode(file_path)
243 243
244 244 # TODO: johbo: Is this always a url like path, or is this operating
245 245 # system dependent?
246 246 path_segments = file_path.split('/')
247 247
248 248 repo_name_html = escape(repo_name)
249 249 if len(path_segments) == 1 and path_segments[0] == '':
250 250 url_segments = [repo_name_html]
251 251 else:
252 252 url_segments = [
253 253 link_to(
254 254 repo_name_html,
255 255 url('files_home',
256 256 repo_name=repo_name,
257 257 revision=commit_id,
258 258 f_path=''),
259 259 class_='pjax-link')]
260 260
261 261 last_cnt = len(path_segments) - 1
262 262 for cnt, segment in enumerate(path_segments):
263 263 if not segment:
264 264 continue
265 265 segment_html = escape(segment)
266 266
267 267 if cnt != last_cnt:
268 268 url_segments.append(
269 269 link_to(
270 270 segment_html,
271 271 url('files_home',
272 272 repo_name=repo_name,
273 273 revision=commit_id,
274 274 f_path='/'.join(path_segments[:cnt + 1])),
275 275 class_='pjax-link'))
276 276 else:
277 277 url_segments.append(segment_html)
278 278
279 279 return literal('/'.join(url_segments))
280 280
281 281
282 282 class CodeHtmlFormatter(HtmlFormatter):
283 283 """
284 284 My code Html Formatter for source codes
285 285 """
286 286
287 287 def wrap(self, source, outfile):
288 288 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
289 289
290 290 def _wrap_code(self, source):
291 291 for cnt, it in enumerate(source):
292 292 i, t = it
293 293 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
294 294 yield i, t
295 295
296 296 def _wrap_tablelinenos(self, inner):
297 297 dummyoutfile = StringIO.StringIO()
298 298 lncount = 0
299 299 for t, line in inner:
300 300 if t:
301 301 lncount += 1
302 302 dummyoutfile.write(line)
303 303
304 304 fl = self.linenostart
305 305 mw = len(str(lncount + fl - 1))
306 306 sp = self.linenospecial
307 307 st = self.linenostep
308 308 la = self.lineanchors
309 309 aln = self.anchorlinenos
310 310 nocls = self.noclasses
311 311 if sp:
312 312 lines = []
313 313
314 314 for i in range(fl, fl + lncount):
315 315 if i % st == 0:
316 316 if i % sp == 0:
317 317 if aln:
318 318 lines.append('<a href="#%s%d" class="special">%*d</a>' %
319 319 (la, i, mw, i))
320 320 else:
321 321 lines.append('<span class="special">%*d</span>' % (mw, i))
322 322 else:
323 323 if aln:
324 324 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
325 325 else:
326 326 lines.append('%*d' % (mw, i))
327 327 else:
328 328 lines.append('')
329 329 ls = '\n'.join(lines)
330 330 else:
331 331 lines = []
332 332 for i in range(fl, fl + lncount):
333 333 if i % st == 0:
334 334 if aln:
335 335 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
336 336 else:
337 337 lines.append('%*d' % (mw, i))
338 338 else:
339 339 lines.append('')
340 340 ls = '\n'.join(lines)
341 341
342 342 # in case you wonder about the seemingly redundant <div> here: since the
343 343 # content in the other cell also is wrapped in a div, some browsers in
344 344 # some configurations seem to mess up the formatting...
345 345 if nocls:
346 346 yield 0, ('<table class="%stable">' % self.cssclass +
347 347 '<tr><td><div class="linenodiv" '
348 348 'style="background-color: #f0f0f0; padding-right: 10px">'
349 349 '<pre style="line-height: 125%">' +
350 350 ls + '</pre></div></td><td id="hlcode" class="code">')
351 351 else:
352 352 yield 0, ('<table class="%stable">' % self.cssclass +
353 353 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
354 354 ls + '</pre></div></td><td id="hlcode" class="code">')
355 355 yield 0, dummyoutfile.getvalue()
356 356 yield 0, '</td></tr></table>'
357 357
358 358
359 359 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
360 360 def __init__(self, **kw):
361 361 # only show these line numbers if set
362 362 self.only_lines = kw.pop('only_line_numbers', [])
363 363 self.query_terms = kw.pop('query_terms', [])
364 364 self.max_lines = kw.pop('max_lines', 5)
365 365 self.line_context = kw.pop('line_context', 3)
366 366 self.url = kw.pop('url', None)
367 367
368 368 super(CodeHtmlFormatter, self).__init__(**kw)
369 369
370 370 def _wrap_code(self, source):
371 371 for cnt, it in enumerate(source):
372 372 i, t = it
373 373 t = '<pre>%s</pre>' % t
374 374 yield i, t
375 375
376 376 def _wrap_tablelinenos(self, inner):
377 377 yield 0, '<table class="code-highlight %stable">' % self.cssclass
378 378
379 379 last_shown_line_number = 0
380 380 current_line_number = 1
381 381
382 382 for t, line in inner:
383 383 if not t:
384 384 yield t, line
385 385 continue
386 386
387 387 if current_line_number in self.only_lines:
388 388 if last_shown_line_number + 1 != current_line_number:
389 389 yield 0, '<tr>'
390 390 yield 0, '<td class="line">...</td>'
391 391 yield 0, '<td id="hlcode" class="code"></td>'
392 392 yield 0, '</tr>'
393 393
394 394 yield 0, '<tr>'
395 395 if self.url:
396 396 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
397 397 self.url, current_line_number, current_line_number)
398 398 else:
399 399 yield 0, '<td class="line"><a href="">%i</a></td>' % (
400 400 current_line_number)
401 401 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
402 402 yield 0, '</tr>'
403 403
404 404 last_shown_line_number = current_line_number
405 405
406 406 current_line_number += 1
407 407
408 408
409 409 yield 0, '</table>'
410 410
411 411
412 412 def extract_phrases(text_query):
413 413 """
414 414 Extracts phrases from search term string making sure phrases
415 415 contained in double quotes are kept together - and discarding empty values
416 416 or fully whitespace values eg.
417 417
418 418 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
419 419
420 420 """
421 421
422 422 in_phrase = False
423 423 buf = ''
424 424 phrases = []
425 425 for char in text_query:
426 426 if in_phrase:
427 427 if char == '"': # end phrase
428 428 phrases.append(buf)
429 429 buf = ''
430 430 in_phrase = False
431 431 continue
432 432 else:
433 433 buf += char
434 434 continue
435 435 else:
436 436 if char == '"': # start phrase
437 437 in_phrase = True
438 438 phrases.append(buf)
439 439 buf = ''
440 440 continue
441 441 elif char == ' ':
442 442 phrases.append(buf)
443 443 buf = ''
444 444 continue
445 445 else:
446 446 buf += char
447 447
448 448 phrases.append(buf)
449 449 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
450 450 return phrases
451 451
452 452
453 453 def get_matching_offsets(text, phrases):
454 454 """
455 455 Returns a list of string offsets in `text` that the list of `terms` match
456 456
457 457 >>> get_matching_offsets('some text here', ['some', 'here'])
458 458 [(0, 4), (10, 14)]
459 459
460 460 """
461 461 offsets = []
462 462 for phrase in phrases:
463 463 for match in re.finditer(phrase, text):
464 464 offsets.append((match.start(), match.end()))
465 465
466 466 return offsets
467 467
468 468
469 469 def normalize_text_for_matching(x):
470 470 """
471 471 Replaces all non alnum characters to spaces and lower cases the string,
472 472 useful for comparing two text strings without punctuation
473 473 """
474 474 return re.sub(r'[^\w]', ' ', x.lower())
475 475
476 476
477 477 def get_matching_line_offsets(lines, terms):
478 478 """ Return a set of `lines` indices (starting from 1) matching a
479 479 text search query, along with `context` lines above/below matching lines
480 480
481 481 :param lines: list of strings representing lines
482 482 :param terms: search term string to match in lines eg. 'some text'
483 483 :param context: number of lines above/below a matching line to add to result
484 484 :param max_lines: cut off for lines of interest
485 485 eg.
486 486
487 487 text = '''
488 488 words words words
489 489 words words words
490 490 some text some
491 491 words words words
492 492 words words words
493 493 text here what
494 494 '''
495 495 get_matching_line_offsets(text, 'text', context=1)
496 496 {3: [(5, 9)], 6: [(0, 4)]]
497 497
498 498 """
499 499 matching_lines = {}
500 500 phrases = [normalize_text_for_matching(phrase)
501 501 for phrase in extract_phrases(terms)]
502 502
503 503 for line_index, line in enumerate(lines, start=1):
504 504 match_offsets = get_matching_offsets(
505 505 normalize_text_for_matching(line), phrases)
506 506 if match_offsets:
507 507 matching_lines[line_index] = match_offsets
508 508
509 509 return matching_lines
510 510
511 511
512 512 def hsv_to_rgb(h, s, v):
513 513 """ Convert hsv color values to rgb """
514 514
515 515 if s == 0.0:
516 516 return v, v, v
517 517 i = int(h * 6.0) # XXX assume int() truncates!
518 518 f = (h * 6.0) - i
519 519 p = v * (1.0 - s)
520 520 q = v * (1.0 - s * f)
521 521 t = v * (1.0 - s * (1.0 - f))
522 522 i = i % 6
523 523 if i == 0:
524 524 return v, t, p
525 525 if i == 1:
526 526 return q, v, p
527 527 if i == 2:
528 528 return p, v, t
529 529 if i == 3:
530 530 return p, q, v
531 531 if i == 4:
532 532 return t, p, v
533 533 if i == 5:
534 534 return v, p, q
535 535
536 536
537 537 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
538 538 """
539 539 Generator for getting n of evenly distributed colors using
540 540 hsv color and golden ratio. It always return same order of colors
541 541
542 542 :param n: number of colors to generate
543 543 :param saturation: saturation of returned colors
544 544 :param lightness: lightness of returned colors
545 545 :returns: RGB tuple
546 546 """
547 547
548 548 golden_ratio = 0.618033988749895
549 549 h = 0.22717784590367374
550 550
551 551 for _ in xrange(n):
552 552 h += golden_ratio
553 553 h %= 1
554 554 HSV_tuple = [h, saturation, lightness]
555 555 RGB_tuple = hsv_to_rgb(*HSV_tuple)
556 556 yield map(lambda x: str(int(x * 256)), RGB_tuple)
557 557
558 558
559 559 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
560 560 """
561 561 Returns a function which when called with an argument returns a unique
562 562 color for that argument, eg.
563 563
564 564 :param n: number of colors to generate
565 565 :param saturation: saturation of returned colors
566 566 :param lightness: lightness of returned colors
567 567 :returns: css RGB string
568 568
569 569 >>> color_hash = color_hasher()
570 570 >>> color_hash('hello')
571 571 'rgb(34, 12, 59)'
572 572 >>> color_hash('hello')
573 573 'rgb(34, 12, 59)'
574 574 >>> color_hash('other')
575 575 'rgb(90, 224, 159)'
576 576 """
577 577
578 578 color_dict = {}
579 579 cgenerator = unique_color_generator(
580 580 saturation=saturation, lightness=lightness)
581 581
582 582 def get_color_string(thing):
583 583 if thing in color_dict:
584 584 col = color_dict[thing]
585 585 else:
586 586 col = color_dict[thing] = cgenerator.next()
587 587 return "rgb(%s)" % (', '.join(col))
588 588
589 589 return get_color_string
590 590
591 591
592 592 def get_lexer_safe(mimetype=None, filepath=None):
593 593 """
594 594 Tries to return a relevant pygments lexer using mimetype/filepath name,
595 595 defaulting to plain text if none could be found
596 596 """
597 597 lexer = None
598 598 try:
599 599 if mimetype:
600 600 lexer = get_lexer_for_mimetype(mimetype)
601 601 if not lexer:
602 602 lexer = get_lexer_for_filename(filepath)
603 603 except pygments.util.ClassNotFound:
604 604 pass
605 605
606 606 if not lexer:
607 607 lexer = get_lexer_by_name('text')
608 608
609 609 return lexer
610 610
611 611
612 612 def get_lexer_for_filenode(filenode):
613 613 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
614 614 return lexer
615 615
616 616
617 617 def pygmentize(filenode, **kwargs):
618 618 """
619 619 pygmentize function using pygments
620 620
621 621 :param filenode:
622 622 """
623 623 lexer = get_lexer_for_filenode(filenode)
624 624 return literal(code_highlight(filenode.content, lexer,
625 625 CodeHtmlFormatter(**kwargs)))
626 626
627 627
628 628 def is_following_repo(repo_name, user_id):
629 629 from rhodecode.model.scm import ScmModel
630 630 return ScmModel().is_following_repo(repo_name, user_id)
631 631
632 632
633 633 class _Message(object):
634 634 """A message returned by ``Flash.pop_messages()``.
635 635
636 636 Converting the message to a string returns the message text. Instances
637 637 also have the following attributes:
638 638
639 639 * ``message``: the message text.
640 640 * ``category``: the category specified when the message was created.
641 641 """
642 642
643 643 def __init__(self, category, message):
644 644 self.category = category
645 645 self.message = message
646 646
647 647 def __str__(self):
648 648 return self.message
649 649
650 650 __unicode__ = __str__
651 651
652 652 def __html__(self):
653 653 return escape(safe_unicode(self.message))
654 654
655 655
656 656 class Flash(_Flash):
657 657
658 658 def pop_messages(self):
659 659 """Return all accumulated messages and delete them from the session.
660 660
661 661 The return value is a list of ``Message`` objects.
662 662 """
663 663 from pylons import session
664 664
665 665 messages = []
666 666
667 667 # Pop the 'old' pylons flash messages. They are tuples of the form
668 668 # (category, message)
669 669 for cat, msg in session.pop(self.session_key, []):
670 670 messages.append(_Message(cat, msg))
671 671
672 672 # Pop the 'new' pyramid flash messages for each category as list
673 673 # of strings.
674 674 for cat in self.categories:
675 675 for msg in session.pop_flash(queue=cat):
676 676 messages.append(_Message(cat, msg))
677 677 # Map messages from the default queue to the 'notice' category.
678 678 for msg in session.pop_flash():
679 679 messages.append(_Message('notice', msg))
680 680
681 681 session.save()
682 682 return messages
683 683
684 684 def json_alerts(self):
685 685 payloads = []
686 686 messages = flash.pop_messages()
687 687 if messages:
688 688 for message in messages:
689 689 subdata = {}
690 690 if hasattr(message.message, 'rsplit'):
691 691 flash_data = message.message.rsplit('|DELIM|', 1)
692 692 org_message = flash_data[0]
693 693 if len(flash_data) > 1:
694 694 subdata = json.loads(flash_data[1])
695 695 else:
696 696 org_message = message.message
697 697 payloads.append({
698 698 'message': {
699 699 'message': u'{}'.format(org_message),
700 700 'level': message.category,
701 701 'force': True,
702 702 'subdata': subdata
703 703 }
704 704 })
705 705 return json.dumps(payloads)
706 706
707 707 flash = Flash()
708 708
709 709 #==============================================================================
710 710 # SCM FILTERS available via h.
711 711 #==============================================================================
712 712 from rhodecode.lib.vcs.utils import author_name, author_email
713 713 from rhodecode.lib.utils2 import credentials_filter, age as _age
714 714 from rhodecode.model.db import User, ChangesetStatus
715 715
716 716 age = _age
717 717 capitalize = lambda x: x.capitalize()
718 718 email = author_email
719 719 short_id = lambda x: x[:12]
720 720 hide_credentials = lambda x: ''.join(credentials_filter(x))
721 721
722 722
723 723 def age_component(datetime_iso, value=None, time_is_local=False):
724 724 title = value or format_date(datetime_iso)
725 tzinfo = '+00:00'
725 726
726 727 # detect if we have a timezone info, otherwise, add it
727 728 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
728 tzinfo = '+00:00'
729
730 729 if time_is_local:
731 730 tzinfo = time.strftime("+%H:%M",
732 731 time.gmtime(
733 732 (datetime.now() - datetime.utcnow()).seconds + 1
734 733 )
735 734 )
736 735
737 736 return literal(
738 737 '<time class="timeago tooltip" '
739 738 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
740 739 datetime_iso, title, tzinfo))
741 740
742 741
743 742 def _shorten_commit_id(commit_id):
744 743 from rhodecode import CONFIG
745 744 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
746 745 return commit_id[:def_len]
747 746
748 747
749 748 def show_id(commit):
750 749 """
751 750 Configurable function that shows ID
752 751 by default it's r123:fffeeefffeee
753 752
754 753 :param commit: commit instance
755 754 """
756 755 from rhodecode import CONFIG
757 756 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
758 757
759 758 raw_id = _shorten_commit_id(commit.raw_id)
760 759 if show_idx:
761 760 return 'r%s:%s' % (commit.idx, raw_id)
762 761 else:
763 762 return '%s' % (raw_id, )
764 763
765 764
766 765 def format_date(date):
767 766 """
768 767 use a standardized formatting for dates used in RhodeCode
769 768
770 769 :param date: date/datetime object
771 770 :return: formatted date
772 771 """
773 772
774 773 if date:
775 774 _fmt = "%a, %d %b %Y %H:%M:%S"
776 775 return safe_unicode(date.strftime(_fmt))
777 776
778 777 return u""
779 778
780 779
781 780 class _RepoChecker(object):
782 781
783 782 def __init__(self, backend_alias):
784 783 self._backend_alias = backend_alias
785 784
786 785 def __call__(self, repository):
787 786 if hasattr(repository, 'alias'):
788 787 _type = repository.alias
789 788 elif hasattr(repository, 'repo_type'):
790 789 _type = repository.repo_type
791 790 else:
792 791 _type = repository
793 792 return _type == self._backend_alias
794 793
795 794 is_git = _RepoChecker('git')
796 795 is_hg = _RepoChecker('hg')
797 796 is_svn = _RepoChecker('svn')
798 797
799 798
800 799 def get_repo_type_by_name(repo_name):
801 800 repo = Repository.get_by_repo_name(repo_name)
802 801 return repo.repo_type
803 802
804 803
805 804 def is_svn_without_proxy(repository):
806 805 if is_svn(repository):
807 806 from rhodecode.model.settings import VcsSettingsModel
808 807 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
809 808 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
810 809 return False
811 810
812 811
813 812 def discover_user(author):
814 813 """
815 814 Tries to discover RhodeCode User based on the autho string. Author string
816 815 is typically `FirstName LastName <email@address.com>`
817 816 """
818 817
819 818 # if author is already an instance use it for extraction
820 819 if isinstance(author, User):
821 820 return author
822 821
823 822 # Valid email in the attribute passed, see if they're in the system
824 823 _email = author_email(author)
825 824 if _email != '':
826 825 user = User.get_by_email(_email, case_insensitive=True, cache=True)
827 826 if user is not None:
828 827 return user
829 828
830 829 # Maybe it's a username, we try to extract it and fetch by username ?
831 830 _author = author_name(author)
832 831 user = User.get_by_username(_author, case_insensitive=True, cache=True)
833 832 if user is not None:
834 833 return user
835 834
836 835 return None
837 836
838 837
839 838 def email_or_none(author):
840 839 # extract email from the commit string
841 840 _email = author_email(author)
842 841
843 842 # If we have an email, use it, otherwise
844 843 # see if it contains a username we can get an email from
845 844 if _email != '':
846 845 return _email
847 846 else:
848 847 user = User.get_by_username(
849 848 author_name(author), case_insensitive=True, cache=True)
850 849
851 850 if user is not None:
852 851 return user.email
853 852
854 853 # No valid email, not a valid user in the system, none!
855 854 return None
856 855
857 856
858 857 def link_to_user(author, length=0, **kwargs):
859 858 user = discover_user(author)
860 859 # user can be None, but if we have it already it means we can re-use it
861 860 # in the person() function, so we save 1 intensive-query
862 861 if user:
863 862 author = user
864 863
865 864 display_person = person(author, 'username_or_name_or_email')
866 865 if length:
867 866 display_person = shorter(display_person, length)
868 867
869 868 if user:
870 869 return link_to(
871 870 escape(display_person),
872 871 url('user_profile', username=user.username),
873 872 **kwargs)
874 873 else:
875 874 return escape(display_person)
876 875
877 876
878 877 def person(author, show_attr="username_and_name"):
879 878 user = discover_user(author)
880 879 if user:
881 880 return getattr(user, show_attr)
882 881 else:
883 882 _author = author_name(author)
884 883 _email = email(author)
885 884 return _author or _email
886 885
887 886
888 887 def author_string(email):
889 888 if email:
890 889 user = User.get_by_email(email, case_insensitive=True, cache=True)
891 890 if user:
892 891 if user.firstname or user.lastname:
893 892 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
894 893 else:
895 894 return email
896 895 else:
897 896 return email
898 897 else:
899 898 return None
900 899
901 900
902 901 def person_by_id(id_, show_attr="username_and_name"):
903 902 # attr to return from fetched user
904 903 person_getter = lambda usr: getattr(usr, show_attr)
905 904
906 905 #maybe it's an ID ?
907 906 if str(id_).isdigit() or isinstance(id_, int):
908 907 id_ = int(id_)
909 908 user = User.get(id_)
910 909 if user is not None:
911 910 return person_getter(user)
912 911 return id_
913 912
914 913
915 914 def gravatar_with_user(author, show_disabled=False):
916 915 from rhodecode.lib.utils import PartialRenderer
917 916 _render = PartialRenderer('base/base.mako')
918 917 return _render('gravatar_with_user', author, show_disabled=show_disabled)
919 918
920 919
921 920 def desc_stylize(value):
922 921 """
923 922 converts tags from value into html equivalent
924 923
925 924 :param value:
926 925 """
927 926 if not value:
928 927 return ''
929 928
930 929 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
931 930 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
932 931 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 932 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
934 933 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
935 934 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
936 935 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
937 936 '<div class="metatag" tag="lang">\\2</div>', value)
938 937 value = re.sub(r'\[([a-z]+)\]',
939 938 '<div class="metatag" tag="\\1">\\1</div>', value)
940 939
941 940 return value
942 941
943 942
944 943 def escaped_stylize(value):
945 944 """
946 945 converts tags from value into html equivalent, but escaping its value first
947 946 """
948 947 if not value:
949 948 return ''
950 949
951 950 # Using default webhelper escape method, but has to force it as a
952 951 # plain unicode instead of a markup tag to be used in regex expressions
953 952 value = unicode(escape(safe_unicode(value)))
954 953
955 954 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
956 955 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
957 956 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 957 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
959 958 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
960 959 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
961 960 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
962 961 '<div class="metatag" tag="lang">\\2</div>', value)
963 962 value = re.sub(r'\[([a-z]+)\]',
964 963 '<div class="metatag" tag="\\1">\\1</div>', value)
965 964
966 965 return value
967 966
968 967
969 968 def bool2icon(value):
970 969 """
971 970 Returns boolean value of a given value, represented as html element with
972 971 classes that will represent icons
973 972
974 973 :param value: given value to convert to html node
975 974 """
976 975
977 976 if value: # does bool conversion
978 977 return HTML.tag('i', class_="icon-true")
979 978 else: # not true as bool
980 979 return HTML.tag('i', class_="icon-false")
981 980
982 981
983 982 #==============================================================================
984 983 # PERMS
985 984 #==============================================================================
986 985 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
987 986 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
988 987 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
989 988 csrf_token_key
990 989
991 990
992 991 #==============================================================================
993 992 # GRAVATAR URL
994 993 #==============================================================================
995 994 class InitialsGravatar(object):
996 995 def __init__(self, email_address, first_name, last_name, size=30,
997 996 background=None, text_color='#fff'):
998 997 self.size = size
999 998 self.first_name = first_name
1000 999 self.last_name = last_name
1001 1000 self.email_address = email_address
1002 1001 self.background = background or self.str2color(email_address)
1003 1002 self.text_color = text_color
1004 1003
1005 1004 def get_color_bank(self):
1006 1005 """
1007 1006 returns a predefined list of colors that gravatars can use.
1008 1007 Those are randomized distinct colors that guarantee readability and
1009 1008 uniqueness.
1010 1009
1011 1010 generated with: http://phrogz.net/css/distinct-colors.html
1012 1011 """
1013 1012 return [
1014 1013 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1015 1014 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1016 1015 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1017 1016 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1018 1017 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1019 1018 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1020 1019 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1021 1020 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1022 1021 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1023 1022 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1024 1023 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1025 1024 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1026 1025 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1027 1026 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1028 1027 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1029 1028 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1030 1029 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1031 1030 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1032 1031 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1033 1032 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1034 1033 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1035 1034 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1036 1035 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1037 1036 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1038 1037 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1039 1038 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1040 1039 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1041 1040 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1042 1041 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1043 1042 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1044 1043 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1045 1044 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1046 1045 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1047 1046 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1048 1047 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1049 1048 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1050 1049 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1051 1050 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1052 1051 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1053 1052 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1054 1053 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1055 1054 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1056 1055 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1057 1056 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1058 1057 '#4f8c46', '#368dd9', '#5c0073'
1059 1058 ]
1060 1059
1061 1060 def rgb_to_hex_color(self, rgb_tuple):
1062 1061 """
1063 1062 Converts an rgb_tuple passed to an hex color.
1064 1063
1065 1064 :param rgb_tuple: tuple with 3 ints represents rgb color space
1066 1065 """
1067 1066 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1068 1067
1069 1068 def email_to_int_list(self, email_str):
1070 1069 """
1071 1070 Get every byte of the hex digest value of email and turn it to integer.
1072 1071 It's going to be always between 0-255
1073 1072 """
1074 1073 digest = md5_safe(email_str.lower())
1075 1074 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1076 1075
1077 1076 def pick_color_bank_index(self, email_str, color_bank):
1078 1077 return self.email_to_int_list(email_str)[0] % len(color_bank)
1079 1078
1080 1079 def str2color(self, email_str):
1081 1080 """
1082 1081 Tries to map in a stable algorithm an email to color
1083 1082
1084 1083 :param email_str:
1085 1084 """
1086 1085 color_bank = self.get_color_bank()
1087 1086 # pick position (module it's length so we always find it in the
1088 1087 # bank even if it's smaller than 256 values
1089 1088 pos = self.pick_color_bank_index(email_str, color_bank)
1090 1089 return color_bank[pos]
1091 1090
1092 1091 def normalize_email(self, email_address):
1093 1092 import unicodedata
1094 1093 # default host used to fill in the fake/missing email
1095 1094 default_host = u'localhost'
1096 1095
1097 1096 if not email_address:
1098 1097 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1099 1098
1100 1099 email_address = safe_unicode(email_address)
1101 1100
1102 1101 if u'@' not in email_address:
1103 1102 email_address = u'%s@%s' % (email_address, default_host)
1104 1103
1105 1104 if email_address.endswith(u'@'):
1106 1105 email_address = u'%s%s' % (email_address, default_host)
1107 1106
1108 1107 email_address = unicodedata.normalize('NFKD', email_address)\
1109 1108 .encode('ascii', 'ignore')
1110 1109 return email_address
1111 1110
1112 1111 def get_initials(self):
1113 1112 """
1114 1113 Returns 2 letter initials calculated based on the input.
1115 1114 The algorithm picks first given email address, and takes first letter
1116 1115 of part before @, and then the first letter of server name. In case
1117 1116 the part before @ is in a format of `somestring.somestring2` it replaces
1118 1117 the server letter with first letter of somestring2
1119 1118
1120 1119 In case function was initialized with both first and lastname, this
1121 1120 overrides the extraction from email by first letter of the first and
1122 1121 last name. We add special logic to that functionality, In case Full name
1123 1122 is compound, like Guido Von Rossum, we use last part of the last name
1124 1123 (Von Rossum) picking `R`.
1125 1124
1126 1125 Function also normalizes the non-ascii characters to they ascii
1127 1126 representation, eg Δ„ => A
1128 1127 """
1129 1128 import unicodedata
1130 1129 # replace non-ascii to ascii
1131 1130 first_name = unicodedata.normalize(
1132 1131 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1133 1132 last_name = unicodedata.normalize(
1134 1133 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1135 1134
1136 1135 # do NFKD encoding, and also make sure email has proper format
1137 1136 email_address = self.normalize_email(self.email_address)
1138 1137
1139 1138 # first push the email initials
1140 1139 prefix, server = email_address.split('@', 1)
1141 1140
1142 1141 # check if prefix is maybe a 'firstname.lastname' syntax
1143 1142 _dot_split = prefix.rsplit('.', 1)
1144 1143 if len(_dot_split) == 2:
1145 1144 initials = [_dot_split[0][0], _dot_split[1][0]]
1146 1145 else:
1147 1146 initials = [prefix[0], server[0]]
1148 1147
1149 1148 # then try to replace either firtname or lastname
1150 1149 fn_letter = (first_name or " ")[0].strip()
1151 1150 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1152 1151
1153 1152 if fn_letter:
1154 1153 initials[0] = fn_letter
1155 1154
1156 1155 if ln_letter:
1157 1156 initials[1] = ln_letter
1158 1157
1159 1158 return ''.join(initials).upper()
1160 1159
1161 1160 def get_img_data_by_type(self, font_family, img_type):
1162 1161 default_user = """
1163 1162 <svg xmlns="http://www.w3.org/2000/svg"
1164 1163 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1165 1164 viewBox="-15 -10 439.165 429.164"
1166 1165
1167 1166 xml:space="preserve"
1168 1167 style="background:{background};" >
1169 1168
1170 1169 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1171 1170 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1172 1171 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1173 1172 168.596,153.916,216.671,
1174 1173 204.583,216.671z" fill="{text_color}"/>
1175 1174 <path d="M407.164,374.717L360.88,
1176 1175 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1177 1176 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1178 1177 15.366-44.203,23.488-69.076,23.488c-24.877,
1179 1178 0-48.762-8.122-69.078-23.488
1180 1179 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1181 1180 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1182 1181 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1183 1182 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1184 1183 19.402-10.527 C409.699,390.129,
1185 1184 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1186 1185 </svg>""".format(
1187 1186 size=self.size,
1188 1187 background='#979797', # @grey4
1189 1188 text_color=self.text_color,
1190 1189 font_family=font_family)
1191 1190
1192 1191 return {
1193 1192 "default_user": default_user
1194 1193 }[img_type]
1195 1194
1196 1195 def get_img_data(self, svg_type=None):
1197 1196 """
1198 1197 generates the svg metadata for image
1199 1198 """
1200 1199
1201 1200 font_family = ','.join([
1202 1201 'proximanovaregular',
1203 1202 'Proxima Nova Regular',
1204 1203 'Proxima Nova',
1205 1204 'Arial',
1206 1205 'Lucida Grande',
1207 1206 'sans-serif'
1208 1207 ])
1209 1208 if svg_type:
1210 1209 return self.get_img_data_by_type(font_family, svg_type)
1211 1210
1212 1211 initials = self.get_initials()
1213 1212 img_data = """
1214 1213 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1215 1214 width="{size}" height="{size}"
1216 1215 style="width: 100%; height: 100%; background-color: {background}"
1217 1216 viewBox="0 0 {size} {size}">
1218 1217 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1219 1218 pointer-events="auto" fill="{text_color}"
1220 1219 font-family="{font_family}"
1221 1220 style="font-weight: 400; font-size: {f_size}px;">{text}
1222 1221 </text>
1223 1222 </svg>""".format(
1224 1223 size=self.size,
1225 1224 f_size=self.size/1.85, # scale the text inside the box nicely
1226 1225 background=self.background,
1227 1226 text_color=self.text_color,
1228 1227 text=initials.upper(),
1229 1228 font_family=font_family)
1230 1229
1231 1230 return img_data
1232 1231
1233 1232 def generate_svg(self, svg_type=None):
1234 1233 img_data = self.get_img_data(svg_type)
1235 1234 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1236 1235
1237 1236
1238 1237 def initials_gravatar(email_address, first_name, last_name, size=30):
1239 1238 svg_type = None
1240 1239 if email_address == User.DEFAULT_USER_EMAIL:
1241 1240 svg_type = 'default_user'
1242 1241 klass = InitialsGravatar(email_address, first_name, last_name, size)
1243 1242 return klass.generate_svg(svg_type=svg_type)
1244 1243
1245 1244
1246 1245 def gravatar_url(email_address, size=30):
1247 1246 # doh, we need to re-import those to mock it later
1248 1247 from pylons import tmpl_context as c
1249 1248
1250 1249 _use_gravatar = c.visual.use_gravatar
1251 1250 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1252 1251
1253 1252 email_address = email_address or User.DEFAULT_USER_EMAIL
1254 1253 if isinstance(email_address, unicode):
1255 1254 # hashlib crashes on unicode items
1256 1255 email_address = safe_str(email_address)
1257 1256
1258 1257 # empty email or default user
1259 1258 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1260 1259 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1261 1260
1262 1261 if _use_gravatar:
1263 1262 # TODO: Disuse pyramid thread locals. Think about another solution to
1264 1263 # get the host and schema here.
1265 1264 request = get_current_request()
1266 1265 tmpl = safe_str(_gravatar_url)
1267 1266 tmpl = tmpl.replace('{email}', email_address)\
1268 1267 .replace('{md5email}', md5_safe(email_address.lower())) \
1269 1268 .replace('{netloc}', request.host)\
1270 1269 .replace('{scheme}', request.scheme)\
1271 1270 .replace('{size}', safe_str(size))
1272 1271 return tmpl
1273 1272 else:
1274 1273 return initials_gravatar(email_address, '', '', size=size)
1275 1274
1276 1275
1277 1276 class Page(_Page):
1278 1277 """
1279 1278 Custom pager to match rendering style with paginator
1280 1279 """
1281 1280
1282 1281 def _get_pos(self, cur_page, max_page, items):
1283 1282 edge = (items / 2) + 1
1284 1283 if (cur_page <= edge):
1285 1284 radius = max(items / 2, items - cur_page)
1286 1285 elif (max_page - cur_page) < edge:
1287 1286 radius = (items - 1) - (max_page - cur_page)
1288 1287 else:
1289 1288 radius = items / 2
1290 1289
1291 1290 left = max(1, (cur_page - (radius)))
1292 1291 right = min(max_page, cur_page + (radius))
1293 1292 return left, cur_page, right
1294 1293
1295 1294 def _range(self, regexp_match):
1296 1295 """
1297 1296 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1298 1297
1299 1298 Arguments:
1300 1299
1301 1300 regexp_match
1302 1301 A "re" (regular expressions) match object containing the
1303 1302 radius of linked pages around the current page in
1304 1303 regexp_match.group(1) as a string
1305 1304
1306 1305 This function is supposed to be called as a callable in
1307 1306 re.sub.
1308 1307
1309 1308 """
1310 1309 radius = int(regexp_match.group(1))
1311 1310
1312 1311 # Compute the first and last page number within the radius
1313 1312 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1314 1313 # -> leftmost_page = 5
1315 1314 # -> rightmost_page = 9
1316 1315 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1317 1316 self.last_page,
1318 1317 (radius * 2) + 1)
1319 1318 nav_items = []
1320 1319
1321 1320 # Create a link to the first page (unless we are on the first page
1322 1321 # or there would be no need to insert '..' spacers)
1323 1322 if self.page != self.first_page and self.first_page < leftmost_page:
1324 1323 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1325 1324
1326 1325 # Insert dots if there are pages between the first page
1327 1326 # and the currently displayed page range
1328 1327 if leftmost_page - self.first_page > 1:
1329 1328 # Wrap in a SPAN tag if nolink_attr is set
1330 1329 text = '..'
1331 1330 if self.dotdot_attr:
1332 1331 text = HTML.span(c=text, **self.dotdot_attr)
1333 1332 nav_items.append(text)
1334 1333
1335 1334 for thispage in xrange(leftmost_page, rightmost_page + 1):
1336 1335 # Hilight the current page number and do not use a link
1337 1336 if thispage == self.page:
1338 1337 text = '%s' % (thispage,)
1339 1338 # Wrap in a SPAN tag if nolink_attr is set
1340 1339 if self.curpage_attr:
1341 1340 text = HTML.span(c=text, **self.curpage_attr)
1342 1341 nav_items.append(text)
1343 1342 # Otherwise create just a link to that page
1344 1343 else:
1345 1344 text = '%s' % (thispage,)
1346 1345 nav_items.append(self._pagerlink(thispage, text))
1347 1346
1348 1347 # Insert dots if there are pages between the displayed
1349 1348 # page numbers and the end of the page range
1350 1349 if self.last_page - rightmost_page > 1:
1351 1350 text = '..'
1352 1351 # Wrap in a SPAN tag if nolink_attr is set
1353 1352 if self.dotdot_attr:
1354 1353 text = HTML.span(c=text, **self.dotdot_attr)
1355 1354 nav_items.append(text)
1356 1355
1357 1356 # Create a link to the very last page (unless we are on the last
1358 1357 # page or there would be no need to insert '..' spacers)
1359 1358 if self.page != self.last_page and rightmost_page < self.last_page:
1360 1359 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1361 1360
1362 1361 ## prerender links
1363 1362 #_page_link = url.current()
1364 1363 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1365 1364 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 1365 return self.separator.join(nav_items)
1367 1366
1368 1367 def pager(self, format='~2~', page_param='page', partial_param='partial',
1369 1368 show_if_single_page=False, separator=' ', onclick=None,
1370 1369 symbol_first='<<', symbol_last='>>',
1371 1370 symbol_previous='<', symbol_next='>',
1372 1371 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1373 1372 curpage_attr={'class': 'pager_curpage'},
1374 1373 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1375 1374
1376 1375 self.curpage_attr = curpage_attr
1377 1376 self.separator = separator
1378 1377 self.pager_kwargs = kwargs
1379 1378 self.page_param = page_param
1380 1379 self.partial_param = partial_param
1381 1380 self.onclick = onclick
1382 1381 self.link_attr = link_attr
1383 1382 self.dotdot_attr = dotdot_attr
1384 1383
1385 1384 # Don't show navigator if there is no more than one page
1386 1385 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1387 1386 return ''
1388 1387
1389 1388 from string import Template
1390 1389 # Replace ~...~ in token format by range of pages
1391 1390 result = re.sub(r'~(\d+)~', self._range, format)
1392 1391
1393 1392 # Interpolate '%' variables
1394 1393 result = Template(result).safe_substitute({
1395 1394 'first_page': self.first_page,
1396 1395 'last_page': self.last_page,
1397 1396 'page': self.page,
1398 1397 'page_count': self.page_count,
1399 1398 'items_per_page': self.items_per_page,
1400 1399 'first_item': self.first_item,
1401 1400 'last_item': self.last_item,
1402 1401 'item_count': self.item_count,
1403 1402 'link_first': self.page > self.first_page and \
1404 1403 self._pagerlink(self.first_page, symbol_first) or '',
1405 1404 'link_last': self.page < self.last_page and \
1406 1405 self._pagerlink(self.last_page, symbol_last) or '',
1407 1406 'link_previous': self.previous_page and \
1408 1407 self._pagerlink(self.previous_page, symbol_previous) \
1409 1408 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1410 1409 'link_next': self.next_page and \
1411 1410 self._pagerlink(self.next_page, symbol_next) \
1412 1411 or HTML.span(symbol_next, class_="pg-next disabled")
1413 1412 })
1414 1413
1415 1414 return literal(result)
1416 1415
1417 1416
1418 1417 #==============================================================================
1419 1418 # REPO PAGER, PAGER FOR REPOSITORY
1420 1419 #==============================================================================
1421 1420 class RepoPage(Page):
1422 1421
1423 1422 def __init__(self, collection, page=1, items_per_page=20,
1424 1423 item_count=None, url=None, **kwargs):
1425 1424
1426 1425 """Create a "RepoPage" instance. special pager for paging
1427 1426 repository
1428 1427 """
1429 1428 self._url_generator = url
1430 1429
1431 1430 # Safe the kwargs class-wide so they can be used in the pager() method
1432 1431 self.kwargs = kwargs
1433 1432
1434 1433 # Save a reference to the collection
1435 1434 self.original_collection = collection
1436 1435
1437 1436 self.collection = collection
1438 1437
1439 1438 # The self.page is the number of the current page.
1440 1439 # The first page has the number 1!
1441 1440 try:
1442 1441 self.page = int(page) # make it int() if we get it as a string
1443 1442 except (ValueError, TypeError):
1444 1443 self.page = 1
1445 1444
1446 1445 self.items_per_page = items_per_page
1447 1446
1448 1447 # Unless the user tells us how many items the collections has
1449 1448 # we calculate that ourselves.
1450 1449 if item_count is not None:
1451 1450 self.item_count = item_count
1452 1451 else:
1453 1452 self.item_count = len(self.collection)
1454 1453
1455 1454 # Compute the number of the first and last available page
1456 1455 if self.item_count > 0:
1457 1456 self.first_page = 1
1458 1457 self.page_count = int(math.ceil(float(self.item_count) /
1459 1458 self.items_per_page))
1460 1459 self.last_page = self.first_page + self.page_count - 1
1461 1460
1462 1461 # Make sure that the requested page number is the range of
1463 1462 # valid pages
1464 1463 if self.page > self.last_page:
1465 1464 self.page = self.last_page
1466 1465 elif self.page < self.first_page:
1467 1466 self.page = self.first_page
1468 1467
1469 1468 # Note: the number of items on this page can be less than
1470 1469 # items_per_page if the last page is not full
1471 1470 self.first_item = max(0, (self.item_count) - (self.page *
1472 1471 items_per_page))
1473 1472 self.last_item = ((self.item_count - 1) - items_per_page *
1474 1473 (self.page - 1))
1475 1474
1476 1475 self.items = list(self.collection[self.first_item:self.last_item + 1])
1477 1476
1478 1477 # Links to previous and next page
1479 1478 if self.page > self.first_page:
1480 1479 self.previous_page = self.page - 1
1481 1480 else:
1482 1481 self.previous_page = None
1483 1482
1484 1483 if self.page < self.last_page:
1485 1484 self.next_page = self.page + 1
1486 1485 else:
1487 1486 self.next_page = None
1488 1487
1489 1488 # No items available
1490 1489 else:
1491 1490 self.first_page = None
1492 1491 self.page_count = 0
1493 1492 self.last_page = None
1494 1493 self.first_item = None
1495 1494 self.last_item = None
1496 1495 self.previous_page = None
1497 1496 self.next_page = None
1498 1497 self.items = []
1499 1498
1500 1499 # This is a subclass of the 'list' type. Initialise the list now.
1501 1500 list.__init__(self, reversed(self.items))
1502 1501
1503 1502
1504 1503 def changed_tooltip(nodes):
1505 1504 """
1506 1505 Generates a html string for changed nodes in commit page.
1507 1506 It limits the output to 30 entries
1508 1507
1509 1508 :param nodes: LazyNodesGenerator
1510 1509 """
1511 1510 if nodes:
1512 1511 pref = ': <br/> '
1513 1512 suf = ''
1514 1513 if len(nodes) > 30:
1515 1514 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1516 1515 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1517 1516 for x in nodes[:30]]) + suf)
1518 1517 else:
1519 1518 return ': ' + _('No Files')
1520 1519
1521 1520
1522 1521 def breadcrumb_repo_link(repo):
1523 1522 """
1524 1523 Makes a breadcrumbs path link to repo
1525 1524
1526 1525 ex::
1527 1526 group >> subgroup >> repo
1528 1527
1529 1528 :param repo: a Repository instance
1530 1529 """
1531 1530
1532 1531 path = [
1533 1532 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1534 1533 for group in repo.groups_with_parents
1535 1534 ] + [
1536 1535 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1537 1536 ]
1538 1537
1539 1538 return literal(' &raquo; '.join(path))
1540 1539
1541 1540
1542 1541 def format_byte_size_binary(file_size):
1543 1542 """
1544 1543 Formats file/folder sizes to standard.
1545 1544 """
1546 1545 formatted_size = format_byte_size(file_size, binary=True)
1547 1546 return formatted_size
1548 1547
1549 1548
1550 1549 def fancy_file_stats(stats):
1551 1550 """
1552 1551 Displays a fancy two colored bar for number of added/deleted
1553 1552 lines of code on file
1554 1553
1555 1554 :param stats: two element list of added/deleted lines of code
1556 1555 """
1557 1556 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1558 1557 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1559 1558
1560 1559 def cgen(l_type, a_v, d_v):
1561 1560 mapping = {'tr': 'top-right-rounded-corner-mid',
1562 1561 'tl': 'top-left-rounded-corner-mid',
1563 1562 'br': 'bottom-right-rounded-corner-mid',
1564 1563 'bl': 'bottom-left-rounded-corner-mid'}
1565 1564 map_getter = lambda x: mapping[x]
1566 1565
1567 1566 if l_type == 'a' and d_v:
1568 1567 #case when added and deleted are present
1569 1568 return ' '.join(map(map_getter, ['tl', 'bl']))
1570 1569
1571 1570 if l_type == 'a' and not d_v:
1572 1571 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1573 1572
1574 1573 if l_type == 'd' and a_v:
1575 1574 return ' '.join(map(map_getter, ['tr', 'br']))
1576 1575
1577 1576 if l_type == 'd' and not a_v:
1578 1577 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1579 1578
1580 1579 a, d = stats['added'], stats['deleted']
1581 1580 width = 100
1582 1581
1583 1582 if stats['binary']: # binary operations like chmod/rename etc
1584 1583 lbl = []
1585 1584 bin_op = 0 # undefined
1586 1585
1587 1586 # prefix with bin for binary files
1588 1587 if BIN_FILENODE in stats['ops']:
1589 1588 lbl += ['bin']
1590 1589
1591 1590 if NEW_FILENODE in stats['ops']:
1592 1591 lbl += [_('new file')]
1593 1592 bin_op = NEW_FILENODE
1594 1593 elif MOD_FILENODE in stats['ops']:
1595 1594 lbl += [_('mod')]
1596 1595 bin_op = MOD_FILENODE
1597 1596 elif DEL_FILENODE in stats['ops']:
1598 1597 lbl += [_('del')]
1599 1598 bin_op = DEL_FILENODE
1600 1599 elif RENAMED_FILENODE in stats['ops']:
1601 1600 lbl += [_('rename')]
1602 1601 bin_op = RENAMED_FILENODE
1603 1602
1604 1603 # chmod can go with other operations, so we add a + to lbl if needed
1605 1604 if CHMOD_FILENODE in stats['ops']:
1606 1605 lbl += [_('chmod')]
1607 1606 if bin_op == 0:
1608 1607 bin_op = CHMOD_FILENODE
1609 1608
1610 1609 lbl = '+'.join(lbl)
1611 1610 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1612 1611 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1613 1612 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1614 1613 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1615 1614
1616 1615 t = stats['added'] + stats['deleted']
1617 1616 unit = float(width) / (t or 1)
1618 1617
1619 1618 # needs > 9% of width to be visible or 0 to be hidden
1620 1619 a_p = max(9, unit * a) if a > 0 else 0
1621 1620 d_p = max(9, unit * d) if d > 0 else 0
1622 1621 p_sum = a_p + d_p
1623 1622
1624 1623 if p_sum > width:
1625 1624 #adjust the percentage to be == 100% since we adjusted to 9
1626 1625 if a_p > d_p:
1627 1626 a_p = a_p - (p_sum - width)
1628 1627 else:
1629 1628 d_p = d_p - (p_sum - width)
1630 1629
1631 1630 a_v = a if a > 0 else ''
1632 1631 d_v = d if d > 0 else ''
1633 1632
1634 1633 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1635 1634 cgen('a', a_v, d_v), a_p, a_v
1636 1635 )
1637 1636 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1638 1637 cgen('d', a_v, d_v), d_p, d_v
1639 1638 )
1640 1639 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1641 1640
1642 1641
1643 1642 def urlify_text(text_, safe=True):
1644 1643 """
1645 1644 Extrac urls from text and make html links out of them
1646 1645
1647 1646 :param text_:
1648 1647 """
1649 1648
1650 1649 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1651 1650 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1652 1651
1653 1652 def url_func(match_obj):
1654 1653 url_full = match_obj.groups()[0]
1655 1654 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1656 1655 _newtext = url_pat.sub(url_func, text_)
1657 1656 if safe:
1658 1657 return literal(_newtext)
1659 1658 return _newtext
1660 1659
1661 1660
1662 1661 def urlify_commits(text_, repository):
1663 1662 """
1664 1663 Extract commit ids from text and make link from them
1665 1664
1666 1665 :param text_:
1667 1666 :param repository: repo name to build the URL with
1668 1667 """
1669 1668 from pylons import url # doh, we need to re-import url to mock it later
1670 1669 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1671 1670
1672 1671 def url_func(match_obj):
1673 1672 commit_id = match_obj.groups()[1]
1674 1673 pref = match_obj.groups()[0]
1675 1674 suf = match_obj.groups()[2]
1676 1675
1677 1676 tmpl = (
1678 1677 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1679 1678 '%(commit_id)s</a>%(suf)s'
1680 1679 )
1681 1680 return tmpl % {
1682 1681 'pref': pref,
1683 1682 'cls': 'revision-link',
1684 1683 'url': url('changeset_home', repo_name=repository,
1685 1684 revision=commit_id, qualified=True),
1686 1685 'commit_id': commit_id,
1687 1686 'suf': suf
1688 1687 }
1689 1688
1690 1689 newtext = URL_PAT.sub(url_func, text_)
1691 1690
1692 1691 return newtext
1693 1692
1694 1693
1695 1694 def _process_url_func(match_obj, repo_name, uid, entry,
1696 1695 return_raw_data=False):
1697 1696 pref = ''
1698 1697 if match_obj.group().startswith(' '):
1699 1698 pref = ' '
1700 1699
1701 1700 issue_id = ''.join(match_obj.groups())
1702 1701 tmpl = (
1703 1702 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1704 1703 '%(issue-prefix)s%(id-repr)s'
1705 1704 '</a>')
1706 1705
1707 1706 (repo_name_cleaned,
1708 1707 parent_group_name) = RepoGroupModel().\
1709 1708 _get_group_name_and_parent(repo_name)
1710 1709
1711 1710 # variables replacement
1712 1711 named_vars = {
1713 1712 'id': issue_id,
1714 1713 'repo': repo_name,
1715 1714 'repo_name': repo_name_cleaned,
1716 1715 'group_name': parent_group_name
1717 1716 }
1718 1717 # named regex variables
1719 1718 named_vars.update(match_obj.groupdict())
1720 1719 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1721 1720
1722 1721 data = {
1723 1722 'pref': pref,
1724 1723 'cls': 'issue-tracker-link',
1725 1724 'url': _url,
1726 1725 'id-repr': issue_id,
1727 1726 'issue-prefix': entry['pref'],
1728 1727 'serv': entry['url'],
1729 1728 }
1730 1729 if return_raw_data:
1731 1730 return {
1732 1731 'id': issue_id,
1733 1732 'url': _url
1734 1733 }
1735 1734 return tmpl % data
1736 1735
1737 1736
1738 1737 def process_patterns(text_string, repo_name, config=None):
1739 1738 repo = None
1740 1739 if repo_name:
1741 1740 # Retrieving repo_name to avoid invalid repo_name to explode on
1742 1741 # IssueTrackerSettingsModel but still passing invalid name further down
1743 1742 repo = Repository.get_by_repo_name(repo_name, cache=True)
1744 1743
1745 1744 settings_model = IssueTrackerSettingsModel(repo=repo)
1746 1745 active_entries = settings_model.get_settings(cache=True)
1747 1746
1748 1747 issues_data = []
1749 1748 newtext = text_string
1750 1749 for uid, entry in active_entries.items():
1751 1750 log.debug('found issue tracker entry with uid %s' % (uid,))
1752 1751
1753 1752 if not (entry['pat'] and entry['url']):
1754 1753 log.debug('skipping due to missing data')
1755 1754 continue
1756 1755
1757 1756 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1758 1757 % (uid, entry['pat'], entry['url'], entry['pref']))
1759 1758
1760 1759 try:
1761 1760 pattern = re.compile(r'%s' % entry['pat'])
1762 1761 except re.error:
1763 1762 log.exception(
1764 1763 'issue tracker pattern: `%s` failed to compile',
1765 1764 entry['pat'])
1766 1765 continue
1767 1766
1768 1767 data_func = partial(
1769 1768 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1770 1769 return_raw_data=True)
1771 1770
1772 1771 for match_obj in pattern.finditer(text_string):
1773 1772 issues_data.append(data_func(match_obj))
1774 1773
1775 1774 url_func = partial(
1776 1775 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1777 1776
1778 1777 newtext = pattern.sub(url_func, newtext)
1779 1778 log.debug('processed prefix:uid `%s`' % (uid,))
1780 1779
1781 1780 return newtext, issues_data
1782 1781
1783 1782
1784 1783 def urlify_commit_message(commit_text, repository=None):
1785 1784 """
1786 1785 Parses given text message and makes proper links.
1787 1786 issues are linked to given issue-server, and rest is a commit link
1788 1787
1789 1788 :param commit_text:
1790 1789 :param repository:
1791 1790 """
1792 1791 from pylons import url # doh, we need to re-import url to mock it later
1793 1792
1794 1793 def escaper(string):
1795 1794 return string.replace('<', '&lt;').replace('>', '&gt;')
1796 1795
1797 1796 newtext = escaper(commit_text)
1798 1797
1799 1798 # extract http/https links and make them real urls
1800 1799 newtext = urlify_text(newtext, safe=False)
1801 1800
1802 1801 # urlify commits - extract commit ids and make link out of them, if we have
1803 1802 # the scope of repository present.
1804 1803 if repository:
1805 1804 newtext = urlify_commits(newtext, repository)
1806 1805
1807 1806 # process issue tracker patterns
1808 1807 newtext, issues = process_patterns(newtext, repository or '')
1809 1808
1810 1809 return literal(newtext)
1811 1810
1812 1811
1813 1812 def rst(source, mentions=False):
1814 1813 return literal('<div class="rst-block">%s</div>' %
1815 1814 MarkupRenderer.rst(source, mentions=mentions))
1816 1815
1817 1816
1818 1817 def markdown(source, mentions=False):
1819 1818 return literal('<div class="markdown-block">%s</div>' %
1820 1819 MarkupRenderer.markdown(source, flavored=True,
1821 1820 mentions=mentions))
1822 1821
1823 1822 def renderer_from_filename(filename, exclude=None):
1824 1823 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1825 1824
1826 1825
1827 1826 def render(source, renderer='rst', mentions=False):
1828 1827 if renderer == 'rst':
1829 1828 return rst(source, mentions=mentions)
1830 1829 if renderer == 'markdown':
1831 1830 return markdown(source, mentions=mentions)
1832 1831
1833 1832
1834 1833 def commit_status(repo, commit_id):
1835 1834 return ChangesetStatusModel().get_status(repo, commit_id)
1836 1835
1837 1836
1838 1837 def commit_status_lbl(commit_status):
1839 1838 return dict(ChangesetStatus.STATUSES).get(commit_status)
1840 1839
1841 1840
1842 1841 def commit_time(repo_name, commit_id):
1843 1842 repo = Repository.get_by_repo_name(repo_name)
1844 1843 commit = repo.get_commit(commit_id=commit_id)
1845 1844 return commit.date
1846 1845
1847 1846
1848 1847 def get_permission_name(key):
1849 1848 return dict(Permission.PERMS).get(key)
1850 1849
1851 1850
1852 1851 def journal_filter_help():
1853 1852 return _(
1854 1853 'Example filter terms:\n' +
1855 1854 ' repository:vcs\n' +
1856 1855 ' username:marcin\n' +
1857 1856 ' action:*push*\n' +
1858 1857 ' ip:127.0.0.1\n' +
1859 1858 ' date:20120101\n' +
1860 1859 ' date:[20120101100000 TO 20120102]\n' +
1861 1860 '\n' +
1862 1861 'Generate wildcards using \'*\' character:\n' +
1863 1862 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1864 1863 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1865 1864 '\n' +
1866 1865 'Optional AND / OR operators in queries\n' +
1867 1866 ' "repository:vcs OR repository:test"\n' +
1868 1867 ' "username:test AND repository:test*"\n'
1869 1868 )
1870 1869
1871 1870
1872 1871 def not_mapped_error(repo_name):
1873 1872 flash(_('%s repository is not mapped to db perhaps'
1874 1873 ' it was created or renamed from the filesystem'
1875 1874 ' please run the application again'
1876 1875 ' in order to rescan repositories') % repo_name, category='error')
1877 1876
1878 1877
1879 1878 def ip_range(ip_addr):
1880 1879 from rhodecode.model.db import UserIpMap
1881 1880 s, e = UserIpMap._get_ip_range(ip_addr)
1882 1881 return '%s - %s' % (s, e)
1883 1882
1884 1883
1885 1884 def form(url, method='post', needs_csrf_token=True, **attrs):
1886 1885 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1887 1886 if method.lower() != 'get' and needs_csrf_token:
1888 1887 raise Exception(
1889 1888 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1890 1889 'CSRF token. If the endpoint does not require such token you can ' +
1891 1890 'explicitly set the parameter needs_csrf_token to false.')
1892 1891
1893 1892 return wh_form(url, method=method, **attrs)
1894 1893
1895 1894
1896 1895 def secure_form(url, method="POST", multipart=False, **attrs):
1897 1896 """Start a form tag that points the action to an url. This
1898 1897 form tag will also include the hidden field containing
1899 1898 the auth token.
1900 1899
1901 1900 The url options should be given either as a string, or as a
1902 1901 ``url()`` function. The method for the form defaults to POST.
1903 1902
1904 1903 Options:
1905 1904
1906 1905 ``multipart``
1907 1906 If set to True, the enctype is set to "multipart/form-data".
1908 1907 ``method``
1909 1908 The method to use when submitting the form, usually either
1910 1909 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1911 1910 hidden input with name _method is added to simulate the verb
1912 1911 over POST.
1913 1912
1914 1913 """
1915 1914 from webhelpers.pylonslib.secure_form import insecure_form
1916 1915 form = insecure_form(url, method, multipart, **attrs)
1917 1916 token = csrf_input()
1918 1917 return literal("%s\n%s" % (form, token))
1919 1918
1920 1919 def csrf_input():
1921 1920 return literal(
1922 1921 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1923 1922 csrf_token_key, csrf_token_key, get_csrf_token()))
1924 1923
1925 1924 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1926 1925 select_html = select(name, selected, options, **attrs)
1927 1926 select2 = """
1928 1927 <script>
1929 1928 $(document).ready(function() {
1930 1929 $('#%s').select2({
1931 1930 containerCssClass: 'drop-menu',
1932 1931 dropdownCssClass: 'drop-menu-dropdown',
1933 1932 dropdownAutoWidth: true%s
1934 1933 });
1935 1934 });
1936 1935 </script>
1937 1936 """
1938 1937 filter_option = """,
1939 1938 minimumResultsForSearch: -1
1940 1939 """
1941 1940 input_id = attrs.get('id') or name
1942 1941 filter_enabled = "" if enable_filter else filter_option
1943 1942 select_script = literal(select2 % (input_id, filter_enabled))
1944 1943
1945 1944 return literal(select_html+select_script)
1946 1945
1947 1946
1948 1947 def get_visual_attr(tmpl_context_var, attr_name):
1949 1948 """
1950 1949 A safe way to get a variable from visual variable of template context
1951 1950
1952 1951 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1953 1952 :param attr_name: name of the attribute we fetch from the c.visual
1954 1953 """
1955 1954 visual = getattr(tmpl_context_var, 'visual', None)
1956 1955 if not visual:
1957 1956 return
1958 1957 else:
1959 1958 return getattr(visual, attr_name, None)
1960 1959
1961 1960
1962 1961 def get_last_path_part(file_node):
1963 1962 if not file_node.path:
1964 1963 return u''
1965 1964
1966 1965 path = safe_unicode(file_node.path.split('/')[-1])
1967 1966 return u'../' + path
1968 1967
1969 1968
1970 1969 def route_path(*args, **kwds):
1971 1970 """
1972 1971 Wrapper around pyramids `route_path` function. It is used to generate
1973 1972 URLs from within pylons views or templates. This will be removed when
1974 1973 pyramid migration if finished.
1975 1974 """
1976 1975 req = get_current_request()
1977 1976 return req.route_path(*args, **kwds)
1978 1977
1979 1978
1980 1979 def route_path_or_none(*args, **kwargs):
1981 1980 try:
1982 1981 return route_path(*args, **kwargs)
1983 1982 except KeyError:
1984 1983 return None
1985 1984
1986 1985
1987 1986 def static_url(*args, **kwds):
1988 1987 """
1989 1988 Wrapper around pyramids `route_path` function. It is used to generate
1990 1989 URLs from within pylons views or templates. This will be removed when
1991 1990 pyramid migration if finished.
1992 1991 """
1993 1992 req = get_current_request()
1994 1993 return req.static_url(*args, **kwds)
1995 1994
1996 1995
1997 1996 def resource_path(*args, **kwds):
1998 1997 """
1999 1998 Wrapper around pyramids `route_path` function. It is used to generate
2000 1999 URLs from within pylons views or templates. This will be removed when
2001 2000 pyramid migration if finished.
2002 2001 """
2003 2002 req = get_current_request()
2004 2003 return req.resource_path(*args, **kwds)
General Comments 0
You need to be logged in to leave comments. Login now