##// END OF EJS Templates
statics: don't generate empty ver= if not ver was given.
marcink -
r542:043cce4d default
parent child Browse files
Show More
@@ -1,1963 +1,1966 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40
41 41 from datetime import datetime
42 42 from functools import partial
43 43 from pygments.formatters.html import HtmlFormatter
44 44 from pygments import highlight as code_highlight
45 45 from pygments.lexers import (
46 46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 47 from pylons import url as pylons_url
48 48 from pylons.i18n.translation import _, ungettext
49 49 from pyramid.threadlocal import get_current_request
50 50
51 51 from webhelpers.html import literal, HTML, escape
52 52 from webhelpers.html.tools import *
53 53 from webhelpers.html.builder import make_tag
54 54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 60 from webhelpers.pylonslib import Flash as _Flash
61 61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 64 from webhelpers.date import time_ago_in_words
65 65 from webhelpers.paginate import Page as _Page
66 66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 68 from webhelpers2.number import format_byte_size
69 69
70 70 from rhodecode.lib.annotate import annotate_highlight
71 71 from rhodecode.lib.action_parser import action_parser
72 72 from rhodecode.lib.ext_json import json
73 73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 76 AttributeDict, safe_int, md5, md5_safe
77 77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 82 from rhodecode.model.db import Permission, User, Repository
83 83 from rhodecode.model.repo_group import RepoGroupModel
84 84 from rhodecode.model.settings import IssueTrackerSettingsModel
85 85
86 86 log = logging.getLogger(__name__)
87 87
88 88
89 89 DEFAULT_USER = User.DEFAULT_USER
90 90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 91
92 92
93 93 def url(*args, **kw):
94 94 return pylons_url(*args, **kw)
95 95
96 96
97 97 def pylons_url_current(*args, **kw):
98 98 """
99 99 This function overrides pylons.url.current() which returns the current
100 100 path so that it will also work from a pyramid only context. This
101 101 should be removed once port to pyramid is complete.
102 102 """
103 103 if not args and not kw:
104 104 request = get_current_request()
105 105 return request.path
106 106 return pylons_url.current(*args, **kw)
107 107
108 108 url.current = pylons_url_current
109 109
110 110
111 111 def asset(path, ver=None):
112 112 """
113 113 Helper to generate a static asset file path for rhodecode assets
114 114
115 115 eg. h.asset('images/image.png', ver='3923')
116 116
117 117 :param path: path of asset
118 118 :param ver: optional version query param to append as ?ver=
119 119 """
120 120 request = get_current_request()
121 query = {}
122 if ver:
123 query = {'ver': ver}
121 124 return request.static_url(
122 'rhodecode:public/{}'.format(path), _query={'ver': ver})
125 'rhodecode:public/{}'.format(path), _query=query)
123 126
124 127
125 128 def html_escape(text, html_escape_table=None):
126 129 """Produce entities within text."""
127 130 if not html_escape_table:
128 131 html_escape_table = {
129 132 "&": "&amp;",
130 133 '"': "&quot;",
131 134 "'": "&apos;",
132 135 ">": "&gt;",
133 136 "<": "&lt;",
134 137 }
135 138 return "".join(html_escape_table.get(c, c) for c in text)
136 139
137 140
138 141 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
139 142 """
140 143 Truncate string ``s`` at the first occurrence of ``sub``.
141 144
142 145 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
143 146 """
144 147 suffix_if_chopped = suffix_if_chopped or ''
145 148 pos = s.find(sub)
146 149 if pos == -1:
147 150 return s
148 151
149 152 if inclusive:
150 153 pos += len(sub)
151 154
152 155 chopped = s[:pos]
153 156 left = s[pos:].strip()
154 157
155 158 if left and suffix_if_chopped:
156 159 chopped += suffix_if_chopped
157 160
158 161 return chopped
159 162
160 163
161 164 def shorter(text, size=20):
162 165 postfix = '...'
163 166 if len(text) > size:
164 167 return text[:size - len(postfix)] + postfix
165 168 return text
166 169
167 170
168 171 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
169 172 """
170 173 Reset button
171 174 """
172 175 _set_input_attrs(attrs, type, name, value)
173 176 _set_id_attr(attrs, id, name)
174 177 convert_boolean_attrs(attrs, ["disabled"])
175 178 return HTML.input(**attrs)
176 179
177 180 reset = _reset
178 181 safeid = _make_safe_id_component
179 182
180 183
181 184 def branding(name, length=40):
182 185 return truncate(name, length, indicator="")
183 186
184 187
185 188 def FID(raw_id, path):
186 189 """
187 190 Creates a unique ID for filenode based on it's hash of path and commit
188 191 it's safe to use in urls
189 192
190 193 :param raw_id:
191 194 :param path:
192 195 """
193 196
194 197 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
195 198
196 199
197 200 class _GetError(object):
198 201 """Get error from form_errors, and represent it as span wrapped error
199 202 message
200 203
201 204 :param field_name: field to fetch errors for
202 205 :param form_errors: form errors dict
203 206 """
204 207
205 208 def __call__(self, field_name, form_errors):
206 209 tmpl = """<span class="error_msg">%s</span>"""
207 210 if form_errors and field_name in form_errors:
208 211 return literal(tmpl % form_errors.get(field_name))
209 212
210 213 get_error = _GetError()
211 214
212 215
213 216 class _ToolTip(object):
214 217
215 218 def __call__(self, tooltip_title, trim_at=50):
216 219 """
217 220 Special function just to wrap our text into nice formatted
218 221 autowrapped text
219 222
220 223 :param tooltip_title:
221 224 """
222 225 tooltip_title = escape(tooltip_title)
223 226 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
224 227 return tooltip_title
225 228 tooltip = _ToolTip()
226 229
227 230
228 231 def files_breadcrumbs(repo_name, commit_id, file_path):
229 232 if isinstance(file_path, str):
230 233 file_path = safe_unicode(file_path)
231 234
232 235 # TODO: johbo: Is this always a url like path, or is this operating
233 236 # system dependent?
234 237 path_segments = file_path.split('/')
235 238
236 239 repo_name_html = escape(repo_name)
237 240 if len(path_segments) == 1 and path_segments[0] == '':
238 241 url_segments = [repo_name_html]
239 242 else:
240 243 url_segments = [
241 244 link_to(
242 245 repo_name_html,
243 246 url('files_home',
244 247 repo_name=repo_name,
245 248 revision=commit_id,
246 249 f_path=''),
247 250 class_='pjax-link')]
248 251
249 252 last_cnt = len(path_segments) - 1
250 253 for cnt, segment in enumerate(path_segments):
251 254 if not segment:
252 255 continue
253 256 segment_html = escape(segment)
254 257
255 258 if cnt != last_cnt:
256 259 url_segments.append(
257 260 link_to(
258 261 segment_html,
259 262 url('files_home',
260 263 repo_name=repo_name,
261 264 revision=commit_id,
262 265 f_path='/'.join(path_segments[:cnt + 1])),
263 266 class_='pjax-link'))
264 267 else:
265 268 url_segments.append(segment_html)
266 269
267 270 return literal('/'.join(url_segments))
268 271
269 272
270 273 class CodeHtmlFormatter(HtmlFormatter):
271 274 """
272 275 My code Html Formatter for source codes
273 276 """
274 277
275 278 def wrap(self, source, outfile):
276 279 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
277 280
278 281 def _wrap_code(self, source):
279 282 for cnt, it in enumerate(source):
280 283 i, t = it
281 284 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
282 285 yield i, t
283 286
284 287 def _wrap_tablelinenos(self, inner):
285 288 dummyoutfile = StringIO.StringIO()
286 289 lncount = 0
287 290 for t, line in inner:
288 291 if t:
289 292 lncount += 1
290 293 dummyoutfile.write(line)
291 294
292 295 fl = self.linenostart
293 296 mw = len(str(lncount + fl - 1))
294 297 sp = self.linenospecial
295 298 st = self.linenostep
296 299 la = self.lineanchors
297 300 aln = self.anchorlinenos
298 301 nocls = self.noclasses
299 302 if sp:
300 303 lines = []
301 304
302 305 for i in range(fl, fl + lncount):
303 306 if i % st == 0:
304 307 if i % sp == 0:
305 308 if aln:
306 309 lines.append('<a href="#%s%d" class="special">%*d</a>' %
307 310 (la, i, mw, i))
308 311 else:
309 312 lines.append('<span class="special">%*d</span>' % (mw, i))
310 313 else:
311 314 if aln:
312 315 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
313 316 else:
314 317 lines.append('%*d' % (mw, i))
315 318 else:
316 319 lines.append('')
317 320 ls = '\n'.join(lines)
318 321 else:
319 322 lines = []
320 323 for i in range(fl, fl + lncount):
321 324 if i % st == 0:
322 325 if aln:
323 326 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
324 327 else:
325 328 lines.append('%*d' % (mw, i))
326 329 else:
327 330 lines.append('')
328 331 ls = '\n'.join(lines)
329 332
330 333 # in case you wonder about the seemingly redundant <div> here: since the
331 334 # content in the other cell also is wrapped in a div, some browsers in
332 335 # some configurations seem to mess up the formatting...
333 336 if nocls:
334 337 yield 0, ('<table class="%stable">' % self.cssclass +
335 338 '<tr><td><div class="linenodiv" '
336 339 'style="background-color: #f0f0f0; padding-right: 10px">'
337 340 '<pre style="line-height: 125%">' +
338 341 ls + '</pre></div></td><td id="hlcode" class="code">')
339 342 else:
340 343 yield 0, ('<table class="%stable">' % self.cssclass +
341 344 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
342 345 ls + '</pre></div></td><td id="hlcode" class="code">')
343 346 yield 0, dummyoutfile.getvalue()
344 347 yield 0, '</td></tr></table>'
345 348
346 349
347 350 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
348 351 def __init__(self, **kw):
349 352 # only show these line numbers if set
350 353 self.only_lines = kw.pop('only_line_numbers', [])
351 354 self.query_terms = kw.pop('query_terms', [])
352 355 self.max_lines = kw.pop('max_lines', 5)
353 356 self.line_context = kw.pop('line_context', 3)
354 357 self.url = kw.pop('url', None)
355 358
356 359 super(CodeHtmlFormatter, self).__init__(**kw)
357 360
358 361 def _wrap_code(self, source):
359 362 for cnt, it in enumerate(source):
360 363 i, t = it
361 364 t = '<pre>%s</pre>' % t
362 365 yield i, t
363 366
364 367 def _wrap_tablelinenos(self, inner):
365 368 yield 0, '<table class="code-highlight %stable">' % self.cssclass
366 369
367 370 last_shown_line_number = 0
368 371 current_line_number = 1
369 372
370 373 for t, line in inner:
371 374 if not t:
372 375 yield t, line
373 376 continue
374 377
375 378 if current_line_number in self.only_lines:
376 379 if last_shown_line_number + 1 != current_line_number:
377 380 yield 0, '<tr>'
378 381 yield 0, '<td class="line">...</td>'
379 382 yield 0, '<td id="hlcode" class="code"></td>'
380 383 yield 0, '</tr>'
381 384
382 385 yield 0, '<tr>'
383 386 if self.url:
384 387 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
385 388 self.url, current_line_number, current_line_number)
386 389 else:
387 390 yield 0, '<td class="line"><a href="">%i</a></td>' % (
388 391 current_line_number)
389 392 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
390 393 yield 0, '</tr>'
391 394
392 395 last_shown_line_number = current_line_number
393 396
394 397 current_line_number += 1
395 398
396 399
397 400 yield 0, '</table>'
398 401
399 402
400 403 def extract_phrases(text_query):
401 404 """
402 405 Extracts phrases from search term string making sure phrases
403 406 contained in double quotes are kept together - and discarding empty values
404 407 or fully whitespace values eg.
405 408
406 409 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
407 410
408 411 """
409 412
410 413 in_phrase = False
411 414 buf = ''
412 415 phrases = []
413 416 for char in text_query:
414 417 if in_phrase:
415 418 if char == '"': # end phrase
416 419 phrases.append(buf)
417 420 buf = ''
418 421 in_phrase = False
419 422 continue
420 423 else:
421 424 buf += char
422 425 continue
423 426 else:
424 427 if char == '"': # start phrase
425 428 in_phrase = True
426 429 phrases.append(buf)
427 430 buf = ''
428 431 continue
429 432 elif char == ' ':
430 433 phrases.append(buf)
431 434 buf = ''
432 435 continue
433 436 else:
434 437 buf += char
435 438
436 439 phrases.append(buf)
437 440 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
438 441 return phrases
439 442
440 443
441 444 def get_matching_offsets(text, phrases):
442 445 """
443 446 Returns a list of string offsets in `text` that the list of `terms` match
444 447
445 448 >>> get_matching_offsets('some text here', ['some', 'here'])
446 449 [(0, 4), (10, 14)]
447 450
448 451 """
449 452 offsets = []
450 453 for phrase in phrases:
451 454 for match in re.finditer(phrase, text):
452 455 offsets.append((match.start(), match.end()))
453 456
454 457 return offsets
455 458
456 459
457 460 def normalize_text_for_matching(x):
458 461 """
459 462 Replaces all non alnum characters to spaces and lower cases the string,
460 463 useful for comparing two text strings without punctuation
461 464 """
462 465 return re.sub(r'[^\w]', ' ', x.lower())
463 466
464 467
465 468 def get_matching_line_offsets(lines, terms):
466 469 """ Return a set of `lines` indices (starting from 1) matching a
467 470 text search query, along with `context` lines above/below matching lines
468 471
469 472 :param lines: list of strings representing lines
470 473 :param terms: search term string to match in lines eg. 'some text'
471 474 :param context: number of lines above/below a matching line to add to result
472 475 :param max_lines: cut off for lines of interest
473 476 eg.
474 477
475 478 text = '''
476 479 words words words
477 480 words words words
478 481 some text some
479 482 words words words
480 483 words words words
481 484 text here what
482 485 '''
483 486 get_matching_line_offsets(text, 'text', context=1)
484 487 {3: [(5, 9)], 6: [(0, 4)]]
485 488
486 489 """
487 490 matching_lines = {}
488 491 phrases = [normalize_text_for_matching(phrase)
489 492 for phrase in extract_phrases(terms)]
490 493
491 494 for line_index, line in enumerate(lines, start=1):
492 495 match_offsets = get_matching_offsets(
493 496 normalize_text_for_matching(line), phrases)
494 497 if match_offsets:
495 498 matching_lines[line_index] = match_offsets
496 499
497 500 return matching_lines
498 501
499 502
500 503 def get_lexer_safe(mimetype=None, filepath=None):
501 504 """
502 505 Tries to return a relevant pygments lexer using mimetype/filepath name,
503 506 defaulting to plain text if none could be found
504 507 """
505 508 lexer = None
506 509 try:
507 510 if mimetype:
508 511 lexer = get_lexer_for_mimetype(mimetype)
509 512 if not lexer:
510 513 lexer = get_lexer_for_filename(filepath)
511 514 except pygments.util.ClassNotFound:
512 515 pass
513 516
514 517 if not lexer:
515 518 lexer = get_lexer_by_name('text')
516 519
517 520 return lexer
518 521
519 522
520 523 def pygmentize(filenode, **kwargs):
521 524 """
522 525 pygmentize function using pygments
523 526
524 527 :param filenode:
525 528 """
526 529 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
527 530 return literal(code_highlight(filenode.content, lexer,
528 531 CodeHtmlFormatter(**kwargs)))
529 532
530 533
531 534 def pygmentize_annotation(repo_name, filenode, **kwargs):
532 535 """
533 536 pygmentize function for annotation
534 537
535 538 :param filenode:
536 539 """
537 540
538 541 color_dict = {}
539 542
540 543 def gen_color(n=10000):
541 544 """generator for getting n of evenly distributed colors using
542 545 hsv color and golden ratio. It always return same order of colors
543 546
544 547 :returns: RGB tuple
545 548 """
546 549
547 550 def hsv_to_rgb(h, s, v):
548 551 if s == 0.0:
549 552 return v, v, v
550 553 i = int(h * 6.0) # XXX assume int() truncates!
551 554 f = (h * 6.0) - i
552 555 p = v * (1.0 - s)
553 556 q = v * (1.0 - s * f)
554 557 t = v * (1.0 - s * (1.0 - f))
555 558 i = i % 6
556 559 if i == 0:
557 560 return v, t, p
558 561 if i == 1:
559 562 return q, v, p
560 563 if i == 2:
561 564 return p, v, t
562 565 if i == 3:
563 566 return p, q, v
564 567 if i == 4:
565 568 return t, p, v
566 569 if i == 5:
567 570 return v, p, q
568 571
569 572 golden_ratio = 0.618033988749895
570 573 h = 0.22717784590367374
571 574
572 575 for _ in xrange(n):
573 576 h += golden_ratio
574 577 h %= 1
575 578 HSV_tuple = [h, 0.95, 0.95]
576 579 RGB_tuple = hsv_to_rgb(*HSV_tuple)
577 580 yield map(lambda x: str(int(x * 256)), RGB_tuple)
578 581
579 582 cgenerator = gen_color()
580 583
581 584 def get_color_string(commit_id):
582 585 if commit_id in color_dict:
583 586 col = color_dict[commit_id]
584 587 else:
585 588 col = color_dict[commit_id] = cgenerator.next()
586 589 return "color: rgb(%s)! important;" % (', '.join(col))
587 590
588 591 def url_func(repo_name):
589 592
590 593 def _url_func(commit):
591 594 author = commit.author
592 595 date = commit.date
593 596 message = tooltip(commit.message)
594 597
595 598 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
596 599 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
597 600 "</b> %s<br/></div>")
598 601
599 602 tooltip_html = tooltip_html % (author, date, message)
600 603 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
601 604 uri = link_to(
602 605 lnk_format,
603 606 url('changeset_home', repo_name=repo_name,
604 607 revision=commit.raw_id),
605 608 style=get_color_string(commit.raw_id),
606 609 class_='tooltip',
607 610 title=tooltip_html
608 611 )
609 612
610 613 uri += '\n'
611 614 return uri
612 615 return _url_func
613 616
614 617 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
615 618
616 619
617 620 def is_following_repo(repo_name, user_id):
618 621 from rhodecode.model.scm import ScmModel
619 622 return ScmModel().is_following_repo(repo_name, user_id)
620 623
621 624
622 625 class _Message(object):
623 626 """A message returned by ``Flash.pop_messages()``.
624 627
625 628 Converting the message to a string returns the message text. Instances
626 629 also have the following attributes:
627 630
628 631 * ``message``: the message text.
629 632 * ``category``: the category specified when the message was created.
630 633 """
631 634
632 635 def __init__(self, category, message):
633 636 self.category = category
634 637 self.message = message
635 638
636 639 def __str__(self):
637 640 return self.message
638 641
639 642 __unicode__ = __str__
640 643
641 644 def __html__(self):
642 645 return escape(safe_unicode(self.message))
643 646
644 647
645 648 class Flash(_Flash):
646 649
647 650 def pop_messages(self):
648 651 """Return all accumulated messages and delete them from the session.
649 652
650 653 The return value is a list of ``Message`` objects.
651 654 """
652 655 from pylons import session
653 656
654 657 messages = []
655 658
656 659 # Pop the 'old' pylons flash messages. They are tuples of the form
657 660 # (category, message)
658 661 for cat, msg in session.pop(self.session_key, []):
659 662 messages.append(_Message(cat, msg))
660 663
661 664 # Pop the 'new' pyramid flash messages for each category as list
662 665 # of strings.
663 666 for cat in self.categories:
664 667 for msg in session.pop_flash(queue=cat):
665 668 messages.append(_Message(cat, msg))
666 669 # Map messages from the default queue to the 'notice' category.
667 670 for msg in session.pop_flash():
668 671 messages.append(_Message('notice', msg))
669 672
670 673 session.save()
671 674 return messages
672 675
673 676 flash = Flash()
674 677
675 678 #==============================================================================
676 679 # SCM FILTERS available via h.
677 680 #==============================================================================
678 681 from rhodecode.lib.vcs.utils import author_name, author_email
679 682 from rhodecode.lib.utils2 import credentials_filter, age as _age
680 683 from rhodecode.model.db import User, ChangesetStatus
681 684
682 685 age = _age
683 686 capitalize = lambda x: x.capitalize()
684 687 email = author_email
685 688 short_id = lambda x: x[:12]
686 689 hide_credentials = lambda x: ''.join(credentials_filter(x))
687 690
688 691
689 692 def age_component(datetime_iso, value=None, time_is_local=False):
690 693 title = value or format_date(datetime_iso)
691 694
692 695 # detect if we have a timezone info, otherwise, add it
693 696 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
694 697 tzinfo = '+00:00'
695 698
696 699 if time_is_local:
697 700 tzinfo = time.strftime("+%H:%M",
698 701 time.gmtime(
699 702 (datetime.now() - datetime.utcnow()).seconds + 1
700 703 )
701 704 )
702 705
703 706 return literal(
704 707 '<time class="timeago tooltip" '
705 708 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
706 709 datetime_iso, title, tzinfo))
707 710
708 711
709 712 def _shorten_commit_id(commit_id):
710 713 from rhodecode import CONFIG
711 714 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
712 715 return commit_id[:def_len]
713 716
714 717
715 718 def show_id(commit):
716 719 """
717 720 Configurable function that shows ID
718 721 by default it's r123:fffeeefffeee
719 722
720 723 :param commit: commit instance
721 724 """
722 725 from rhodecode import CONFIG
723 726 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
724 727
725 728 raw_id = _shorten_commit_id(commit.raw_id)
726 729 if show_idx:
727 730 return 'r%s:%s' % (commit.idx, raw_id)
728 731 else:
729 732 return '%s' % (raw_id, )
730 733
731 734
732 735 def format_date(date):
733 736 """
734 737 use a standardized formatting for dates used in RhodeCode
735 738
736 739 :param date: date/datetime object
737 740 :return: formatted date
738 741 """
739 742
740 743 if date:
741 744 _fmt = "%a, %d %b %Y %H:%M:%S"
742 745 return safe_unicode(date.strftime(_fmt))
743 746
744 747 return u""
745 748
746 749
747 750 class _RepoChecker(object):
748 751
749 752 def __init__(self, backend_alias):
750 753 self._backend_alias = backend_alias
751 754
752 755 def __call__(self, repository):
753 756 if hasattr(repository, 'alias'):
754 757 _type = repository.alias
755 758 elif hasattr(repository, 'repo_type'):
756 759 _type = repository.repo_type
757 760 else:
758 761 _type = repository
759 762 return _type == self._backend_alias
760 763
761 764 is_git = _RepoChecker('git')
762 765 is_hg = _RepoChecker('hg')
763 766 is_svn = _RepoChecker('svn')
764 767
765 768
766 769 def get_repo_type_by_name(repo_name):
767 770 repo = Repository.get_by_repo_name(repo_name)
768 771 return repo.repo_type
769 772
770 773
771 774 def is_svn_without_proxy(repository):
772 775 from rhodecode import CONFIG
773 776 if is_svn(repository):
774 777 if not CONFIG.get('rhodecode_proxy_subversion_http_requests', False):
775 778 return True
776 779 return False
777 780
778 781
779 782 def discover_user(author):
780 783 """
781 784 Tries to discover RhodeCode User based on the autho string. Author string
782 785 is typically `FirstName LastName <email@address.com>`
783 786 """
784 787
785 788 # if author is already an instance use it for extraction
786 789 if isinstance(author, User):
787 790 return author
788 791
789 792 # Valid email in the attribute passed, see if they're in the system
790 793 _email = author_email(author)
791 794 if _email != '':
792 795 user = User.get_by_email(_email, case_insensitive=True, cache=True)
793 796 if user is not None:
794 797 return user
795 798
796 799 # Maybe it's a username, we try to extract it and fetch by username ?
797 800 _author = author_name(author)
798 801 user = User.get_by_username(_author, case_insensitive=True, cache=True)
799 802 if user is not None:
800 803 return user
801 804
802 805 return None
803 806
804 807
805 808 def email_or_none(author):
806 809 # extract email from the commit string
807 810 _email = author_email(author)
808 811
809 812 # If we have an email, use it, otherwise
810 813 # see if it contains a username we can get an email from
811 814 if _email != '':
812 815 return _email
813 816 else:
814 817 user = User.get_by_username(
815 818 author_name(author), case_insensitive=True, cache=True)
816 819
817 820 if user is not None:
818 821 return user.email
819 822
820 823 # No valid email, not a valid user in the system, none!
821 824 return None
822 825
823 826
824 827 def link_to_user(author, length=0, **kwargs):
825 828 user = discover_user(author)
826 829 # user can be None, but if we have it already it means we can re-use it
827 830 # in the person() function, so we save 1 intensive-query
828 831 if user:
829 832 author = user
830 833
831 834 display_person = person(author, 'username_or_name_or_email')
832 835 if length:
833 836 display_person = shorter(display_person, length)
834 837
835 838 if user:
836 839 return link_to(
837 840 escape(display_person),
838 841 url('user_profile', username=user.username),
839 842 **kwargs)
840 843 else:
841 844 return escape(display_person)
842 845
843 846
844 847 def person(author, show_attr="username_and_name"):
845 848 user = discover_user(author)
846 849 if user:
847 850 return getattr(user, show_attr)
848 851 else:
849 852 _author = author_name(author)
850 853 _email = email(author)
851 854 return _author or _email
852 855
853 856
854 857 def author_string(email):
855 858 if email:
856 859 user = User.get_by_email(email, case_insensitive=True, cache=True)
857 860 if user:
858 861 if user.firstname or user.lastname:
859 862 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
860 863 else:
861 864 return email
862 865 else:
863 866 return email
864 867 else:
865 868 return None
866 869
867 870
868 871 def person_by_id(id_, show_attr="username_and_name"):
869 872 # attr to return from fetched user
870 873 person_getter = lambda usr: getattr(usr, show_attr)
871 874
872 875 #maybe it's an ID ?
873 876 if str(id_).isdigit() or isinstance(id_, int):
874 877 id_ = int(id_)
875 878 user = User.get(id_)
876 879 if user is not None:
877 880 return person_getter(user)
878 881 return id_
879 882
880 883
881 884 def gravatar_with_user(author, show_disabled=False):
882 885 from rhodecode.lib.utils import PartialRenderer
883 886 _render = PartialRenderer('base/base.html')
884 887 return _render('gravatar_with_user', author, show_disabled=show_disabled)
885 888
886 889
887 890 def desc_stylize(value):
888 891 """
889 892 converts tags from value into html equivalent
890 893
891 894 :param value:
892 895 """
893 896 if not value:
894 897 return ''
895 898
896 899 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
897 900 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
898 901 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
899 902 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
900 903 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
901 904 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
902 905 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
903 906 '<div class="metatag" tag="lang">\\2</div>', value)
904 907 value = re.sub(r'\[([a-z]+)\]',
905 908 '<div class="metatag" tag="\\1">\\1</div>', value)
906 909
907 910 return value
908 911
909 912
910 913 def escaped_stylize(value):
911 914 """
912 915 converts tags from value into html equivalent, but escaping its value first
913 916 """
914 917 if not value:
915 918 return ''
916 919
917 920 # Using default webhelper escape method, but has to force it as a
918 921 # plain unicode instead of a markup tag to be used in regex expressions
919 922 value = unicode(escape(safe_unicode(value)))
920 923
921 924 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
922 925 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
923 926 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
924 927 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
925 928 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
926 929 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
927 930 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
928 931 '<div class="metatag" tag="lang">\\2</div>', value)
929 932 value = re.sub(r'\[([a-z]+)\]',
930 933 '<div class="metatag" tag="\\1">\\1</div>', value)
931 934
932 935 return value
933 936
934 937
935 938 def bool2icon(value):
936 939 """
937 940 Returns boolean value of a given value, represented as html element with
938 941 classes that will represent icons
939 942
940 943 :param value: given value to convert to html node
941 944 """
942 945
943 946 if value: # does bool conversion
944 947 return HTML.tag('i', class_="icon-true")
945 948 else: # not true as bool
946 949 return HTML.tag('i', class_="icon-false")
947 950
948 951
949 952 #==============================================================================
950 953 # PERMS
951 954 #==============================================================================
952 955 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
953 956 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
954 957 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
955 958 csrf_token_key
956 959
957 960
958 961 #==============================================================================
959 962 # GRAVATAR URL
960 963 #==============================================================================
961 964 class InitialsGravatar(object):
962 965 def __init__(self, email_address, first_name, last_name, size=30,
963 966 background=None, text_color='#fff'):
964 967 self.size = size
965 968 self.first_name = first_name
966 969 self.last_name = last_name
967 970 self.email_address = email_address
968 971 self.background = background or self.str2color(email_address)
969 972 self.text_color = text_color
970 973
971 974 def get_color_bank(self):
972 975 """
973 976 returns a predefined list of colors that gravatars can use.
974 977 Those are randomized distinct colors that guarantee readability and
975 978 uniqueness.
976 979
977 980 generated with: http://phrogz.net/css/distinct-colors.html
978 981 """
979 982 return [
980 983 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
981 984 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
982 985 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
983 986 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
984 987 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
985 988 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
986 989 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
987 990 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
988 991 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
989 992 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
990 993 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
991 994 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
992 995 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
993 996 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
994 997 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
995 998 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
996 999 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
997 1000 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
998 1001 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
999 1002 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1000 1003 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1001 1004 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1002 1005 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1003 1006 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1004 1007 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1005 1008 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1006 1009 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1007 1010 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1008 1011 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1009 1012 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1010 1013 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1011 1014 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1012 1015 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1013 1016 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1014 1017 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1015 1018 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1016 1019 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1017 1020 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1018 1021 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1019 1022 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1020 1023 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1021 1024 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1022 1025 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1023 1026 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1024 1027 '#4f8c46', '#368dd9', '#5c0073'
1025 1028 ]
1026 1029
1027 1030 def rgb_to_hex_color(self, rgb_tuple):
1028 1031 """
1029 1032 Converts an rgb_tuple passed to an hex color.
1030 1033
1031 1034 :param rgb_tuple: tuple with 3 ints represents rgb color space
1032 1035 """
1033 1036 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1034 1037
1035 1038 def email_to_int_list(self, email_str):
1036 1039 """
1037 1040 Get every byte of the hex digest value of email and turn it to integer.
1038 1041 It's going to be always between 0-255
1039 1042 """
1040 1043 digest = md5_safe(email_str.lower())
1041 1044 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1042 1045
1043 1046 def pick_color_bank_index(self, email_str, color_bank):
1044 1047 return self.email_to_int_list(email_str)[0] % len(color_bank)
1045 1048
1046 1049 def str2color(self, email_str):
1047 1050 """
1048 1051 Tries to map in a stable algorithm an email to color
1049 1052
1050 1053 :param email_str:
1051 1054 """
1052 1055 color_bank = self.get_color_bank()
1053 1056 # pick position (module it's length so we always find it in the
1054 1057 # bank even if it's smaller than 256 values
1055 1058 pos = self.pick_color_bank_index(email_str, color_bank)
1056 1059 return color_bank[pos]
1057 1060
1058 1061 def normalize_email(self, email_address):
1059 1062 import unicodedata
1060 1063 # default host used to fill in the fake/missing email
1061 1064 default_host = u'localhost'
1062 1065
1063 1066 if not email_address:
1064 1067 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1065 1068
1066 1069 email_address = safe_unicode(email_address)
1067 1070
1068 1071 if u'@' not in email_address:
1069 1072 email_address = u'%s@%s' % (email_address, default_host)
1070 1073
1071 1074 if email_address.endswith(u'@'):
1072 1075 email_address = u'%s%s' % (email_address, default_host)
1073 1076
1074 1077 email_address = unicodedata.normalize('NFKD', email_address)\
1075 1078 .encode('ascii', 'ignore')
1076 1079 return email_address
1077 1080
1078 1081 def get_initials(self):
1079 1082 """
1080 1083 Returns 2 letter initials calculated based on the input.
1081 1084 The algorithm picks first given email address, and takes first letter
1082 1085 of part before @, and then the first letter of server name. In case
1083 1086 the part before @ is in a format of `somestring.somestring2` it replaces
1084 1087 the server letter with first letter of somestring2
1085 1088
1086 1089 In case function was initialized with both first and lastname, this
1087 1090 overrides the extraction from email by first letter of the first and
1088 1091 last name. We add special logic to that functionality, In case Full name
1089 1092 is compound, like Guido Von Rossum, we use last part of the last name
1090 1093 (Von Rossum) picking `R`.
1091 1094
1092 1095 Function also normalizes the non-ascii characters to they ascii
1093 1096 representation, eg Δ„ => A
1094 1097 """
1095 1098 import unicodedata
1096 1099 # replace non-ascii to ascii
1097 1100 first_name = unicodedata.normalize(
1098 1101 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1099 1102 last_name = unicodedata.normalize(
1100 1103 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1101 1104
1102 1105 # do NFKD encoding, and also make sure email has proper format
1103 1106 email_address = self.normalize_email(self.email_address)
1104 1107
1105 1108 # first push the email initials
1106 1109 prefix, server = email_address.split('@', 1)
1107 1110
1108 1111 # check if prefix is maybe a 'firstname.lastname' syntax
1109 1112 _dot_split = prefix.rsplit('.', 1)
1110 1113 if len(_dot_split) == 2:
1111 1114 initials = [_dot_split[0][0], _dot_split[1][0]]
1112 1115 else:
1113 1116 initials = [prefix[0], server[0]]
1114 1117
1115 1118 # then try to replace either firtname or lastname
1116 1119 fn_letter = (first_name or " ")[0].strip()
1117 1120 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1118 1121
1119 1122 if fn_letter:
1120 1123 initials[0] = fn_letter
1121 1124
1122 1125 if ln_letter:
1123 1126 initials[1] = ln_letter
1124 1127
1125 1128 return ''.join(initials).upper()
1126 1129
1127 1130 def get_img_data_by_type(self, font_family, img_type):
1128 1131 default_user = """
1129 1132 <svg xmlns="http://www.w3.org/2000/svg"
1130 1133 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1131 1134 viewBox="-15 -10 439.165 429.164"
1132 1135
1133 1136 xml:space="preserve"
1134 1137 style="background:{background};" >
1135 1138
1136 1139 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1137 1140 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1138 1141 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1139 1142 168.596,153.916,216.671,
1140 1143 204.583,216.671z" fill="{text_color}"/>
1141 1144 <path d="M407.164,374.717L360.88,
1142 1145 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1143 1146 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1144 1147 15.366-44.203,23.488-69.076,23.488c-24.877,
1145 1148 0-48.762-8.122-69.078-23.488
1146 1149 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1147 1150 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1148 1151 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1149 1152 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1150 1153 19.402-10.527 C409.699,390.129,
1151 1154 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1152 1155 </svg>""".format(
1153 1156 size=self.size,
1154 1157 background='#979797', # @grey4
1155 1158 text_color=self.text_color,
1156 1159 font_family=font_family)
1157 1160
1158 1161 return {
1159 1162 "default_user": default_user
1160 1163 }[img_type]
1161 1164
1162 1165 def get_img_data(self, svg_type=None):
1163 1166 """
1164 1167 generates the svg metadata for image
1165 1168 """
1166 1169
1167 1170 font_family = ','.join([
1168 1171 'proximanovaregular',
1169 1172 'Proxima Nova Regular',
1170 1173 'Proxima Nova',
1171 1174 'Arial',
1172 1175 'Lucida Grande',
1173 1176 'sans-serif'
1174 1177 ])
1175 1178 if svg_type:
1176 1179 return self.get_img_data_by_type(font_family, svg_type)
1177 1180
1178 1181 initials = self.get_initials()
1179 1182 img_data = """
1180 1183 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1181 1184 width="{size}" height="{size}"
1182 1185 style="width: 100%; height: 100%; background-color: {background}"
1183 1186 viewBox="0 0 {size} {size}">
1184 1187 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1185 1188 pointer-events="auto" fill="{text_color}"
1186 1189 font-family="{font_family}"
1187 1190 style="font-weight: 400; font-size: {f_size}px;">{text}
1188 1191 </text>
1189 1192 </svg>""".format(
1190 1193 size=self.size,
1191 1194 f_size=self.size/1.85, # scale the text inside the box nicely
1192 1195 background=self.background,
1193 1196 text_color=self.text_color,
1194 1197 text=initials.upper(),
1195 1198 font_family=font_family)
1196 1199
1197 1200 return img_data
1198 1201
1199 1202 def generate_svg(self, svg_type=None):
1200 1203 img_data = self.get_img_data(svg_type)
1201 1204 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1202 1205
1203 1206
1204 1207 def initials_gravatar(email_address, first_name, last_name, size=30):
1205 1208 svg_type = None
1206 1209 if email_address == User.DEFAULT_USER_EMAIL:
1207 1210 svg_type = 'default_user'
1208 1211 klass = InitialsGravatar(email_address, first_name, last_name, size)
1209 1212 return klass.generate_svg(svg_type=svg_type)
1210 1213
1211 1214
1212 1215 def gravatar_url(email_address, size=30):
1213 1216 # doh, we need to re-import those to mock it later
1214 1217 from pylons import tmpl_context as c
1215 1218
1216 1219 _use_gravatar = c.visual.use_gravatar
1217 1220 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1218 1221
1219 1222 email_address = email_address or User.DEFAULT_USER_EMAIL
1220 1223 if isinstance(email_address, unicode):
1221 1224 # hashlib crashes on unicode items
1222 1225 email_address = safe_str(email_address)
1223 1226
1224 1227 # empty email or default user
1225 1228 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1226 1229 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1227 1230
1228 1231 if _use_gravatar:
1229 1232 # TODO: Disuse pyramid thread locals. Think about another solution to
1230 1233 # get the host and schema here.
1231 1234 request = get_current_request()
1232 1235 tmpl = safe_str(_gravatar_url)
1233 1236 tmpl = tmpl.replace('{email}', email_address)\
1234 1237 .replace('{md5email}', md5_safe(email_address.lower())) \
1235 1238 .replace('{netloc}', request.host)\
1236 1239 .replace('{scheme}', request.scheme)\
1237 1240 .replace('{size}', safe_str(size))
1238 1241 return tmpl
1239 1242 else:
1240 1243 return initials_gravatar(email_address, '', '', size=size)
1241 1244
1242 1245
1243 1246 class Page(_Page):
1244 1247 """
1245 1248 Custom pager to match rendering style with paginator
1246 1249 """
1247 1250
1248 1251 def _get_pos(self, cur_page, max_page, items):
1249 1252 edge = (items / 2) + 1
1250 1253 if (cur_page <= edge):
1251 1254 radius = max(items / 2, items - cur_page)
1252 1255 elif (max_page - cur_page) < edge:
1253 1256 radius = (items - 1) - (max_page - cur_page)
1254 1257 else:
1255 1258 radius = items / 2
1256 1259
1257 1260 left = max(1, (cur_page - (radius)))
1258 1261 right = min(max_page, cur_page + (radius))
1259 1262 return left, cur_page, right
1260 1263
1261 1264 def _range(self, regexp_match):
1262 1265 """
1263 1266 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1264 1267
1265 1268 Arguments:
1266 1269
1267 1270 regexp_match
1268 1271 A "re" (regular expressions) match object containing the
1269 1272 radius of linked pages around the current page in
1270 1273 regexp_match.group(1) as a string
1271 1274
1272 1275 This function is supposed to be called as a callable in
1273 1276 re.sub.
1274 1277
1275 1278 """
1276 1279 radius = int(regexp_match.group(1))
1277 1280
1278 1281 # Compute the first and last page number within the radius
1279 1282 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1280 1283 # -> leftmost_page = 5
1281 1284 # -> rightmost_page = 9
1282 1285 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1283 1286 self.last_page,
1284 1287 (radius * 2) + 1)
1285 1288 nav_items = []
1286 1289
1287 1290 # Create a link to the first page (unless we are on the first page
1288 1291 # or there would be no need to insert '..' spacers)
1289 1292 if self.page != self.first_page and self.first_page < leftmost_page:
1290 1293 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1291 1294
1292 1295 # Insert dots if there are pages between the first page
1293 1296 # and the currently displayed page range
1294 1297 if leftmost_page - self.first_page > 1:
1295 1298 # Wrap in a SPAN tag if nolink_attr is set
1296 1299 text = '..'
1297 1300 if self.dotdot_attr:
1298 1301 text = HTML.span(c=text, **self.dotdot_attr)
1299 1302 nav_items.append(text)
1300 1303
1301 1304 for thispage in xrange(leftmost_page, rightmost_page + 1):
1302 1305 # Hilight the current page number and do not use a link
1303 1306 if thispage == self.page:
1304 1307 text = '%s' % (thispage,)
1305 1308 # Wrap in a SPAN tag if nolink_attr is set
1306 1309 if self.curpage_attr:
1307 1310 text = HTML.span(c=text, **self.curpage_attr)
1308 1311 nav_items.append(text)
1309 1312 # Otherwise create just a link to that page
1310 1313 else:
1311 1314 text = '%s' % (thispage,)
1312 1315 nav_items.append(self._pagerlink(thispage, text))
1313 1316
1314 1317 # Insert dots if there are pages between the displayed
1315 1318 # page numbers and the end of the page range
1316 1319 if self.last_page - rightmost_page > 1:
1317 1320 text = '..'
1318 1321 # Wrap in a SPAN tag if nolink_attr is set
1319 1322 if self.dotdot_attr:
1320 1323 text = HTML.span(c=text, **self.dotdot_attr)
1321 1324 nav_items.append(text)
1322 1325
1323 1326 # Create a link to the very last page (unless we are on the last
1324 1327 # page or there would be no need to insert '..' spacers)
1325 1328 if self.page != self.last_page and rightmost_page < self.last_page:
1326 1329 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1327 1330
1328 1331 ## prerender links
1329 1332 #_page_link = url.current()
1330 1333 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1331 1334 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1332 1335 return self.separator.join(nav_items)
1333 1336
1334 1337 def pager(self, format='~2~', page_param='page', partial_param='partial',
1335 1338 show_if_single_page=False, separator=' ', onclick=None,
1336 1339 symbol_first='<<', symbol_last='>>',
1337 1340 symbol_previous='<', symbol_next='>',
1338 1341 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1339 1342 curpage_attr={'class': 'pager_curpage'},
1340 1343 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1341 1344
1342 1345 self.curpage_attr = curpage_attr
1343 1346 self.separator = separator
1344 1347 self.pager_kwargs = kwargs
1345 1348 self.page_param = page_param
1346 1349 self.partial_param = partial_param
1347 1350 self.onclick = onclick
1348 1351 self.link_attr = link_attr
1349 1352 self.dotdot_attr = dotdot_attr
1350 1353
1351 1354 # Don't show navigator if there is no more than one page
1352 1355 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1353 1356 return ''
1354 1357
1355 1358 from string import Template
1356 1359 # Replace ~...~ in token format by range of pages
1357 1360 result = re.sub(r'~(\d+)~', self._range, format)
1358 1361
1359 1362 # Interpolate '%' variables
1360 1363 result = Template(result).safe_substitute({
1361 1364 'first_page': self.first_page,
1362 1365 'last_page': self.last_page,
1363 1366 'page': self.page,
1364 1367 'page_count': self.page_count,
1365 1368 'items_per_page': self.items_per_page,
1366 1369 'first_item': self.first_item,
1367 1370 'last_item': self.last_item,
1368 1371 'item_count': self.item_count,
1369 1372 'link_first': self.page > self.first_page and \
1370 1373 self._pagerlink(self.first_page, symbol_first) or '',
1371 1374 'link_last': self.page < self.last_page and \
1372 1375 self._pagerlink(self.last_page, symbol_last) or '',
1373 1376 'link_previous': self.previous_page and \
1374 1377 self._pagerlink(self.previous_page, symbol_previous) \
1375 1378 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1376 1379 'link_next': self.next_page and \
1377 1380 self._pagerlink(self.next_page, symbol_next) \
1378 1381 or HTML.span(symbol_next, class_="pg-next disabled")
1379 1382 })
1380 1383
1381 1384 return literal(result)
1382 1385
1383 1386
1384 1387 #==============================================================================
1385 1388 # REPO PAGER, PAGER FOR REPOSITORY
1386 1389 #==============================================================================
1387 1390 class RepoPage(Page):
1388 1391
1389 1392 def __init__(self, collection, page=1, items_per_page=20,
1390 1393 item_count=None, url=None, **kwargs):
1391 1394
1392 1395 """Create a "RepoPage" instance. special pager for paging
1393 1396 repository
1394 1397 """
1395 1398 self._url_generator = url
1396 1399
1397 1400 # Safe the kwargs class-wide so they can be used in the pager() method
1398 1401 self.kwargs = kwargs
1399 1402
1400 1403 # Save a reference to the collection
1401 1404 self.original_collection = collection
1402 1405
1403 1406 self.collection = collection
1404 1407
1405 1408 # The self.page is the number of the current page.
1406 1409 # The first page has the number 1!
1407 1410 try:
1408 1411 self.page = int(page) # make it int() if we get it as a string
1409 1412 except (ValueError, TypeError):
1410 1413 self.page = 1
1411 1414
1412 1415 self.items_per_page = items_per_page
1413 1416
1414 1417 # Unless the user tells us how many items the collections has
1415 1418 # we calculate that ourselves.
1416 1419 if item_count is not None:
1417 1420 self.item_count = item_count
1418 1421 else:
1419 1422 self.item_count = len(self.collection)
1420 1423
1421 1424 # Compute the number of the first and last available page
1422 1425 if self.item_count > 0:
1423 1426 self.first_page = 1
1424 1427 self.page_count = int(math.ceil(float(self.item_count) /
1425 1428 self.items_per_page))
1426 1429 self.last_page = self.first_page + self.page_count - 1
1427 1430
1428 1431 # Make sure that the requested page number is the range of
1429 1432 # valid pages
1430 1433 if self.page > self.last_page:
1431 1434 self.page = self.last_page
1432 1435 elif self.page < self.first_page:
1433 1436 self.page = self.first_page
1434 1437
1435 1438 # Note: the number of items on this page can be less than
1436 1439 # items_per_page if the last page is not full
1437 1440 self.first_item = max(0, (self.item_count) - (self.page *
1438 1441 items_per_page))
1439 1442 self.last_item = ((self.item_count - 1) - items_per_page *
1440 1443 (self.page - 1))
1441 1444
1442 1445 self.items = list(self.collection[self.first_item:self.last_item + 1])
1443 1446
1444 1447 # Links to previous and next page
1445 1448 if self.page > self.first_page:
1446 1449 self.previous_page = self.page - 1
1447 1450 else:
1448 1451 self.previous_page = None
1449 1452
1450 1453 if self.page < self.last_page:
1451 1454 self.next_page = self.page + 1
1452 1455 else:
1453 1456 self.next_page = None
1454 1457
1455 1458 # No items available
1456 1459 else:
1457 1460 self.first_page = None
1458 1461 self.page_count = 0
1459 1462 self.last_page = None
1460 1463 self.first_item = None
1461 1464 self.last_item = None
1462 1465 self.previous_page = None
1463 1466 self.next_page = None
1464 1467 self.items = []
1465 1468
1466 1469 # This is a subclass of the 'list' type. Initialise the list now.
1467 1470 list.__init__(self, reversed(self.items))
1468 1471
1469 1472
1470 1473 def changed_tooltip(nodes):
1471 1474 """
1472 1475 Generates a html string for changed nodes in commit page.
1473 1476 It limits the output to 30 entries
1474 1477
1475 1478 :param nodes: LazyNodesGenerator
1476 1479 """
1477 1480 if nodes:
1478 1481 pref = ': <br/> '
1479 1482 suf = ''
1480 1483 if len(nodes) > 30:
1481 1484 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1482 1485 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1483 1486 for x in nodes[:30]]) + suf)
1484 1487 else:
1485 1488 return ': ' + _('No Files')
1486 1489
1487 1490
1488 1491 def breadcrumb_repo_link(repo):
1489 1492 """
1490 1493 Makes a breadcrumbs path link to repo
1491 1494
1492 1495 ex::
1493 1496 group >> subgroup >> repo
1494 1497
1495 1498 :param repo: a Repository instance
1496 1499 """
1497 1500
1498 1501 path = [
1499 1502 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1500 1503 for group in repo.groups_with_parents
1501 1504 ] + [
1502 1505 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1503 1506 ]
1504 1507
1505 1508 return literal(' &raquo; '.join(path))
1506 1509
1507 1510
1508 1511 def format_byte_size_binary(file_size):
1509 1512 """
1510 1513 Formats file/folder sizes to standard.
1511 1514 """
1512 1515 formatted_size = format_byte_size(file_size, binary=True)
1513 1516 return formatted_size
1514 1517
1515 1518
1516 1519 def fancy_file_stats(stats):
1517 1520 """
1518 1521 Displays a fancy two colored bar for number of added/deleted
1519 1522 lines of code on file
1520 1523
1521 1524 :param stats: two element list of added/deleted lines of code
1522 1525 """
1523 1526 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1524 1527 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1525 1528
1526 1529 def cgen(l_type, a_v, d_v):
1527 1530 mapping = {'tr': 'top-right-rounded-corner-mid',
1528 1531 'tl': 'top-left-rounded-corner-mid',
1529 1532 'br': 'bottom-right-rounded-corner-mid',
1530 1533 'bl': 'bottom-left-rounded-corner-mid'}
1531 1534 map_getter = lambda x: mapping[x]
1532 1535
1533 1536 if l_type == 'a' and d_v:
1534 1537 #case when added and deleted are present
1535 1538 return ' '.join(map(map_getter, ['tl', 'bl']))
1536 1539
1537 1540 if l_type == 'a' and not d_v:
1538 1541 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1539 1542
1540 1543 if l_type == 'd' and a_v:
1541 1544 return ' '.join(map(map_getter, ['tr', 'br']))
1542 1545
1543 1546 if l_type == 'd' and not a_v:
1544 1547 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1545 1548
1546 1549 a, d = stats['added'], stats['deleted']
1547 1550 width = 100
1548 1551
1549 1552 if stats['binary']: # binary operations like chmod/rename etc
1550 1553 lbl = []
1551 1554 bin_op = 0 # undefined
1552 1555
1553 1556 # prefix with bin for binary files
1554 1557 if BIN_FILENODE in stats['ops']:
1555 1558 lbl += ['bin']
1556 1559
1557 1560 if NEW_FILENODE in stats['ops']:
1558 1561 lbl += [_('new file')]
1559 1562 bin_op = NEW_FILENODE
1560 1563 elif MOD_FILENODE in stats['ops']:
1561 1564 lbl += [_('mod')]
1562 1565 bin_op = MOD_FILENODE
1563 1566 elif DEL_FILENODE in stats['ops']:
1564 1567 lbl += [_('del')]
1565 1568 bin_op = DEL_FILENODE
1566 1569 elif RENAMED_FILENODE in stats['ops']:
1567 1570 lbl += [_('rename')]
1568 1571 bin_op = RENAMED_FILENODE
1569 1572
1570 1573 # chmod can go with other operations, so we add a + to lbl if needed
1571 1574 if CHMOD_FILENODE in stats['ops']:
1572 1575 lbl += [_('chmod')]
1573 1576 if bin_op == 0:
1574 1577 bin_op = CHMOD_FILENODE
1575 1578
1576 1579 lbl = '+'.join(lbl)
1577 1580 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1578 1581 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1579 1582 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1580 1583 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1581 1584
1582 1585 t = stats['added'] + stats['deleted']
1583 1586 unit = float(width) / (t or 1)
1584 1587
1585 1588 # needs > 9% of width to be visible or 0 to be hidden
1586 1589 a_p = max(9, unit * a) if a > 0 else 0
1587 1590 d_p = max(9, unit * d) if d > 0 else 0
1588 1591 p_sum = a_p + d_p
1589 1592
1590 1593 if p_sum > width:
1591 1594 #adjust the percentage to be == 100% since we adjusted to 9
1592 1595 if a_p > d_p:
1593 1596 a_p = a_p - (p_sum - width)
1594 1597 else:
1595 1598 d_p = d_p - (p_sum - width)
1596 1599
1597 1600 a_v = a if a > 0 else ''
1598 1601 d_v = d if d > 0 else ''
1599 1602
1600 1603 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1601 1604 cgen('a', a_v, d_v), a_p, a_v
1602 1605 )
1603 1606 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1604 1607 cgen('d', a_v, d_v), d_p, d_v
1605 1608 )
1606 1609 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1607 1610
1608 1611
1609 1612 def urlify_text(text_, safe=True):
1610 1613 """
1611 1614 Extrac urls from text and make html links out of them
1612 1615
1613 1616 :param text_:
1614 1617 """
1615 1618
1616 1619 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1617 1620 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1618 1621
1619 1622 def url_func(match_obj):
1620 1623 url_full = match_obj.groups()[0]
1621 1624 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1622 1625 _newtext = url_pat.sub(url_func, text_)
1623 1626 if safe:
1624 1627 return literal(_newtext)
1625 1628 return _newtext
1626 1629
1627 1630
1628 1631 def urlify_commits(text_, repository):
1629 1632 """
1630 1633 Extract commit ids from text and make link from them
1631 1634
1632 1635 :param text_:
1633 1636 :param repository: repo name to build the URL with
1634 1637 """
1635 1638 from pylons import url # doh, we need to re-import url to mock it later
1636 1639 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1637 1640
1638 1641 def url_func(match_obj):
1639 1642 commit_id = match_obj.groups()[1]
1640 1643 pref = match_obj.groups()[0]
1641 1644 suf = match_obj.groups()[2]
1642 1645
1643 1646 tmpl = (
1644 1647 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1645 1648 '%(commit_id)s</a>%(suf)s'
1646 1649 )
1647 1650 return tmpl % {
1648 1651 'pref': pref,
1649 1652 'cls': 'revision-link',
1650 1653 'url': url('changeset_home', repo_name=repository,
1651 1654 revision=commit_id, qualified=True),
1652 1655 'commit_id': commit_id,
1653 1656 'suf': suf
1654 1657 }
1655 1658
1656 1659 newtext = URL_PAT.sub(url_func, text_)
1657 1660
1658 1661 return newtext
1659 1662
1660 1663
1661 1664 def _process_url_func(match_obj, repo_name, uid, entry,
1662 1665 return_raw_data=False):
1663 1666 pref = ''
1664 1667 if match_obj.group().startswith(' '):
1665 1668 pref = ' '
1666 1669
1667 1670 issue_id = ''.join(match_obj.groups())
1668 1671 tmpl = (
1669 1672 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1670 1673 '%(issue-prefix)s%(id-repr)s'
1671 1674 '</a>')
1672 1675
1673 1676 (repo_name_cleaned,
1674 1677 parent_group_name) = RepoGroupModel().\
1675 1678 _get_group_name_and_parent(repo_name)
1676 1679
1677 1680 # variables replacement
1678 1681 named_vars = {
1679 1682 'id': issue_id,
1680 1683 'repo': repo_name,
1681 1684 'repo_name': repo_name_cleaned,
1682 1685 'group_name': parent_group_name
1683 1686 }
1684 1687 # named regex variables
1685 1688 named_vars.update(match_obj.groupdict())
1686 1689 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1687 1690
1688 1691 data = {
1689 1692 'pref': pref,
1690 1693 'cls': 'issue-tracker-link',
1691 1694 'url': _url,
1692 1695 'id-repr': issue_id,
1693 1696 'issue-prefix': entry['pref'],
1694 1697 'serv': entry['url'],
1695 1698 }
1696 1699 if return_raw_data:
1697 1700 return {
1698 1701 'id': issue_id,
1699 1702 'url': _url
1700 1703 }
1701 1704 return tmpl % data
1702 1705
1703 1706
1704 1707 def process_patterns(text_string, repo_name, config=None):
1705 1708 repo = None
1706 1709 if repo_name:
1707 1710 # Retrieving repo_name to avoid invalid repo_name to explode on
1708 1711 # IssueTrackerSettingsModel but still passing invalid name further down
1709 1712 repo = Repository.get_by_repo_name(repo_name, cache=True)
1710 1713
1711 1714 settings_model = IssueTrackerSettingsModel(repo=repo)
1712 1715 active_entries = settings_model.get_settings(cache=True)
1713 1716
1714 1717 issues_data = []
1715 1718 newtext = text_string
1716 1719 for uid, entry in active_entries.items():
1717 1720 log.debug('found issue tracker entry with uid %s' % (uid,))
1718 1721
1719 1722 if not (entry['pat'] and entry['url']):
1720 1723 log.debug('skipping due to missing data')
1721 1724 continue
1722 1725
1723 1726 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1724 1727 % (uid, entry['pat'], entry['url'], entry['pref']))
1725 1728
1726 1729 try:
1727 1730 pattern = re.compile(r'%s' % entry['pat'])
1728 1731 except re.error:
1729 1732 log.exception(
1730 1733 'issue tracker pattern: `%s` failed to compile',
1731 1734 entry['pat'])
1732 1735 continue
1733 1736
1734 1737 data_func = partial(
1735 1738 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1736 1739 return_raw_data=True)
1737 1740
1738 1741 for match_obj in pattern.finditer(text_string):
1739 1742 issues_data.append(data_func(match_obj))
1740 1743
1741 1744 url_func = partial(
1742 1745 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1743 1746
1744 1747 newtext = pattern.sub(url_func, newtext)
1745 1748 log.debug('processed prefix:uid `%s`' % (uid,))
1746 1749
1747 1750 return newtext, issues_data
1748 1751
1749 1752
1750 1753 def urlify_commit_message(commit_text, repository=None):
1751 1754 """
1752 1755 Parses given text message and makes proper links.
1753 1756 issues are linked to given issue-server, and rest is a commit link
1754 1757
1755 1758 :param commit_text:
1756 1759 :param repository:
1757 1760 """
1758 1761 from pylons import url # doh, we need to re-import url to mock it later
1759 1762
1760 1763 def escaper(string):
1761 1764 return string.replace('<', '&lt;').replace('>', '&gt;')
1762 1765
1763 1766 newtext = escaper(commit_text)
1764 1767
1765 1768 # extract http/https links and make them real urls
1766 1769 newtext = urlify_text(newtext, safe=False)
1767 1770
1768 1771 # urlify commits - extract commit ids and make link out of them, if we have
1769 1772 # the scope of repository present.
1770 1773 if repository:
1771 1774 newtext = urlify_commits(newtext, repository)
1772 1775
1773 1776 # process issue tracker patterns
1774 1777 newtext, issues = process_patterns(newtext, repository or '')
1775 1778
1776 1779 return literal(newtext)
1777 1780
1778 1781
1779 1782 def rst(source, mentions=False):
1780 1783 return literal('<div class="rst-block">%s</div>' %
1781 1784 MarkupRenderer.rst(source, mentions=mentions))
1782 1785
1783 1786
1784 1787 def markdown(source, mentions=False):
1785 1788 return literal('<div class="markdown-block">%s</div>' %
1786 1789 MarkupRenderer.markdown(source, flavored=True,
1787 1790 mentions=mentions))
1788 1791
1789 1792 def renderer_from_filename(filename, exclude=None):
1790 1793 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1791 1794
1792 1795
1793 1796 def render(source, renderer='rst', mentions=False):
1794 1797 if renderer == 'rst':
1795 1798 return rst(source, mentions=mentions)
1796 1799 if renderer == 'markdown':
1797 1800 return markdown(source, mentions=mentions)
1798 1801
1799 1802
1800 1803 def commit_status(repo, commit_id):
1801 1804 return ChangesetStatusModel().get_status(repo, commit_id)
1802 1805
1803 1806
1804 1807 def commit_status_lbl(commit_status):
1805 1808 return dict(ChangesetStatus.STATUSES).get(commit_status)
1806 1809
1807 1810
1808 1811 def commit_time(repo_name, commit_id):
1809 1812 repo = Repository.get_by_repo_name(repo_name)
1810 1813 commit = repo.get_commit(commit_id=commit_id)
1811 1814 return commit.date
1812 1815
1813 1816
1814 1817 def get_permission_name(key):
1815 1818 return dict(Permission.PERMS).get(key)
1816 1819
1817 1820
1818 1821 def journal_filter_help():
1819 1822 return _(
1820 1823 'Example filter terms:\n' +
1821 1824 ' repository:vcs\n' +
1822 1825 ' username:marcin\n' +
1823 1826 ' action:*push*\n' +
1824 1827 ' ip:127.0.0.1\n' +
1825 1828 ' date:20120101\n' +
1826 1829 ' date:[20120101100000 TO 20120102]\n' +
1827 1830 '\n' +
1828 1831 'Generate wildcards using \'*\' character:\n' +
1829 1832 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1830 1833 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1831 1834 '\n' +
1832 1835 'Optional AND / OR operators in queries\n' +
1833 1836 ' "repository:vcs OR repository:test"\n' +
1834 1837 ' "username:test AND repository:test*"\n'
1835 1838 )
1836 1839
1837 1840
1838 1841 def not_mapped_error(repo_name):
1839 1842 flash(_('%s repository is not mapped to db perhaps'
1840 1843 ' it was created or renamed from the filesystem'
1841 1844 ' please run the application again'
1842 1845 ' in order to rescan repositories') % repo_name, category='error')
1843 1846
1844 1847
1845 1848 def ip_range(ip_addr):
1846 1849 from rhodecode.model.db import UserIpMap
1847 1850 s, e = UserIpMap._get_ip_range(ip_addr)
1848 1851 return '%s - %s' % (s, e)
1849 1852
1850 1853
1851 1854 def form(url, method='post', needs_csrf_token=True, **attrs):
1852 1855 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1853 1856 if method.lower() != 'get' and needs_csrf_token:
1854 1857 raise Exception(
1855 1858 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1856 1859 'CSRF token. If the endpoint does not require such token you can ' +
1857 1860 'explicitly set the parameter needs_csrf_token to false.')
1858 1861
1859 1862 return wh_form(url, method=method, **attrs)
1860 1863
1861 1864
1862 1865 def secure_form(url, method="POST", multipart=False, **attrs):
1863 1866 """Start a form tag that points the action to an url. This
1864 1867 form tag will also include the hidden field containing
1865 1868 the auth token.
1866 1869
1867 1870 The url options should be given either as a string, or as a
1868 1871 ``url()`` function. The method for the form defaults to POST.
1869 1872
1870 1873 Options:
1871 1874
1872 1875 ``multipart``
1873 1876 If set to True, the enctype is set to "multipart/form-data".
1874 1877 ``method``
1875 1878 The method to use when submitting the form, usually either
1876 1879 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1877 1880 hidden input with name _method is added to simulate the verb
1878 1881 over POST.
1879 1882
1880 1883 """
1881 1884 from webhelpers.pylonslib.secure_form import insecure_form
1882 1885 form = insecure_form(url, method, multipart, **attrs)
1883 1886 token = csrf_input()
1884 1887 return literal("%s\n%s" % (form, token))
1885 1888
1886 1889 def csrf_input():
1887 1890 return literal(
1888 1891 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1889 1892 csrf_token_key, csrf_token_key, get_csrf_token()))
1890 1893
1891 1894 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1892 1895 select_html = select(name, selected, options, **attrs)
1893 1896 select2 = """
1894 1897 <script>
1895 1898 $(document).ready(function() {
1896 1899 $('#%s').select2({
1897 1900 containerCssClass: 'drop-menu',
1898 1901 dropdownCssClass: 'drop-menu-dropdown',
1899 1902 dropdownAutoWidth: true%s
1900 1903 });
1901 1904 });
1902 1905 </script>
1903 1906 """
1904 1907 filter_option = """,
1905 1908 minimumResultsForSearch: -1
1906 1909 """
1907 1910 input_id = attrs.get('id') or name
1908 1911 filter_enabled = "" if enable_filter else filter_option
1909 1912 select_script = literal(select2 % (input_id, filter_enabled))
1910 1913
1911 1914 return literal(select_html+select_script)
1912 1915
1913 1916
1914 1917 def get_visual_attr(tmpl_context_var, attr_name):
1915 1918 """
1916 1919 A safe way to get a variable from visual variable of template context
1917 1920
1918 1921 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1919 1922 :param attr_name: name of the attribute we fetch from the c.visual
1920 1923 """
1921 1924 visual = getattr(tmpl_context_var, 'visual', None)
1922 1925 if not visual:
1923 1926 return
1924 1927 else:
1925 1928 return getattr(visual, attr_name, None)
1926 1929
1927 1930
1928 1931 def get_last_path_part(file_node):
1929 1932 if not file_node.path:
1930 1933 return u''
1931 1934
1932 1935 path = safe_unicode(file_node.path.split('/')[-1])
1933 1936 return u'../' + path
1934 1937
1935 1938
1936 1939 def route_path(*args, **kwds):
1937 1940 """
1938 1941 Wrapper around pyramids `route_path` function. It is used to generate
1939 1942 URLs from within pylons views or templates. This will be removed when
1940 1943 pyramid migration if finished.
1941 1944 """
1942 1945 req = get_current_request()
1943 1946 return req.route_path(*args, **kwds)
1944 1947
1945 1948
1946 1949 def static_url(*args, **kwds):
1947 1950 """
1948 1951 Wrapper around pyramids `route_path` function. It is used to generate
1949 1952 URLs from within pylons views or templates. This will be removed when
1950 1953 pyramid migration if finished.
1951 1954 """
1952 1955 req = get_current_request()
1953 1956 return req.static_url(*args, **kwds)
1954 1957
1955 1958
1956 1959 def resource_path(*args, **kwds):
1957 1960 """
1958 1961 Wrapper around pyramids `route_path` function. It is used to generate
1959 1962 URLs from within pylons views or templates. This will be removed when
1960 1963 pyramid migration if finished.
1961 1964 """
1962 1965 req = get_current_request()
1963 1966 return req.resource_path(*args, **kwds)
General Comments 0
You need to be logged in to leave comments. Login now