##// END OF EJS Templates
comments: allow parsing the issue tracker patterns inside the markup...
marcink -
r1672:a43e4e95 default
parent child Browse files
Show More
@@ -1,1961 +1,1987 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40 import itertools
41 41 import fnmatch
42 42
43 43 from datetime import datetime
44 44 from functools import partial
45 45 from pygments.formatters.html import HtmlFormatter
46 46 from pygments import highlight as code_highlight
47 47 from pygments.lexers import (
48 48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 49 from pylons import url as pylons_url
50 50 from pylons.i18n.translation import _, ungettext
51 51 from pyramid.threadlocal import get_current_request
52 52
53 53 from webhelpers.html import literal, HTML, escape
54 54 from webhelpers.html.tools import *
55 55 from webhelpers.html.builder import make_tag
56 56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 62 from webhelpers.pylonslib import Flash as _Flash
63 63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 66 from webhelpers.date import time_ago_in_words
67 67 from webhelpers.paginate import Page as _Page
68 68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 70 from webhelpers2.number import format_byte_size
71 71
72 72 from rhodecode.lib.action_parser import action_parser
73 73 from rhodecode.lib.ext_json import json
74 74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 77 AttributeDict, safe_int, md5, md5_safe
78 78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 83 from rhodecode.model.db import Permission, User, Repository
84 84 from rhodecode.model.repo_group import RepoGroupModel
85 85 from rhodecode.model.settings import IssueTrackerSettingsModel
86 86
87 87 log = logging.getLogger(__name__)
88 88
89 89
90 90 DEFAULT_USER = User.DEFAULT_USER
91 91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92 92
93 93
94 94 def url(*args, **kw):
95 95 return pylons_url(*args, **kw)
96 96
97 97
98 98 def pylons_url_current(*args, **kw):
99 99 """
100 100 This function overrides pylons.url.current() which returns the current
101 101 path so that it will also work from a pyramid only context. This
102 102 should be removed once port to pyramid is complete.
103 103 """
104 104 if not args and not kw:
105 105 request = get_current_request()
106 106 return request.path
107 107 return pylons_url.current(*args, **kw)
108 108
109 109 url.current = pylons_url_current
110 110
111 111
112 112 def url_replace(**qargs):
113 113 """ Returns the current request url while replacing query string args """
114 114
115 115 request = get_current_request()
116 116 new_args = request.GET.mixed()
117 117 new_args.update(qargs)
118 118 return url('', **new_args)
119 119
120 120
121 121 def asset(path, ver=None, **kwargs):
122 122 """
123 123 Helper to generate a static asset file path for rhodecode assets
124 124
125 125 eg. h.asset('images/image.png', ver='3923')
126 126
127 127 :param path: path of asset
128 128 :param ver: optional version query param to append as ?ver=
129 129 """
130 130 request = get_current_request()
131 131 query = {}
132 132 query.update(kwargs)
133 133 if ver:
134 134 query = {'ver': ver}
135 135 return request.static_path(
136 136 'rhodecode:public/{}'.format(path), _query=query)
137 137
138 138
139 139 default_html_escape_table = {
140 140 ord('&'): u'&amp;',
141 141 ord('<'): u'&lt;',
142 142 ord('>'): u'&gt;',
143 143 ord('"'): u'&quot;',
144 144 ord("'"): u'&#39;',
145 145 }
146 146
147 147
148 148 def html_escape(text, html_escape_table=default_html_escape_table):
149 149 """Produce entities within text."""
150 150 return text.translate(html_escape_table)
151 151
152 152
153 153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 154 """
155 155 Truncate string ``s`` at the first occurrence of ``sub``.
156 156
157 157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 158 """
159 159 suffix_if_chopped = suffix_if_chopped or ''
160 160 pos = s.find(sub)
161 161 if pos == -1:
162 162 return s
163 163
164 164 if inclusive:
165 165 pos += len(sub)
166 166
167 167 chopped = s[:pos]
168 168 left = s[pos:].strip()
169 169
170 170 if left and suffix_if_chopped:
171 171 chopped += suffix_if_chopped
172 172
173 173 return chopped
174 174
175 175
176 176 def shorter(text, size=20):
177 177 postfix = '...'
178 178 if len(text) > size:
179 179 return text[:size - len(postfix)] + postfix
180 180 return text
181 181
182 182
183 183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 184 """
185 185 Reset button
186 186 """
187 187 _set_input_attrs(attrs, type, name, value)
188 188 _set_id_attr(attrs, id, name)
189 189 convert_boolean_attrs(attrs, ["disabled"])
190 190 return HTML.input(**attrs)
191 191
192 192 reset = _reset
193 193 safeid = _make_safe_id_component
194 194
195 195
196 196 def branding(name, length=40):
197 197 return truncate(name, length, indicator="")
198 198
199 199
200 200 def FID(raw_id, path):
201 201 """
202 202 Creates a unique ID for filenode based on it's hash of path and commit
203 203 it's safe to use in urls
204 204
205 205 :param raw_id:
206 206 :param path:
207 207 """
208 208
209 209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210 210
211 211
212 212 class _GetError(object):
213 213 """Get error from form_errors, and represent it as span wrapped error
214 214 message
215 215
216 216 :param field_name: field to fetch errors for
217 217 :param form_errors: form errors dict
218 218 """
219 219
220 220 def __call__(self, field_name, form_errors):
221 221 tmpl = """<span class="error_msg">%s</span>"""
222 222 if form_errors and field_name in form_errors:
223 223 return literal(tmpl % form_errors.get(field_name))
224 224
225 225 get_error = _GetError()
226 226
227 227
228 228 class _ToolTip(object):
229 229
230 230 def __call__(self, tooltip_title, trim_at=50):
231 231 """
232 232 Special function just to wrap our text into nice formatted
233 233 autowrapped text
234 234
235 235 :param tooltip_title:
236 236 """
237 237 tooltip_title = escape(tooltip_title)
238 238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 239 return tooltip_title
240 240 tooltip = _ToolTip()
241 241
242 242
243 243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 244 if isinstance(file_path, str):
245 245 file_path = safe_unicode(file_path)
246 246
247 247 # TODO: johbo: Is this always a url like path, or is this operating
248 248 # system dependent?
249 249 path_segments = file_path.split('/')
250 250
251 251 repo_name_html = escape(repo_name)
252 252 if len(path_segments) == 1 and path_segments[0] == '':
253 253 url_segments = [repo_name_html]
254 254 else:
255 255 url_segments = [
256 256 link_to(
257 257 repo_name_html,
258 258 url('files_home',
259 259 repo_name=repo_name,
260 260 revision=commit_id,
261 261 f_path=''),
262 262 class_='pjax-link')]
263 263
264 264 last_cnt = len(path_segments) - 1
265 265 for cnt, segment in enumerate(path_segments):
266 266 if not segment:
267 267 continue
268 268 segment_html = escape(segment)
269 269
270 270 if cnt != last_cnt:
271 271 url_segments.append(
272 272 link_to(
273 273 segment_html,
274 274 url('files_home',
275 275 repo_name=repo_name,
276 276 revision=commit_id,
277 277 f_path='/'.join(path_segments[:cnt + 1])),
278 278 class_='pjax-link'))
279 279 else:
280 280 url_segments.append(segment_html)
281 281
282 282 return literal('/'.join(url_segments))
283 283
284 284
285 285 class CodeHtmlFormatter(HtmlFormatter):
286 286 """
287 287 My code Html Formatter for source codes
288 288 """
289 289
290 290 def wrap(self, source, outfile):
291 291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292 292
293 293 def _wrap_code(self, source):
294 294 for cnt, it in enumerate(source):
295 295 i, t = it
296 296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 297 yield i, t
298 298
299 299 def _wrap_tablelinenos(self, inner):
300 300 dummyoutfile = StringIO.StringIO()
301 301 lncount = 0
302 302 for t, line in inner:
303 303 if t:
304 304 lncount += 1
305 305 dummyoutfile.write(line)
306 306
307 307 fl = self.linenostart
308 308 mw = len(str(lncount + fl - 1))
309 309 sp = self.linenospecial
310 310 st = self.linenostep
311 311 la = self.lineanchors
312 312 aln = self.anchorlinenos
313 313 nocls = self.noclasses
314 314 if sp:
315 315 lines = []
316 316
317 317 for i in range(fl, fl + lncount):
318 318 if i % st == 0:
319 319 if i % sp == 0:
320 320 if aln:
321 321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 322 (la, i, mw, i))
323 323 else:
324 324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 325 else:
326 326 if aln:
327 327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 328 else:
329 329 lines.append('%*d' % (mw, i))
330 330 else:
331 331 lines.append('')
332 332 ls = '\n'.join(lines)
333 333 else:
334 334 lines = []
335 335 for i in range(fl, fl + lncount):
336 336 if i % st == 0:
337 337 if aln:
338 338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 339 else:
340 340 lines.append('%*d' % (mw, i))
341 341 else:
342 342 lines.append('')
343 343 ls = '\n'.join(lines)
344 344
345 345 # in case you wonder about the seemingly redundant <div> here: since the
346 346 # content in the other cell also is wrapped in a div, some browsers in
347 347 # some configurations seem to mess up the formatting...
348 348 if nocls:
349 349 yield 0, ('<table class="%stable">' % self.cssclass +
350 350 '<tr><td><div class="linenodiv" '
351 351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 352 '<pre style="line-height: 125%">' +
353 353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 354 else:
355 355 yield 0, ('<table class="%stable">' % self.cssclass +
356 356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 358 yield 0, dummyoutfile.getvalue()
359 359 yield 0, '</td></tr></table>'
360 360
361 361
362 362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 363 def __init__(self, **kw):
364 364 # only show these line numbers if set
365 365 self.only_lines = kw.pop('only_line_numbers', [])
366 366 self.query_terms = kw.pop('query_terms', [])
367 367 self.max_lines = kw.pop('max_lines', 5)
368 368 self.line_context = kw.pop('line_context', 3)
369 369 self.url = kw.pop('url', None)
370 370
371 371 super(CodeHtmlFormatter, self).__init__(**kw)
372 372
373 373 def _wrap_code(self, source):
374 374 for cnt, it in enumerate(source):
375 375 i, t = it
376 376 t = '<pre>%s</pre>' % t
377 377 yield i, t
378 378
379 379 def _wrap_tablelinenos(self, inner):
380 380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381 381
382 382 last_shown_line_number = 0
383 383 current_line_number = 1
384 384
385 385 for t, line in inner:
386 386 if not t:
387 387 yield t, line
388 388 continue
389 389
390 390 if current_line_number in self.only_lines:
391 391 if last_shown_line_number + 1 != current_line_number:
392 392 yield 0, '<tr>'
393 393 yield 0, '<td class="line">...</td>'
394 394 yield 0, '<td id="hlcode" class="code"></td>'
395 395 yield 0, '</tr>'
396 396
397 397 yield 0, '<tr>'
398 398 if self.url:
399 399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 400 self.url, current_line_number, current_line_number)
401 401 else:
402 402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 403 current_line_number)
404 404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 405 yield 0, '</tr>'
406 406
407 407 last_shown_line_number = current_line_number
408 408
409 409 current_line_number += 1
410 410
411 411
412 412 yield 0, '</table>'
413 413
414 414
415 415 def extract_phrases(text_query):
416 416 """
417 417 Extracts phrases from search term string making sure phrases
418 418 contained in double quotes are kept together - and discarding empty values
419 419 or fully whitespace values eg.
420 420
421 421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422 422
423 423 """
424 424
425 425 in_phrase = False
426 426 buf = ''
427 427 phrases = []
428 428 for char in text_query:
429 429 if in_phrase:
430 430 if char == '"': # end phrase
431 431 phrases.append(buf)
432 432 buf = ''
433 433 in_phrase = False
434 434 continue
435 435 else:
436 436 buf += char
437 437 continue
438 438 else:
439 439 if char == '"': # start phrase
440 440 in_phrase = True
441 441 phrases.append(buf)
442 442 buf = ''
443 443 continue
444 444 elif char == ' ':
445 445 phrases.append(buf)
446 446 buf = ''
447 447 continue
448 448 else:
449 449 buf += char
450 450
451 451 phrases.append(buf)
452 452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 453 return phrases
454 454
455 455
456 456 def get_matching_offsets(text, phrases):
457 457 """
458 458 Returns a list of string offsets in `text` that the list of `terms` match
459 459
460 460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 461 [(0, 4), (10, 14)]
462 462
463 463 """
464 464 offsets = []
465 465 for phrase in phrases:
466 466 for match in re.finditer(phrase, text):
467 467 offsets.append((match.start(), match.end()))
468 468
469 469 return offsets
470 470
471 471
472 472 def normalize_text_for_matching(x):
473 473 """
474 474 Replaces all non alnum characters to spaces and lower cases the string,
475 475 useful for comparing two text strings without punctuation
476 476 """
477 477 return re.sub(r'[^\w]', ' ', x.lower())
478 478
479 479
480 480 def get_matching_line_offsets(lines, terms):
481 481 """ Return a set of `lines` indices (starting from 1) matching a
482 482 text search query, along with `context` lines above/below matching lines
483 483
484 484 :param lines: list of strings representing lines
485 485 :param terms: search term string to match in lines eg. 'some text'
486 486 :param context: number of lines above/below a matching line to add to result
487 487 :param max_lines: cut off for lines of interest
488 488 eg.
489 489
490 490 text = '''
491 491 words words words
492 492 words words words
493 493 some text some
494 494 words words words
495 495 words words words
496 496 text here what
497 497 '''
498 498 get_matching_line_offsets(text, 'text', context=1)
499 499 {3: [(5, 9)], 6: [(0, 4)]]
500 500
501 501 """
502 502 matching_lines = {}
503 503 phrases = [normalize_text_for_matching(phrase)
504 504 for phrase in extract_phrases(terms)]
505 505
506 506 for line_index, line in enumerate(lines, start=1):
507 507 match_offsets = get_matching_offsets(
508 508 normalize_text_for_matching(line), phrases)
509 509 if match_offsets:
510 510 matching_lines[line_index] = match_offsets
511 511
512 512 return matching_lines
513 513
514 514
515 515 def hsv_to_rgb(h, s, v):
516 516 """ Convert hsv color values to rgb """
517 517
518 518 if s == 0.0:
519 519 return v, v, v
520 520 i = int(h * 6.0) # XXX assume int() truncates!
521 521 f = (h * 6.0) - i
522 522 p = v * (1.0 - s)
523 523 q = v * (1.0 - s * f)
524 524 t = v * (1.0 - s * (1.0 - f))
525 525 i = i % 6
526 526 if i == 0:
527 527 return v, t, p
528 528 if i == 1:
529 529 return q, v, p
530 530 if i == 2:
531 531 return p, v, t
532 532 if i == 3:
533 533 return p, q, v
534 534 if i == 4:
535 535 return t, p, v
536 536 if i == 5:
537 537 return v, p, q
538 538
539 539
540 540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 541 """
542 542 Generator for getting n of evenly distributed colors using
543 543 hsv color and golden ratio. It always return same order of colors
544 544
545 545 :param n: number of colors to generate
546 546 :param saturation: saturation of returned colors
547 547 :param lightness: lightness of returned colors
548 548 :returns: RGB tuple
549 549 """
550 550
551 551 golden_ratio = 0.618033988749895
552 552 h = 0.22717784590367374
553 553
554 554 for _ in xrange(n):
555 555 h += golden_ratio
556 556 h %= 1
557 557 HSV_tuple = [h, saturation, lightness]
558 558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560 560
561 561
562 562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 563 """
564 564 Returns a function which when called with an argument returns a unique
565 565 color for that argument, eg.
566 566
567 567 :param n: number of colors to generate
568 568 :param saturation: saturation of returned colors
569 569 :param lightness: lightness of returned colors
570 570 :returns: css RGB string
571 571
572 572 >>> color_hash = color_hasher()
573 573 >>> color_hash('hello')
574 574 'rgb(34, 12, 59)'
575 575 >>> color_hash('hello')
576 576 'rgb(34, 12, 59)'
577 577 >>> color_hash('other')
578 578 'rgb(90, 224, 159)'
579 579 """
580 580
581 581 color_dict = {}
582 582 cgenerator = unique_color_generator(
583 583 saturation=saturation, lightness=lightness)
584 584
585 585 def get_color_string(thing):
586 586 if thing in color_dict:
587 587 col = color_dict[thing]
588 588 else:
589 589 col = color_dict[thing] = cgenerator.next()
590 590 return "rgb(%s)" % (', '.join(col))
591 591
592 592 return get_color_string
593 593
594 594
595 595 def get_lexer_safe(mimetype=None, filepath=None):
596 596 """
597 597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 598 defaulting to plain text if none could be found
599 599 """
600 600 lexer = None
601 601 try:
602 602 if mimetype:
603 603 lexer = get_lexer_for_mimetype(mimetype)
604 604 if not lexer:
605 605 lexer = get_lexer_for_filename(filepath)
606 606 except pygments.util.ClassNotFound:
607 607 pass
608 608
609 609 if not lexer:
610 610 lexer = get_lexer_by_name('text')
611 611
612 612 return lexer
613 613
614 614
615 615 def get_lexer_for_filenode(filenode):
616 616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 617 return lexer
618 618
619 619
620 620 def pygmentize(filenode, **kwargs):
621 621 """
622 622 pygmentize function using pygments
623 623
624 624 :param filenode:
625 625 """
626 626 lexer = get_lexer_for_filenode(filenode)
627 627 return literal(code_highlight(filenode.content, lexer,
628 628 CodeHtmlFormatter(**kwargs)))
629 629
630 630
631 631 def is_following_repo(repo_name, user_id):
632 632 from rhodecode.model.scm import ScmModel
633 633 return ScmModel().is_following_repo(repo_name, user_id)
634 634
635 635
636 636 class _Message(object):
637 637 """A message returned by ``Flash.pop_messages()``.
638 638
639 639 Converting the message to a string returns the message text. Instances
640 640 also have the following attributes:
641 641
642 642 * ``message``: the message text.
643 643 * ``category``: the category specified when the message was created.
644 644 """
645 645
646 646 def __init__(self, category, message):
647 647 self.category = category
648 648 self.message = message
649 649
650 650 def __str__(self):
651 651 return self.message
652 652
653 653 __unicode__ = __str__
654 654
655 655 def __html__(self):
656 656 return escape(safe_unicode(self.message))
657 657
658 658
659 659 class Flash(_Flash):
660 660
661 661 def pop_messages(self):
662 662 """Return all accumulated messages and delete them from the session.
663 663
664 664 The return value is a list of ``Message`` objects.
665 665 """
666 666 from pylons import session
667 667
668 668 messages = []
669 669
670 670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 671 # (category, message)
672 672 for cat, msg in session.pop(self.session_key, []):
673 673 messages.append(_Message(cat, msg))
674 674
675 675 # Pop the 'new' pyramid flash messages for each category as list
676 676 # of strings.
677 677 for cat in self.categories:
678 678 for msg in session.pop_flash(queue=cat):
679 679 messages.append(_Message(cat, msg))
680 680 # Map messages from the default queue to the 'notice' category.
681 681 for msg in session.pop_flash():
682 682 messages.append(_Message('notice', msg))
683 683
684 684 session.save()
685 685 return messages
686 686
687 687 def json_alerts(self):
688 688 payloads = []
689 689 messages = flash.pop_messages()
690 690 if messages:
691 691 for message in messages:
692 692 subdata = {}
693 693 if hasattr(message.message, 'rsplit'):
694 694 flash_data = message.message.rsplit('|DELIM|', 1)
695 695 org_message = flash_data[0]
696 696 if len(flash_data) > 1:
697 697 subdata = json.loads(flash_data[1])
698 698 else:
699 699 org_message = message.message
700 700 payloads.append({
701 701 'message': {
702 702 'message': u'{}'.format(org_message),
703 703 'level': message.category,
704 704 'force': True,
705 705 'subdata': subdata
706 706 }
707 707 })
708 708 return json.dumps(payloads)
709 709
710 710 flash = Flash()
711 711
712 712 #==============================================================================
713 713 # SCM FILTERS available via h.
714 714 #==============================================================================
715 715 from rhodecode.lib.vcs.utils import author_name, author_email
716 716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 717 from rhodecode.model.db import User, ChangesetStatus
718 718
719 719 age = _age
720 720 capitalize = lambda x: x.capitalize()
721 721 email = author_email
722 722 short_id = lambda x: x[:12]
723 723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724 724
725 725
726 726 def age_component(datetime_iso, value=None, time_is_local=False):
727 727 title = value or format_date(datetime_iso)
728 728 tzinfo = '+00:00'
729 729
730 730 # detect if we have a timezone info, otherwise, add it
731 731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 732 if time_is_local:
733 733 tzinfo = time.strftime("+%H:%M",
734 734 time.gmtime(
735 735 (datetime.now() - datetime.utcnow()).seconds + 1
736 736 )
737 737 )
738 738
739 739 return literal(
740 740 '<time class="timeago tooltip" '
741 741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 742 datetime_iso, title, tzinfo))
743 743
744 744
745 745 def _shorten_commit_id(commit_id):
746 746 from rhodecode import CONFIG
747 747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 748 return commit_id[:def_len]
749 749
750 750
751 751 def show_id(commit):
752 752 """
753 753 Configurable function that shows ID
754 754 by default it's r123:fffeeefffeee
755 755
756 756 :param commit: commit instance
757 757 """
758 758 from rhodecode import CONFIG
759 759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760 760
761 761 raw_id = _shorten_commit_id(commit.raw_id)
762 762 if show_idx:
763 763 return 'r%s:%s' % (commit.idx, raw_id)
764 764 else:
765 765 return '%s' % (raw_id, )
766 766
767 767
768 768 def format_date(date):
769 769 """
770 770 use a standardized formatting for dates used in RhodeCode
771 771
772 772 :param date: date/datetime object
773 773 :return: formatted date
774 774 """
775 775
776 776 if date:
777 777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 778 return safe_unicode(date.strftime(_fmt))
779 779
780 780 return u""
781 781
782 782
783 783 class _RepoChecker(object):
784 784
785 785 def __init__(self, backend_alias):
786 786 self._backend_alias = backend_alias
787 787
788 788 def __call__(self, repository):
789 789 if hasattr(repository, 'alias'):
790 790 _type = repository.alias
791 791 elif hasattr(repository, 'repo_type'):
792 792 _type = repository.repo_type
793 793 else:
794 794 _type = repository
795 795 return _type == self._backend_alias
796 796
797 797 is_git = _RepoChecker('git')
798 798 is_hg = _RepoChecker('hg')
799 799 is_svn = _RepoChecker('svn')
800 800
801 801
802 802 def get_repo_type_by_name(repo_name):
803 803 repo = Repository.get_by_repo_name(repo_name)
804 804 return repo.repo_type
805 805
806 806
807 807 def is_svn_without_proxy(repository):
808 808 if is_svn(repository):
809 809 from rhodecode.model.settings import VcsSettingsModel
810 810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 812 return False
813 813
814 814
815 815 def discover_user(author):
816 816 """
817 817 Tries to discover RhodeCode User based on the autho string. Author string
818 818 is typically `FirstName LastName <email@address.com>`
819 819 """
820 820
821 821 # if author is already an instance use it for extraction
822 822 if isinstance(author, User):
823 823 return author
824 824
825 825 # Valid email in the attribute passed, see if they're in the system
826 826 _email = author_email(author)
827 827 if _email != '':
828 828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 829 if user is not None:
830 830 return user
831 831
832 832 # Maybe it's a username, we try to extract it and fetch by username ?
833 833 _author = author_name(author)
834 834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 835 if user is not None:
836 836 return user
837 837
838 838 return None
839 839
840 840
841 841 def email_or_none(author):
842 842 # extract email from the commit string
843 843 _email = author_email(author)
844 844
845 845 # If we have an email, use it, otherwise
846 846 # see if it contains a username we can get an email from
847 847 if _email != '':
848 848 return _email
849 849 else:
850 850 user = User.get_by_username(
851 851 author_name(author), case_insensitive=True, cache=True)
852 852
853 853 if user is not None:
854 854 return user.email
855 855
856 856 # No valid email, not a valid user in the system, none!
857 857 return None
858 858
859 859
860 860 def link_to_user(author, length=0, **kwargs):
861 861 user = discover_user(author)
862 862 # user can be None, but if we have it already it means we can re-use it
863 863 # in the person() function, so we save 1 intensive-query
864 864 if user:
865 865 author = user
866 866
867 867 display_person = person(author, 'username_or_name_or_email')
868 868 if length:
869 869 display_person = shorter(display_person, length)
870 870
871 871 if user:
872 872 return link_to(
873 873 escape(display_person),
874 874 route_path('user_profile', username=user.username),
875 875 **kwargs)
876 876 else:
877 877 return escape(display_person)
878 878
879 879
880 880 def person(author, show_attr="username_and_name"):
881 881 user = discover_user(author)
882 882 if user:
883 883 return getattr(user, show_attr)
884 884 else:
885 885 _author = author_name(author)
886 886 _email = email(author)
887 887 return _author or _email
888 888
889 889
890 890 def author_string(email):
891 891 if email:
892 892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 893 if user:
894 894 if user.firstname or user.lastname:
895 895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 896 else:
897 897 return email
898 898 else:
899 899 return email
900 900 else:
901 901 return None
902 902
903 903
904 904 def person_by_id(id_, show_attr="username_and_name"):
905 905 # attr to return from fetched user
906 906 person_getter = lambda usr: getattr(usr, show_attr)
907 907
908 908 #maybe it's an ID ?
909 909 if str(id_).isdigit() or isinstance(id_, int):
910 910 id_ = int(id_)
911 911 user = User.get(id_)
912 912 if user is not None:
913 913 return person_getter(user)
914 914 return id_
915 915
916 916
917 917 def gravatar_with_user(author, show_disabled=False):
918 918 from rhodecode.lib.utils import PartialRenderer
919 919 _render = PartialRenderer('base/base.mako')
920 920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921 921
922 922
923 923 def desc_stylize(value):
924 924 """
925 925 converts tags from value into html equivalent
926 926
927 927 :param value:
928 928 """
929 929 if not value:
930 930 return ''
931 931
932 932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 939 '<div class="metatag" tag="lang">\\2</div>', value)
940 940 value = re.sub(r'\[([a-z]+)\]',
941 941 '<div class="metatag" tag="\\1">\\1</div>', value)
942 942
943 943 return value
944 944
945 945
946 946 def escaped_stylize(value):
947 947 """
948 948 converts tags from value into html equivalent, but escaping its value first
949 949 """
950 950 if not value:
951 951 return ''
952 952
953 953 # Using default webhelper escape method, but has to force it as a
954 954 # plain unicode instead of a markup tag to be used in regex expressions
955 955 value = unicode(escape(safe_unicode(value)))
956 956
957 957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 964 '<div class="metatag" tag="lang">\\2</div>', value)
965 965 value = re.sub(r'\[([a-z]+)\]',
966 966 '<div class="metatag" tag="\\1">\\1</div>', value)
967 967
968 968 return value
969 969
970 970
971 971 def bool2icon(value):
972 972 """
973 973 Returns boolean value of a given value, represented as html element with
974 974 classes that will represent icons
975 975
976 976 :param value: given value to convert to html node
977 977 """
978 978
979 979 if value: # does bool conversion
980 980 return HTML.tag('i', class_="icon-true")
981 981 else: # not true as bool
982 982 return HTML.tag('i', class_="icon-false")
983 983
984 984
985 985 #==============================================================================
986 986 # PERMS
987 987 #==============================================================================
988 988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 991 csrf_token_key
992 992
993 993
994 994 #==============================================================================
995 995 # GRAVATAR URL
996 996 #==============================================================================
997 997 class InitialsGravatar(object):
998 998 def __init__(self, email_address, first_name, last_name, size=30,
999 999 background=None, text_color='#fff'):
1000 1000 self.size = size
1001 1001 self.first_name = first_name
1002 1002 self.last_name = last_name
1003 1003 self.email_address = email_address
1004 1004 self.background = background or self.str2color(email_address)
1005 1005 self.text_color = text_color
1006 1006
1007 1007 def get_color_bank(self):
1008 1008 """
1009 1009 returns a predefined list of colors that gravatars can use.
1010 1010 Those are randomized distinct colors that guarantee readability and
1011 1011 uniqueness.
1012 1012
1013 1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 1014 """
1015 1015 return [
1016 1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 1060 '#4f8c46', '#368dd9', '#5c0073'
1061 1061 ]
1062 1062
1063 1063 def rgb_to_hex_color(self, rgb_tuple):
1064 1064 """
1065 1065 Converts an rgb_tuple passed to an hex color.
1066 1066
1067 1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 1068 """
1069 1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070 1070
1071 1071 def email_to_int_list(self, email_str):
1072 1072 """
1073 1073 Get every byte of the hex digest value of email and turn it to integer.
1074 1074 It's going to be always between 0-255
1075 1075 """
1076 1076 digest = md5_safe(email_str.lower())
1077 1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078 1078
1079 1079 def pick_color_bank_index(self, email_str, color_bank):
1080 1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081 1081
1082 1082 def str2color(self, email_str):
1083 1083 """
1084 1084 Tries to map in a stable algorithm an email to color
1085 1085
1086 1086 :param email_str:
1087 1087 """
1088 1088 color_bank = self.get_color_bank()
1089 1089 # pick position (module it's length so we always find it in the
1090 1090 # bank even if it's smaller than 256 values
1091 1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 1092 return color_bank[pos]
1093 1093
1094 1094 def normalize_email(self, email_address):
1095 1095 import unicodedata
1096 1096 # default host used to fill in the fake/missing email
1097 1097 default_host = u'localhost'
1098 1098
1099 1099 if not email_address:
1100 1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101 1101
1102 1102 email_address = safe_unicode(email_address)
1103 1103
1104 1104 if u'@' not in email_address:
1105 1105 email_address = u'%s@%s' % (email_address, default_host)
1106 1106
1107 1107 if email_address.endswith(u'@'):
1108 1108 email_address = u'%s%s' % (email_address, default_host)
1109 1109
1110 1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 1111 .encode('ascii', 'ignore')
1112 1112 return email_address
1113 1113
1114 1114 def get_initials(self):
1115 1115 """
1116 1116 Returns 2 letter initials calculated based on the input.
1117 1117 The algorithm picks first given email address, and takes first letter
1118 1118 of part before @, and then the first letter of server name. In case
1119 1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 1120 the server letter with first letter of somestring2
1121 1121
1122 1122 In case function was initialized with both first and lastname, this
1123 1123 overrides the extraction from email by first letter of the first and
1124 1124 last name. We add special logic to that functionality, In case Full name
1125 1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 1126 (Von Rossum) picking `R`.
1127 1127
1128 1128 Function also normalizes the non-ascii characters to they ascii
1129 1129 representation, eg Δ„ => A
1130 1130 """
1131 1131 import unicodedata
1132 1132 # replace non-ascii to ascii
1133 1133 first_name = unicodedata.normalize(
1134 1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 1135 last_name = unicodedata.normalize(
1136 1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137 1137
1138 1138 # do NFKD encoding, and also make sure email has proper format
1139 1139 email_address = self.normalize_email(self.email_address)
1140 1140
1141 1141 # first push the email initials
1142 1142 prefix, server = email_address.split('@', 1)
1143 1143
1144 1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 1145 _dot_split = prefix.rsplit('.', 1)
1146 1146 if len(_dot_split) == 2:
1147 1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 1148 else:
1149 1149 initials = [prefix[0], server[0]]
1150 1150
1151 1151 # then try to replace either firtname or lastname
1152 1152 fn_letter = (first_name or " ")[0].strip()
1153 1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154 1154
1155 1155 if fn_letter:
1156 1156 initials[0] = fn_letter
1157 1157
1158 1158 if ln_letter:
1159 1159 initials[1] = ln_letter
1160 1160
1161 1161 return ''.join(initials).upper()
1162 1162
1163 1163 def get_img_data_by_type(self, font_family, img_type):
1164 1164 default_user = """
1165 1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 1167 viewBox="-15 -10 439.165 429.164"
1168 1168
1169 1169 xml:space="preserve"
1170 1170 style="background:{background};" >
1171 1171
1172 1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 1175 168.596,153.916,216.671,
1176 1176 204.583,216.671z" fill="{text_color}"/>
1177 1177 <path d="M407.164,374.717L360.88,
1178 1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 1181 0-48.762-8.122-69.078-23.488
1182 1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 1186 19.402-10.527 C409.699,390.129,
1187 1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 1188 </svg>""".format(
1189 1189 size=self.size,
1190 1190 background='#979797', # @grey4
1191 1191 text_color=self.text_color,
1192 1192 font_family=font_family)
1193 1193
1194 1194 return {
1195 1195 "default_user": default_user
1196 1196 }[img_type]
1197 1197
1198 1198 def get_img_data(self, svg_type=None):
1199 1199 """
1200 1200 generates the svg metadata for image
1201 1201 """
1202 1202
1203 1203 font_family = ','.join([
1204 1204 'proximanovaregular',
1205 1205 'Proxima Nova Regular',
1206 1206 'Proxima Nova',
1207 1207 'Arial',
1208 1208 'Lucida Grande',
1209 1209 'sans-serif'
1210 1210 ])
1211 1211 if svg_type:
1212 1212 return self.get_img_data_by_type(font_family, svg_type)
1213 1213
1214 1214 initials = self.get_initials()
1215 1215 img_data = """
1216 1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 1217 width="{size}" height="{size}"
1218 1218 style="width: 100%; height: 100%; background-color: {background}"
1219 1219 viewBox="0 0 {size} {size}">
1220 1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 1221 pointer-events="auto" fill="{text_color}"
1222 1222 font-family="{font_family}"
1223 1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 1224 </text>
1225 1225 </svg>""".format(
1226 1226 size=self.size,
1227 1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 1228 background=self.background,
1229 1229 text_color=self.text_color,
1230 1230 text=initials.upper(),
1231 1231 font_family=font_family)
1232 1232
1233 1233 return img_data
1234 1234
1235 1235 def generate_svg(self, svg_type=None):
1236 1236 img_data = self.get_img_data(svg_type)
1237 1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238 1238
1239 1239
1240 1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 1241 svg_type = None
1242 1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 1243 svg_type = 'default_user'
1244 1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 1245 return klass.generate_svg(svg_type=svg_type)
1246 1246
1247 1247
1248 1248 def gravatar_url(email_address, size=30):
1249 1249 # doh, we need to re-import those to mock it later
1250 1250 from pylons import tmpl_context as c
1251 1251
1252 1252 _use_gravatar = c.visual.use_gravatar
1253 1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254 1254
1255 1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 1256 if isinstance(email_address, unicode):
1257 1257 # hashlib crashes on unicode items
1258 1258 email_address = safe_str(email_address)
1259 1259
1260 1260 # empty email or default user
1261 1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263 1263
1264 1264 if _use_gravatar:
1265 1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 1266 # get the host and schema here.
1267 1267 request = get_current_request()
1268 1268 tmpl = safe_str(_gravatar_url)
1269 1269 tmpl = tmpl.replace('{email}', email_address)\
1270 1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 1271 .replace('{netloc}', request.host)\
1272 1272 .replace('{scheme}', request.scheme)\
1273 1273 .replace('{size}', safe_str(size))
1274 1274 return tmpl
1275 1275 else:
1276 1276 return initials_gravatar(email_address, '', '', size=size)
1277 1277
1278 1278
1279 1279 class Page(_Page):
1280 1280 """
1281 1281 Custom pager to match rendering style with paginator
1282 1282 """
1283 1283
1284 1284 def _get_pos(self, cur_page, max_page, items):
1285 1285 edge = (items / 2) + 1
1286 1286 if (cur_page <= edge):
1287 1287 radius = max(items / 2, items - cur_page)
1288 1288 elif (max_page - cur_page) < edge:
1289 1289 radius = (items - 1) - (max_page - cur_page)
1290 1290 else:
1291 1291 radius = items / 2
1292 1292
1293 1293 left = max(1, (cur_page - (radius)))
1294 1294 right = min(max_page, cur_page + (radius))
1295 1295 return left, cur_page, right
1296 1296
1297 1297 def _range(self, regexp_match):
1298 1298 """
1299 1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300 1300
1301 1301 Arguments:
1302 1302
1303 1303 regexp_match
1304 1304 A "re" (regular expressions) match object containing the
1305 1305 radius of linked pages around the current page in
1306 1306 regexp_match.group(1) as a string
1307 1307
1308 1308 This function is supposed to be called as a callable in
1309 1309 re.sub.
1310 1310
1311 1311 """
1312 1312 radius = int(regexp_match.group(1))
1313 1313
1314 1314 # Compute the first and last page number within the radius
1315 1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 1316 # -> leftmost_page = 5
1317 1317 # -> rightmost_page = 9
1318 1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 1319 self.last_page,
1320 1320 (radius * 2) + 1)
1321 1321 nav_items = []
1322 1322
1323 1323 # Create a link to the first page (unless we are on the first page
1324 1324 # or there would be no need to insert '..' spacers)
1325 1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327 1327
1328 1328 # Insert dots if there are pages between the first page
1329 1329 # and the currently displayed page range
1330 1330 if leftmost_page - self.first_page > 1:
1331 1331 # Wrap in a SPAN tag if nolink_attr is set
1332 1332 text = '..'
1333 1333 if self.dotdot_attr:
1334 1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 1335 nav_items.append(text)
1336 1336
1337 1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 1338 # Hilight the current page number and do not use a link
1339 1339 if thispage == self.page:
1340 1340 text = '%s' % (thispage,)
1341 1341 # Wrap in a SPAN tag if nolink_attr is set
1342 1342 if self.curpage_attr:
1343 1343 text = HTML.span(c=text, **self.curpage_attr)
1344 1344 nav_items.append(text)
1345 1345 # Otherwise create just a link to that page
1346 1346 else:
1347 1347 text = '%s' % (thispage,)
1348 1348 nav_items.append(self._pagerlink(thispage, text))
1349 1349
1350 1350 # Insert dots if there are pages between the displayed
1351 1351 # page numbers and the end of the page range
1352 1352 if self.last_page - rightmost_page > 1:
1353 1353 text = '..'
1354 1354 # Wrap in a SPAN tag if nolink_attr is set
1355 1355 if self.dotdot_attr:
1356 1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 1357 nav_items.append(text)
1358 1358
1359 1359 # Create a link to the very last page (unless we are on the last
1360 1360 # page or there would be no need to insert '..' spacers)
1361 1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363 1363
1364 1364 ## prerender links
1365 1365 #_page_link = url.current()
1366 1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 1368 return self.separator.join(nav_items)
1369 1369
1370 1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 1371 show_if_single_page=False, separator=' ', onclick=None,
1372 1372 symbol_first='<<', symbol_last='>>',
1373 1373 symbol_previous='<', symbol_next='>',
1374 1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 1375 curpage_attr={'class': 'pager_curpage'},
1376 1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377 1377
1378 1378 self.curpage_attr = curpage_attr
1379 1379 self.separator = separator
1380 1380 self.pager_kwargs = kwargs
1381 1381 self.page_param = page_param
1382 1382 self.partial_param = partial_param
1383 1383 self.onclick = onclick
1384 1384 self.link_attr = link_attr
1385 1385 self.dotdot_attr = dotdot_attr
1386 1386
1387 1387 # Don't show navigator if there is no more than one page
1388 1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 1389 return ''
1390 1390
1391 1391 from string import Template
1392 1392 # Replace ~...~ in token format by range of pages
1393 1393 result = re.sub(r'~(\d+)~', self._range, format)
1394 1394
1395 1395 # Interpolate '%' variables
1396 1396 result = Template(result).safe_substitute({
1397 1397 'first_page': self.first_page,
1398 1398 'last_page': self.last_page,
1399 1399 'page': self.page,
1400 1400 'page_count': self.page_count,
1401 1401 'items_per_page': self.items_per_page,
1402 1402 'first_item': self.first_item,
1403 1403 'last_item': self.last_item,
1404 1404 'item_count': self.item_count,
1405 1405 'link_first': self.page > self.first_page and \
1406 1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 1407 'link_last': self.page < self.last_page and \
1408 1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 1409 'link_previous': self.previous_page and \
1410 1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 1412 'link_next': self.next_page and \
1413 1413 self._pagerlink(self.next_page, symbol_next) \
1414 1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 1415 })
1416 1416
1417 1417 return literal(result)
1418 1418
1419 1419
1420 1420 #==============================================================================
1421 1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 1422 #==============================================================================
1423 1423 class RepoPage(Page):
1424 1424
1425 1425 def __init__(self, collection, page=1, items_per_page=20,
1426 1426 item_count=None, url=None, **kwargs):
1427 1427
1428 1428 """Create a "RepoPage" instance. special pager for paging
1429 1429 repository
1430 1430 """
1431 1431 self._url_generator = url
1432 1432
1433 1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 1434 self.kwargs = kwargs
1435 1435
1436 1436 # Save a reference to the collection
1437 1437 self.original_collection = collection
1438 1438
1439 1439 self.collection = collection
1440 1440
1441 1441 # The self.page is the number of the current page.
1442 1442 # The first page has the number 1!
1443 1443 try:
1444 1444 self.page = int(page) # make it int() if we get it as a string
1445 1445 except (ValueError, TypeError):
1446 1446 self.page = 1
1447 1447
1448 1448 self.items_per_page = items_per_page
1449 1449
1450 1450 # Unless the user tells us how many items the collections has
1451 1451 # we calculate that ourselves.
1452 1452 if item_count is not None:
1453 1453 self.item_count = item_count
1454 1454 else:
1455 1455 self.item_count = len(self.collection)
1456 1456
1457 1457 # Compute the number of the first and last available page
1458 1458 if self.item_count > 0:
1459 1459 self.first_page = 1
1460 1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 1461 self.items_per_page))
1462 1462 self.last_page = self.first_page + self.page_count - 1
1463 1463
1464 1464 # Make sure that the requested page number is the range of
1465 1465 # valid pages
1466 1466 if self.page > self.last_page:
1467 1467 self.page = self.last_page
1468 1468 elif self.page < self.first_page:
1469 1469 self.page = self.first_page
1470 1470
1471 1471 # Note: the number of items on this page can be less than
1472 1472 # items_per_page if the last page is not full
1473 1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 1474 items_per_page))
1475 1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 1476 (self.page - 1))
1477 1477
1478 1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479 1479
1480 1480 # Links to previous and next page
1481 1481 if self.page > self.first_page:
1482 1482 self.previous_page = self.page - 1
1483 1483 else:
1484 1484 self.previous_page = None
1485 1485
1486 1486 if self.page < self.last_page:
1487 1487 self.next_page = self.page + 1
1488 1488 else:
1489 1489 self.next_page = None
1490 1490
1491 1491 # No items available
1492 1492 else:
1493 1493 self.first_page = None
1494 1494 self.page_count = 0
1495 1495 self.last_page = None
1496 1496 self.first_item = None
1497 1497 self.last_item = None
1498 1498 self.previous_page = None
1499 1499 self.next_page = None
1500 1500 self.items = []
1501 1501
1502 1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 1503 list.__init__(self, reversed(self.items))
1504 1504
1505 1505
1506 1506 def changed_tooltip(nodes):
1507 1507 """
1508 1508 Generates a html string for changed nodes in commit page.
1509 1509 It limits the output to 30 entries
1510 1510
1511 1511 :param nodes: LazyNodesGenerator
1512 1512 """
1513 1513 if nodes:
1514 1514 pref = ': <br/> '
1515 1515 suf = ''
1516 1516 if len(nodes) > 30:
1517 1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 1519 for x in nodes[:30]]) + suf)
1520 1520 else:
1521 1521 return ': ' + _('No Files')
1522 1522
1523 1523
1524 1524 def breadcrumb_repo_link(repo):
1525 1525 """
1526 1526 Makes a breadcrumbs path link to repo
1527 1527
1528 1528 ex::
1529 1529 group >> subgroup >> repo
1530 1530
1531 1531 :param repo: a Repository instance
1532 1532 """
1533 1533
1534 1534 path = [
1535 1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 1536 for group in repo.groups_with_parents
1537 1537 ] + [
1538 1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 1539 ]
1540 1540
1541 1541 return literal(' &raquo; '.join(path))
1542 1542
1543 1543
1544 1544 def format_byte_size_binary(file_size):
1545 1545 """
1546 1546 Formats file/folder sizes to standard.
1547 1547 """
1548 1548 formatted_size = format_byte_size(file_size, binary=True)
1549 1549 return formatted_size
1550 1550
1551 1551
1552 1552 def urlify_text(text_, safe=True):
1553 1553 """
1554 1554 Extrac urls from text and make html links out of them
1555 1555
1556 1556 :param text_:
1557 1557 """
1558 1558
1559 1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1560 1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1561 1561
1562 1562 def url_func(match_obj):
1563 1563 url_full = match_obj.groups()[0]
1564 1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1565 1565 _newtext = url_pat.sub(url_func, text_)
1566 1566 if safe:
1567 1567 return literal(_newtext)
1568 1568 return _newtext
1569 1569
1570 1570
1571 1571 def urlify_commits(text_, repository):
1572 1572 """
1573 1573 Extract commit ids from text and make link from them
1574 1574
1575 1575 :param text_:
1576 1576 :param repository: repo name to build the URL with
1577 1577 """
1578 1578 from pylons import url # doh, we need to re-import url to mock it later
1579 1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1580 1580
1581 1581 def url_func(match_obj):
1582 1582 commit_id = match_obj.groups()[1]
1583 1583 pref = match_obj.groups()[0]
1584 1584 suf = match_obj.groups()[2]
1585 1585
1586 1586 tmpl = (
1587 1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1588 1588 '%(commit_id)s</a>%(suf)s'
1589 1589 )
1590 1590 return tmpl % {
1591 1591 'pref': pref,
1592 1592 'cls': 'revision-link',
1593 1593 'url': url('changeset_home', repo_name=repository,
1594 1594 revision=commit_id, qualified=True),
1595 1595 'commit_id': commit_id,
1596 1596 'suf': suf
1597 1597 }
1598 1598
1599 1599 newtext = URL_PAT.sub(url_func, text_)
1600 1600
1601 1601 return newtext
1602 1602
1603 1603
1604 1604 def _process_url_func(match_obj, repo_name, uid, entry,
1605 return_raw_data=False):
1605 return_raw_data=False, link_format='html'):
1606 1606 pref = ''
1607 1607 if match_obj.group().startswith(' '):
1608 1608 pref = ' '
1609 1609
1610 1610 issue_id = ''.join(match_obj.groups())
1611 tmpl = (
1612 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1613 '%(issue-prefix)s%(id-repr)s'
1614 '</a>')
1611
1612 if link_format == 'html':
1613 tmpl = (
1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1615 '%(issue-prefix)s%(id-repr)s'
1616 '</a>')
1617 elif link_format == 'rst':
1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1619 elif link_format == 'markdown':
1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1621 else:
1622 raise ValueError('Bad link_format:{}'.format(link_format))
1615 1623
1616 1624 (repo_name_cleaned,
1617 1625 parent_group_name) = RepoGroupModel().\
1618 1626 _get_group_name_and_parent(repo_name)
1619 1627
1620 1628 # variables replacement
1621 1629 named_vars = {
1622 1630 'id': issue_id,
1623 1631 'repo': repo_name,
1624 1632 'repo_name': repo_name_cleaned,
1625 1633 'group_name': parent_group_name
1626 1634 }
1627 1635 # named regex variables
1628 1636 named_vars.update(match_obj.groupdict())
1629 1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1630 1638
1631 1639 data = {
1632 1640 'pref': pref,
1633 1641 'cls': 'issue-tracker-link',
1634 1642 'url': _url,
1635 1643 'id-repr': issue_id,
1636 1644 'issue-prefix': entry['pref'],
1637 1645 'serv': entry['url'],
1638 1646 }
1639 1647 if return_raw_data:
1640 1648 return {
1641 1649 'id': issue_id,
1642 1650 'url': _url
1643 1651 }
1644 1652 return tmpl % data
1645 1653
1646 1654
1647 def process_patterns(text_string, repo_name, config=None):
1655 def process_patterns(text_string, repo_name, link_format='html'):
1656 allowed_formats = ['html', 'rst', 'markdown']
1657 if link_format not in allowed_formats:
1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1659 allowed_formats, link_format))
1660
1648 1661 repo = None
1649 1662 if repo_name:
1650 1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1651 1664 # IssueTrackerSettingsModel but still passing invalid name further down
1652 1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1653 1666
1654 1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1655 1668 active_entries = settings_model.get_settings(cache=True)
1656 1669
1657 1670 issues_data = []
1658 1671 newtext = text_string
1672
1659 1673 for uid, entry in active_entries.items():
1660 1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1661 1675
1662 1676 if not (entry['pat'] and entry['url']):
1663 1677 log.debug('skipping due to missing data')
1664 1678 continue
1665 1679
1666 1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1667 1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1668 1682
1669 1683 try:
1670 1684 pattern = re.compile(r'%s' % entry['pat'])
1671 1685 except re.error:
1672 1686 log.exception(
1673 1687 'issue tracker pattern: `%s` failed to compile',
1674 1688 entry['pat'])
1675 1689 continue
1676 1690
1677 1691 data_func = partial(
1678 1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1679 1693 return_raw_data=True)
1680 1694
1681 1695 for match_obj in pattern.finditer(text_string):
1682 1696 issues_data.append(data_func(match_obj))
1683 1697
1684 1698 url_func = partial(
1685 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1700 link_format=link_format)
1686 1701
1687 1702 newtext = pattern.sub(url_func, newtext)
1688 1703 log.debug('processed prefix:uid `%s`' % (uid,))
1689 1704
1690 1705 return newtext, issues_data
1691 1706
1692 1707
1693 1708 def urlify_commit_message(commit_text, repository=None):
1694 1709 """
1695 1710 Parses given text message and makes proper links.
1696 1711 issues are linked to given issue-server, and rest is a commit link
1697 1712
1698 1713 :param commit_text:
1699 1714 :param repository:
1700 1715 """
1701 1716 from pylons import url # doh, we need to re-import url to mock it later
1702 1717
1703 1718 def escaper(string):
1704 1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1705 1720
1706 1721 newtext = escaper(commit_text)
1707 1722
1708 1723 # extract http/https links and make them real urls
1709 1724 newtext = urlify_text(newtext, safe=False)
1710 1725
1711 1726 # urlify commits - extract commit ids and make link out of them, if we have
1712 1727 # the scope of repository present.
1713 1728 if repository:
1714 1729 newtext = urlify_commits(newtext, repository)
1715 1730
1716 1731 # process issue tracker patterns
1717 1732 newtext, issues = process_patterns(newtext, repository or '')
1718 1733
1719 1734 return literal(newtext)
1720 1735
1721 1736
1722 1737 def render_binary(repo_name, file_obj):
1723 1738 """
1724 1739 Choose how to render a binary file
1725 1740 """
1726 1741 filename = file_obj.name
1727 1742
1728 1743 # images
1729 1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1730 1745 if fnmatch.fnmatch(filename, pat=ext):
1731 1746 alt = filename
1732 1747 src = url('files_raw_home', repo_name=repo_name,
1733 1748 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1734 1749 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1735 1750
1736 1751
1737 1752 def renderer_from_filename(filename, exclude=None):
1738 1753 """
1739 1754 choose a renderer based on filename, this works only for text based files
1740 1755 """
1741 1756
1742 1757 # ipython
1743 1758 for ext in ['*.ipynb']:
1744 1759 if fnmatch.fnmatch(filename, pat=ext):
1745 1760 return 'jupyter'
1746 1761
1747 1762 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1748 1763 if is_markup:
1749 1764 return is_markup
1750 1765 return None
1751 1766
1752 1767
1753 def render(source, renderer='rst', mentions=False, relative_url=None):
1768 def render(source, renderer='rst', mentions=False, relative_url=None,
1769 repo_name=None):
1754 1770
1755 1771 def maybe_convert_relative_links(html_source):
1756 1772 if relative_url:
1757 1773 return relative_links(html_source, relative_url)
1758 1774 return html_source
1759 1775
1760 1776 if renderer == 'rst':
1777 if repo_name:
1778 # process patterns on comments if we pass in repo name
1779 source, issues = process_patterns(
1780 source, repo_name, link_format='rst')
1781
1761 1782 return literal(
1762 1783 '<div class="rst-block">%s</div>' %
1763 1784 maybe_convert_relative_links(
1764 1785 MarkupRenderer.rst(source, mentions=mentions)))
1765 1786 elif renderer == 'markdown':
1787 if repo_name:
1788 # process patterns on comments if we pass in repo name
1789 source, issues = process_patterns(
1790 source, repo_name, link_format='markdown')
1791
1766 1792 return literal(
1767 1793 '<div class="markdown-block">%s</div>' %
1768 1794 maybe_convert_relative_links(
1769 1795 MarkupRenderer.markdown(source, flavored=True,
1770 1796 mentions=mentions)))
1771 1797 elif renderer == 'jupyter':
1772 1798 return literal(
1773 1799 '<div class="ipynb">%s</div>' %
1774 1800 maybe_convert_relative_links(
1775 1801 MarkupRenderer.jupyter(source)))
1776 1802
1777 1803 # None means just show the file-source
1778 1804 return None
1779 1805
1780 1806
1781 1807 def commit_status(repo, commit_id):
1782 1808 return ChangesetStatusModel().get_status(repo, commit_id)
1783 1809
1784 1810
1785 1811 def commit_status_lbl(commit_status):
1786 1812 return dict(ChangesetStatus.STATUSES).get(commit_status)
1787 1813
1788 1814
1789 1815 def commit_time(repo_name, commit_id):
1790 1816 repo = Repository.get_by_repo_name(repo_name)
1791 1817 commit = repo.get_commit(commit_id=commit_id)
1792 1818 return commit.date
1793 1819
1794 1820
1795 1821 def get_permission_name(key):
1796 1822 return dict(Permission.PERMS).get(key)
1797 1823
1798 1824
1799 1825 def journal_filter_help():
1800 1826 return _(
1801 1827 'Example filter terms:\n' +
1802 1828 ' repository:vcs\n' +
1803 1829 ' username:marcin\n' +
1804 1830 ' action:*push*\n' +
1805 1831 ' ip:127.0.0.1\n' +
1806 1832 ' date:20120101\n' +
1807 1833 ' date:[20120101100000 TO 20120102]\n' +
1808 1834 '\n' +
1809 1835 'Generate wildcards using \'*\' character:\n' +
1810 1836 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1811 1837 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1812 1838 '\n' +
1813 1839 'Optional AND / OR operators in queries\n' +
1814 1840 ' "repository:vcs OR repository:test"\n' +
1815 1841 ' "username:test AND repository:test*"\n'
1816 1842 )
1817 1843
1818 1844
1819 1845 def not_mapped_error(repo_name):
1820 1846 flash(_('%s repository is not mapped to db perhaps'
1821 1847 ' it was created or renamed from the filesystem'
1822 1848 ' please run the application again'
1823 1849 ' in order to rescan repositories') % repo_name, category='error')
1824 1850
1825 1851
1826 1852 def ip_range(ip_addr):
1827 1853 from rhodecode.model.db import UserIpMap
1828 1854 s, e = UserIpMap._get_ip_range(ip_addr)
1829 1855 return '%s - %s' % (s, e)
1830 1856
1831 1857
1832 1858 def form(url, method='post', needs_csrf_token=True, **attrs):
1833 1859 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1834 1860 if method.lower() != 'get' and needs_csrf_token:
1835 1861 raise Exception(
1836 1862 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1837 1863 'CSRF token. If the endpoint does not require such token you can ' +
1838 1864 'explicitly set the parameter needs_csrf_token to false.')
1839 1865
1840 1866 return wh_form(url, method=method, **attrs)
1841 1867
1842 1868
1843 1869 def secure_form(url, method="POST", multipart=False, **attrs):
1844 1870 """Start a form tag that points the action to an url. This
1845 1871 form tag will also include the hidden field containing
1846 1872 the auth token.
1847 1873
1848 1874 The url options should be given either as a string, or as a
1849 1875 ``url()`` function. The method for the form defaults to POST.
1850 1876
1851 1877 Options:
1852 1878
1853 1879 ``multipart``
1854 1880 If set to True, the enctype is set to "multipart/form-data".
1855 1881 ``method``
1856 1882 The method to use when submitting the form, usually either
1857 1883 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1858 1884 hidden input with name _method is added to simulate the verb
1859 1885 over POST.
1860 1886
1861 1887 """
1862 1888 from webhelpers.pylonslib.secure_form import insecure_form
1863 1889 form = insecure_form(url, method, multipart, **attrs)
1864 1890 token = csrf_input()
1865 1891 return literal("%s\n%s" % (form, token))
1866 1892
1867 1893 def csrf_input():
1868 1894 return literal(
1869 1895 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1870 1896 csrf_token_key, csrf_token_key, get_csrf_token()))
1871 1897
1872 1898 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1873 1899 select_html = select(name, selected, options, **attrs)
1874 1900 select2 = """
1875 1901 <script>
1876 1902 $(document).ready(function() {
1877 1903 $('#%s').select2({
1878 1904 containerCssClass: 'drop-menu',
1879 1905 dropdownCssClass: 'drop-menu-dropdown',
1880 1906 dropdownAutoWidth: true%s
1881 1907 });
1882 1908 });
1883 1909 </script>
1884 1910 """
1885 1911 filter_option = """,
1886 1912 minimumResultsForSearch: -1
1887 1913 """
1888 1914 input_id = attrs.get('id') or name
1889 1915 filter_enabled = "" if enable_filter else filter_option
1890 1916 select_script = literal(select2 % (input_id, filter_enabled))
1891 1917
1892 1918 return literal(select_html+select_script)
1893 1919
1894 1920
1895 1921 def get_visual_attr(tmpl_context_var, attr_name):
1896 1922 """
1897 1923 A safe way to get a variable from visual variable of template context
1898 1924
1899 1925 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1900 1926 :param attr_name: name of the attribute we fetch from the c.visual
1901 1927 """
1902 1928 visual = getattr(tmpl_context_var, 'visual', None)
1903 1929 if not visual:
1904 1930 return
1905 1931 else:
1906 1932 return getattr(visual, attr_name, None)
1907 1933
1908 1934
1909 1935 def get_last_path_part(file_node):
1910 1936 if not file_node.path:
1911 1937 return u''
1912 1938
1913 1939 path = safe_unicode(file_node.path.split('/')[-1])
1914 1940 return u'../' + path
1915 1941
1916 1942
1917 1943 def route_url(*args, **kwds):
1918 1944 """
1919 1945 Wrapper around pyramids `route_url` (fully qualified url) function.
1920 1946 It is used to generate URLs from within pylons views or templates.
1921 1947 This will be removed when pyramid migration if finished.
1922 1948 """
1923 1949 req = get_current_request()
1924 1950 return req.route_url(*args, **kwds)
1925 1951
1926 1952
1927 1953 def route_path(*args, **kwds):
1928 1954 """
1929 1955 Wrapper around pyramids `route_path` function. It is used to generate
1930 1956 URLs from within pylons views or templates. This will be removed when
1931 1957 pyramid migration if finished.
1932 1958 """
1933 1959 req = get_current_request()
1934 1960 return req.route_path(*args, **kwds)
1935 1961
1936 1962
1937 1963 def route_path_or_none(*args, **kwargs):
1938 1964 try:
1939 1965 return route_path(*args, **kwargs)
1940 1966 except KeyError:
1941 1967 return None
1942 1968
1943 1969
1944 1970 def static_url(*args, **kwds):
1945 1971 """
1946 1972 Wrapper around pyramids `route_path` function. It is used to generate
1947 1973 URLs from within pylons views or templates. This will be removed when
1948 1974 pyramid migration if finished.
1949 1975 """
1950 1976 req = get_current_request()
1951 1977 return req.static_url(*args, **kwds)
1952 1978
1953 1979
1954 1980 def resource_path(*args, **kwds):
1955 1981 """
1956 1982 Wrapper around pyramids `route_path` function. It is used to generate
1957 1983 URLs from within pylons views or templates. This will be removed when
1958 1984 pyramid migration if finished.
1959 1985 """
1960 1986 req = get_current_request()
1961 1987 return req.resource_path(*args, **kwds)
@@ -1,244 +1,251 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import copy
22 22 import mock
23 23 import pytest
24 24
25 25 from pylons.util import ContextObj
26 26
27 27 from rhodecode.lib import helpers
28 28 from rhodecode.lib.utils2 import AttributeDict
29 29 from rhodecode.model.settings import IssueTrackerSettingsModel
30 30
31 31
32 32 @pytest.mark.parametrize('url, expected_url', [
33 33 ('http://rc.rc/test', '<a href="http://rc.rc/test">http://rc.rc/test</a>'),
34 34 ('http://rc.rc/@foo', '<a href="http://rc.rc/@foo">http://rc.rc/@foo</a>'),
35 35 ('http://rc.rc/!foo', '<a href="http://rc.rc/!foo">http://rc.rc/!foo</a>'),
36 36 ('http://rc.rc/&foo', '<a href="http://rc.rc/&foo">http://rc.rc/&foo</a>'),
37 37 ('http://rc.rc/#foo', '<a href="http://rc.rc/#foo">http://rc.rc/#foo</a>'),
38 38 ])
39 39 def test_urlify_text(url, expected_url):
40 40 assert helpers.urlify_text(url) == expected_url
41 41
42 42
43 43 @pytest.mark.parametrize('repo_name, commit_id, path, expected_result', [
44 44 ('rX<X', 'cX<X', 'pX<X/aX<X/bX<X',
45 45 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/'
46 46 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX">pX&lt;X</a>/'
47 47 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/pX%3CX/aX%3CX">aX&lt;X'
48 48 '</a>/bX&lt;X'),
49 49 # Path with only one segment
50 50 ('rX<X', 'cX<X', 'pX<X',
51 51 '<a class="pjax-link" href="/rX%3CX/files/cX%3CX/">rX&lt;X</a>/pX&lt;X'),
52 52 # Empty path
53 53 ('rX<X', 'cX<X', '', 'rX&lt;X'),
54 54 ('rX"X', 'cX"X', 'pX"X/aX"X/bX"X',
55 55 '<a class="pjax-link" href="/rX%22X/files/cX%22X/">rX&#34;X</a>/'
56 56 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X">pX&#34;X</a>/'
57 57 '<a class="pjax-link" href="/rX%22X/files/cX%22X/pX%22X/aX%22X">aX&#34;X'
58 58 '</a>/bX&#34;X'),
59 59 ], ids=['simple', 'one_segment', 'empty_path', 'simple_quote'])
60 60 def test_files_breadcrumbs_xss(
61 61 repo_name, commit_id, path, pylonsapp, expected_result):
62 62 result = helpers.files_breadcrumbs(repo_name, commit_id, path)
63 63 # Expect it to encode all path fragments properly. This is important
64 64 # because it returns an instance of `literal`.
65 65 assert result == expected_result
66 66
67 67
68 68 def test_format_binary():
69 69 assert helpers.format_byte_size_binary(298489462784) == '278.0 GiB'
70 70
71 71
72 72 @pytest.mark.parametrize('text_string, pattern, expected', [
73 73 ('No issue here', '(?:#)(?P<issue_id>\d+)', []),
74 74 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
75 75 [{'url': 'http://r.io/{repo}/i/42', 'id': '42'}]),
76 76 ('Fix #42, #53', '(?:#)(?P<issue_id>\d+)', [
77 77 {'url': 'http://r.io/{repo}/i/42', 'id': '42'},
78 78 {'url': 'http://r.io/{repo}/i/53', 'id': '53'}]),
79 79 ('Fix #42', '(?:#)?<issue_id>\d+)', []), # Broken regex
80 80 ])
81 81 def test_extract_issues(backend, text_string, pattern, expected):
82 82 repo = backend.create_repo()
83 83 config = {
84 84 '123': {
85 85 'uid': '123',
86 86 'pat': pattern,
87 87 'url': 'http://r.io/${repo}/i/${issue_id}',
88 88 'pref': '#',
89 89 }
90 90 }
91 91
92 92 def get_settings_mock(self, cache=True):
93 93 return config
94 94
95 95 with mock.patch.object(IssueTrackerSettingsModel,
96 96 'get_settings', get_settings_mock):
97 97 text, issues = helpers.process_patterns(text_string, repo.repo_name)
98 98
99 99 expected = copy.deepcopy(expected)
100 100 for item in expected:
101 101 item['url'] = item['url'].format(repo=repo.repo_name)
102 102
103 103 assert issues == expected
104 104
105 105
106 @pytest.mark.parametrize('text_string, pattern, expected_text', [
107 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
108 'Fix <a class="issue-tracker-link" href="http://r.io/{repo}/i/42">#42</a>'
109 ),
110 ('Fix #42', '(?:#)?<issue_id>\d+)', 'Fix #42'), # Broken regex
106 @pytest.mark.parametrize('text_string, pattern, link_format, expected_text', [
107 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'html',
108 'Fix <a class="issue-tracker-link" href="http://r.io/{repo}/i/42">#42</a>'),
109
110 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'markdown',
111 'Fix [#42](http://r.io/{repo}/i/42)'),
112
113 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'rst',
114 'Fix `#42 <http://r.io/{repo}/i/42>`_'),
115
116 ('Fix #42', '(?:#)?<issue_id>\d+)', 'html',
117 'Fix #42'), # Broken regex
111 118 ])
112 def test_process_patterns_repo(backend, text_string, pattern, expected_text):
119 def test_process_patterns_repo(backend, text_string, pattern, expected_text, link_format):
113 120 repo = backend.create_repo()
114 config = {'123': {
115 'uid': '123',
116 'pat': pattern,
117 'url': 'http://r.io/${repo}/i/${issue_id}',
118 'pref': '#',
119 }
120 }
121 121
122 122 def get_settings_mock(self, cache=True):
123 return config
123 return {
124 '123': {
125 'uid': '123',
126 'pat': pattern,
127 'url': 'http://r.io/${repo}/i/${issue_id}',
128 'pref': '#',
129 }
130 }
124 131
125 132 with mock.patch.object(IssueTrackerSettingsModel,
126 133 'get_settings', get_settings_mock):
127 134 processed_text, issues = helpers.process_patterns(
128 text_string, repo.repo_name, config)
135 text_string, repo.repo_name, link_format)
129 136
130 137 assert processed_text == expected_text.format(repo=repo.repo_name)
131 138
132 139
133 140 @pytest.mark.parametrize('text_string, pattern, expected_text', [
134 141 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
135 'Fix <a class="issue-tracker-link" href="http://r.io/i/42">#42</a>'
136 ),
137 ('Fix #42', '(?:#)?<issue_id>\d+)', 'Fix #42'), # Broken regex
142 'Fix <a class="issue-tracker-link" href="http://r.io/i/42">#42</a>'),
143 ('Fix #42', '(?:#)?<issue_id>\d+)',
144 'Fix #42'), # Broken regex
138 145 ])
139 146 def test_process_patterns_no_repo(text_string, pattern, expected_text):
140 config = {'123': {
141 'uid': '123',
142 'pat': pattern,
143 'url': 'http://r.io/i/${issue_id}',
144 'pref': '#',
145 }
146 }
147 147
148 148 def get_settings_mock(self, cache=True):
149 return config
149 return {
150 '123': {
151 'uid': '123',
152 'pat': pattern,
153 'url': 'http://r.io/i/${issue_id}',
154 'pref': '#',
155 }
156 }
150 157
151 158 with mock.patch.object(IssueTrackerSettingsModel,
152 159 'get_global_settings', get_settings_mock):
153 160 processed_text, issues = helpers.process_patterns(
154 text_string, '', config)
161 text_string, '')
155 162
156 163 assert processed_text == expected_text
157 164
158 165
159 166 def test_process_patterns_non_existent_repo_name(backend):
160 167 text_string = 'Fix #42'
161 168 pattern = '(?:#)(?P<issue_id>\d+)'
162 169 expected_text = ('Fix <a class="issue-tracker-link" '
163 170 'href="http://r.io/do-not-exist/i/42">#42</a>')
164 config = {'123': {
165 'uid': '123',
166 'pat': pattern,
167 'url': 'http://r.io/${repo}/i/${issue_id}',
168 'pref': '#',
169 }
170 }
171 171
172 172 def get_settings_mock(self, cache=True):
173 return config
173 return {
174 '123': {
175 'uid': '123',
176 'pat': pattern,
177 'url': 'http://r.io/${repo}/i/${issue_id}',
178 'pref': '#',
179 }
180 }
174 181
175 182 with mock.patch.object(IssueTrackerSettingsModel,
176 183 'get_global_settings', get_settings_mock):
177 184 processed_text, issues = helpers.process_patterns(
178 text_string, 'do-not-exist', config)
185 text_string, 'do-not-exist')
179 186
180 187 assert processed_text == expected_text
181 188
182 189
183 190 def test_get_visual_attr(pylonsapp):
184 191 c = ContextObj()
185 192 assert None is helpers.get_visual_attr(c, 'fakse')
186 193
187 194 # emulate the c.visual behaviour
188 195 c.visual = AttributeDict({})
189 196 assert None is helpers.get_visual_attr(c, 'some_var')
190 197
191 198 c.visual.some_var = 'foobar'
192 199 assert 'foobar' == helpers.get_visual_attr(c, 'some_var')
193 200
194 201
195 202 @pytest.mark.parametrize('test_text, inclusive, expected_text', [
196 203 ('just a string', False, 'just a string'),
197 204 ('just a string\n', False, 'just a string'),
198 205 ('just a string\n next line', False, 'just a string...'),
199 206 ('just a string\n next line', True, 'just a string\n...'),
200 207 ])
201 208 def test_chop_at(test_text, inclusive, expected_text):
202 209 assert helpers.chop_at_smart(
203 210 test_text, '\n', inclusive, '...') == expected_text
204 211
205 212
206 213 @pytest.mark.parametrize('test_text, expected_output', [
207 214 ('some text', ['some', 'text']),
208 215 ('some text', ['some', 'text']),
209 216 ('some text "with a phrase"', ['some', 'text', 'with a phrase']),
210 217 ('"a phrase" "another phrase"', ['a phrase', 'another phrase']),
211 218 ('"justphrase"', ['justphrase']),
212 219 ('""', []),
213 220 ('', []),
214 221 (' ', []),
215 222 ('" "', []),
216 223 ])
217 224 def test_extract_phrases(test_text, expected_output):
218 225 assert helpers.extract_phrases(test_text) == expected_output
219 226
220 227
221 228 @pytest.mark.parametrize('test_text, text_phrases, expected_output', [
222 229 ('some text here', ['some', 'here'], [(0, 4), (10, 14)]),
223 230 ('here here there', ['here'], [(0, 4), (5, 9), (11, 15)]),
224 231 ('irrelevant', ['not found'], []),
225 232 ('irrelevant', ['not found'], []),
226 233 ])
227 234 def test_get_matching_offsets(test_text, text_phrases, expected_output):
228 235 assert helpers.get_matching_offsets(
229 236 test_text, text_phrases) == expected_output
230 237
231 238
232 239 def test_normalize_text_for_matching():
233 240 assert helpers.normalize_text_for_matching(
234 241 'OJjfe)*#$*@)$JF*)3r2f80h') == 'ojjfe jf 3r2f80h'
235 242
236 243
237 244 def test_get_matching_line_offsets():
238 245 assert helpers.get_matching_line_offsets([
239 246 'words words words',
240 247 'words words words',
241 248 'some text some',
242 249 'words words words',
243 250 'words words words',
244 251 'text here what'], 'text') == {3: [(5, 9)], 6: [(0, 4)]}
General Comments 0
You need to be logged in to leave comments. Login now