##// END OF EJS Templates
mettags: limit the scope of url => metatag to http, https and / links....
marcink -
r2381:39239f6c default
parent child Browse files
Show More
@@ -1,2064 +1,2064 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 from collections import OrderedDict
40 40
41 41 import pygments
42 42 import itertools
43 43 import fnmatch
44 44
45 45 from datetime import datetime
46 46 from functools import partial
47 47 from pygments.formatters.html import HtmlFormatter
48 48 from pygments import highlight as code_highlight
49 49 from pygments.lexers import (
50 50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51 51
52 52 from pyramid.threadlocal import get_current_request
53 53
54 54 from webhelpers.html import literal, HTML, escape
55 55 from webhelpers.html.tools import *
56 56 from webhelpers.html.builder import make_tag
57 57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 60 submit, text, password, textarea, title, ul, xml_declaration, radio
61 61 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 66 from webhelpers.date import time_ago_in_words
67 67 from webhelpers.paginate import Page as _Page
68 68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 70 from webhelpers2.number import format_byte_size
71 71
72 72 from rhodecode.lib.action_parser import action_parser
73 73 from rhodecode.lib.ext_json import json
74 74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 77 AttributeDict, safe_int, md5, md5_safe
78 78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 83 from rhodecode.model.db import Permission, User, Repository
84 84 from rhodecode.model.repo_group import RepoGroupModel
85 85 from rhodecode.model.settings import IssueTrackerSettingsModel
86 86
87 87 log = logging.getLogger(__name__)
88 88
89 89
90 90 DEFAULT_USER = User.DEFAULT_USER
91 91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92 92
93 93
94 94 def asset(path, ver=None, **kwargs):
95 95 """
96 96 Helper to generate a static asset file path for rhodecode assets
97 97
98 98 eg. h.asset('images/image.png', ver='3923')
99 99
100 100 :param path: path of asset
101 101 :param ver: optional version query param to append as ?ver=
102 102 """
103 103 request = get_current_request()
104 104 query = {}
105 105 query.update(kwargs)
106 106 if ver:
107 107 query = {'ver': ver}
108 108 return request.static_path(
109 109 'rhodecode:public/{}'.format(path), _query=query)
110 110
111 111
112 112 default_html_escape_table = {
113 113 ord('&'): u'&amp;',
114 114 ord('<'): u'&lt;',
115 115 ord('>'): u'&gt;',
116 116 ord('"'): u'&quot;',
117 117 ord("'"): u'&#39;',
118 118 }
119 119
120 120
121 121 def html_escape(text, html_escape_table=default_html_escape_table):
122 122 """Produce entities within text."""
123 123 return text.translate(html_escape_table)
124 124
125 125
126 126 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
127 127 """
128 128 Truncate string ``s`` at the first occurrence of ``sub``.
129 129
130 130 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
131 131 """
132 132 suffix_if_chopped = suffix_if_chopped or ''
133 133 pos = s.find(sub)
134 134 if pos == -1:
135 135 return s
136 136
137 137 if inclusive:
138 138 pos += len(sub)
139 139
140 140 chopped = s[:pos]
141 141 left = s[pos:].strip()
142 142
143 143 if left and suffix_if_chopped:
144 144 chopped += suffix_if_chopped
145 145
146 146 return chopped
147 147
148 148
149 149 def shorter(text, size=20):
150 150 postfix = '...'
151 151 if len(text) > size:
152 152 return text[:size - len(postfix)] + postfix
153 153 return text
154 154
155 155
156 156 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
157 157 """
158 158 Reset button
159 159 """
160 160 _set_input_attrs(attrs, type, name, value)
161 161 _set_id_attr(attrs, id, name)
162 162 convert_boolean_attrs(attrs, ["disabled"])
163 163 return HTML.input(**attrs)
164 164
165 165 reset = _reset
166 166 safeid = _make_safe_id_component
167 167
168 168
169 169 def branding(name, length=40):
170 170 return truncate(name, length, indicator="")
171 171
172 172
173 173 def FID(raw_id, path):
174 174 """
175 175 Creates a unique ID for filenode based on it's hash of path and commit
176 176 it's safe to use in urls
177 177
178 178 :param raw_id:
179 179 :param path:
180 180 """
181 181
182 182 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
183 183
184 184
185 185 class _GetError(object):
186 186 """Get error from form_errors, and represent it as span wrapped error
187 187 message
188 188
189 189 :param field_name: field to fetch errors for
190 190 :param form_errors: form errors dict
191 191 """
192 192
193 193 def __call__(self, field_name, form_errors):
194 194 tmpl = """<span class="error_msg">%s</span>"""
195 195 if form_errors and field_name in form_errors:
196 196 return literal(tmpl % form_errors.get(field_name))
197 197
198 198 get_error = _GetError()
199 199
200 200
201 201 class _ToolTip(object):
202 202
203 203 def __call__(self, tooltip_title, trim_at=50):
204 204 """
205 205 Special function just to wrap our text into nice formatted
206 206 autowrapped text
207 207
208 208 :param tooltip_title:
209 209 """
210 210 tooltip_title = escape(tooltip_title)
211 211 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
212 212 return tooltip_title
213 213 tooltip = _ToolTip()
214 214
215 215
216 216 def files_breadcrumbs(repo_name, commit_id, file_path):
217 217 if isinstance(file_path, str):
218 218 file_path = safe_unicode(file_path)
219 219
220 220 # TODO: johbo: Is this always a url like path, or is this operating
221 221 # system dependent?
222 222 path_segments = file_path.split('/')
223 223
224 224 repo_name_html = escape(repo_name)
225 225 if len(path_segments) == 1 and path_segments[0] == '':
226 226 url_segments = [repo_name_html]
227 227 else:
228 228 url_segments = [
229 229 link_to(
230 230 repo_name_html,
231 231 route_path(
232 232 'repo_files',
233 233 repo_name=repo_name,
234 234 commit_id=commit_id,
235 235 f_path=''),
236 236 class_='pjax-link')]
237 237
238 238 last_cnt = len(path_segments) - 1
239 239 for cnt, segment in enumerate(path_segments):
240 240 if not segment:
241 241 continue
242 242 segment_html = escape(segment)
243 243
244 244 if cnt != last_cnt:
245 245 url_segments.append(
246 246 link_to(
247 247 segment_html,
248 248 route_path(
249 249 'repo_files',
250 250 repo_name=repo_name,
251 251 commit_id=commit_id,
252 252 f_path='/'.join(path_segments[:cnt + 1])),
253 253 class_='pjax-link'))
254 254 else:
255 255 url_segments.append(segment_html)
256 256
257 257 return literal('/'.join(url_segments))
258 258
259 259
260 260 class CodeHtmlFormatter(HtmlFormatter):
261 261 """
262 262 My code Html Formatter for source codes
263 263 """
264 264
265 265 def wrap(self, source, outfile):
266 266 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
267 267
268 268 def _wrap_code(self, source):
269 269 for cnt, it in enumerate(source):
270 270 i, t = it
271 271 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
272 272 yield i, t
273 273
274 274 def _wrap_tablelinenos(self, inner):
275 275 dummyoutfile = StringIO.StringIO()
276 276 lncount = 0
277 277 for t, line in inner:
278 278 if t:
279 279 lncount += 1
280 280 dummyoutfile.write(line)
281 281
282 282 fl = self.linenostart
283 283 mw = len(str(lncount + fl - 1))
284 284 sp = self.linenospecial
285 285 st = self.linenostep
286 286 la = self.lineanchors
287 287 aln = self.anchorlinenos
288 288 nocls = self.noclasses
289 289 if sp:
290 290 lines = []
291 291
292 292 for i in range(fl, fl + lncount):
293 293 if i % st == 0:
294 294 if i % sp == 0:
295 295 if aln:
296 296 lines.append('<a href="#%s%d" class="special">%*d</a>' %
297 297 (la, i, mw, i))
298 298 else:
299 299 lines.append('<span class="special">%*d</span>' % (mw, i))
300 300 else:
301 301 if aln:
302 302 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
303 303 else:
304 304 lines.append('%*d' % (mw, i))
305 305 else:
306 306 lines.append('')
307 307 ls = '\n'.join(lines)
308 308 else:
309 309 lines = []
310 310 for i in range(fl, fl + lncount):
311 311 if i % st == 0:
312 312 if aln:
313 313 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
314 314 else:
315 315 lines.append('%*d' % (mw, i))
316 316 else:
317 317 lines.append('')
318 318 ls = '\n'.join(lines)
319 319
320 320 # in case you wonder about the seemingly redundant <div> here: since the
321 321 # content in the other cell also is wrapped in a div, some browsers in
322 322 # some configurations seem to mess up the formatting...
323 323 if nocls:
324 324 yield 0, ('<table class="%stable">' % self.cssclass +
325 325 '<tr><td><div class="linenodiv" '
326 326 'style="background-color: #f0f0f0; padding-right: 10px">'
327 327 '<pre style="line-height: 125%">' +
328 328 ls + '</pre></div></td><td id="hlcode" class="code">')
329 329 else:
330 330 yield 0, ('<table class="%stable">' % self.cssclass +
331 331 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
332 332 ls + '</pre></div></td><td id="hlcode" class="code">')
333 333 yield 0, dummyoutfile.getvalue()
334 334 yield 0, '</td></tr></table>'
335 335
336 336
337 337 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
338 338 def __init__(self, **kw):
339 339 # only show these line numbers if set
340 340 self.only_lines = kw.pop('only_line_numbers', [])
341 341 self.query_terms = kw.pop('query_terms', [])
342 342 self.max_lines = kw.pop('max_lines', 5)
343 343 self.line_context = kw.pop('line_context', 3)
344 344 self.url = kw.pop('url', None)
345 345
346 346 super(CodeHtmlFormatter, self).__init__(**kw)
347 347
348 348 def _wrap_code(self, source):
349 349 for cnt, it in enumerate(source):
350 350 i, t = it
351 351 t = '<pre>%s</pre>' % t
352 352 yield i, t
353 353
354 354 def _wrap_tablelinenos(self, inner):
355 355 yield 0, '<table class="code-highlight %stable">' % self.cssclass
356 356
357 357 last_shown_line_number = 0
358 358 current_line_number = 1
359 359
360 360 for t, line in inner:
361 361 if not t:
362 362 yield t, line
363 363 continue
364 364
365 365 if current_line_number in self.only_lines:
366 366 if last_shown_line_number + 1 != current_line_number:
367 367 yield 0, '<tr>'
368 368 yield 0, '<td class="line">...</td>'
369 369 yield 0, '<td id="hlcode" class="code"></td>'
370 370 yield 0, '</tr>'
371 371
372 372 yield 0, '<tr>'
373 373 if self.url:
374 374 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
375 375 self.url, current_line_number, current_line_number)
376 376 else:
377 377 yield 0, '<td class="line"><a href="">%i</a></td>' % (
378 378 current_line_number)
379 379 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
380 380 yield 0, '</tr>'
381 381
382 382 last_shown_line_number = current_line_number
383 383
384 384 current_line_number += 1
385 385
386 386
387 387 yield 0, '</table>'
388 388
389 389
390 390 def extract_phrases(text_query):
391 391 """
392 392 Extracts phrases from search term string making sure phrases
393 393 contained in double quotes are kept together - and discarding empty values
394 394 or fully whitespace values eg.
395 395
396 396 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
397 397
398 398 """
399 399
400 400 in_phrase = False
401 401 buf = ''
402 402 phrases = []
403 403 for char in text_query:
404 404 if in_phrase:
405 405 if char == '"': # end phrase
406 406 phrases.append(buf)
407 407 buf = ''
408 408 in_phrase = False
409 409 continue
410 410 else:
411 411 buf += char
412 412 continue
413 413 else:
414 414 if char == '"': # start phrase
415 415 in_phrase = True
416 416 phrases.append(buf)
417 417 buf = ''
418 418 continue
419 419 elif char == ' ':
420 420 phrases.append(buf)
421 421 buf = ''
422 422 continue
423 423 else:
424 424 buf += char
425 425
426 426 phrases.append(buf)
427 427 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
428 428 return phrases
429 429
430 430
431 431 def get_matching_offsets(text, phrases):
432 432 """
433 433 Returns a list of string offsets in `text` that the list of `terms` match
434 434
435 435 >>> get_matching_offsets('some text here', ['some', 'here'])
436 436 [(0, 4), (10, 14)]
437 437
438 438 """
439 439 offsets = []
440 440 for phrase in phrases:
441 441 for match in re.finditer(phrase, text):
442 442 offsets.append((match.start(), match.end()))
443 443
444 444 return offsets
445 445
446 446
447 447 def normalize_text_for_matching(x):
448 448 """
449 449 Replaces all non alnum characters to spaces and lower cases the string,
450 450 useful for comparing two text strings without punctuation
451 451 """
452 452 return re.sub(r'[^\w]', ' ', x.lower())
453 453
454 454
455 455 def get_matching_line_offsets(lines, terms):
456 456 """ Return a set of `lines` indices (starting from 1) matching a
457 457 text search query, along with `context` lines above/below matching lines
458 458
459 459 :param lines: list of strings representing lines
460 460 :param terms: search term string to match in lines eg. 'some text'
461 461 :param context: number of lines above/below a matching line to add to result
462 462 :param max_lines: cut off for lines of interest
463 463 eg.
464 464
465 465 text = '''
466 466 words words words
467 467 words words words
468 468 some text some
469 469 words words words
470 470 words words words
471 471 text here what
472 472 '''
473 473 get_matching_line_offsets(text, 'text', context=1)
474 474 {3: [(5, 9)], 6: [(0, 4)]]
475 475
476 476 """
477 477 matching_lines = {}
478 478 phrases = [normalize_text_for_matching(phrase)
479 479 for phrase in extract_phrases(terms)]
480 480
481 481 for line_index, line in enumerate(lines, start=1):
482 482 match_offsets = get_matching_offsets(
483 483 normalize_text_for_matching(line), phrases)
484 484 if match_offsets:
485 485 matching_lines[line_index] = match_offsets
486 486
487 487 return matching_lines
488 488
489 489
490 490 def hsv_to_rgb(h, s, v):
491 491 """ Convert hsv color values to rgb """
492 492
493 493 if s == 0.0:
494 494 return v, v, v
495 495 i = int(h * 6.0) # XXX assume int() truncates!
496 496 f = (h * 6.0) - i
497 497 p = v * (1.0 - s)
498 498 q = v * (1.0 - s * f)
499 499 t = v * (1.0 - s * (1.0 - f))
500 500 i = i % 6
501 501 if i == 0:
502 502 return v, t, p
503 503 if i == 1:
504 504 return q, v, p
505 505 if i == 2:
506 506 return p, v, t
507 507 if i == 3:
508 508 return p, q, v
509 509 if i == 4:
510 510 return t, p, v
511 511 if i == 5:
512 512 return v, p, q
513 513
514 514
515 515 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
516 516 """
517 517 Generator for getting n of evenly distributed colors using
518 518 hsv color and golden ratio. It always return same order of colors
519 519
520 520 :param n: number of colors to generate
521 521 :param saturation: saturation of returned colors
522 522 :param lightness: lightness of returned colors
523 523 :returns: RGB tuple
524 524 """
525 525
526 526 golden_ratio = 0.618033988749895
527 527 h = 0.22717784590367374
528 528
529 529 for _ in xrange(n):
530 530 h += golden_ratio
531 531 h %= 1
532 532 HSV_tuple = [h, saturation, lightness]
533 533 RGB_tuple = hsv_to_rgb(*HSV_tuple)
534 534 yield map(lambda x: str(int(x * 256)), RGB_tuple)
535 535
536 536
537 537 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
538 538 """
539 539 Returns a function which when called with an argument returns a unique
540 540 color for that argument, eg.
541 541
542 542 :param n: number of colors to generate
543 543 :param saturation: saturation of returned colors
544 544 :param lightness: lightness of returned colors
545 545 :returns: css RGB string
546 546
547 547 >>> color_hash = color_hasher()
548 548 >>> color_hash('hello')
549 549 'rgb(34, 12, 59)'
550 550 >>> color_hash('hello')
551 551 'rgb(34, 12, 59)'
552 552 >>> color_hash('other')
553 553 'rgb(90, 224, 159)'
554 554 """
555 555
556 556 color_dict = {}
557 557 cgenerator = unique_color_generator(
558 558 saturation=saturation, lightness=lightness)
559 559
560 560 def get_color_string(thing):
561 561 if thing in color_dict:
562 562 col = color_dict[thing]
563 563 else:
564 564 col = color_dict[thing] = cgenerator.next()
565 565 return "rgb(%s)" % (', '.join(col))
566 566
567 567 return get_color_string
568 568
569 569
570 570 def get_lexer_safe(mimetype=None, filepath=None):
571 571 """
572 572 Tries to return a relevant pygments lexer using mimetype/filepath name,
573 573 defaulting to plain text if none could be found
574 574 """
575 575 lexer = None
576 576 try:
577 577 if mimetype:
578 578 lexer = get_lexer_for_mimetype(mimetype)
579 579 if not lexer:
580 580 lexer = get_lexer_for_filename(filepath)
581 581 except pygments.util.ClassNotFound:
582 582 pass
583 583
584 584 if not lexer:
585 585 lexer = get_lexer_by_name('text')
586 586
587 587 return lexer
588 588
589 589
590 590 def get_lexer_for_filenode(filenode):
591 591 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
592 592 return lexer
593 593
594 594
595 595 def pygmentize(filenode, **kwargs):
596 596 """
597 597 pygmentize function using pygments
598 598
599 599 :param filenode:
600 600 """
601 601 lexer = get_lexer_for_filenode(filenode)
602 602 return literal(code_highlight(filenode.content, lexer,
603 603 CodeHtmlFormatter(**kwargs)))
604 604
605 605
606 606 def is_following_repo(repo_name, user_id):
607 607 from rhodecode.model.scm import ScmModel
608 608 return ScmModel().is_following_repo(repo_name, user_id)
609 609
610 610
611 611 class _Message(object):
612 612 """A message returned by ``Flash.pop_messages()``.
613 613
614 614 Converting the message to a string returns the message text. Instances
615 615 also have the following attributes:
616 616
617 617 * ``message``: the message text.
618 618 * ``category``: the category specified when the message was created.
619 619 """
620 620
621 621 def __init__(self, category, message):
622 622 self.category = category
623 623 self.message = message
624 624
625 625 def __str__(self):
626 626 return self.message
627 627
628 628 __unicode__ = __str__
629 629
630 630 def __html__(self):
631 631 return escape(safe_unicode(self.message))
632 632
633 633
634 634 class Flash(object):
635 635 # List of allowed categories. If None, allow any category.
636 636 categories = ["warning", "notice", "error", "success"]
637 637
638 638 # Default category if none is specified.
639 639 default_category = "notice"
640 640
641 641 def __init__(self, session_key="flash", categories=None,
642 642 default_category=None):
643 643 """
644 644 Instantiate a ``Flash`` object.
645 645
646 646 ``session_key`` is the key to save the messages under in the user's
647 647 session.
648 648
649 649 ``categories`` is an optional list which overrides the default list
650 650 of categories.
651 651
652 652 ``default_category`` overrides the default category used for messages
653 653 when none is specified.
654 654 """
655 655 self.session_key = session_key
656 656 if categories is not None:
657 657 self.categories = categories
658 658 if default_category is not None:
659 659 self.default_category = default_category
660 660 if self.categories and self.default_category not in self.categories:
661 661 raise ValueError(
662 662 "unrecognized default category %r" % (self.default_category,))
663 663
664 664 def pop_messages(self, session=None, request=None):
665 665 """
666 666 Return all accumulated messages and delete them from the session.
667 667
668 668 The return value is a list of ``Message`` objects.
669 669 """
670 670 messages = []
671 671
672 672 if not session:
673 673 if not request:
674 674 request = get_current_request()
675 675 session = request.session
676 676
677 677 # Pop the 'old' pylons flash messages. They are tuples of the form
678 678 # (category, message)
679 679 for cat, msg in session.pop(self.session_key, []):
680 680 messages.append(_Message(cat, msg))
681 681
682 682 # Pop the 'new' pyramid flash messages for each category as list
683 683 # of strings.
684 684 for cat in self.categories:
685 685 for msg in session.pop_flash(queue=cat):
686 686 messages.append(_Message(cat, msg))
687 687 # Map messages from the default queue to the 'notice' category.
688 688 for msg in session.pop_flash():
689 689 messages.append(_Message('notice', msg))
690 690
691 691 session.save()
692 692 return messages
693 693
694 694 def json_alerts(self, session=None, request=None):
695 695 payloads = []
696 696 messages = flash.pop_messages(session=session, request=request)
697 697 if messages:
698 698 for message in messages:
699 699 subdata = {}
700 700 if hasattr(message.message, 'rsplit'):
701 701 flash_data = message.message.rsplit('|DELIM|', 1)
702 702 org_message = flash_data[0]
703 703 if len(flash_data) > 1:
704 704 subdata = json.loads(flash_data[1])
705 705 else:
706 706 org_message = message.message
707 707 payloads.append({
708 708 'message': {
709 709 'message': u'{}'.format(org_message),
710 710 'level': message.category,
711 711 'force': True,
712 712 'subdata': subdata
713 713 }
714 714 })
715 715 return json.dumps(payloads)
716 716
717 717 def __call__(self, message, category=None, ignore_duplicate=False,
718 718 session=None, request=None):
719 719
720 720 if not session:
721 721 if not request:
722 722 request = get_current_request()
723 723 session = request.session
724 724
725 725 session.flash(
726 726 message, queue=category, allow_duplicate=not ignore_duplicate)
727 727
728 728
729 729 flash = Flash()
730 730
731 731 #==============================================================================
732 732 # SCM FILTERS available via h.
733 733 #==============================================================================
734 734 from rhodecode.lib.vcs.utils import author_name, author_email
735 735 from rhodecode.lib.utils2 import credentials_filter, age as _age
736 736 from rhodecode.model.db import User, ChangesetStatus
737 737
738 738 age = _age
739 739 capitalize = lambda x: x.capitalize()
740 740 email = author_email
741 741 short_id = lambda x: x[:12]
742 742 hide_credentials = lambda x: ''.join(credentials_filter(x))
743 743
744 744
745 745 def age_component(datetime_iso, value=None, time_is_local=False):
746 746 title = value or format_date(datetime_iso)
747 747 tzinfo = '+00:00'
748 748
749 749 # detect if we have a timezone info, otherwise, add it
750 750 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
751 751 if time_is_local:
752 752 tzinfo = time.strftime("+%H:%M",
753 753 time.gmtime(
754 754 (datetime.now() - datetime.utcnow()).seconds + 1
755 755 )
756 756 )
757 757
758 758 return literal(
759 759 '<time class="timeago tooltip" '
760 760 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
761 761 datetime_iso, title, tzinfo))
762 762
763 763
764 764 def _shorten_commit_id(commit_id):
765 765 from rhodecode import CONFIG
766 766 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
767 767 return commit_id[:def_len]
768 768
769 769
770 770 def show_id(commit):
771 771 """
772 772 Configurable function that shows ID
773 773 by default it's r123:fffeeefffeee
774 774
775 775 :param commit: commit instance
776 776 """
777 777 from rhodecode import CONFIG
778 778 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
779 779
780 780 raw_id = _shorten_commit_id(commit.raw_id)
781 781 if show_idx:
782 782 return 'r%s:%s' % (commit.idx, raw_id)
783 783 else:
784 784 return '%s' % (raw_id, )
785 785
786 786
787 787 def format_date(date):
788 788 """
789 789 use a standardized formatting for dates used in RhodeCode
790 790
791 791 :param date: date/datetime object
792 792 :return: formatted date
793 793 """
794 794
795 795 if date:
796 796 _fmt = "%a, %d %b %Y %H:%M:%S"
797 797 return safe_unicode(date.strftime(_fmt))
798 798
799 799 return u""
800 800
801 801
802 802 class _RepoChecker(object):
803 803
804 804 def __init__(self, backend_alias):
805 805 self._backend_alias = backend_alias
806 806
807 807 def __call__(self, repository):
808 808 if hasattr(repository, 'alias'):
809 809 _type = repository.alias
810 810 elif hasattr(repository, 'repo_type'):
811 811 _type = repository.repo_type
812 812 else:
813 813 _type = repository
814 814 return _type == self._backend_alias
815 815
816 816 is_git = _RepoChecker('git')
817 817 is_hg = _RepoChecker('hg')
818 818 is_svn = _RepoChecker('svn')
819 819
820 820
821 821 def get_repo_type_by_name(repo_name):
822 822 repo = Repository.get_by_repo_name(repo_name)
823 823 return repo.repo_type
824 824
825 825
826 826 def is_svn_without_proxy(repository):
827 827 if is_svn(repository):
828 828 from rhodecode.model.settings import VcsSettingsModel
829 829 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
830 830 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
831 831 return False
832 832
833 833
834 834 def discover_user(author):
835 835 """
836 836 Tries to discover RhodeCode User based on the autho string. Author string
837 837 is typically `FirstName LastName <email@address.com>`
838 838 """
839 839
840 840 # if author is already an instance use it for extraction
841 841 if isinstance(author, User):
842 842 return author
843 843
844 844 # Valid email in the attribute passed, see if they're in the system
845 845 _email = author_email(author)
846 846 if _email != '':
847 847 user = User.get_by_email(_email, case_insensitive=True, cache=True)
848 848 if user is not None:
849 849 return user
850 850
851 851 # Maybe it's a username, we try to extract it and fetch by username ?
852 852 _author = author_name(author)
853 853 user = User.get_by_username(_author, case_insensitive=True, cache=True)
854 854 if user is not None:
855 855 return user
856 856
857 857 return None
858 858
859 859
860 860 def email_or_none(author):
861 861 # extract email from the commit string
862 862 _email = author_email(author)
863 863
864 864 # If we have an email, use it, otherwise
865 865 # see if it contains a username we can get an email from
866 866 if _email != '':
867 867 return _email
868 868 else:
869 869 user = User.get_by_username(
870 870 author_name(author), case_insensitive=True, cache=True)
871 871
872 872 if user is not None:
873 873 return user.email
874 874
875 875 # No valid email, not a valid user in the system, none!
876 876 return None
877 877
878 878
879 879 def link_to_user(author, length=0, **kwargs):
880 880 user = discover_user(author)
881 881 # user can be None, but if we have it already it means we can re-use it
882 882 # in the person() function, so we save 1 intensive-query
883 883 if user:
884 884 author = user
885 885
886 886 display_person = person(author, 'username_or_name_or_email')
887 887 if length:
888 888 display_person = shorter(display_person, length)
889 889
890 890 if user:
891 891 return link_to(
892 892 escape(display_person),
893 893 route_path('user_profile', username=user.username),
894 894 **kwargs)
895 895 else:
896 896 return escape(display_person)
897 897
898 898
899 899 def person(author, show_attr="username_and_name"):
900 900 user = discover_user(author)
901 901 if user:
902 902 return getattr(user, show_attr)
903 903 else:
904 904 _author = author_name(author)
905 905 _email = email(author)
906 906 return _author or _email
907 907
908 908
909 909 def author_string(email):
910 910 if email:
911 911 user = User.get_by_email(email, case_insensitive=True, cache=True)
912 912 if user:
913 913 if user.first_name or user.last_name:
914 914 return '%s %s &lt;%s&gt;' % (
915 915 user.first_name, user.last_name, email)
916 916 else:
917 917 return email
918 918 else:
919 919 return email
920 920 else:
921 921 return None
922 922
923 923
924 924 def person_by_id(id_, show_attr="username_and_name"):
925 925 # attr to return from fetched user
926 926 person_getter = lambda usr: getattr(usr, show_attr)
927 927
928 928 #maybe it's an ID ?
929 929 if str(id_).isdigit() or isinstance(id_, int):
930 930 id_ = int(id_)
931 931 user = User.get(id_)
932 932 if user is not None:
933 933 return person_getter(user)
934 934 return id_
935 935
936 936
937 937 def gravatar_with_user(request, author, show_disabled=False):
938 938 _render = request.get_partial_renderer(
939 939 'rhodecode:templates/base/base.mako')
940 940 return _render('gravatar_with_user', author, show_disabled=show_disabled)
941 941
942 942
943 943 tags_paterns = OrderedDict((
944 944 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
945 945 '<div class="metatag" tag="lang">\\2</div>')),
946 946
947 947 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
948 948 '<div class="metatag" tag="see">see: \\1 </div>')),
949 949
950 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
951 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
950 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((http://|https://|/)(.*?)\)\]'),
951 '<div class="metatag" tag="url"> <a href="\\2\\3">\\1</a> </div>')),
952 952
953 953 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
954 954 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
955 955
956 956 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
957 957 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
958 958
959 959 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
960 960 '<div class="metatag" tag="state \\1">\\1</div>')),
961 961
962 962 # label in grey
963 963 ('label', (re.compile(r'\[([a-z]+)\]'),
964 964 '<div class="metatag" tag="label">\\1</div>')),
965 965
966 966 # generic catch all in grey
967 967 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
968 968 '<div class="metatag" tag="generic">\\1</div>')),
969 969 ))
970 970
971 971
972 972 def extract_metatags(value):
973 973 """
974 974 Extract supported meta-tags from given text value
975 975 """
976 976 tags = []
977 977 if not value:
978 978 return tags, ''
979 979
980 980 for key, val in tags_paterns.items():
981 981 pat, replace_html = val
982 982 tags.extend([(key, x.group()) for x in pat.finditer(value)])
983 983 value = pat.sub('', value)
984 984
985 985 return tags, value
986 986
987 987
988 988 def style_metatag(tag_type, value):
989 989 """
990 990 converts tags from value into html equivalent
991 991 """
992 992 if not value:
993 993 return ''
994 994
995 995 html_value = value
996 996 tag_data = tags_paterns.get(tag_type)
997 997 if tag_data:
998 998 pat, replace_html = tag_data
999 999 # convert to plain `unicode` instead of a markup tag to be used in
1000 1000 # regex expressions. safe_unicode doesn't work here
1001 1001 html_value = pat.sub(replace_html, unicode(value))
1002 1002
1003 1003 return html_value
1004 1004
1005 1005
1006 1006 def bool2icon(value):
1007 1007 """
1008 1008 Returns boolean value of a given value, represented as html element with
1009 1009 classes that will represent icons
1010 1010
1011 1011 :param value: given value to convert to html node
1012 1012 """
1013 1013
1014 1014 if value: # does bool conversion
1015 1015 return HTML.tag('i', class_="icon-true")
1016 1016 else: # not true as bool
1017 1017 return HTML.tag('i', class_="icon-false")
1018 1018
1019 1019
1020 1020 #==============================================================================
1021 1021 # PERMS
1022 1022 #==============================================================================
1023 1023 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1024 1024 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1025 1025 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1026 1026 csrf_token_key
1027 1027
1028 1028
1029 1029 #==============================================================================
1030 1030 # GRAVATAR URL
1031 1031 #==============================================================================
1032 1032 class InitialsGravatar(object):
1033 1033 def __init__(self, email_address, first_name, last_name, size=30,
1034 1034 background=None, text_color='#fff'):
1035 1035 self.size = size
1036 1036 self.first_name = first_name
1037 1037 self.last_name = last_name
1038 1038 self.email_address = email_address
1039 1039 self.background = background or self.str2color(email_address)
1040 1040 self.text_color = text_color
1041 1041
1042 1042 def get_color_bank(self):
1043 1043 """
1044 1044 returns a predefined list of colors that gravatars can use.
1045 1045 Those are randomized distinct colors that guarantee readability and
1046 1046 uniqueness.
1047 1047
1048 1048 generated with: http://phrogz.net/css/distinct-colors.html
1049 1049 """
1050 1050 return [
1051 1051 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1052 1052 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1053 1053 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1054 1054 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1055 1055 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1056 1056 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1057 1057 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1058 1058 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1059 1059 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1060 1060 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1061 1061 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1062 1062 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1063 1063 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1064 1064 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1065 1065 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1066 1066 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1067 1067 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1068 1068 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1069 1069 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1070 1070 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1071 1071 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1072 1072 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1073 1073 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1074 1074 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1075 1075 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1076 1076 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1077 1077 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1078 1078 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1079 1079 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1080 1080 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1081 1081 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1082 1082 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1083 1083 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1084 1084 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1085 1085 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1086 1086 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1087 1087 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1088 1088 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1089 1089 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1090 1090 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1091 1091 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1092 1092 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1093 1093 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1094 1094 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1095 1095 '#4f8c46', '#368dd9', '#5c0073'
1096 1096 ]
1097 1097
1098 1098 def rgb_to_hex_color(self, rgb_tuple):
1099 1099 """
1100 1100 Converts an rgb_tuple passed to an hex color.
1101 1101
1102 1102 :param rgb_tuple: tuple with 3 ints represents rgb color space
1103 1103 """
1104 1104 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1105 1105
1106 1106 def email_to_int_list(self, email_str):
1107 1107 """
1108 1108 Get every byte of the hex digest value of email and turn it to integer.
1109 1109 It's going to be always between 0-255
1110 1110 """
1111 1111 digest = md5_safe(email_str.lower())
1112 1112 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1113 1113
1114 1114 def pick_color_bank_index(self, email_str, color_bank):
1115 1115 return self.email_to_int_list(email_str)[0] % len(color_bank)
1116 1116
1117 1117 def str2color(self, email_str):
1118 1118 """
1119 1119 Tries to map in a stable algorithm an email to color
1120 1120
1121 1121 :param email_str:
1122 1122 """
1123 1123 color_bank = self.get_color_bank()
1124 1124 # pick position (module it's length so we always find it in the
1125 1125 # bank even if it's smaller than 256 values
1126 1126 pos = self.pick_color_bank_index(email_str, color_bank)
1127 1127 return color_bank[pos]
1128 1128
1129 1129 def normalize_email(self, email_address):
1130 1130 import unicodedata
1131 1131 # default host used to fill in the fake/missing email
1132 1132 default_host = u'localhost'
1133 1133
1134 1134 if not email_address:
1135 1135 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1136 1136
1137 1137 email_address = safe_unicode(email_address)
1138 1138
1139 1139 if u'@' not in email_address:
1140 1140 email_address = u'%s@%s' % (email_address, default_host)
1141 1141
1142 1142 if email_address.endswith(u'@'):
1143 1143 email_address = u'%s%s' % (email_address, default_host)
1144 1144
1145 1145 email_address = unicodedata.normalize('NFKD', email_address)\
1146 1146 .encode('ascii', 'ignore')
1147 1147 return email_address
1148 1148
1149 1149 def get_initials(self):
1150 1150 """
1151 1151 Returns 2 letter initials calculated based on the input.
1152 1152 The algorithm picks first given email address, and takes first letter
1153 1153 of part before @, and then the first letter of server name. In case
1154 1154 the part before @ is in a format of `somestring.somestring2` it replaces
1155 1155 the server letter with first letter of somestring2
1156 1156
1157 1157 In case function was initialized with both first and lastname, this
1158 1158 overrides the extraction from email by first letter of the first and
1159 1159 last name. We add special logic to that functionality, In case Full name
1160 1160 is compound, like Guido Von Rossum, we use last part of the last name
1161 1161 (Von Rossum) picking `R`.
1162 1162
1163 1163 Function also normalizes the non-ascii characters to they ascii
1164 1164 representation, eg Δ„ => A
1165 1165 """
1166 1166 import unicodedata
1167 1167 # replace non-ascii to ascii
1168 1168 first_name = unicodedata.normalize(
1169 1169 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1170 1170 last_name = unicodedata.normalize(
1171 1171 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1172 1172
1173 1173 # do NFKD encoding, and also make sure email has proper format
1174 1174 email_address = self.normalize_email(self.email_address)
1175 1175
1176 1176 # first push the email initials
1177 1177 prefix, server = email_address.split('@', 1)
1178 1178
1179 1179 # check if prefix is maybe a 'first_name.last_name' syntax
1180 1180 _dot_split = prefix.rsplit('.', 1)
1181 1181 if len(_dot_split) == 2 and _dot_split[1]:
1182 1182 initials = [_dot_split[0][0], _dot_split[1][0]]
1183 1183 else:
1184 1184 initials = [prefix[0], server[0]]
1185 1185
1186 1186 # then try to replace either first_name or last_name
1187 1187 fn_letter = (first_name or " ")[0].strip()
1188 1188 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1189 1189
1190 1190 if fn_letter:
1191 1191 initials[0] = fn_letter
1192 1192
1193 1193 if ln_letter:
1194 1194 initials[1] = ln_letter
1195 1195
1196 1196 return ''.join(initials).upper()
1197 1197
1198 1198 def get_img_data_by_type(self, font_family, img_type):
1199 1199 default_user = """
1200 1200 <svg xmlns="http://www.w3.org/2000/svg"
1201 1201 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1202 1202 viewBox="-15 -10 439.165 429.164"
1203 1203
1204 1204 xml:space="preserve"
1205 1205 style="background:{background};" >
1206 1206
1207 1207 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1208 1208 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1209 1209 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1210 1210 168.596,153.916,216.671,
1211 1211 204.583,216.671z" fill="{text_color}"/>
1212 1212 <path d="M407.164,374.717L360.88,
1213 1213 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1214 1214 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1215 1215 15.366-44.203,23.488-69.076,23.488c-24.877,
1216 1216 0-48.762-8.122-69.078-23.488
1217 1217 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1218 1218 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1219 1219 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1220 1220 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1221 1221 19.402-10.527 C409.699,390.129,
1222 1222 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1223 1223 </svg>""".format(
1224 1224 size=self.size,
1225 1225 background='#979797', # @grey4
1226 1226 text_color=self.text_color,
1227 1227 font_family=font_family)
1228 1228
1229 1229 return {
1230 1230 "default_user": default_user
1231 1231 }[img_type]
1232 1232
1233 1233 def get_img_data(self, svg_type=None):
1234 1234 """
1235 1235 generates the svg metadata for image
1236 1236 """
1237 1237
1238 1238 font_family = ','.join([
1239 1239 'proximanovaregular',
1240 1240 'Proxima Nova Regular',
1241 1241 'Proxima Nova',
1242 1242 'Arial',
1243 1243 'Lucida Grande',
1244 1244 'sans-serif'
1245 1245 ])
1246 1246 if svg_type:
1247 1247 return self.get_img_data_by_type(font_family, svg_type)
1248 1248
1249 1249 initials = self.get_initials()
1250 1250 img_data = """
1251 1251 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1252 1252 width="{size}" height="{size}"
1253 1253 style="width: 100%; height: 100%; background-color: {background}"
1254 1254 viewBox="0 0 {size} {size}">
1255 1255 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1256 1256 pointer-events="auto" fill="{text_color}"
1257 1257 font-family="{font_family}"
1258 1258 style="font-weight: 400; font-size: {f_size}px;">{text}
1259 1259 </text>
1260 1260 </svg>""".format(
1261 1261 size=self.size,
1262 1262 f_size=self.size/1.85, # scale the text inside the box nicely
1263 1263 background=self.background,
1264 1264 text_color=self.text_color,
1265 1265 text=initials.upper(),
1266 1266 font_family=font_family)
1267 1267
1268 1268 return img_data
1269 1269
1270 1270 def generate_svg(self, svg_type=None):
1271 1271 img_data = self.get_img_data(svg_type)
1272 1272 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1273 1273
1274 1274
1275 1275 def initials_gravatar(email_address, first_name, last_name, size=30):
1276 1276 svg_type = None
1277 1277 if email_address == User.DEFAULT_USER_EMAIL:
1278 1278 svg_type = 'default_user'
1279 1279 klass = InitialsGravatar(email_address, first_name, last_name, size)
1280 1280 return klass.generate_svg(svg_type=svg_type)
1281 1281
1282 1282
1283 1283 def gravatar_url(email_address, size=30, request=None):
1284 1284 request = get_current_request()
1285 1285 _use_gravatar = request.call_context.visual.use_gravatar
1286 1286 _gravatar_url = request.call_context.visual.gravatar_url
1287 1287
1288 1288 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1289 1289
1290 1290 email_address = email_address or User.DEFAULT_USER_EMAIL
1291 1291 if isinstance(email_address, unicode):
1292 1292 # hashlib crashes on unicode items
1293 1293 email_address = safe_str(email_address)
1294 1294
1295 1295 # empty email or default user
1296 1296 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1297 1297 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1298 1298
1299 1299 if _use_gravatar:
1300 1300 # TODO: Disuse pyramid thread locals. Think about another solution to
1301 1301 # get the host and schema here.
1302 1302 request = get_current_request()
1303 1303 tmpl = safe_str(_gravatar_url)
1304 1304 tmpl = tmpl.replace('{email}', email_address)\
1305 1305 .replace('{md5email}', md5_safe(email_address.lower())) \
1306 1306 .replace('{netloc}', request.host)\
1307 1307 .replace('{scheme}', request.scheme)\
1308 1308 .replace('{size}', safe_str(size))
1309 1309 return tmpl
1310 1310 else:
1311 1311 return initials_gravatar(email_address, '', '', size=size)
1312 1312
1313 1313
1314 1314 class Page(_Page):
1315 1315 """
1316 1316 Custom pager to match rendering style with paginator
1317 1317 """
1318 1318
1319 1319 def _get_pos(self, cur_page, max_page, items):
1320 1320 edge = (items / 2) + 1
1321 1321 if (cur_page <= edge):
1322 1322 radius = max(items / 2, items - cur_page)
1323 1323 elif (max_page - cur_page) < edge:
1324 1324 radius = (items - 1) - (max_page - cur_page)
1325 1325 else:
1326 1326 radius = items / 2
1327 1327
1328 1328 left = max(1, (cur_page - (radius)))
1329 1329 right = min(max_page, cur_page + (radius))
1330 1330 return left, cur_page, right
1331 1331
1332 1332 def _range(self, regexp_match):
1333 1333 """
1334 1334 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1335 1335
1336 1336 Arguments:
1337 1337
1338 1338 regexp_match
1339 1339 A "re" (regular expressions) match object containing the
1340 1340 radius of linked pages around the current page in
1341 1341 regexp_match.group(1) as a string
1342 1342
1343 1343 This function is supposed to be called as a callable in
1344 1344 re.sub.
1345 1345
1346 1346 """
1347 1347 radius = int(regexp_match.group(1))
1348 1348
1349 1349 # Compute the first and last page number within the radius
1350 1350 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1351 1351 # -> leftmost_page = 5
1352 1352 # -> rightmost_page = 9
1353 1353 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1354 1354 self.last_page,
1355 1355 (radius * 2) + 1)
1356 1356 nav_items = []
1357 1357
1358 1358 # Create a link to the first page (unless we are on the first page
1359 1359 # or there would be no need to insert '..' spacers)
1360 1360 if self.page != self.first_page and self.first_page < leftmost_page:
1361 1361 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1362 1362
1363 1363 # Insert dots if there are pages between the first page
1364 1364 # and the currently displayed page range
1365 1365 if leftmost_page - self.first_page > 1:
1366 1366 # Wrap in a SPAN tag if nolink_attr is set
1367 1367 text = '..'
1368 1368 if self.dotdot_attr:
1369 1369 text = HTML.span(c=text, **self.dotdot_attr)
1370 1370 nav_items.append(text)
1371 1371
1372 1372 for thispage in xrange(leftmost_page, rightmost_page + 1):
1373 1373 # Hilight the current page number and do not use a link
1374 1374 if thispage == self.page:
1375 1375 text = '%s' % (thispage,)
1376 1376 # Wrap in a SPAN tag if nolink_attr is set
1377 1377 if self.curpage_attr:
1378 1378 text = HTML.span(c=text, **self.curpage_attr)
1379 1379 nav_items.append(text)
1380 1380 # Otherwise create just a link to that page
1381 1381 else:
1382 1382 text = '%s' % (thispage,)
1383 1383 nav_items.append(self._pagerlink(thispage, text))
1384 1384
1385 1385 # Insert dots if there are pages between the displayed
1386 1386 # page numbers and the end of the page range
1387 1387 if self.last_page - rightmost_page > 1:
1388 1388 text = '..'
1389 1389 # Wrap in a SPAN tag if nolink_attr is set
1390 1390 if self.dotdot_attr:
1391 1391 text = HTML.span(c=text, **self.dotdot_attr)
1392 1392 nav_items.append(text)
1393 1393
1394 1394 # Create a link to the very last page (unless we are on the last
1395 1395 # page or there would be no need to insert '..' spacers)
1396 1396 if self.page != self.last_page and rightmost_page < self.last_page:
1397 1397 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1398 1398
1399 1399 ## prerender links
1400 1400 #_page_link = url.current()
1401 1401 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1402 1402 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1403 1403 return self.separator.join(nav_items)
1404 1404
1405 1405 def pager(self, format='~2~', page_param='page', partial_param='partial',
1406 1406 show_if_single_page=False, separator=' ', onclick=None,
1407 1407 symbol_first='<<', symbol_last='>>',
1408 1408 symbol_previous='<', symbol_next='>',
1409 1409 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1410 1410 curpage_attr={'class': 'pager_curpage'},
1411 1411 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1412 1412
1413 1413 self.curpage_attr = curpage_attr
1414 1414 self.separator = separator
1415 1415 self.pager_kwargs = kwargs
1416 1416 self.page_param = page_param
1417 1417 self.partial_param = partial_param
1418 1418 self.onclick = onclick
1419 1419 self.link_attr = link_attr
1420 1420 self.dotdot_attr = dotdot_attr
1421 1421
1422 1422 # Don't show navigator if there is no more than one page
1423 1423 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1424 1424 return ''
1425 1425
1426 1426 from string import Template
1427 1427 # Replace ~...~ in token format by range of pages
1428 1428 result = re.sub(r'~(\d+)~', self._range, format)
1429 1429
1430 1430 # Interpolate '%' variables
1431 1431 result = Template(result).safe_substitute({
1432 1432 'first_page': self.first_page,
1433 1433 'last_page': self.last_page,
1434 1434 'page': self.page,
1435 1435 'page_count': self.page_count,
1436 1436 'items_per_page': self.items_per_page,
1437 1437 'first_item': self.first_item,
1438 1438 'last_item': self.last_item,
1439 1439 'item_count': self.item_count,
1440 1440 'link_first': self.page > self.first_page and \
1441 1441 self._pagerlink(self.first_page, symbol_first) or '',
1442 1442 'link_last': self.page < self.last_page and \
1443 1443 self._pagerlink(self.last_page, symbol_last) or '',
1444 1444 'link_previous': self.previous_page and \
1445 1445 self._pagerlink(self.previous_page, symbol_previous) \
1446 1446 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1447 1447 'link_next': self.next_page and \
1448 1448 self._pagerlink(self.next_page, symbol_next) \
1449 1449 or HTML.span(symbol_next, class_="pg-next disabled")
1450 1450 })
1451 1451
1452 1452 return literal(result)
1453 1453
1454 1454
1455 1455 #==============================================================================
1456 1456 # REPO PAGER, PAGER FOR REPOSITORY
1457 1457 #==============================================================================
1458 1458 class RepoPage(Page):
1459 1459
1460 1460 def __init__(self, collection, page=1, items_per_page=20,
1461 1461 item_count=None, url=None, **kwargs):
1462 1462
1463 1463 """Create a "RepoPage" instance. special pager for paging
1464 1464 repository
1465 1465 """
1466 1466 self._url_generator = url
1467 1467
1468 1468 # Safe the kwargs class-wide so they can be used in the pager() method
1469 1469 self.kwargs = kwargs
1470 1470
1471 1471 # Save a reference to the collection
1472 1472 self.original_collection = collection
1473 1473
1474 1474 self.collection = collection
1475 1475
1476 1476 # The self.page is the number of the current page.
1477 1477 # The first page has the number 1!
1478 1478 try:
1479 1479 self.page = int(page) # make it int() if we get it as a string
1480 1480 except (ValueError, TypeError):
1481 1481 self.page = 1
1482 1482
1483 1483 self.items_per_page = items_per_page
1484 1484
1485 1485 # Unless the user tells us how many items the collections has
1486 1486 # we calculate that ourselves.
1487 1487 if item_count is not None:
1488 1488 self.item_count = item_count
1489 1489 else:
1490 1490 self.item_count = len(self.collection)
1491 1491
1492 1492 # Compute the number of the first and last available page
1493 1493 if self.item_count > 0:
1494 1494 self.first_page = 1
1495 1495 self.page_count = int(math.ceil(float(self.item_count) /
1496 1496 self.items_per_page))
1497 1497 self.last_page = self.first_page + self.page_count - 1
1498 1498
1499 1499 # Make sure that the requested page number is the range of
1500 1500 # valid pages
1501 1501 if self.page > self.last_page:
1502 1502 self.page = self.last_page
1503 1503 elif self.page < self.first_page:
1504 1504 self.page = self.first_page
1505 1505
1506 1506 # Note: the number of items on this page can be less than
1507 1507 # items_per_page if the last page is not full
1508 1508 self.first_item = max(0, (self.item_count) - (self.page *
1509 1509 items_per_page))
1510 1510 self.last_item = ((self.item_count - 1) - items_per_page *
1511 1511 (self.page - 1))
1512 1512
1513 1513 self.items = list(self.collection[self.first_item:self.last_item + 1])
1514 1514
1515 1515 # Links to previous and next page
1516 1516 if self.page > self.first_page:
1517 1517 self.previous_page = self.page - 1
1518 1518 else:
1519 1519 self.previous_page = None
1520 1520
1521 1521 if self.page < self.last_page:
1522 1522 self.next_page = self.page + 1
1523 1523 else:
1524 1524 self.next_page = None
1525 1525
1526 1526 # No items available
1527 1527 else:
1528 1528 self.first_page = None
1529 1529 self.page_count = 0
1530 1530 self.last_page = None
1531 1531 self.first_item = None
1532 1532 self.last_item = None
1533 1533 self.previous_page = None
1534 1534 self.next_page = None
1535 1535 self.items = []
1536 1536
1537 1537 # This is a subclass of the 'list' type. Initialise the list now.
1538 1538 list.__init__(self, reversed(self.items))
1539 1539
1540 1540
1541 1541 def breadcrumb_repo_link(repo):
1542 1542 """
1543 1543 Makes a breadcrumbs path link to repo
1544 1544
1545 1545 ex::
1546 1546 group >> subgroup >> repo
1547 1547
1548 1548 :param repo: a Repository instance
1549 1549 """
1550 1550
1551 1551 path = [
1552 1552 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1553 1553 for group in repo.groups_with_parents
1554 1554 ] + [
1555 1555 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1556 1556 ]
1557 1557
1558 1558 return literal(' &raquo; '.join(path))
1559 1559
1560 1560
1561 1561 def format_byte_size_binary(file_size):
1562 1562 """
1563 1563 Formats file/folder sizes to standard.
1564 1564 """
1565 1565 if file_size is None:
1566 1566 file_size = 0
1567 1567
1568 1568 formatted_size = format_byte_size(file_size, binary=True)
1569 1569 return formatted_size
1570 1570
1571 1571
1572 1572 def urlify_text(text_, safe=True):
1573 1573 """
1574 1574 Extrac urls from text and make html links out of them
1575 1575
1576 1576 :param text_:
1577 1577 """
1578 1578
1579 1579 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1580 1580 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1581 1581
1582 1582 def url_func(match_obj):
1583 1583 url_full = match_obj.groups()[0]
1584 1584 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1585 1585 _newtext = url_pat.sub(url_func, text_)
1586 1586 if safe:
1587 1587 return literal(_newtext)
1588 1588 return _newtext
1589 1589
1590 1590
1591 1591 def urlify_commits(text_, repository):
1592 1592 """
1593 1593 Extract commit ids from text and make link from them
1594 1594
1595 1595 :param text_:
1596 1596 :param repository: repo name to build the URL with
1597 1597 """
1598 1598
1599 1599 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1600 1600
1601 1601 def url_func(match_obj):
1602 1602 commit_id = match_obj.groups()[1]
1603 1603 pref = match_obj.groups()[0]
1604 1604 suf = match_obj.groups()[2]
1605 1605
1606 1606 tmpl = (
1607 1607 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1608 1608 '%(commit_id)s</a>%(suf)s'
1609 1609 )
1610 1610 return tmpl % {
1611 1611 'pref': pref,
1612 1612 'cls': 'revision-link',
1613 1613 'url': route_url('repo_commit', repo_name=repository,
1614 1614 commit_id=commit_id),
1615 1615 'commit_id': commit_id,
1616 1616 'suf': suf
1617 1617 }
1618 1618
1619 1619 newtext = URL_PAT.sub(url_func, text_)
1620 1620
1621 1621 return newtext
1622 1622
1623 1623
1624 1624 def _process_url_func(match_obj, repo_name, uid, entry,
1625 1625 return_raw_data=False, link_format='html'):
1626 1626 pref = ''
1627 1627 if match_obj.group().startswith(' '):
1628 1628 pref = ' '
1629 1629
1630 1630 issue_id = ''.join(match_obj.groups())
1631 1631
1632 1632 if link_format == 'html':
1633 1633 tmpl = (
1634 1634 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1635 1635 '%(issue-prefix)s%(id-repr)s'
1636 1636 '</a>')
1637 1637 elif link_format == 'rst':
1638 1638 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1639 1639 elif link_format == 'markdown':
1640 1640 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1641 1641 else:
1642 1642 raise ValueError('Bad link_format:{}'.format(link_format))
1643 1643
1644 1644 (repo_name_cleaned,
1645 1645 parent_group_name) = RepoGroupModel().\
1646 1646 _get_group_name_and_parent(repo_name)
1647 1647
1648 1648 # variables replacement
1649 1649 named_vars = {
1650 1650 'id': issue_id,
1651 1651 'repo': repo_name,
1652 1652 'repo_name': repo_name_cleaned,
1653 1653 'group_name': parent_group_name
1654 1654 }
1655 1655 # named regex variables
1656 1656 named_vars.update(match_obj.groupdict())
1657 1657 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1658 1658
1659 1659 data = {
1660 1660 'pref': pref,
1661 1661 'cls': 'issue-tracker-link',
1662 1662 'url': _url,
1663 1663 'id-repr': issue_id,
1664 1664 'issue-prefix': entry['pref'],
1665 1665 'serv': entry['url'],
1666 1666 }
1667 1667 if return_raw_data:
1668 1668 return {
1669 1669 'id': issue_id,
1670 1670 'url': _url
1671 1671 }
1672 1672 return tmpl % data
1673 1673
1674 1674
1675 1675 def process_patterns(text_string, repo_name, link_format='html'):
1676 1676 allowed_formats = ['html', 'rst', 'markdown']
1677 1677 if link_format not in allowed_formats:
1678 1678 raise ValueError('Link format can be only one of:{} got {}'.format(
1679 1679 allowed_formats, link_format))
1680 1680
1681 1681 repo = None
1682 1682 if repo_name:
1683 1683 # Retrieving repo_name to avoid invalid repo_name to explode on
1684 1684 # IssueTrackerSettingsModel but still passing invalid name further down
1685 1685 repo = Repository.get_by_repo_name(repo_name, cache=True)
1686 1686
1687 1687 settings_model = IssueTrackerSettingsModel(repo=repo)
1688 1688 active_entries = settings_model.get_settings(cache=True)
1689 1689
1690 1690 issues_data = []
1691 1691 newtext = text_string
1692 1692
1693 1693 for uid, entry in active_entries.items():
1694 1694 log.debug('found issue tracker entry with uid %s' % (uid,))
1695 1695
1696 1696 if not (entry['pat'] and entry['url']):
1697 1697 log.debug('skipping due to missing data')
1698 1698 continue
1699 1699
1700 1700 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1701 1701 % (uid, entry['pat'], entry['url'], entry['pref']))
1702 1702
1703 1703 try:
1704 1704 pattern = re.compile(r'%s' % entry['pat'])
1705 1705 except re.error:
1706 1706 log.exception(
1707 1707 'issue tracker pattern: `%s` failed to compile',
1708 1708 entry['pat'])
1709 1709 continue
1710 1710
1711 1711 data_func = partial(
1712 1712 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1713 1713 return_raw_data=True)
1714 1714
1715 1715 for match_obj in pattern.finditer(text_string):
1716 1716 issues_data.append(data_func(match_obj))
1717 1717
1718 1718 url_func = partial(
1719 1719 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1720 1720 link_format=link_format)
1721 1721
1722 1722 newtext = pattern.sub(url_func, newtext)
1723 1723 log.debug('processed prefix:uid `%s`' % (uid,))
1724 1724
1725 1725 return newtext, issues_data
1726 1726
1727 1727
1728 1728 def urlify_commit_message(commit_text, repository=None):
1729 1729 """
1730 1730 Parses given text message and makes proper links.
1731 1731 issues are linked to given issue-server, and rest is a commit link
1732 1732
1733 1733 :param commit_text:
1734 1734 :param repository:
1735 1735 """
1736 1736 def escaper(string):
1737 1737 return string.replace('<', '&lt;').replace('>', '&gt;')
1738 1738
1739 1739 newtext = escaper(commit_text)
1740 1740
1741 1741 # extract http/https links and make them real urls
1742 1742 newtext = urlify_text(newtext, safe=False)
1743 1743
1744 1744 # urlify commits - extract commit ids and make link out of them, if we have
1745 1745 # the scope of repository present.
1746 1746 if repository:
1747 1747 newtext = urlify_commits(newtext, repository)
1748 1748
1749 1749 # process issue tracker patterns
1750 1750 newtext, issues = process_patterns(newtext, repository or '')
1751 1751
1752 1752 return literal(newtext)
1753 1753
1754 1754
1755 1755 def render_binary(repo_name, file_obj):
1756 1756 """
1757 1757 Choose how to render a binary file
1758 1758 """
1759 1759 filename = file_obj.name
1760 1760
1761 1761 # images
1762 1762 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1763 1763 if fnmatch.fnmatch(filename, pat=ext):
1764 1764 alt = filename
1765 1765 src = route_path(
1766 1766 'repo_file_raw', repo_name=repo_name,
1767 1767 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1768 1768 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1769 1769
1770 1770
1771 1771 def renderer_from_filename(filename, exclude=None):
1772 1772 """
1773 1773 choose a renderer based on filename, this works only for text based files
1774 1774 """
1775 1775
1776 1776 # ipython
1777 1777 for ext in ['*.ipynb']:
1778 1778 if fnmatch.fnmatch(filename, pat=ext):
1779 1779 return 'jupyter'
1780 1780
1781 1781 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1782 1782 if is_markup:
1783 1783 return is_markup
1784 1784 return None
1785 1785
1786 1786
1787 1787 def render(source, renderer='rst', mentions=False, relative_urls=None,
1788 1788 repo_name=None):
1789 1789
1790 1790 def maybe_convert_relative_links(html_source):
1791 1791 if relative_urls:
1792 1792 return relative_links(html_source, relative_urls)
1793 1793 return html_source
1794 1794
1795 1795 if renderer == 'rst':
1796 1796 if repo_name:
1797 1797 # process patterns on comments if we pass in repo name
1798 1798 source, issues = process_patterns(
1799 1799 source, repo_name, link_format='rst')
1800 1800
1801 1801 return literal(
1802 1802 '<div class="rst-block">%s</div>' %
1803 1803 maybe_convert_relative_links(
1804 1804 MarkupRenderer.rst(source, mentions=mentions)))
1805 1805 elif renderer == 'markdown':
1806 1806 if repo_name:
1807 1807 # process patterns on comments if we pass in repo name
1808 1808 source, issues = process_patterns(
1809 1809 source, repo_name, link_format='markdown')
1810 1810
1811 1811 return literal(
1812 1812 '<div class="markdown-block">%s</div>' %
1813 1813 maybe_convert_relative_links(
1814 1814 MarkupRenderer.markdown(source, flavored=True,
1815 1815 mentions=mentions)))
1816 1816 elif renderer == 'jupyter':
1817 1817 return literal(
1818 1818 '<div class="ipynb">%s</div>' %
1819 1819 maybe_convert_relative_links(
1820 1820 MarkupRenderer.jupyter(source)))
1821 1821
1822 1822 # None means just show the file-source
1823 1823 return None
1824 1824
1825 1825
1826 1826 def commit_status(repo, commit_id):
1827 1827 return ChangesetStatusModel().get_status(repo, commit_id)
1828 1828
1829 1829
1830 1830 def commit_status_lbl(commit_status):
1831 1831 return dict(ChangesetStatus.STATUSES).get(commit_status)
1832 1832
1833 1833
1834 1834 def commit_time(repo_name, commit_id):
1835 1835 repo = Repository.get_by_repo_name(repo_name)
1836 1836 commit = repo.get_commit(commit_id=commit_id)
1837 1837 return commit.date
1838 1838
1839 1839
1840 1840 def get_permission_name(key):
1841 1841 return dict(Permission.PERMS).get(key)
1842 1842
1843 1843
1844 1844 def journal_filter_help(request):
1845 1845 _ = request.translate
1846 1846
1847 1847 return _(
1848 1848 'Example filter terms:\n' +
1849 1849 ' repository:vcs\n' +
1850 1850 ' username:marcin\n' +
1851 1851 ' username:(NOT marcin)\n' +
1852 1852 ' action:*push*\n' +
1853 1853 ' ip:127.0.0.1\n' +
1854 1854 ' date:20120101\n' +
1855 1855 ' date:[20120101100000 TO 20120102]\n' +
1856 1856 '\n' +
1857 1857 'Generate wildcards using \'*\' character:\n' +
1858 1858 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1859 1859 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1860 1860 '\n' +
1861 1861 'Optional AND / OR operators in queries\n' +
1862 1862 ' "repository:vcs OR repository:test"\n' +
1863 1863 ' "username:test AND repository:test*"\n'
1864 1864 )
1865 1865
1866 1866
1867 1867 def search_filter_help(searcher, request):
1868 1868 _ = request.translate
1869 1869
1870 1870 terms = ''
1871 1871 return _(
1872 1872 'Example filter terms for `{searcher}` search:\n' +
1873 1873 '{terms}\n' +
1874 1874 'Generate wildcards using \'*\' character:\n' +
1875 1875 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1876 1876 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1877 1877 '\n' +
1878 1878 'Optional AND / OR operators in queries\n' +
1879 1879 ' "repo_name:vcs OR repo_name:test"\n' +
1880 1880 ' "owner:test AND repo_name:test*"\n' +
1881 1881 'More: {search_doc}'
1882 1882 ).format(searcher=searcher.name,
1883 1883 terms=terms, search_doc=searcher.query_lang_doc)
1884 1884
1885 1885
1886 1886 def not_mapped_error(repo_name):
1887 1887 from rhodecode.translation import _
1888 1888 flash(_('%s repository is not mapped to db perhaps'
1889 1889 ' it was created or renamed from the filesystem'
1890 1890 ' please run the application again'
1891 1891 ' in order to rescan repositories') % repo_name, category='error')
1892 1892
1893 1893
1894 1894 def ip_range(ip_addr):
1895 1895 from rhodecode.model.db import UserIpMap
1896 1896 s, e = UserIpMap._get_ip_range(ip_addr)
1897 1897 return '%s - %s' % (s, e)
1898 1898
1899 1899
1900 1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1901 1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1902 1902 if method.lower() != 'get' and needs_csrf_token:
1903 1903 raise Exception(
1904 1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1905 1905 'CSRF token. If the endpoint does not require such token you can ' +
1906 1906 'explicitly set the parameter needs_csrf_token to false.')
1907 1907
1908 1908 return wh_form(url, method=method, **attrs)
1909 1909
1910 1910
1911 1911 def secure_form(form_url, method="POST", multipart=False, **attrs):
1912 1912 """Start a form tag that points the action to an url. This
1913 1913 form tag will also include the hidden field containing
1914 1914 the auth token.
1915 1915
1916 1916 The url options should be given either as a string, or as a
1917 1917 ``url()`` function. The method for the form defaults to POST.
1918 1918
1919 1919 Options:
1920 1920
1921 1921 ``multipart``
1922 1922 If set to True, the enctype is set to "multipart/form-data".
1923 1923 ``method``
1924 1924 The method to use when submitting the form, usually either
1925 1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1926 1926 hidden input with name _method is added to simulate the verb
1927 1927 over POST.
1928 1928
1929 1929 """
1930 1930 from webhelpers.pylonslib.secure_form import insecure_form
1931 1931
1932 1932 if 'request' in attrs:
1933 1933 session = attrs['request'].session
1934 1934 del attrs['request']
1935 1935 else:
1936 1936 raise ValueError(
1937 1937 'Calling this form requires request= to be passed as argument')
1938 1938
1939 1939 form = insecure_form(form_url, method, multipart, **attrs)
1940 1940 token = literal(
1941 1941 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1942 1942 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1943 1943
1944 1944 return literal("%s\n%s" % (form, token))
1945 1945
1946 1946
1947 1947 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1948 1948 select_html = select(name, selected, options, **attrs)
1949 1949 select2 = """
1950 1950 <script>
1951 1951 $(document).ready(function() {
1952 1952 $('#%s').select2({
1953 1953 containerCssClass: 'drop-menu',
1954 1954 dropdownCssClass: 'drop-menu-dropdown',
1955 1955 dropdownAutoWidth: true%s
1956 1956 });
1957 1957 });
1958 1958 </script>
1959 1959 """
1960 1960 filter_option = """,
1961 1961 minimumResultsForSearch: -1
1962 1962 """
1963 1963 input_id = attrs.get('id') or name
1964 1964 filter_enabled = "" if enable_filter else filter_option
1965 1965 select_script = literal(select2 % (input_id, filter_enabled))
1966 1966
1967 1967 return literal(select_html+select_script)
1968 1968
1969 1969
1970 1970 def get_visual_attr(tmpl_context_var, attr_name):
1971 1971 """
1972 1972 A safe way to get a variable from visual variable of template context
1973 1973
1974 1974 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1975 1975 :param attr_name: name of the attribute we fetch from the c.visual
1976 1976 """
1977 1977 visual = getattr(tmpl_context_var, 'visual', None)
1978 1978 if not visual:
1979 1979 return
1980 1980 else:
1981 1981 return getattr(visual, attr_name, None)
1982 1982
1983 1983
1984 1984 def get_last_path_part(file_node):
1985 1985 if not file_node.path:
1986 1986 return u''
1987 1987
1988 1988 path = safe_unicode(file_node.path.split('/')[-1])
1989 1989 return u'../' + path
1990 1990
1991 1991
1992 1992 def route_url(*args, **kwargs):
1993 1993 """
1994 1994 Wrapper around pyramids `route_url` (fully qualified url) function.
1995 1995 """
1996 1996 req = get_current_request()
1997 1997 return req.route_url(*args, **kwargs)
1998 1998
1999 1999
2000 2000 def route_path(*args, **kwargs):
2001 2001 """
2002 2002 Wrapper around pyramids `route_path` function.
2003 2003 """
2004 2004 req = get_current_request()
2005 2005 return req.route_path(*args, **kwargs)
2006 2006
2007 2007
2008 2008 def route_path_or_none(*args, **kwargs):
2009 2009 try:
2010 2010 return route_path(*args, **kwargs)
2011 2011 except KeyError:
2012 2012 return None
2013 2013
2014 2014
2015 2015 def current_route_path(request, **kw):
2016 2016 new_args = request.GET.mixed()
2017 2017 new_args.update(kw)
2018 2018 return request.current_route_path(_query=new_args)
2019 2019
2020 2020
2021 2021 def api_call_example(method, args):
2022 2022 """
2023 2023 Generates an API call example via CURL
2024 2024 """
2025 2025 args_json = json.dumps(OrderedDict([
2026 2026 ('id', 1),
2027 2027 ('auth_token', 'SECRET'),
2028 2028 ('method', method),
2029 2029 ('args', args)
2030 2030 ]))
2031 2031 return literal(
2032 2032 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2033 2033 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2034 2034 "and needs to be of `api calls` role."
2035 2035 .format(
2036 2036 api_url=route_url('apiv2'),
2037 2037 token_url=route_url('my_account_auth_tokens'),
2038 2038 data=args_json))
2039 2039
2040 2040
2041 2041 def notification_description(notification, request):
2042 2042 """
2043 2043 Generate notification human readable description based on notification type
2044 2044 """
2045 2045 from rhodecode.model.notification import NotificationModel
2046 2046 return NotificationModel().make_description(
2047 2047 notification, translate=request.translate)
2048 2048
2049 2049
2050 2050 def go_import_header(request, db_repo=None):
2051 2051 """
2052 2052 Creates a header for go-import functionality in Go Lang
2053 2053 """
2054 2054
2055 2055 if not db_repo:
2056 2056 return
2057 2057 if 'go-get' not in request.GET:
2058 2058 return
2059 2059
2060 2060 clone_url = db_repo.clone_url()
2061 2061 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2062 2062 # we have a repo and go-get flag,
2063 2063 return literal('<meta name="go-import" content="{} {} {}">'.format(
2064 2064 prefix, db_repo.repo_type, clone_url))
@@ -1,631 +1,644 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Package for testing various lib/helper functions in rhodecode
24 24 """
25 25
26 26 import datetime
27 27 import string
28 28 import mock
29 29 import pytest
30 30
31 31 from rhodecode.tests import no_newline_id_generator
32 32 from rhodecode.tests.utils import run_test_concurrently
33 33 from rhodecode.lib.helpers import InitialsGravatar
34 34
35 35 from rhodecode.lib.utils2 import AttributeDict
36 36 from rhodecode.model.db import Repository
37 37
38 38
39 39 def _urls_for_proto(proto):
40 40 return [
41 41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
42 42 '%s://127.0.0.1' % proto),
43 43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
44 44 '%s://127.0.0.1' % proto),
45 45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
46 46 '%s://127.0.0.1' % proto),
47 47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
48 48 '%s://127.0.0.1:8080' % proto),
49 49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
50 50 '%s://domain.org' % proto),
51 51 ('%s://user:pass@domain.org:8080' % proto,
52 52 ['%s://' % proto, 'domain.org', '8080'],
53 53 '%s://domain.org:8080' % proto),
54 54 ]
55 55
56 56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
57 57
58 58
59 59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
60 60 def test_uri_filter(test_url, expected, expected_creds):
61 61 from rhodecode.lib.utils2 import uri_filter
62 62 assert uri_filter(test_url) == expected
63 63
64 64
65 65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
66 66 def test_credentials_filter(test_url, expected, expected_creds):
67 67 from rhodecode.lib.utils2 import credentials_filter
68 68 assert credentials_filter(test_url) == expected_creds
69 69
70 70
71 71 @pytest.mark.parametrize("str_bool, expected", [
72 72 ('t', True),
73 73 ('true', True),
74 74 ('y', True),
75 75 ('yes', True),
76 76 ('on', True),
77 77 ('1', True),
78 78 ('Y', True),
79 79 ('yeS', True),
80 80 ('Y', True),
81 81 ('TRUE', True),
82 82 ('T', True),
83 83 ('False', False),
84 84 ('F', False),
85 85 ('FALSE', False),
86 86 ('0', False),
87 87 ('-1', False),
88 88 ('', False)
89 89 ])
90 90 def test_str2bool(str_bool, expected):
91 91 from rhodecode.lib.utils2 import str2bool
92 92 assert str2bool(str_bool) == expected
93 93
94 94
95 95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
96 96 [
97 97 (pref+"", []),
98 98 (pref+"Hi there @marcink", ['marcink']),
99 99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
100 100 (pref+"Hi there @marcink\n", ['marcink']),
101 101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
102 102 (pref+"Hi there marcin@rhodecode.com", []),
103 103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
104 104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
105 105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
106 106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
107 107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
108 108 (pref+"@john @mary, please review", ["john", "mary"]),
109 109 (pref+"@john,@mary, please review", ["john", "mary"]),
110 110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
111 111 (pref+"@first hi there @marcink here's my email marcin@email.com "
112 112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
113 113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
114 114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
115 115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
116 116 (pref+"\n@marcin", ['marcin']),
117 117 ]
118 118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
119 119 def test_mention_extractor(text, expected):
120 120 from rhodecode.lib.utils2 import extract_mentioned_users
121 121 got = extract_mentioned_users(text)
122 122 assert sorted(got, key=lambda x: x.lower()) == got
123 123 assert set(expected) == set(got)
124 124
125 125 @pytest.mark.parametrize("age_args, expected, kw", [
126 126 ({}, u'just now', {}),
127 127 ({'seconds': -1}, u'1 second ago', {}),
128 128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
129 129 ({'hours': -1}, u'1 hour ago', {}),
130 130 ({'hours': -24}, u'1 day ago', {}),
131 131 ({'hours': -24 * 5}, u'5 days ago', {}),
132 132 ({'months': -1}, u'1 month ago', {}),
133 133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
134 134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
135 135 ({}, u'just now', {'short_format': True}),
136 136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
137 137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
138 138 ({'hours': -1}, u'1h ago', {'short_format': True}),
139 139 ({'hours': -24}, u'1d ago', {'short_format': True}),
140 140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
141 141 ({'months': -1}, u'1m ago', {'short_format': True}),
142 142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
143 143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
144 144 ])
145 145 def test_age(age_args, expected, kw, baseapp):
146 146 from rhodecode.lib.utils2 import age
147 147 from dateutil import relativedelta
148 148 n = datetime.datetime(year=2012, month=5, day=17)
149 149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
150 150
151 151 def translate(elem):
152 152 return elem.interpolate()
153 153
154 154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
155 155
156 156
157 157 @pytest.mark.parametrize("age_args, expected, kw", [
158 158 ({}, u'just now', {}),
159 159 ({'seconds': 1}, u'in 1 second', {}),
160 160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
161 161 ({'hours': 1}, u'in 1 hour', {}),
162 162 ({'hours': 24}, u'in 1 day', {}),
163 163 ({'hours': 24 * 5}, u'in 5 days', {}),
164 164 ({'months': 1}, u'in 1 month', {}),
165 165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
166 166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
167 167 ({}, u'just now', {'short_format': True}),
168 168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
169 169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
170 170 ({'hours': 1}, u'in 1h', {'short_format': True}),
171 171 ({'hours': 24}, u'in 1d', {'short_format': True}),
172 172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
173 173 ({'months': 1}, u'in 1m', {'short_format': True}),
174 174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
175 175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
176 176 ])
177 177 def test_age_in_future(age_args, expected, kw, baseapp):
178 178 from rhodecode.lib.utils2 import age
179 179 from dateutil import relativedelta
180 180 n = datetime.datetime(year=2012, month=5, day=17)
181 181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
182 182
183 183 def translate(elem):
184 184 return elem.interpolate()
185 185
186 186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
187 187
188 188
189 189 @pytest.mark.parametrize("sample, expected_tags", [
190 190 # entry
191 191 ((
192 192 ""
193 193 ),
194 194 [
195 195
196 196 ]),
197 197 # entry
198 198 ((
199 199 "hello world [stale]"
200 200 ),
201 201 [
202 202 ('state', '[stale]'),
203 203 ]),
204 204 # entry
205 205 ((
206 206 "hello world [v2.0.0] [v1.0.0]"
207 207 ),
208 208 [
209 209 ('generic', '[v2.0.0]'),
210 210 ('generic', '[v1.0.0]'),
211 211 ]),
212 212 # entry
213 213 ((
214 214 "he[ll]o wo[rl]d"
215 215 ),
216 216 [
217 217 ('label', '[ll]'),
218 218 ('label', '[rl]'),
219 219 ]),
220 220 # entry
221 221 ((
222 222 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
223 223 ),
224 224 [
225 225 ('state', '[stale]'),
226 226 ('state', '[featured]'),
227 227 ('state', '[stale]'),
228 228 ('state', '[dead]'),
229 229 ('state', '[dev]'),
230 230 ]),
231 231 # entry
232 232 ((
233 233 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
234 234 ),
235 235 [
236 236 ('state', '[stale]'),
237 237 ('url', '[url =&gt; [name](http://rc.com)]'),
238 238 ]),
239 239 # entry
240 240 ((
241 "[url =&gt; [linkNameJS](javascript:alert(document.domain))]\n"
242 "[url =&gt; [linkNameHTTP](http://rhodecode.com)]\n"
243 "[url =&gt; [linkNameHTTPS](https://rhodecode.com)]\n"
244 "[url =&gt; [linkNamePath](/repo_group)]\n"
245 ),
246 [
247 ('generic', '[linkNameJS]'),
248 ('url', '[url =&gt; [linkNameHTTP](http://rhodecode.com)]'),
249 ('url', '[url =&gt; [linkNameHTTPS](https://rhodecode.com)]'),
250 ('url', '[url =&gt; [linkNamePath](/repo_group)]'),
251 ]),
252 # entry
253 ((
241 254 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
242 255 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
243 256 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
244 257 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
245 258 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
246 259 ),
247 260 [
248 261 ('label', '[desc]'),
249 262 ('label', '[obsolete]'),
250 263 ('label', '[or]'),
251 264 ('label', '[requires]'),
252 265 ('label', '[tag]'),
253 266 ('state', '[stale]'),
254 267 ('lang', '[lang =&gt; python]'),
255 268 ('ref', '[requires =&gt; url]'),
256 269 ('see', '[see =&gt; http://url.com]'),
257 270
258 271 ]),
259 272
260 273 ], ids=no_newline_id_generator)
261 274 def test_metatag_extraction(sample, expected_tags):
262 275 from rhodecode.lib.helpers import extract_metatags
263 276 tags, value = extract_metatags(sample)
264 277 assert sorted(tags) == sorted(expected_tags)
265 278
266 279
267 280 @pytest.mark.parametrize("tag_data, expected_html", [
268 281
269 282 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
270 283 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
271 284 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
272 285 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
273 286 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
274 287
275 288 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
276 289 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
277 290
278 291 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
279 292 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
280 293 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
281 294 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
282 295 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
283 296 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
284 297
285 298 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
286 299 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
287 300 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
288 301 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
289 302
290 303 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
291 304 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
292 305 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
293 306 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
294 307
295 308 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
296 309
297 310 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
298 311 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
299 312 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
300 313
301 314 ])
302 315 def test_metatags_stylize(tag_data, expected_html):
303 316 from rhodecode.lib.helpers import style_metatag
304 317 tag_type,value = tag_data
305 318 assert style_metatag(tag_type, value) == expected_html
306 319
307 320
308 321 @pytest.mark.parametrize("tmpl_url, email, expected", [
309 322 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
310 323
311 324 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
312 325 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
313 326
314 327 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
315 328 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
316 329
317 330 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
318 331 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
319 332
320 333 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
321 334 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
322 335 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
323 336 ])
324 337 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
325 338 from rhodecode.lib.helpers import gravatar_url
326 339
327 340 def fake_tmpl_context(_url):
328 341 _c = AttributeDict()
329 342 _c.visual = AttributeDict()
330 343 _c.visual.use_gravatar = True
331 344 _c.visual.gravatar_url = _url
332 345 return _c
333 346
334 347 # mock pyramid.threadlocals
335 348 def fake_get_current_request():
336 349 request_stub.scheme = 'https'
337 350 request_stub.host = 'server.com'
338 351
339 352 request_stub._call_context = fake_tmpl_context(tmpl_url)
340 353 return request_stub
341 354
342 355 with mock.patch('rhodecode.lib.helpers.get_current_request',
343 356 fake_get_current_request):
344 357
345 358 grav = gravatar_url(email_address=email, size=24)
346 359 assert grav == expected
347 360
348 361
349 362 @pytest.mark.parametrize(
350 363 "email, first_name, last_name, expected_initials, expected_color", [
351 364
352 365 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
353 366 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
354 367 # special cases of email
355 368 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
356 369 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
357 370 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
358 371
359 372 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
360 373 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
361 374
362 375 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
363 376 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
364 377 # partials
365 378 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
366 379 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
367 380 # non-ascii
368 381 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
369 382 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
370 383
371 384 # special cases, LDAP can provide those...
372 385 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
373 386 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
374 387 ('null', '', '', 'NL', '#8c4646'),
375 388 ('some.@abc.com', 'some', '', 'SA', '#664e33')
376 389 ])
377 390 def test_initials_gravatar_pick_of_initials_and_color_algo(
378 391 email, first_name, last_name, expected_initials, expected_color):
379 392 instance = InitialsGravatar(email, first_name, last_name)
380 393 assert instance.get_initials() == expected_initials
381 394 assert instance.str2color(email) == expected_color
382 395
383 396
384 397 def test_initials_gravatar_mapping_algo():
385 398 pos = set()
386 399 instance = InitialsGravatar('', '', '')
387 400 iterations = 0
388 401
389 402 variations = []
390 403 for letter1 in string.ascii_letters:
391 404 for letter2 in string.ascii_letters[::-1][:10]:
392 405 for letter3 in string.ascii_letters[:10]:
393 406 variations.append(
394 407 '%s@rhodecode.com' % (letter1+letter2+letter3))
395 408
396 409 max_variations = 4096
397 410 for email in variations[:max_variations]:
398 411 iterations += 1
399 412 pos.add(
400 413 instance.pick_color_bank_index(email,
401 414 instance.get_color_bank()))
402 415
403 416 # we assume that we have match all 256 possible positions,
404 417 # in reasonable amount of different email addresses
405 418 assert len(pos) == 256
406 419 assert iterations == max_variations
407 420
408 421
409 422 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
410 423 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
411 424 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
412 425 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
413 426 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
414 427 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
415 428 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
416 429 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
417 430 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
418 431 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
419 432 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
420 433 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
421 434 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
422 435 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
423 436 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
424 437 ])
425 438 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
426 439 from rhodecode.lib.utils2 import get_clone_url
427 440
428 441 class RequestStub(object):
429 442 def request_url(self, name):
430 443 return 'http://vps1:8000' + prefix
431 444
432 445 def route_url(self, name):
433 446 return self.request_url(name)
434 447
435 448 clone_url = get_clone_url(
436 449 request=RequestStub(),
437 450 uri_tmpl=tmpl,
438 451 repo_name=repo_name, repo_id=23, **overrides)
439 452 assert clone_url == expected
440 453
441 454
442 455 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
443 456 """
444 457 Changes `some text url[foo]` => `some text <a href="/">foo</a>
445 458
446 459 :param text:
447 460 """
448 461 import re
449 462 # quickly change expected url[] into a link
450 463 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
451 464
452 465 def url_func(match_obj):
453 466 _url = match_obj.groups()[0]
454 467 return tmpl % (url_ or '/some-url', _url)
455 468 return URL_PAT.sub(url_func, text)
456 469
457 470
458 471 @pytest.mark.parametrize("sample, expected", [
459 472 ("",
460 473 ""),
461 474 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
462 475 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
463 476 ("from rev 000000000000",
464 477 "from rev url[000000000000]"),
465 478 ("from rev 000000000000123123 also rev 000000000000",
466 479 "from rev url[000000000000123123] also rev url[000000000000]"),
467 480 ("this should-000 00",
468 481 "this should-000 00"),
469 482 ("longtextffffffffff rev 123123123123",
470 483 "longtextffffffffff rev url[123123123123]"),
471 484 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
472 485 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
473 486 ("ffffffffffff some text traalaa",
474 487 "url[ffffffffffff] some text traalaa"),
475 488 ("""Multi line
476 489 123123123123
477 490 some text 123123123123
478 491 sometimes !
479 492 """,
480 493 """Multi line
481 494 url[123123123123]
482 495 some text url[123123123123]
483 496 sometimes !
484 497 """)
485 498 ], ids=no_newline_id_generator)
486 499 def test_urlify_commits(sample, expected):
487 500 def fake_url(self, *args, **kwargs):
488 501 return '/some-url'
489 502
490 503 expected = _quick_url(expected)
491 504
492 505 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
493 506 from rhodecode.lib.helpers import urlify_commits
494 507 assert urlify_commits(sample, 'repo_name') == expected
495 508
496 509
497 510 @pytest.mark.parametrize("sample, expected, url_", [
498 511 ("",
499 512 "",
500 513 ""),
501 514 ("https://svn.apache.org/repos",
502 515 "url[https://svn.apache.org/repos]",
503 516 "https://svn.apache.org/repos"),
504 517 ("http://svn.apache.org/repos",
505 518 "url[http://svn.apache.org/repos]",
506 519 "http://svn.apache.org/repos"),
507 520 ("from rev a also rev http://google.com",
508 521 "from rev a also rev url[http://google.com]",
509 522 "http://google.com"),
510 523 ("""Multi line
511 524 https://foo.bar.com
512 525 some text lalala""",
513 526 """Multi line
514 527 url[https://foo.bar.com]
515 528 some text lalala""",
516 529 "https://foo.bar.com")
517 530 ], ids=no_newline_id_generator)
518 531 def test_urlify_test(sample, expected, url_):
519 532 from rhodecode.lib.helpers import urlify_text
520 533 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
521 534 assert urlify_text(sample) == expected
522 535
523 536
524 537 @pytest.mark.parametrize("test, expected", [
525 538 ("", None),
526 539 ("/_2", '2'),
527 540 ("_2", '2'),
528 541 ("/_2/", '2'),
529 542 ("_2/", '2'),
530 543
531 544 ("/_21", '21'),
532 545 ("_21", '21'),
533 546 ("/_21/", '21'),
534 547 ("_21/", '21'),
535 548
536 549 ("/_21/foobar", '21'),
537 550 ("_21/121", '21'),
538 551 ("/_21/_12", '21'),
539 552 ("_21/rc/foo", '21'),
540 553
541 554 ])
542 555 def test_get_repo_by_id(test, expected):
543 556 from rhodecode.model.repo import RepoModel
544 557 _test = RepoModel()._extract_id_from_repo_name(test)
545 558 assert _test == expected
546 559
547 560
548 561 @pytest.mark.parametrize("test_repo_name, repo_type", [
549 562 ("test_repo_1", None),
550 563 ("repo_group/foobar", None),
551 564 ("test_non_asci_Δ…Δ‡Δ™", None),
552 565 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
553 566 ])
554 567 def test_invalidation_context(baseapp, test_repo_name, repo_type):
555 568 from beaker.cache import cache_region
556 569 from rhodecode.lib import caches
557 570 from rhodecode.model.db import CacheKey
558 571
559 572 @cache_region('long_term')
560 573 def _dummy_func(cache_key):
561 574 return 'result'
562 575
563 576 invalidator_context = CacheKey.repo_context_cache(
564 577 _dummy_func, test_repo_name, 'repo')
565 578
566 579 with invalidator_context as context:
567 580 invalidated = context.invalidate()
568 581 result = context.compute()
569 582
570 583 assert invalidated == True
571 584 assert 'result' == result
572 585 assert isinstance(context, caches.FreshRegionCache)
573 586
574 587 assert 'InvalidationContext' in repr(invalidator_context)
575 588
576 589 with invalidator_context as context:
577 590 context.invalidate()
578 591 result = context.compute()
579 592
580 593 assert 'result' == result
581 594 assert isinstance(context, caches.ActiveRegionCache)
582 595
583 596
584 597 def test_invalidation_context_exception_in_compute(baseapp):
585 598 from rhodecode.model.db import CacheKey
586 599 from beaker.cache import cache_region
587 600
588 601 @cache_region('long_term')
589 602 def _dummy_func(cache_key):
590 603 # this causes error since it doesn't get any params
591 604 raise Exception('ups')
592 605
593 606 invalidator_context = CacheKey.repo_context_cache(
594 607 _dummy_func, 'test_repo_2', 'repo')
595 608
596 609 with pytest.raises(Exception):
597 610 with invalidator_context as context:
598 611 context.invalidate()
599 612 context.compute()
600 613
601 614
602 615 @pytest.mark.parametrize('execution_number', range(5))
603 616 def test_cache_invalidation_race_condition(execution_number, baseapp):
604 617 import time
605 618 from beaker.cache import cache_region
606 619 from rhodecode.model.db import CacheKey
607 620
608 621 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
609 622 reason = (
610 623 'Fails on MariaDB due to some locking issues. Investigation'
611 624 ' needed')
612 625 pytest.xfail(reason=reason)
613 626
614 627 @run_test_concurrently(25)
615 628 def test_create_and_delete_cache_keys():
616 629 time.sleep(0.2)
617 630
618 631 @cache_region('long_term')
619 632 def _dummy_func(cache_key):
620 633 return 'result'
621 634
622 635 invalidator_context = CacheKey.repo_context_cache(
623 636 _dummy_func, 'test_repo_1', 'repo')
624 637
625 638 with invalidator_context as context:
626 639 context.invalidate()
627 640 context.compute()
628 641
629 642 CacheKey.set_invalidate('test_repo_1', delete=True)
630 643
631 644 test_create_and_delete_cache_keys()
General Comments 0
You need to be logged in to leave comments. Login now