##// END OF EJS Templates
audit-logs: added *basic* support for NOT query term in audit logs.
marcink -
r1824:fdf0761c default
parent child Browse files
Show More
@@ -1,2035 +1,2036 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 from collections import OrderedDict
40 40
41 41 import pygments
42 42 import itertools
43 43 import fnmatch
44 44
45 45 from datetime import datetime
46 46 from functools import partial
47 47 from pygments.formatters.html import HtmlFormatter
48 48 from pygments import highlight as code_highlight
49 49 from pygments.lexers import (
50 50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51 51 from pylons import url as pylons_url
52 52 from pylons.i18n.translation import _, ungettext
53 53 from pyramid.threadlocal import get_current_request
54 54
55 55 from webhelpers.html import literal, HTML, escape
56 56 from webhelpers.html.tools import *
57 57 from webhelpers.html.builder import make_tag
58 58 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
59 59 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
60 60 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
61 61 submit, text, password, textarea, title, ul, xml_declaration, radio
62 62 from webhelpers.html.tools import auto_link, button_to, highlight, \
63 63 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
64 64 from webhelpers.pylonslib import Flash as _Flash
65 65 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
66 66 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
67 67 replace_whitespace, urlify, truncate, wrap_paragraphs
68 68 from webhelpers.date import time_ago_in_words
69 69 from webhelpers.paginate import Page as _Page
70 70 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
71 71 convert_boolean_attrs, NotGiven, _make_safe_id_component
72 72 from webhelpers2.number import format_byte_size
73 73
74 74 from rhodecode.lib.action_parser import action_parser
75 75 from rhodecode.lib.ext_json import json
76 76 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
77 77 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
78 78 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
79 79 AttributeDict, safe_int, md5, md5_safe
80 80 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
81 81 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
82 82 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
83 83 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
84 84 from rhodecode.model.changeset_status import ChangesetStatusModel
85 85 from rhodecode.model.db import Permission, User, Repository
86 86 from rhodecode.model.repo_group import RepoGroupModel
87 87 from rhodecode.model.settings import IssueTrackerSettingsModel
88 88
89 89 log = logging.getLogger(__name__)
90 90
91 91
92 92 DEFAULT_USER = User.DEFAULT_USER
93 93 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
94 94
95 95
96 96 def url(*args, **kw):
97 97 return pylons_url(*args, **kw)
98 98
99 99
100 100 def pylons_url_current(*args, **kw):
101 101 """
102 102 This function overrides pylons.url.current() which returns the current
103 103 path so that it will also work from a pyramid only context. This
104 104 should be removed once port to pyramid is complete.
105 105 """
106 106 if not args and not kw:
107 107 request = get_current_request()
108 108 return request.path
109 109 return pylons_url.current(*args, **kw)
110 110
111 111 url.current = pylons_url_current
112 112
113 113
114 114 def url_replace(**qargs):
115 115 """ Returns the current request url while replacing query string args """
116 116
117 117 request = get_current_request()
118 118 new_args = request.GET.mixed()
119 119 new_args.update(qargs)
120 120 return url('', **new_args)
121 121
122 122
123 123 def asset(path, ver=None, **kwargs):
124 124 """
125 125 Helper to generate a static asset file path for rhodecode assets
126 126
127 127 eg. h.asset('images/image.png', ver='3923')
128 128
129 129 :param path: path of asset
130 130 :param ver: optional version query param to append as ?ver=
131 131 """
132 132 request = get_current_request()
133 133 query = {}
134 134 query.update(kwargs)
135 135 if ver:
136 136 query = {'ver': ver}
137 137 return request.static_path(
138 138 'rhodecode:public/{}'.format(path), _query=query)
139 139
140 140
141 141 default_html_escape_table = {
142 142 ord('&'): u'&amp;',
143 143 ord('<'): u'&lt;',
144 144 ord('>'): u'&gt;',
145 145 ord('"'): u'&quot;',
146 146 ord("'"): u'&#39;',
147 147 }
148 148
149 149
150 150 def html_escape(text, html_escape_table=default_html_escape_table):
151 151 """Produce entities within text."""
152 152 return text.translate(html_escape_table)
153 153
154 154
155 155 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
156 156 """
157 157 Truncate string ``s`` at the first occurrence of ``sub``.
158 158
159 159 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
160 160 """
161 161 suffix_if_chopped = suffix_if_chopped or ''
162 162 pos = s.find(sub)
163 163 if pos == -1:
164 164 return s
165 165
166 166 if inclusive:
167 167 pos += len(sub)
168 168
169 169 chopped = s[:pos]
170 170 left = s[pos:].strip()
171 171
172 172 if left and suffix_if_chopped:
173 173 chopped += suffix_if_chopped
174 174
175 175 return chopped
176 176
177 177
178 178 def shorter(text, size=20):
179 179 postfix = '...'
180 180 if len(text) > size:
181 181 return text[:size - len(postfix)] + postfix
182 182 return text
183 183
184 184
185 185 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
186 186 """
187 187 Reset button
188 188 """
189 189 _set_input_attrs(attrs, type, name, value)
190 190 _set_id_attr(attrs, id, name)
191 191 convert_boolean_attrs(attrs, ["disabled"])
192 192 return HTML.input(**attrs)
193 193
194 194 reset = _reset
195 195 safeid = _make_safe_id_component
196 196
197 197
198 198 def branding(name, length=40):
199 199 return truncate(name, length, indicator="")
200 200
201 201
202 202 def FID(raw_id, path):
203 203 """
204 204 Creates a unique ID for filenode based on it's hash of path and commit
205 205 it's safe to use in urls
206 206
207 207 :param raw_id:
208 208 :param path:
209 209 """
210 210
211 211 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
212 212
213 213
214 214 class _GetError(object):
215 215 """Get error from form_errors, and represent it as span wrapped error
216 216 message
217 217
218 218 :param field_name: field to fetch errors for
219 219 :param form_errors: form errors dict
220 220 """
221 221
222 222 def __call__(self, field_name, form_errors):
223 223 tmpl = """<span class="error_msg">%s</span>"""
224 224 if form_errors and field_name in form_errors:
225 225 return literal(tmpl % form_errors.get(field_name))
226 226
227 227 get_error = _GetError()
228 228
229 229
230 230 class _ToolTip(object):
231 231
232 232 def __call__(self, tooltip_title, trim_at=50):
233 233 """
234 234 Special function just to wrap our text into nice formatted
235 235 autowrapped text
236 236
237 237 :param tooltip_title:
238 238 """
239 239 tooltip_title = escape(tooltip_title)
240 240 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
241 241 return tooltip_title
242 242 tooltip = _ToolTip()
243 243
244 244
245 245 def files_breadcrumbs(repo_name, commit_id, file_path):
246 246 if isinstance(file_path, str):
247 247 file_path = safe_unicode(file_path)
248 248
249 249 # TODO: johbo: Is this always a url like path, or is this operating
250 250 # system dependent?
251 251 path_segments = file_path.split('/')
252 252
253 253 repo_name_html = escape(repo_name)
254 254 if len(path_segments) == 1 and path_segments[0] == '':
255 255 url_segments = [repo_name_html]
256 256 else:
257 257 url_segments = [
258 258 link_to(
259 259 repo_name_html,
260 260 url('files_home',
261 261 repo_name=repo_name,
262 262 revision=commit_id,
263 263 f_path=''),
264 264 class_='pjax-link')]
265 265
266 266 last_cnt = len(path_segments) - 1
267 267 for cnt, segment in enumerate(path_segments):
268 268 if not segment:
269 269 continue
270 270 segment_html = escape(segment)
271 271
272 272 if cnt != last_cnt:
273 273 url_segments.append(
274 274 link_to(
275 275 segment_html,
276 276 url('files_home',
277 277 repo_name=repo_name,
278 278 revision=commit_id,
279 279 f_path='/'.join(path_segments[:cnt + 1])),
280 280 class_='pjax-link'))
281 281 else:
282 282 url_segments.append(segment_html)
283 283
284 284 return literal('/'.join(url_segments))
285 285
286 286
287 287 class CodeHtmlFormatter(HtmlFormatter):
288 288 """
289 289 My code Html Formatter for source codes
290 290 """
291 291
292 292 def wrap(self, source, outfile):
293 293 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
294 294
295 295 def _wrap_code(self, source):
296 296 for cnt, it in enumerate(source):
297 297 i, t = it
298 298 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
299 299 yield i, t
300 300
301 301 def _wrap_tablelinenos(self, inner):
302 302 dummyoutfile = StringIO.StringIO()
303 303 lncount = 0
304 304 for t, line in inner:
305 305 if t:
306 306 lncount += 1
307 307 dummyoutfile.write(line)
308 308
309 309 fl = self.linenostart
310 310 mw = len(str(lncount + fl - 1))
311 311 sp = self.linenospecial
312 312 st = self.linenostep
313 313 la = self.lineanchors
314 314 aln = self.anchorlinenos
315 315 nocls = self.noclasses
316 316 if sp:
317 317 lines = []
318 318
319 319 for i in range(fl, fl + lncount):
320 320 if i % st == 0:
321 321 if i % sp == 0:
322 322 if aln:
323 323 lines.append('<a href="#%s%d" class="special">%*d</a>' %
324 324 (la, i, mw, i))
325 325 else:
326 326 lines.append('<span class="special">%*d</span>' % (mw, i))
327 327 else:
328 328 if aln:
329 329 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
330 330 else:
331 331 lines.append('%*d' % (mw, i))
332 332 else:
333 333 lines.append('')
334 334 ls = '\n'.join(lines)
335 335 else:
336 336 lines = []
337 337 for i in range(fl, fl + lncount):
338 338 if i % st == 0:
339 339 if aln:
340 340 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
341 341 else:
342 342 lines.append('%*d' % (mw, i))
343 343 else:
344 344 lines.append('')
345 345 ls = '\n'.join(lines)
346 346
347 347 # in case you wonder about the seemingly redundant <div> here: since the
348 348 # content in the other cell also is wrapped in a div, some browsers in
349 349 # some configurations seem to mess up the formatting...
350 350 if nocls:
351 351 yield 0, ('<table class="%stable">' % self.cssclass +
352 352 '<tr><td><div class="linenodiv" '
353 353 'style="background-color: #f0f0f0; padding-right: 10px">'
354 354 '<pre style="line-height: 125%">' +
355 355 ls + '</pre></div></td><td id="hlcode" class="code">')
356 356 else:
357 357 yield 0, ('<table class="%stable">' % self.cssclass +
358 358 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
359 359 ls + '</pre></div></td><td id="hlcode" class="code">')
360 360 yield 0, dummyoutfile.getvalue()
361 361 yield 0, '</td></tr></table>'
362 362
363 363
364 364 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
365 365 def __init__(self, **kw):
366 366 # only show these line numbers if set
367 367 self.only_lines = kw.pop('only_line_numbers', [])
368 368 self.query_terms = kw.pop('query_terms', [])
369 369 self.max_lines = kw.pop('max_lines', 5)
370 370 self.line_context = kw.pop('line_context', 3)
371 371 self.url = kw.pop('url', None)
372 372
373 373 super(CodeHtmlFormatter, self).__init__(**kw)
374 374
375 375 def _wrap_code(self, source):
376 376 for cnt, it in enumerate(source):
377 377 i, t = it
378 378 t = '<pre>%s</pre>' % t
379 379 yield i, t
380 380
381 381 def _wrap_tablelinenos(self, inner):
382 382 yield 0, '<table class="code-highlight %stable">' % self.cssclass
383 383
384 384 last_shown_line_number = 0
385 385 current_line_number = 1
386 386
387 387 for t, line in inner:
388 388 if not t:
389 389 yield t, line
390 390 continue
391 391
392 392 if current_line_number in self.only_lines:
393 393 if last_shown_line_number + 1 != current_line_number:
394 394 yield 0, '<tr>'
395 395 yield 0, '<td class="line">...</td>'
396 396 yield 0, '<td id="hlcode" class="code"></td>'
397 397 yield 0, '</tr>'
398 398
399 399 yield 0, '<tr>'
400 400 if self.url:
401 401 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
402 402 self.url, current_line_number, current_line_number)
403 403 else:
404 404 yield 0, '<td class="line"><a href="">%i</a></td>' % (
405 405 current_line_number)
406 406 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
407 407 yield 0, '</tr>'
408 408
409 409 last_shown_line_number = current_line_number
410 410
411 411 current_line_number += 1
412 412
413 413
414 414 yield 0, '</table>'
415 415
416 416
417 417 def extract_phrases(text_query):
418 418 """
419 419 Extracts phrases from search term string making sure phrases
420 420 contained in double quotes are kept together - and discarding empty values
421 421 or fully whitespace values eg.
422 422
423 423 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
424 424
425 425 """
426 426
427 427 in_phrase = False
428 428 buf = ''
429 429 phrases = []
430 430 for char in text_query:
431 431 if in_phrase:
432 432 if char == '"': # end phrase
433 433 phrases.append(buf)
434 434 buf = ''
435 435 in_phrase = False
436 436 continue
437 437 else:
438 438 buf += char
439 439 continue
440 440 else:
441 441 if char == '"': # start phrase
442 442 in_phrase = True
443 443 phrases.append(buf)
444 444 buf = ''
445 445 continue
446 446 elif char == ' ':
447 447 phrases.append(buf)
448 448 buf = ''
449 449 continue
450 450 else:
451 451 buf += char
452 452
453 453 phrases.append(buf)
454 454 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
455 455 return phrases
456 456
457 457
458 458 def get_matching_offsets(text, phrases):
459 459 """
460 460 Returns a list of string offsets in `text` that the list of `terms` match
461 461
462 462 >>> get_matching_offsets('some text here', ['some', 'here'])
463 463 [(0, 4), (10, 14)]
464 464
465 465 """
466 466 offsets = []
467 467 for phrase in phrases:
468 468 for match in re.finditer(phrase, text):
469 469 offsets.append((match.start(), match.end()))
470 470
471 471 return offsets
472 472
473 473
474 474 def normalize_text_for_matching(x):
475 475 """
476 476 Replaces all non alnum characters to spaces and lower cases the string,
477 477 useful for comparing two text strings without punctuation
478 478 """
479 479 return re.sub(r'[^\w]', ' ', x.lower())
480 480
481 481
482 482 def get_matching_line_offsets(lines, terms):
483 483 """ Return a set of `lines` indices (starting from 1) matching a
484 484 text search query, along with `context` lines above/below matching lines
485 485
486 486 :param lines: list of strings representing lines
487 487 :param terms: search term string to match in lines eg. 'some text'
488 488 :param context: number of lines above/below a matching line to add to result
489 489 :param max_lines: cut off for lines of interest
490 490 eg.
491 491
492 492 text = '''
493 493 words words words
494 494 words words words
495 495 some text some
496 496 words words words
497 497 words words words
498 498 text here what
499 499 '''
500 500 get_matching_line_offsets(text, 'text', context=1)
501 501 {3: [(5, 9)], 6: [(0, 4)]]
502 502
503 503 """
504 504 matching_lines = {}
505 505 phrases = [normalize_text_for_matching(phrase)
506 506 for phrase in extract_phrases(terms)]
507 507
508 508 for line_index, line in enumerate(lines, start=1):
509 509 match_offsets = get_matching_offsets(
510 510 normalize_text_for_matching(line), phrases)
511 511 if match_offsets:
512 512 matching_lines[line_index] = match_offsets
513 513
514 514 return matching_lines
515 515
516 516
517 517 def hsv_to_rgb(h, s, v):
518 518 """ Convert hsv color values to rgb """
519 519
520 520 if s == 0.0:
521 521 return v, v, v
522 522 i = int(h * 6.0) # XXX assume int() truncates!
523 523 f = (h * 6.0) - i
524 524 p = v * (1.0 - s)
525 525 q = v * (1.0 - s * f)
526 526 t = v * (1.0 - s * (1.0 - f))
527 527 i = i % 6
528 528 if i == 0:
529 529 return v, t, p
530 530 if i == 1:
531 531 return q, v, p
532 532 if i == 2:
533 533 return p, v, t
534 534 if i == 3:
535 535 return p, q, v
536 536 if i == 4:
537 537 return t, p, v
538 538 if i == 5:
539 539 return v, p, q
540 540
541 541
542 542 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
543 543 """
544 544 Generator for getting n of evenly distributed colors using
545 545 hsv color and golden ratio. It always return same order of colors
546 546
547 547 :param n: number of colors to generate
548 548 :param saturation: saturation of returned colors
549 549 :param lightness: lightness of returned colors
550 550 :returns: RGB tuple
551 551 """
552 552
553 553 golden_ratio = 0.618033988749895
554 554 h = 0.22717784590367374
555 555
556 556 for _ in xrange(n):
557 557 h += golden_ratio
558 558 h %= 1
559 559 HSV_tuple = [h, saturation, lightness]
560 560 RGB_tuple = hsv_to_rgb(*HSV_tuple)
561 561 yield map(lambda x: str(int(x * 256)), RGB_tuple)
562 562
563 563
564 564 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
565 565 """
566 566 Returns a function which when called with an argument returns a unique
567 567 color for that argument, eg.
568 568
569 569 :param n: number of colors to generate
570 570 :param saturation: saturation of returned colors
571 571 :param lightness: lightness of returned colors
572 572 :returns: css RGB string
573 573
574 574 >>> color_hash = color_hasher()
575 575 >>> color_hash('hello')
576 576 'rgb(34, 12, 59)'
577 577 >>> color_hash('hello')
578 578 'rgb(34, 12, 59)'
579 579 >>> color_hash('other')
580 580 'rgb(90, 224, 159)'
581 581 """
582 582
583 583 color_dict = {}
584 584 cgenerator = unique_color_generator(
585 585 saturation=saturation, lightness=lightness)
586 586
587 587 def get_color_string(thing):
588 588 if thing in color_dict:
589 589 col = color_dict[thing]
590 590 else:
591 591 col = color_dict[thing] = cgenerator.next()
592 592 return "rgb(%s)" % (', '.join(col))
593 593
594 594 return get_color_string
595 595
596 596
597 597 def get_lexer_safe(mimetype=None, filepath=None):
598 598 """
599 599 Tries to return a relevant pygments lexer using mimetype/filepath name,
600 600 defaulting to plain text if none could be found
601 601 """
602 602 lexer = None
603 603 try:
604 604 if mimetype:
605 605 lexer = get_lexer_for_mimetype(mimetype)
606 606 if not lexer:
607 607 lexer = get_lexer_for_filename(filepath)
608 608 except pygments.util.ClassNotFound:
609 609 pass
610 610
611 611 if not lexer:
612 612 lexer = get_lexer_by_name('text')
613 613
614 614 return lexer
615 615
616 616
617 617 def get_lexer_for_filenode(filenode):
618 618 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
619 619 return lexer
620 620
621 621
622 622 def pygmentize(filenode, **kwargs):
623 623 """
624 624 pygmentize function using pygments
625 625
626 626 :param filenode:
627 627 """
628 628 lexer = get_lexer_for_filenode(filenode)
629 629 return literal(code_highlight(filenode.content, lexer,
630 630 CodeHtmlFormatter(**kwargs)))
631 631
632 632
633 633 def is_following_repo(repo_name, user_id):
634 634 from rhodecode.model.scm import ScmModel
635 635 return ScmModel().is_following_repo(repo_name, user_id)
636 636
637 637
638 638 class _Message(object):
639 639 """A message returned by ``Flash.pop_messages()``.
640 640
641 641 Converting the message to a string returns the message text. Instances
642 642 also have the following attributes:
643 643
644 644 * ``message``: the message text.
645 645 * ``category``: the category specified when the message was created.
646 646 """
647 647
648 648 def __init__(self, category, message):
649 649 self.category = category
650 650 self.message = message
651 651
652 652 def __str__(self):
653 653 return self.message
654 654
655 655 __unicode__ = __str__
656 656
657 657 def __html__(self):
658 658 return escape(safe_unicode(self.message))
659 659
660 660
661 661 class Flash(_Flash):
662 662
663 663 def pop_messages(self):
664 664 """Return all accumulated messages and delete them from the session.
665 665
666 666 The return value is a list of ``Message`` objects.
667 667 """
668 668 from pylons import session
669 669
670 670 messages = []
671 671
672 672 # Pop the 'old' pylons flash messages. They are tuples of the form
673 673 # (category, message)
674 674 for cat, msg in session.pop(self.session_key, []):
675 675 messages.append(_Message(cat, msg))
676 676
677 677 # Pop the 'new' pyramid flash messages for each category as list
678 678 # of strings.
679 679 for cat in self.categories:
680 680 for msg in session.pop_flash(queue=cat):
681 681 messages.append(_Message(cat, msg))
682 682 # Map messages from the default queue to the 'notice' category.
683 683 for msg in session.pop_flash():
684 684 messages.append(_Message('notice', msg))
685 685
686 686 session.save()
687 687 return messages
688 688
689 689 def json_alerts(self):
690 690 payloads = []
691 691 messages = flash.pop_messages()
692 692 if messages:
693 693 for message in messages:
694 694 subdata = {}
695 695 if hasattr(message.message, 'rsplit'):
696 696 flash_data = message.message.rsplit('|DELIM|', 1)
697 697 org_message = flash_data[0]
698 698 if len(flash_data) > 1:
699 699 subdata = json.loads(flash_data[1])
700 700 else:
701 701 org_message = message.message
702 702 payloads.append({
703 703 'message': {
704 704 'message': u'{}'.format(org_message),
705 705 'level': message.category,
706 706 'force': True,
707 707 'subdata': subdata
708 708 }
709 709 })
710 710 return json.dumps(payloads)
711 711
712 712 flash = Flash()
713 713
714 714 #==============================================================================
715 715 # SCM FILTERS available via h.
716 716 #==============================================================================
717 717 from rhodecode.lib.vcs.utils import author_name, author_email
718 718 from rhodecode.lib.utils2 import credentials_filter, age as _age
719 719 from rhodecode.model.db import User, ChangesetStatus
720 720
721 721 age = _age
722 722 capitalize = lambda x: x.capitalize()
723 723 email = author_email
724 724 short_id = lambda x: x[:12]
725 725 hide_credentials = lambda x: ''.join(credentials_filter(x))
726 726
727 727
728 728 def age_component(datetime_iso, value=None, time_is_local=False):
729 729 title = value or format_date(datetime_iso)
730 730 tzinfo = '+00:00'
731 731
732 732 # detect if we have a timezone info, otherwise, add it
733 733 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
734 734 if time_is_local:
735 735 tzinfo = time.strftime("+%H:%M",
736 736 time.gmtime(
737 737 (datetime.now() - datetime.utcnow()).seconds + 1
738 738 )
739 739 )
740 740
741 741 return literal(
742 742 '<time class="timeago tooltip" '
743 743 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
744 744 datetime_iso, title, tzinfo))
745 745
746 746
747 747 def _shorten_commit_id(commit_id):
748 748 from rhodecode import CONFIG
749 749 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
750 750 return commit_id[:def_len]
751 751
752 752
753 753 def show_id(commit):
754 754 """
755 755 Configurable function that shows ID
756 756 by default it's r123:fffeeefffeee
757 757
758 758 :param commit: commit instance
759 759 """
760 760 from rhodecode import CONFIG
761 761 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
762 762
763 763 raw_id = _shorten_commit_id(commit.raw_id)
764 764 if show_idx:
765 765 return 'r%s:%s' % (commit.idx, raw_id)
766 766 else:
767 767 return '%s' % (raw_id, )
768 768
769 769
770 770 def format_date(date):
771 771 """
772 772 use a standardized formatting for dates used in RhodeCode
773 773
774 774 :param date: date/datetime object
775 775 :return: formatted date
776 776 """
777 777
778 778 if date:
779 779 _fmt = "%a, %d %b %Y %H:%M:%S"
780 780 return safe_unicode(date.strftime(_fmt))
781 781
782 782 return u""
783 783
784 784
785 785 class _RepoChecker(object):
786 786
787 787 def __init__(self, backend_alias):
788 788 self._backend_alias = backend_alias
789 789
790 790 def __call__(self, repository):
791 791 if hasattr(repository, 'alias'):
792 792 _type = repository.alias
793 793 elif hasattr(repository, 'repo_type'):
794 794 _type = repository.repo_type
795 795 else:
796 796 _type = repository
797 797 return _type == self._backend_alias
798 798
799 799 is_git = _RepoChecker('git')
800 800 is_hg = _RepoChecker('hg')
801 801 is_svn = _RepoChecker('svn')
802 802
803 803
804 804 def get_repo_type_by_name(repo_name):
805 805 repo = Repository.get_by_repo_name(repo_name)
806 806 return repo.repo_type
807 807
808 808
809 809 def is_svn_without_proxy(repository):
810 810 if is_svn(repository):
811 811 from rhodecode.model.settings import VcsSettingsModel
812 812 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
813 813 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
814 814 return False
815 815
816 816
817 817 def discover_user(author):
818 818 """
819 819 Tries to discover RhodeCode User based on the autho string. Author string
820 820 is typically `FirstName LastName <email@address.com>`
821 821 """
822 822
823 823 # if author is already an instance use it for extraction
824 824 if isinstance(author, User):
825 825 return author
826 826
827 827 # Valid email in the attribute passed, see if they're in the system
828 828 _email = author_email(author)
829 829 if _email != '':
830 830 user = User.get_by_email(_email, case_insensitive=True, cache=True)
831 831 if user is not None:
832 832 return user
833 833
834 834 # Maybe it's a username, we try to extract it and fetch by username ?
835 835 _author = author_name(author)
836 836 user = User.get_by_username(_author, case_insensitive=True, cache=True)
837 837 if user is not None:
838 838 return user
839 839
840 840 return None
841 841
842 842
843 843 def email_or_none(author):
844 844 # extract email from the commit string
845 845 _email = author_email(author)
846 846
847 847 # If we have an email, use it, otherwise
848 848 # see if it contains a username we can get an email from
849 849 if _email != '':
850 850 return _email
851 851 else:
852 852 user = User.get_by_username(
853 853 author_name(author), case_insensitive=True, cache=True)
854 854
855 855 if user is not None:
856 856 return user.email
857 857
858 858 # No valid email, not a valid user in the system, none!
859 859 return None
860 860
861 861
862 862 def link_to_user(author, length=0, **kwargs):
863 863 user = discover_user(author)
864 864 # user can be None, but if we have it already it means we can re-use it
865 865 # in the person() function, so we save 1 intensive-query
866 866 if user:
867 867 author = user
868 868
869 869 display_person = person(author, 'username_or_name_or_email')
870 870 if length:
871 871 display_person = shorter(display_person, length)
872 872
873 873 if user:
874 874 return link_to(
875 875 escape(display_person),
876 876 route_path('user_profile', username=user.username),
877 877 **kwargs)
878 878 else:
879 879 return escape(display_person)
880 880
881 881
882 882 def person(author, show_attr="username_and_name"):
883 883 user = discover_user(author)
884 884 if user:
885 885 return getattr(user, show_attr)
886 886 else:
887 887 _author = author_name(author)
888 888 _email = email(author)
889 889 return _author or _email
890 890
891 891
892 892 def author_string(email):
893 893 if email:
894 894 user = User.get_by_email(email, case_insensitive=True, cache=True)
895 895 if user:
896 896 if user.first_name or user.last_name:
897 897 return '%s %s &lt;%s&gt;' % (
898 898 user.first_name, user.last_name, email)
899 899 else:
900 900 return email
901 901 else:
902 902 return email
903 903 else:
904 904 return None
905 905
906 906
907 907 def person_by_id(id_, show_attr="username_and_name"):
908 908 # attr to return from fetched user
909 909 person_getter = lambda usr: getattr(usr, show_attr)
910 910
911 911 #maybe it's an ID ?
912 912 if str(id_).isdigit() or isinstance(id_, int):
913 913 id_ = int(id_)
914 914 user = User.get(id_)
915 915 if user is not None:
916 916 return person_getter(user)
917 917 return id_
918 918
919 919
920 920 def gravatar_with_user(author, show_disabled=False):
921 921 from rhodecode.lib.utils import PartialRenderer
922 922 _render = PartialRenderer('base/base.mako')
923 923 return _render('gravatar_with_user', author, show_disabled=show_disabled)
924 924
925 925
926 926 def desc_stylize(value):
927 927 """
928 928 converts tags from value into html equivalent
929 929
930 930 :param value:
931 931 """
932 932 if not value:
933 933 return ''
934 934
935 935 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
936 936 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
937 937 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
938 938 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
939 939 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
940 940 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
941 941 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
942 942 '<div class="metatag" tag="lang">\\2</div>', value)
943 943 value = re.sub(r'\[([a-z]+)\]',
944 944 '<div class="metatag" tag="\\1">\\1</div>', value)
945 945
946 946 return value
947 947
948 948
949 949 def escaped_stylize(value):
950 950 """
951 951 converts tags from value into html equivalent, but escaping its value first
952 952 """
953 953 if not value:
954 954 return ''
955 955
956 956 # Using default webhelper escape method, but has to force it as a
957 957 # plain unicode instead of a markup tag to be used in regex expressions
958 958 value = unicode(escape(safe_unicode(value)))
959 959
960 960 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
961 961 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
962 962 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
963 963 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
964 964 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
965 965 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
966 966 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
967 967 '<div class="metatag" tag="lang">\\2</div>', value)
968 968 value = re.sub(r'\[([a-z]+)\]',
969 969 '<div class="metatag" tag="\\1">\\1</div>', value)
970 970
971 971 return value
972 972
973 973
974 974 def bool2icon(value):
975 975 """
976 976 Returns boolean value of a given value, represented as html element with
977 977 classes that will represent icons
978 978
979 979 :param value: given value to convert to html node
980 980 """
981 981
982 982 if value: # does bool conversion
983 983 return HTML.tag('i', class_="icon-true")
984 984 else: # not true as bool
985 985 return HTML.tag('i', class_="icon-false")
986 986
987 987
988 988 #==============================================================================
989 989 # PERMS
990 990 #==============================================================================
991 991 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
992 992 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
993 993 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
994 994 csrf_token_key
995 995
996 996
997 997 #==============================================================================
998 998 # GRAVATAR URL
999 999 #==============================================================================
1000 1000 class InitialsGravatar(object):
1001 1001 def __init__(self, email_address, first_name, last_name, size=30,
1002 1002 background=None, text_color='#fff'):
1003 1003 self.size = size
1004 1004 self.first_name = first_name
1005 1005 self.last_name = last_name
1006 1006 self.email_address = email_address
1007 1007 self.background = background or self.str2color(email_address)
1008 1008 self.text_color = text_color
1009 1009
1010 1010 def get_color_bank(self):
1011 1011 """
1012 1012 returns a predefined list of colors that gravatars can use.
1013 1013 Those are randomized distinct colors that guarantee readability and
1014 1014 uniqueness.
1015 1015
1016 1016 generated with: http://phrogz.net/css/distinct-colors.html
1017 1017 """
1018 1018 return [
1019 1019 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1020 1020 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1021 1021 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1022 1022 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1023 1023 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1024 1024 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1025 1025 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1026 1026 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1027 1027 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1028 1028 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1029 1029 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1030 1030 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1031 1031 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1032 1032 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1033 1033 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1034 1034 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1035 1035 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1036 1036 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1037 1037 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1038 1038 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1039 1039 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1040 1040 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1041 1041 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1042 1042 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1043 1043 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1044 1044 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1045 1045 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1046 1046 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1047 1047 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1048 1048 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1049 1049 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1050 1050 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1051 1051 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1052 1052 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1053 1053 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1054 1054 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1055 1055 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1056 1056 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1057 1057 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1058 1058 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1059 1059 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1060 1060 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1061 1061 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1062 1062 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1063 1063 '#4f8c46', '#368dd9', '#5c0073'
1064 1064 ]
1065 1065
1066 1066 def rgb_to_hex_color(self, rgb_tuple):
1067 1067 """
1068 1068 Converts an rgb_tuple passed to an hex color.
1069 1069
1070 1070 :param rgb_tuple: tuple with 3 ints represents rgb color space
1071 1071 """
1072 1072 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1073 1073
1074 1074 def email_to_int_list(self, email_str):
1075 1075 """
1076 1076 Get every byte of the hex digest value of email and turn it to integer.
1077 1077 It's going to be always between 0-255
1078 1078 """
1079 1079 digest = md5_safe(email_str.lower())
1080 1080 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1081 1081
1082 1082 def pick_color_bank_index(self, email_str, color_bank):
1083 1083 return self.email_to_int_list(email_str)[0] % len(color_bank)
1084 1084
1085 1085 def str2color(self, email_str):
1086 1086 """
1087 1087 Tries to map in a stable algorithm an email to color
1088 1088
1089 1089 :param email_str:
1090 1090 """
1091 1091 color_bank = self.get_color_bank()
1092 1092 # pick position (module it's length so we always find it in the
1093 1093 # bank even if it's smaller than 256 values
1094 1094 pos = self.pick_color_bank_index(email_str, color_bank)
1095 1095 return color_bank[pos]
1096 1096
1097 1097 def normalize_email(self, email_address):
1098 1098 import unicodedata
1099 1099 # default host used to fill in the fake/missing email
1100 1100 default_host = u'localhost'
1101 1101
1102 1102 if not email_address:
1103 1103 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1104 1104
1105 1105 email_address = safe_unicode(email_address)
1106 1106
1107 1107 if u'@' not in email_address:
1108 1108 email_address = u'%s@%s' % (email_address, default_host)
1109 1109
1110 1110 if email_address.endswith(u'@'):
1111 1111 email_address = u'%s%s' % (email_address, default_host)
1112 1112
1113 1113 email_address = unicodedata.normalize('NFKD', email_address)\
1114 1114 .encode('ascii', 'ignore')
1115 1115 return email_address
1116 1116
1117 1117 def get_initials(self):
1118 1118 """
1119 1119 Returns 2 letter initials calculated based on the input.
1120 1120 The algorithm picks first given email address, and takes first letter
1121 1121 of part before @, and then the first letter of server name. In case
1122 1122 the part before @ is in a format of `somestring.somestring2` it replaces
1123 1123 the server letter with first letter of somestring2
1124 1124
1125 1125 In case function was initialized with both first and lastname, this
1126 1126 overrides the extraction from email by first letter of the first and
1127 1127 last name. We add special logic to that functionality, In case Full name
1128 1128 is compound, like Guido Von Rossum, we use last part of the last name
1129 1129 (Von Rossum) picking `R`.
1130 1130
1131 1131 Function also normalizes the non-ascii characters to they ascii
1132 1132 representation, eg Δ„ => A
1133 1133 """
1134 1134 import unicodedata
1135 1135 # replace non-ascii to ascii
1136 1136 first_name = unicodedata.normalize(
1137 1137 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1138 1138 last_name = unicodedata.normalize(
1139 1139 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1140 1140
1141 1141 # do NFKD encoding, and also make sure email has proper format
1142 1142 email_address = self.normalize_email(self.email_address)
1143 1143
1144 1144 # first push the email initials
1145 1145 prefix, server = email_address.split('@', 1)
1146 1146
1147 1147 # check if prefix is maybe a 'first_name.last_name' syntax
1148 1148 _dot_split = prefix.rsplit('.', 1)
1149 1149 if len(_dot_split) == 2:
1150 1150 initials = [_dot_split[0][0], _dot_split[1][0]]
1151 1151 else:
1152 1152 initials = [prefix[0], server[0]]
1153 1153
1154 1154 # then try to replace either first_name or last_name
1155 1155 fn_letter = (first_name or " ")[0].strip()
1156 1156 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1157 1157
1158 1158 if fn_letter:
1159 1159 initials[0] = fn_letter
1160 1160
1161 1161 if ln_letter:
1162 1162 initials[1] = ln_letter
1163 1163
1164 1164 return ''.join(initials).upper()
1165 1165
1166 1166 def get_img_data_by_type(self, font_family, img_type):
1167 1167 default_user = """
1168 1168 <svg xmlns="http://www.w3.org/2000/svg"
1169 1169 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1170 1170 viewBox="-15 -10 439.165 429.164"
1171 1171
1172 1172 xml:space="preserve"
1173 1173 style="background:{background};" >
1174 1174
1175 1175 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1176 1176 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1177 1177 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1178 1178 168.596,153.916,216.671,
1179 1179 204.583,216.671z" fill="{text_color}"/>
1180 1180 <path d="M407.164,374.717L360.88,
1181 1181 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1182 1182 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1183 1183 15.366-44.203,23.488-69.076,23.488c-24.877,
1184 1184 0-48.762-8.122-69.078-23.488
1185 1185 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1186 1186 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1187 1187 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1188 1188 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1189 1189 19.402-10.527 C409.699,390.129,
1190 1190 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1191 1191 </svg>""".format(
1192 1192 size=self.size,
1193 1193 background='#979797', # @grey4
1194 1194 text_color=self.text_color,
1195 1195 font_family=font_family)
1196 1196
1197 1197 return {
1198 1198 "default_user": default_user
1199 1199 }[img_type]
1200 1200
1201 1201 def get_img_data(self, svg_type=None):
1202 1202 """
1203 1203 generates the svg metadata for image
1204 1204 """
1205 1205
1206 1206 font_family = ','.join([
1207 1207 'proximanovaregular',
1208 1208 'Proxima Nova Regular',
1209 1209 'Proxima Nova',
1210 1210 'Arial',
1211 1211 'Lucida Grande',
1212 1212 'sans-serif'
1213 1213 ])
1214 1214 if svg_type:
1215 1215 return self.get_img_data_by_type(font_family, svg_type)
1216 1216
1217 1217 initials = self.get_initials()
1218 1218 img_data = """
1219 1219 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1220 1220 width="{size}" height="{size}"
1221 1221 style="width: 100%; height: 100%; background-color: {background}"
1222 1222 viewBox="0 0 {size} {size}">
1223 1223 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1224 1224 pointer-events="auto" fill="{text_color}"
1225 1225 font-family="{font_family}"
1226 1226 style="font-weight: 400; font-size: {f_size}px;">{text}
1227 1227 </text>
1228 1228 </svg>""".format(
1229 1229 size=self.size,
1230 1230 f_size=self.size/1.85, # scale the text inside the box nicely
1231 1231 background=self.background,
1232 1232 text_color=self.text_color,
1233 1233 text=initials.upper(),
1234 1234 font_family=font_family)
1235 1235
1236 1236 return img_data
1237 1237
1238 1238 def generate_svg(self, svg_type=None):
1239 1239 img_data = self.get_img_data(svg_type)
1240 1240 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1241 1241
1242 1242
1243 1243 def initials_gravatar(email_address, first_name, last_name, size=30):
1244 1244 svg_type = None
1245 1245 if email_address == User.DEFAULT_USER_EMAIL:
1246 1246 svg_type = 'default_user'
1247 1247 klass = InitialsGravatar(email_address, first_name, last_name, size)
1248 1248 return klass.generate_svg(svg_type=svg_type)
1249 1249
1250 1250
1251 1251 def gravatar_url(email_address, size=30, request=None):
1252 1252 request = get_current_request()
1253 1253 if request and hasattr(request, 'call_context'):
1254 1254 _use_gravatar = request.call_context.visual.use_gravatar
1255 1255 _gravatar_url = request.call_context.visual.gravatar_url
1256 1256 else:
1257 1257 # doh, we need to re-import those to mock it later
1258 1258 from pylons import tmpl_context as c
1259 1259
1260 1260 _use_gravatar = c.visual.use_gravatar
1261 1261 _gravatar_url = c.visual.gravatar_url
1262 1262
1263 1263 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1264 1264
1265 1265 email_address = email_address or User.DEFAULT_USER_EMAIL
1266 1266 if isinstance(email_address, unicode):
1267 1267 # hashlib crashes on unicode items
1268 1268 email_address = safe_str(email_address)
1269 1269
1270 1270 # empty email or default user
1271 1271 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1272 1272 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1273 1273
1274 1274 if _use_gravatar:
1275 1275 # TODO: Disuse pyramid thread locals. Think about another solution to
1276 1276 # get the host and schema here.
1277 1277 request = get_current_request()
1278 1278 tmpl = safe_str(_gravatar_url)
1279 1279 tmpl = tmpl.replace('{email}', email_address)\
1280 1280 .replace('{md5email}', md5_safe(email_address.lower())) \
1281 1281 .replace('{netloc}', request.host)\
1282 1282 .replace('{scheme}', request.scheme)\
1283 1283 .replace('{size}', safe_str(size))
1284 1284 return tmpl
1285 1285 else:
1286 1286 return initials_gravatar(email_address, '', '', size=size)
1287 1287
1288 1288
1289 1289 class Page(_Page):
1290 1290 """
1291 1291 Custom pager to match rendering style with paginator
1292 1292 """
1293 1293
1294 1294 def _get_pos(self, cur_page, max_page, items):
1295 1295 edge = (items / 2) + 1
1296 1296 if (cur_page <= edge):
1297 1297 radius = max(items / 2, items - cur_page)
1298 1298 elif (max_page - cur_page) < edge:
1299 1299 radius = (items - 1) - (max_page - cur_page)
1300 1300 else:
1301 1301 radius = items / 2
1302 1302
1303 1303 left = max(1, (cur_page - (radius)))
1304 1304 right = min(max_page, cur_page + (radius))
1305 1305 return left, cur_page, right
1306 1306
1307 1307 def _range(self, regexp_match):
1308 1308 """
1309 1309 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1310 1310
1311 1311 Arguments:
1312 1312
1313 1313 regexp_match
1314 1314 A "re" (regular expressions) match object containing the
1315 1315 radius of linked pages around the current page in
1316 1316 regexp_match.group(1) as a string
1317 1317
1318 1318 This function is supposed to be called as a callable in
1319 1319 re.sub.
1320 1320
1321 1321 """
1322 1322 radius = int(regexp_match.group(1))
1323 1323
1324 1324 # Compute the first and last page number within the radius
1325 1325 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1326 1326 # -> leftmost_page = 5
1327 1327 # -> rightmost_page = 9
1328 1328 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1329 1329 self.last_page,
1330 1330 (radius * 2) + 1)
1331 1331 nav_items = []
1332 1332
1333 1333 # Create a link to the first page (unless we are on the first page
1334 1334 # or there would be no need to insert '..' spacers)
1335 1335 if self.page != self.first_page and self.first_page < leftmost_page:
1336 1336 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1337 1337
1338 1338 # Insert dots if there are pages between the first page
1339 1339 # and the currently displayed page range
1340 1340 if leftmost_page - self.first_page > 1:
1341 1341 # Wrap in a SPAN tag if nolink_attr is set
1342 1342 text = '..'
1343 1343 if self.dotdot_attr:
1344 1344 text = HTML.span(c=text, **self.dotdot_attr)
1345 1345 nav_items.append(text)
1346 1346
1347 1347 for thispage in xrange(leftmost_page, rightmost_page + 1):
1348 1348 # Hilight the current page number and do not use a link
1349 1349 if thispage == self.page:
1350 1350 text = '%s' % (thispage,)
1351 1351 # Wrap in a SPAN tag if nolink_attr is set
1352 1352 if self.curpage_attr:
1353 1353 text = HTML.span(c=text, **self.curpage_attr)
1354 1354 nav_items.append(text)
1355 1355 # Otherwise create just a link to that page
1356 1356 else:
1357 1357 text = '%s' % (thispage,)
1358 1358 nav_items.append(self._pagerlink(thispage, text))
1359 1359
1360 1360 # Insert dots if there are pages between the displayed
1361 1361 # page numbers and the end of the page range
1362 1362 if self.last_page - rightmost_page > 1:
1363 1363 text = '..'
1364 1364 # Wrap in a SPAN tag if nolink_attr is set
1365 1365 if self.dotdot_attr:
1366 1366 text = HTML.span(c=text, **self.dotdot_attr)
1367 1367 nav_items.append(text)
1368 1368
1369 1369 # Create a link to the very last page (unless we are on the last
1370 1370 # page or there would be no need to insert '..' spacers)
1371 1371 if self.page != self.last_page and rightmost_page < self.last_page:
1372 1372 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1373 1373
1374 1374 ## prerender links
1375 1375 #_page_link = url.current()
1376 1376 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1377 1377 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1378 1378 return self.separator.join(nav_items)
1379 1379
1380 1380 def pager(self, format='~2~', page_param='page', partial_param='partial',
1381 1381 show_if_single_page=False, separator=' ', onclick=None,
1382 1382 symbol_first='<<', symbol_last='>>',
1383 1383 symbol_previous='<', symbol_next='>',
1384 1384 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1385 1385 curpage_attr={'class': 'pager_curpage'},
1386 1386 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1387 1387
1388 1388 self.curpage_attr = curpage_attr
1389 1389 self.separator = separator
1390 1390 self.pager_kwargs = kwargs
1391 1391 self.page_param = page_param
1392 1392 self.partial_param = partial_param
1393 1393 self.onclick = onclick
1394 1394 self.link_attr = link_attr
1395 1395 self.dotdot_attr = dotdot_attr
1396 1396
1397 1397 # Don't show navigator if there is no more than one page
1398 1398 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1399 1399 return ''
1400 1400
1401 1401 from string import Template
1402 1402 # Replace ~...~ in token format by range of pages
1403 1403 result = re.sub(r'~(\d+)~', self._range, format)
1404 1404
1405 1405 # Interpolate '%' variables
1406 1406 result = Template(result).safe_substitute({
1407 1407 'first_page': self.first_page,
1408 1408 'last_page': self.last_page,
1409 1409 'page': self.page,
1410 1410 'page_count': self.page_count,
1411 1411 'items_per_page': self.items_per_page,
1412 1412 'first_item': self.first_item,
1413 1413 'last_item': self.last_item,
1414 1414 'item_count': self.item_count,
1415 1415 'link_first': self.page > self.first_page and \
1416 1416 self._pagerlink(self.first_page, symbol_first) or '',
1417 1417 'link_last': self.page < self.last_page and \
1418 1418 self._pagerlink(self.last_page, symbol_last) or '',
1419 1419 'link_previous': self.previous_page and \
1420 1420 self._pagerlink(self.previous_page, symbol_previous) \
1421 1421 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1422 1422 'link_next': self.next_page and \
1423 1423 self._pagerlink(self.next_page, symbol_next) \
1424 1424 or HTML.span(symbol_next, class_="pg-next disabled")
1425 1425 })
1426 1426
1427 1427 return literal(result)
1428 1428
1429 1429
1430 1430 #==============================================================================
1431 1431 # REPO PAGER, PAGER FOR REPOSITORY
1432 1432 #==============================================================================
1433 1433 class RepoPage(Page):
1434 1434
1435 1435 def __init__(self, collection, page=1, items_per_page=20,
1436 1436 item_count=None, url=None, **kwargs):
1437 1437
1438 1438 """Create a "RepoPage" instance. special pager for paging
1439 1439 repository
1440 1440 """
1441 1441 self._url_generator = url
1442 1442
1443 1443 # Safe the kwargs class-wide so they can be used in the pager() method
1444 1444 self.kwargs = kwargs
1445 1445
1446 1446 # Save a reference to the collection
1447 1447 self.original_collection = collection
1448 1448
1449 1449 self.collection = collection
1450 1450
1451 1451 # The self.page is the number of the current page.
1452 1452 # The first page has the number 1!
1453 1453 try:
1454 1454 self.page = int(page) # make it int() if we get it as a string
1455 1455 except (ValueError, TypeError):
1456 1456 self.page = 1
1457 1457
1458 1458 self.items_per_page = items_per_page
1459 1459
1460 1460 # Unless the user tells us how many items the collections has
1461 1461 # we calculate that ourselves.
1462 1462 if item_count is not None:
1463 1463 self.item_count = item_count
1464 1464 else:
1465 1465 self.item_count = len(self.collection)
1466 1466
1467 1467 # Compute the number of the first and last available page
1468 1468 if self.item_count > 0:
1469 1469 self.first_page = 1
1470 1470 self.page_count = int(math.ceil(float(self.item_count) /
1471 1471 self.items_per_page))
1472 1472 self.last_page = self.first_page + self.page_count - 1
1473 1473
1474 1474 # Make sure that the requested page number is the range of
1475 1475 # valid pages
1476 1476 if self.page > self.last_page:
1477 1477 self.page = self.last_page
1478 1478 elif self.page < self.first_page:
1479 1479 self.page = self.first_page
1480 1480
1481 1481 # Note: the number of items on this page can be less than
1482 1482 # items_per_page if the last page is not full
1483 1483 self.first_item = max(0, (self.item_count) - (self.page *
1484 1484 items_per_page))
1485 1485 self.last_item = ((self.item_count - 1) - items_per_page *
1486 1486 (self.page - 1))
1487 1487
1488 1488 self.items = list(self.collection[self.first_item:self.last_item + 1])
1489 1489
1490 1490 # Links to previous and next page
1491 1491 if self.page > self.first_page:
1492 1492 self.previous_page = self.page - 1
1493 1493 else:
1494 1494 self.previous_page = None
1495 1495
1496 1496 if self.page < self.last_page:
1497 1497 self.next_page = self.page + 1
1498 1498 else:
1499 1499 self.next_page = None
1500 1500
1501 1501 # No items available
1502 1502 else:
1503 1503 self.first_page = None
1504 1504 self.page_count = 0
1505 1505 self.last_page = None
1506 1506 self.first_item = None
1507 1507 self.last_item = None
1508 1508 self.previous_page = None
1509 1509 self.next_page = None
1510 1510 self.items = []
1511 1511
1512 1512 # This is a subclass of the 'list' type. Initialise the list now.
1513 1513 list.__init__(self, reversed(self.items))
1514 1514
1515 1515
1516 1516 def changed_tooltip(nodes):
1517 1517 """
1518 1518 Generates a html string for changed nodes in commit page.
1519 1519 It limits the output to 30 entries
1520 1520
1521 1521 :param nodes: LazyNodesGenerator
1522 1522 """
1523 1523 if nodes:
1524 1524 pref = ': <br/> '
1525 1525 suf = ''
1526 1526 if len(nodes) > 30:
1527 1527 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1528 1528 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1529 1529 for x in nodes[:30]]) + suf)
1530 1530 else:
1531 1531 return ': ' + _('No Files')
1532 1532
1533 1533
1534 1534 def breadcrumb_repo_link(repo):
1535 1535 """
1536 1536 Makes a breadcrumbs path link to repo
1537 1537
1538 1538 ex::
1539 1539 group >> subgroup >> repo
1540 1540
1541 1541 :param repo: a Repository instance
1542 1542 """
1543 1543
1544 1544 path = [
1545 1545 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1546 1546 for group in repo.groups_with_parents
1547 1547 ] + [
1548 1548 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1549 1549 ]
1550 1550
1551 1551 return literal(' &raquo; '.join(path))
1552 1552
1553 1553
1554 1554 def format_byte_size_binary(file_size):
1555 1555 """
1556 1556 Formats file/folder sizes to standard.
1557 1557 """
1558 1558 formatted_size = format_byte_size(file_size, binary=True)
1559 1559 return formatted_size
1560 1560
1561 1561
1562 1562 def urlify_text(text_, safe=True):
1563 1563 """
1564 1564 Extrac urls from text and make html links out of them
1565 1565
1566 1566 :param text_:
1567 1567 """
1568 1568
1569 1569 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1570 1570 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1571 1571
1572 1572 def url_func(match_obj):
1573 1573 url_full = match_obj.groups()[0]
1574 1574 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1575 1575 _newtext = url_pat.sub(url_func, text_)
1576 1576 if safe:
1577 1577 return literal(_newtext)
1578 1578 return _newtext
1579 1579
1580 1580
1581 1581 def urlify_commits(text_, repository):
1582 1582 """
1583 1583 Extract commit ids from text and make link from them
1584 1584
1585 1585 :param text_:
1586 1586 :param repository: repo name to build the URL with
1587 1587 """
1588 1588 from pylons import url # doh, we need to re-import url to mock it later
1589 1589 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1590 1590
1591 1591 def url_func(match_obj):
1592 1592 commit_id = match_obj.groups()[1]
1593 1593 pref = match_obj.groups()[0]
1594 1594 suf = match_obj.groups()[2]
1595 1595
1596 1596 tmpl = (
1597 1597 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1598 1598 '%(commit_id)s</a>%(suf)s'
1599 1599 )
1600 1600 return tmpl % {
1601 1601 'pref': pref,
1602 1602 'cls': 'revision-link',
1603 1603 'url': url('changeset_home', repo_name=repository,
1604 1604 revision=commit_id, qualified=True),
1605 1605 'commit_id': commit_id,
1606 1606 'suf': suf
1607 1607 }
1608 1608
1609 1609 newtext = URL_PAT.sub(url_func, text_)
1610 1610
1611 1611 return newtext
1612 1612
1613 1613
1614 1614 def _process_url_func(match_obj, repo_name, uid, entry,
1615 1615 return_raw_data=False, link_format='html'):
1616 1616 pref = ''
1617 1617 if match_obj.group().startswith(' '):
1618 1618 pref = ' '
1619 1619
1620 1620 issue_id = ''.join(match_obj.groups())
1621 1621
1622 1622 if link_format == 'html':
1623 1623 tmpl = (
1624 1624 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1625 1625 '%(issue-prefix)s%(id-repr)s'
1626 1626 '</a>')
1627 1627 elif link_format == 'rst':
1628 1628 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1629 1629 elif link_format == 'markdown':
1630 1630 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1631 1631 else:
1632 1632 raise ValueError('Bad link_format:{}'.format(link_format))
1633 1633
1634 1634 (repo_name_cleaned,
1635 1635 parent_group_name) = RepoGroupModel().\
1636 1636 _get_group_name_and_parent(repo_name)
1637 1637
1638 1638 # variables replacement
1639 1639 named_vars = {
1640 1640 'id': issue_id,
1641 1641 'repo': repo_name,
1642 1642 'repo_name': repo_name_cleaned,
1643 1643 'group_name': parent_group_name
1644 1644 }
1645 1645 # named regex variables
1646 1646 named_vars.update(match_obj.groupdict())
1647 1647 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1648 1648
1649 1649 data = {
1650 1650 'pref': pref,
1651 1651 'cls': 'issue-tracker-link',
1652 1652 'url': _url,
1653 1653 'id-repr': issue_id,
1654 1654 'issue-prefix': entry['pref'],
1655 1655 'serv': entry['url'],
1656 1656 }
1657 1657 if return_raw_data:
1658 1658 return {
1659 1659 'id': issue_id,
1660 1660 'url': _url
1661 1661 }
1662 1662 return tmpl % data
1663 1663
1664 1664
1665 1665 def process_patterns(text_string, repo_name, link_format='html'):
1666 1666 allowed_formats = ['html', 'rst', 'markdown']
1667 1667 if link_format not in allowed_formats:
1668 1668 raise ValueError('Link format can be only one of:{} got {}'.format(
1669 1669 allowed_formats, link_format))
1670 1670
1671 1671 repo = None
1672 1672 if repo_name:
1673 1673 # Retrieving repo_name to avoid invalid repo_name to explode on
1674 1674 # IssueTrackerSettingsModel but still passing invalid name further down
1675 1675 repo = Repository.get_by_repo_name(repo_name, cache=True)
1676 1676
1677 1677 settings_model = IssueTrackerSettingsModel(repo=repo)
1678 1678 active_entries = settings_model.get_settings(cache=True)
1679 1679
1680 1680 issues_data = []
1681 1681 newtext = text_string
1682 1682
1683 1683 for uid, entry in active_entries.items():
1684 1684 log.debug('found issue tracker entry with uid %s' % (uid,))
1685 1685
1686 1686 if not (entry['pat'] and entry['url']):
1687 1687 log.debug('skipping due to missing data')
1688 1688 continue
1689 1689
1690 1690 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1691 1691 % (uid, entry['pat'], entry['url'], entry['pref']))
1692 1692
1693 1693 try:
1694 1694 pattern = re.compile(r'%s' % entry['pat'])
1695 1695 except re.error:
1696 1696 log.exception(
1697 1697 'issue tracker pattern: `%s` failed to compile',
1698 1698 entry['pat'])
1699 1699 continue
1700 1700
1701 1701 data_func = partial(
1702 1702 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1703 1703 return_raw_data=True)
1704 1704
1705 1705 for match_obj in pattern.finditer(text_string):
1706 1706 issues_data.append(data_func(match_obj))
1707 1707
1708 1708 url_func = partial(
1709 1709 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1710 1710 link_format=link_format)
1711 1711
1712 1712 newtext = pattern.sub(url_func, newtext)
1713 1713 log.debug('processed prefix:uid `%s`' % (uid,))
1714 1714
1715 1715 return newtext, issues_data
1716 1716
1717 1717
1718 1718 def urlify_commit_message(commit_text, repository=None):
1719 1719 """
1720 1720 Parses given text message and makes proper links.
1721 1721 issues are linked to given issue-server, and rest is a commit link
1722 1722
1723 1723 :param commit_text:
1724 1724 :param repository:
1725 1725 """
1726 1726 from pylons import url # doh, we need to re-import url to mock it later
1727 1727
1728 1728 def escaper(string):
1729 1729 return string.replace('<', '&lt;').replace('>', '&gt;')
1730 1730
1731 1731 newtext = escaper(commit_text)
1732 1732
1733 1733 # extract http/https links and make them real urls
1734 1734 newtext = urlify_text(newtext, safe=False)
1735 1735
1736 1736 # urlify commits - extract commit ids and make link out of them, if we have
1737 1737 # the scope of repository present.
1738 1738 if repository:
1739 1739 newtext = urlify_commits(newtext, repository)
1740 1740
1741 1741 # process issue tracker patterns
1742 1742 newtext, issues = process_patterns(newtext, repository or '')
1743 1743
1744 1744 return literal(newtext)
1745 1745
1746 1746
1747 1747 def render_binary(repo_name, file_obj):
1748 1748 """
1749 1749 Choose how to render a binary file
1750 1750 """
1751 1751 filename = file_obj.name
1752 1752
1753 1753 # images
1754 1754 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1755 1755 if fnmatch.fnmatch(filename, pat=ext):
1756 1756 alt = filename
1757 1757 src = url('files_raw_home', repo_name=repo_name,
1758 1758 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1759 1759 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1760 1760
1761 1761
1762 1762 def renderer_from_filename(filename, exclude=None):
1763 1763 """
1764 1764 choose a renderer based on filename, this works only for text based files
1765 1765 """
1766 1766
1767 1767 # ipython
1768 1768 for ext in ['*.ipynb']:
1769 1769 if fnmatch.fnmatch(filename, pat=ext):
1770 1770 return 'jupyter'
1771 1771
1772 1772 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1773 1773 if is_markup:
1774 1774 return is_markup
1775 1775 return None
1776 1776
1777 1777
1778 1778 def render(source, renderer='rst', mentions=False, relative_url=None,
1779 1779 repo_name=None):
1780 1780
1781 1781 def maybe_convert_relative_links(html_source):
1782 1782 if relative_url:
1783 1783 return relative_links(html_source, relative_url)
1784 1784 return html_source
1785 1785
1786 1786 if renderer == 'rst':
1787 1787 if repo_name:
1788 1788 # process patterns on comments if we pass in repo name
1789 1789 source, issues = process_patterns(
1790 1790 source, repo_name, link_format='rst')
1791 1791
1792 1792 return literal(
1793 1793 '<div class="rst-block">%s</div>' %
1794 1794 maybe_convert_relative_links(
1795 1795 MarkupRenderer.rst(source, mentions=mentions)))
1796 1796 elif renderer == 'markdown':
1797 1797 if repo_name:
1798 1798 # process patterns on comments if we pass in repo name
1799 1799 source, issues = process_patterns(
1800 1800 source, repo_name, link_format='markdown')
1801 1801
1802 1802 return literal(
1803 1803 '<div class="markdown-block">%s</div>' %
1804 1804 maybe_convert_relative_links(
1805 1805 MarkupRenderer.markdown(source, flavored=True,
1806 1806 mentions=mentions)))
1807 1807 elif renderer == 'jupyter':
1808 1808 return literal(
1809 1809 '<div class="ipynb">%s</div>' %
1810 1810 maybe_convert_relative_links(
1811 1811 MarkupRenderer.jupyter(source)))
1812 1812
1813 1813 # None means just show the file-source
1814 1814 return None
1815 1815
1816 1816
1817 1817 def commit_status(repo, commit_id):
1818 1818 return ChangesetStatusModel().get_status(repo, commit_id)
1819 1819
1820 1820
1821 1821 def commit_status_lbl(commit_status):
1822 1822 return dict(ChangesetStatus.STATUSES).get(commit_status)
1823 1823
1824 1824
1825 1825 def commit_time(repo_name, commit_id):
1826 1826 repo = Repository.get_by_repo_name(repo_name)
1827 1827 commit = repo.get_commit(commit_id=commit_id)
1828 1828 return commit.date
1829 1829
1830 1830
1831 1831 def get_permission_name(key):
1832 1832 return dict(Permission.PERMS).get(key)
1833 1833
1834 1834
1835 1835 def journal_filter_help():
1836 1836 return _(
1837 1837 'Example filter terms:\n' +
1838 1838 ' repository:vcs\n' +
1839 1839 ' username:marcin\n' +
1840 ' username:(NOT marcin)\n' +
1840 1841 ' action:*push*\n' +
1841 1842 ' ip:127.0.0.1\n' +
1842 1843 ' date:20120101\n' +
1843 1844 ' date:[20120101100000 TO 20120102]\n' +
1844 1845 '\n' +
1845 1846 'Generate wildcards using \'*\' character:\n' +
1846 1847 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1847 1848 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1848 1849 '\n' +
1849 1850 'Optional AND / OR operators in queries\n' +
1850 1851 ' "repository:vcs OR repository:test"\n' +
1851 1852 ' "username:test AND repository:test*"\n'
1852 1853 )
1853 1854
1854 1855
1855 1856 def search_filter_help(searcher):
1856 1857
1857 1858 terms = ''
1858 1859 return _(
1859 1860 'Example filter terms for `{searcher}` search:\n' +
1860 1861 '{terms}\n' +
1861 1862 'Generate wildcards using \'*\' character:\n' +
1862 1863 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1863 1864 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1864 1865 '\n' +
1865 1866 'Optional AND / OR operators in queries\n' +
1866 1867 ' "repo_name:vcs OR repo_name:test"\n' +
1867 1868 ' "owner:test AND repo_name:test*"\n' +
1868 1869 'More: {search_doc}'
1869 1870 ).format(searcher=searcher.name,
1870 1871 terms=terms, search_doc=searcher.query_lang_doc)
1871 1872
1872 1873
1873 1874 def not_mapped_error(repo_name):
1874 1875 flash(_('%s repository is not mapped to db perhaps'
1875 1876 ' it was created or renamed from the filesystem'
1876 1877 ' please run the application again'
1877 1878 ' in order to rescan repositories') % repo_name, category='error')
1878 1879
1879 1880
1880 1881 def ip_range(ip_addr):
1881 1882 from rhodecode.model.db import UserIpMap
1882 1883 s, e = UserIpMap._get_ip_range(ip_addr)
1883 1884 return '%s - %s' % (s, e)
1884 1885
1885 1886
1886 1887 def form(url, method='post', needs_csrf_token=True, **attrs):
1887 1888 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1888 1889 if method.lower() != 'get' and needs_csrf_token:
1889 1890 raise Exception(
1890 1891 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1891 1892 'CSRF token. If the endpoint does not require such token you can ' +
1892 1893 'explicitly set the parameter needs_csrf_token to false.')
1893 1894
1894 1895 return wh_form(url, method=method, **attrs)
1895 1896
1896 1897
1897 1898 def secure_form(url, method="POST", multipart=False, **attrs):
1898 1899 """Start a form tag that points the action to an url. This
1899 1900 form tag will also include the hidden field containing
1900 1901 the auth token.
1901 1902
1902 1903 The url options should be given either as a string, or as a
1903 1904 ``url()`` function. The method for the form defaults to POST.
1904 1905
1905 1906 Options:
1906 1907
1907 1908 ``multipart``
1908 1909 If set to True, the enctype is set to "multipart/form-data".
1909 1910 ``method``
1910 1911 The method to use when submitting the form, usually either
1911 1912 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1912 1913 hidden input with name _method is added to simulate the verb
1913 1914 over POST.
1914 1915
1915 1916 """
1916 1917 from webhelpers.pylonslib.secure_form import insecure_form
1917 1918 form = insecure_form(url, method, multipart, **attrs)
1918 1919 token = csrf_input()
1919 1920 return literal("%s\n%s" % (form, token))
1920 1921
1921 1922 def csrf_input():
1922 1923 return literal(
1923 1924 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1924 1925 csrf_token_key, csrf_token_key, get_csrf_token()))
1925 1926
1926 1927 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1927 1928 select_html = select(name, selected, options, **attrs)
1928 1929 select2 = """
1929 1930 <script>
1930 1931 $(document).ready(function() {
1931 1932 $('#%s').select2({
1932 1933 containerCssClass: 'drop-menu',
1933 1934 dropdownCssClass: 'drop-menu-dropdown',
1934 1935 dropdownAutoWidth: true%s
1935 1936 });
1936 1937 });
1937 1938 </script>
1938 1939 """
1939 1940 filter_option = """,
1940 1941 minimumResultsForSearch: -1
1941 1942 """
1942 1943 input_id = attrs.get('id') or name
1943 1944 filter_enabled = "" if enable_filter else filter_option
1944 1945 select_script = literal(select2 % (input_id, filter_enabled))
1945 1946
1946 1947 return literal(select_html+select_script)
1947 1948
1948 1949
1949 1950 def get_visual_attr(tmpl_context_var, attr_name):
1950 1951 """
1951 1952 A safe way to get a variable from visual variable of template context
1952 1953
1953 1954 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1954 1955 :param attr_name: name of the attribute we fetch from the c.visual
1955 1956 """
1956 1957 visual = getattr(tmpl_context_var, 'visual', None)
1957 1958 if not visual:
1958 1959 return
1959 1960 else:
1960 1961 return getattr(visual, attr_name, None)
1961 1962
1962 1963
1963 1964 def get_last_path_part(file_node):
1964 1965 if not file_node.path:
1965 1966 return u''
1966 1967
1967 1968 path = safe_unicode(file_node.path.split('/')[-1])
1968 1969 return u'../' + path
1969 1970
1970 1971
1971 1972 def route_url(*args, **kwargs):
1972 1973 """
1973 1974 Wrapper around pyramids `route_url` (fully qualified url) function.
1974 1975 It is used to generate URLs from within pylons views or templates.
1975 1976 This will be removed when pyramid migration if finished.
1976 1977 """
1977 1978 req = get_current_request()
1978 1979 return req.route_url(*args, **kwargs)
1979 1980
1980 1981
1981 1982 def route_path(*args, **kwargs):
1982 1983 """
1983 1984 Wrapper around pyramids `route_path` function. It is used to generate
1984 1985 URLs from within pylons views or templates. This will be removed when
1985 1986 pyramid migration if finished.
1986 1987 """
1987 1988 req = get_current_request()
1988 1989 return req.route_path(*args, **kwargs)
1989 1990
1990 1991
1991 1992 def route_path_or_none(*args, **kwargs):
1992 1993 try:
1993 1994 return route_path(*args, **kwargs)
1994 1995 except KeyError:
1995 1996 return None
1996 1997
1997 1998
1998 1999 def static_url(*args, **kwds):
1999 2000 """
2000 2001 Wrapper around pyramids `route_path` function. It is used to generate
2001 2002 URLs from within pylons views or templates. This will be removed when
2002 2003 pyramid migration if finished.
2003 2004 """
2004 2005 req = get_current_request()
2005 2006 return req.static_url(*args, **kwds)
2006 2007
2007 2008
2008 2009 def resource_path(*args, **kwds):
2009 2010 """
2010 2011 Wrapper around pyramids `route_path` function. It is used to generate
2011 2012 URLs from within pylons views or templates. This will be removed when
2012 2013 pyramid migration if finished.
2013 2014 """
2014 2015 req = get_current_request()
2015 2016 return req.resource_path(*args, **kwds)
2016 2017
2017 2018
2018 2019 def api_call_example(method, args):
2019 2020 """
2020 2021 Generates an API call example via CURL
2021 2022 """
2022 2023 args_json = json.dumps(OrderedDict([
2023 2024 ('id', 1),
2024 2025 ('auth_token', 'SECRET'),
2025 2026 ('method', method),
2026 2027 ('args', args)
2027 2028 ]))
2028 2029 return literal(
2029 2030 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2030 2031 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2031 2032 "and needs to be of `api calls` role."
2032 2033 .format(
2033 2034 api_url=route_url('apiv2'),
2034 2035 token_url=route_url('my_account_auth_tokens'),
2035 2036 data=args_json))
@@ -1,112 +1,123 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import logging
22 22
23 23 from whoosh.qparser.default import QueryParser, query
24 24 from whoosh.qparser.dateparse import DateParserPlugin
25 25 from whoosh.fields import (TEXT, Schema, DATETIME)
26 from sqlalchemy.sql.expression import or_, and_, func
26 from sqlalchemy.sql.expression import or_, and_, not_, func
27 27
28 28 from rhodecode.model.db import UserLog
29 from rhodecode.lib.utils2 import remove_prefix, remove_suffix
29 from rhodecode.lib.utils2 import remove_prefix, remove_suffix, safe_unicode
30 30
31 31 # JOURNAL SCHEMA used only to generate queries in journal. We use whoosh
32 32 # querylang to build sql queries and filter journals
33 33 JOURNAL_SCHEMA = Schema(
34 34 username=TEXT(),
35 35 date=DATETIME(),
36 36 action=TEXT(),
37 37 repository=TEXT(),
38 38 ip=TEXT(),
39 39 )
40 40
41 41 log = logging.getLogger(__name__)
42 42
43 43
44 44 def user_log_filter(user_log, search_term):
45 45 """
46 46 Filters sqlalchemy user_log based on search_term with whoosh Query language
47 47 http://packages.python.org/Whoosh/querylang.html
48 48
49 49 :param user_log:
50 50 :param search_term:
51 51 """
52 52 log.debug('Initial search term: %r' % search_term)
53 53 qry = None
54 54 if search_term:
55 55 qp = QueryParser('repository', schema=JOURNAL_SCHEMA)
56 56 qp.add_plugin(DateParserPlugin())
57 qry = qp.parse(unicode(search_term))
57 qry = qp.parse(safe_unicode(search_term))
58 58 log.debug('Filtering using parsed query %r' % qry)
59 59
60 60 def wildcard_handler(col, wc_term):
61 61 if wc_term.startswith('*') and not wc_term.endswith('*'):
62 62 # postfix == endswith
63 63 wc_term = remove_prefix(wc_term, prefix='*')
64 64 return func.lower(col).endswith(wc_term)
65 65 elif wc_term.startswith('*') and wc_term.endswith('*'):
66 66 # wildcard == ilike
67 67 wc_term = remove_prefix(wc_term, prefix='*')
68 68 wc_term = remove_suffix(wc_term, suffix='*')
69 69 return func.lower(col).contains(wc_term)
70 70
71 71 def get_filterion(field, val, term):
72 72
73 73 if field == 'repository':
74 74 field = getattr(UserLog, 'repository_name')
75 75 elif field == 'ip':
76 76 field = getattr(UserLog, 'user_ip')
77 77 elif field == 'date':
78 78 field = getattr(UserLog, 'action_date')
79 79 elif field == 'username':
80 80 field = getattr(UserLog, 'username')
81 81 else:
82 82 field = getattr(UserLog, field)
83 83 log.debug('filter field: %s val=>%s' % (field, val))
84 84
85 85 # sql filtering
86 86 if isinstance(term, query.Wildcard):
87 87 return wildcard_handler(field, val)
88 88 elif isinstance(term, query.Prefix):
89 89 return func.lower(field).startswith(func.lower(val))
90 90 elif isinstance(term, query.DateRange):
91 91 return and_(field >= val[0], field <= val[1])
92 elif isinstance(term, query.Not):
93 return not_(field == val)
92 94 return func.lower(field) == func.lower(val)
93 95
94 if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard,
95 query.DateRange)):
96 if isinstance(qry, (query.And, query.Not, query.Term, query.Prefix,
97 query.Wildcard, query.DateRange)):
96 98 if not isinstance(qry, query.And):
97 99 qry = [qry]
100
98 101 for term in qry:
99 field = term.fieldname
100 val = (term.text if not isinstance(term, query.DateRange)
101 else [term.startdate, term.enddate])
102 if isinstance(term, query.Not):
103 not_term = [z for z in term.leaves()][0]
104 field = not_term.fieldname
105 val = not_term.text
106 elif isinstance(term, query.DateRange):
107 field = term.fieldname
108 val = [term.startdate, term.enddate]
109 else:
110 field = term.fieldname
111 val = term.text
112
102 113 user_log = user_log.filter(get_filterion(field, val, term))
103 114 elif isinstance(qry, query.Or):
104 115 filters = []
105 116 for term in qry:
106 117 field = term.fieldname
107 118 val = (term.text if not isinstance(term, query.DateRange)
108 119 else [term.startdate, term.enddate])
109 120 filters.append(get_filterion(field, val, term))
110 121 user_log = user_log.filter(or_(*filters))
111 122
112 123 return user_log
General Comments 0
You need to be logged in to leave comments. Login now