##// END OF EJS Templates
helpers: inline url markup in urlify_text...
Mads Kiilerich -
r6147:ca830f9d default
parent child Browse files
Show More
@@ -1,1497 +1,1506 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 Helper functions
16 16
17 17 Consists of functions to typically be used within templates, but also
18 18 available to Controllers. This module is available to both as 'h'.
19 19 """
20 20 import hashlib
21 21 import StringIO
22 22 import math
23 23 import logging
24 24 import re
25 25 import urlparse
26 26 import textwrap
27 27
28 28 from beaker.cache import cache_region
29 29 from pygments.formatters.html import HtmlFormatter
30 30 from pygments import highlight as code_highlight
31 31 from pylons import url
32 32 from pylons.i18n.translation import _, ungettext
33 33
34 34 from webhelpers.html import literal, HTML, escape
35 35 from webhelpers.html.tools import *
36 36 from webhelpers.html.builder import make_tag
37 37 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
38 38 end_form, file, hidden, image, javascript_link, link_to, \
39 39 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
40 40 submit, text, password, textarea, title, ul, xml_declaration, radio, \
41 41 form as insecure_form
42 42 from webhelpers.html.tools import auto_link, button_to, highlight, \
43 43 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
44 44 from webhelpers.number import format_byte_size, format_bit_size
45 45 from webhelpers.pylonslib import Flash as _Flash
46 46 from webhelpers.pylonslib.secure_form import secure_form, authentication_token
47 47 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
48 48 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
49 49 replace_whitespace, urlify, truncate, wrap_paragraphs
50 50 from webhelpers.date import time_ago_in_words
51 51 from webhelpers.paginate import Page as _Page
52 52 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
53 53 convert_boolean_attrs, NotGiven, _make_safe_id_component
54 54
55 55 from kallithea.lib.annotate import annotate_highlight
56 56 from kallithea.lib.utils import repo_name_slug, get_custom_lexer
57 57 from kallithea.lib.utils2 import str2bool, safe_unicode, safe_str, \
58 58 get_changeset_safe, datetime_to_time, time_to_datetime, AttributeDict, \
59 59 safe_int, MENTIONS_REGEX
60 60 from kallithea.lib.markup_renderer import MarkupRenderer, url_re
61 61 from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError
62 62 from kallithea.lib.vcs.backends.base import BaseChangeset, EmptyChangeset
63 63 from kallithea.config.conf import DATE_FORMAT, DATETIME_FORMAT
64 64 from kallithea.model.changeset_status import ChangesetStatusModel
65 65 from kallithea.model.db import URL_SEP, Permission
66 66
67 67 log = logging.getLogger(__name__)
68 68
69 69
70 70 def canonical_url(*args, **kargs):
71 71 '''Like url(x, qualified=True), but returns url that not only is qualified
72 72 but also canonical, as configured in canonical_url'''
73 73 from kallithea import CONFIG
74 74 try:
75 75 parts = CONFIG.get('canonical_url', '').split('://', 1)
76 76 kargs['host'] = parts[1].split('/', 1)[0]
77 77 kargs['protocol'] = parts[0]
78 78 except IndexError:
79 79 kargs['qualified'] = True
80 80 return url(*args, **kargs)
81 81
82 82 def canonical_hostname():
83 83 '''Return canonical hostname of system'''
84 84 from kallithea import CONFIG
85 85 try:
86 86 parts = CONFIG.get('canonical_url', '').split('://', 1)
87 87 return parts[1].split('/', 1)[0]
88 88 except IndexError:
89 89 parts = url('home', qualified=True).split('://', 1)
90 90 return parts[1].split('/', 1)[0]
91 91
92 92 def html_escape(s):
93 93 """Return string with all html escaped.
94 94 This is also safe for javascript in html but not necessarily correct.
95 95 """
96 96 return (s
97 97 .replace('&', '&amp;')
98 98 .replace(">", "&gt;")
99 99 .replace("<", "&lt;")
100 100 .replace('"', "&quot;")
101 101 .replace("'", "&apos;")
102 102 )
103 103
104 104 def shorter(s, size=20, firstline=False, postfix='...'):
105 105 """Truncate s to size, including the postfix string if truncating.
106 106 If firstline, truncate at newline.
107 107 """
108 108 if firstline:
109 109 s = s.split('\n', 1)[0].rstrip()
110 110 if len(s) > size:
111 111 return s[:size - len(postfix)] + postfix
112 112 return s
113 113
114 114
115 115 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
116 116 """
117 117 Reset button
118 118 """
119 119 _set_input_attrs(attrs, type, name, value)
120 120 _set_id_attr(attrs, id, name)
121 121 convert_boolean_attrs(attrs, ["disabled"])
122 122 return HTML.input(**attrs)
123 123
124 124 reset = _reset
125 125 safeid = _make_safe_id_component
126 126
127 127
128 128 def FID(raw_id, path):
129 129 """
130 130 Creates a unique ID for filenode based on it's hash of path and revision
131 131 it's safe to use in urls
132 132
133 133 :param raw_id:
134 134 :param path:
135 135 """
136 136
137 137 return 'C-%s-%s' % (short_id(raw_id), hashlib.md5(safe_str(path)).hexdigest()[:12])
138 138
139 139
140 140 class _GetError(object):
141 141 """Get error from form_errors, and represent it as span wrapped error
142 142 message
143 143
144 144 :param field_name: field to fetch errors for
145 145 :param form_errors: form errors dict
146 146 """
147 147
148 148 def __call__(self, field_name, form_errors):
149 149 tmpl = """<span class="error_msg">%s</span>"""
150 150 if form_errors and field_name in form_errors:
151 151 return literal(tmpl % form_errors.get(field_name))
152 152
153 153 get_error = _GetError()
154 154
155 155
156 156 class _FilesBreadCrumbs(object):
157 157
158 158 def __call__(self, repo_name, rev, paths):
159 159 if isinstance(paths, str):
160 160 paths = safe_unicode(paths)
161 161 url_l = [link_to(repo_name, url('files_home',
162 162 repo_name=repo_name,
163 163 revision=rev, f_path=''),
164 164 class_='ypjax-link')]
165 165 paths_l = paths.split('/')
166 166 for cnt, p in enumerate(paths_l):
167 167 if p != '':
168 168 url_l.append(link_to(p,
169 169 url('files_home',
170 170 repo_name=repo_name,
171 171 revision=rev,
172 172 f_path='/'.join(paths_l[:cnt + 1])
173 173 ),
174 174 class_='ypjax-link'
175 175 )
176 176 )
177 177
178 178 return literal('/'.join(url_l))
179 179
180 180 files_breadcrumbs = _FilesBreadCrumbs()
181 181
182 182
183 183 class CodeHtmlFormatter(HtmlFormatter):
184 184 """
185 185 My code Html Formatter for source codes
186 186 """
187 187
188 188 def wrap(self, source, outfile):
189 189 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
190 190
191 191 def _wrap_code(self, source):
192 192 for cnt, it in enumerate(source):
193 193 i, t = it
194 194 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
195 195 yield i, t
196 196
197 197 def _wrap_tablelinenos(self, inner):
198 198 dummyoutfile = StringIO.StringIO()
199 199 lncount = 0
200 200 for t, line in inner:
201 201 if t:
202 202 lncount += 1
203 203 dummyoutfile.write(line)
204 204
205 205 fl = self.linenostart
206 206 mw = len(str(lncount + fl - 1))
207 207 sp = self.linenospecial
208 208 st = self.linenostep
209 209 la = self.lineanchors
210 210 aln = self.anchorlinenos
211 211 nocls = self.noclasses
212 212 if sp:
213 213 lines = []
214 214
215 215 for i in range(fl, fl + lncount):
216 216 if i % st == 0:
217 217 if i % sp == 0:
218 218 if aln:
219 219 lines.append('<a href="#%s%d" class="special">%*d</a>' %
220 220 (la, i, mw, i))
221 221 else:
222 222 lines.append('<span class="special">%*d</span>' % (mw, i))
223 223 else:
224 224 if aln:
225 225 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
226 226 else:
227 227 lines.append('%*d' % (mw, i))
228 228 else:
229 229 lines.append('')
230 230 ls = '\n'.join(lines)
231 231 else:
232 232 lines = []
233 233 for i in range(fl, fl + lncount):
234 234 if i % st == 0:
235 235 if aln:
236 236 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
237 237 else:
238 238 lines.append('%*d' % (mw, i))
239 239 else:
240 240 lines.append('')
241 241 ls = '\n'.join(lines)
242 242
243 243 # in case you wonder about the seemingly redundant <div> here: since the
244 244 # content in the other cell also is wrapped in a div, some browsers in
245 245 # some configurations seem to mess up the formatting...
246 246 if nocls:
247 247 yield 0, ('<table class="%stable">' % self.cssclass +
248 248 '<tr><td><div class="linenodiv" '
249 249 'style="background-color: #f0f0f0; padding-right: 10px">'
250 250 '<pre style="line-height: 125%">' +
251 251 ls + '</pre></div></td><td id="hlcode" class="code">')
252 252 else:
253 253 yield 0, ('<table class="%stable">' % self.cssclass +
254 254 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
255 255 ls + '</pre></div></td><td id="hlcode" class="code">')
256 256 yield 0, dummyoutfile.getvalue()
257 257 yield 0, '</td></tr></table>'
258 258
259 259
260 260 _whitespace_re = re.compile(r'(\t)|( )(?=\n|</div>)')
261 261
262 262 def _markup_whitespace(m):
263 263 groups = m.groups()
264 264 if groups[0]:
265 265 return '<u>\t</u>'
266 266 if groups[1]:
267 267 return ' <i></i>'
268 268
269 269 def markup_whitespace(s):
270 270 return _whitespace_re.sub(_markup_whitespace, s)
271 271
272 272 def pygmentize(filenode, **kwargs):
273 273 """
274 274 pygmentize function using pygments
275 275
276 276 :param filenode:
277 277 """
278 278 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
279 279 return literal(markup_whitespace(
280 280 code_highlight(filenode.content, lexer, CodeHtmlFormatter(**kwargs))))
281 281
282 282
283 283 def pygmentize_annotation(repo_name, filenode, **kwargs):
284 284 """
285 285 pygmentize function for annotation
286 286
287 287 :param filenode:
288 288 """
289 289
290 290 color_dict = {}
291 291
292 292 def gen_color(n=10000):
293 293 """generator for getting n of evenly distributed colors using
294 294 hsv color and golden ratio. It always return same order of colors
295 295
296 296 :returns: RGB tuple
297 297 """
298 298
299 299 def hsv_to_rgb(h, s, v):
300 300 if s == 0.0:
301 301 return v, v, v
302 302 i = int(h * 6.0) # XXX assume int() truncates!
303 303 f = (h * 6.0) - i
304 304 p = v * (1.0 - s)
305 305 q = v * (1.0 - s * f)
306 306 t = v * (1.0 - s * (1.0 - f))
307 307 i = i % 6
308 308 if i == 0:
309 309 return v, t, p
310 310 if i == 1:
311 311 return q, v, p
312 312 if i == 2:
313 313 return p, v, t
314 314 if i == 3:
315 315 return p, q, v
316 316 if i == 4:
317 317 return t, p, v
318 318 if i == 5:
319 319 return v, p, q
320 320
321 321 golden_ratio = 0.618033988749895
322 322 h = 0.22717784590367374
323 323
324 324 for _unused in xrange(n):
325 325 h += golden_ratio
326 326 h %= 1
327 327 HSV_tuple = [h, 0.95, 0.95]
328 328 RGB_tuple = hsv_to_rgb(*HSV_tuple)
329 329 yield map(lambda x: str(int(x * 256)), RGB_tuple)
330 330
331 331 cgenerator = gen_color()
332 332
333 333 def get_color_string(cs):
334 334 if cs in color_dict:
335 335 col = color_dict[cs]
336 336 else:
337 337 col = color_dict[cs] = cgenerator.next()
338 338 return "color: rgb(%s)! important;" % (', '.join(col))
339 339
340 340 def url_func(repo_name):
341 341
342 342 def _url_func(changeset):
343 343 author = escape(changeset.author)
344 344 date = changeset.date
345 345 message = escape(changeset.message)
346 346 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
347 347 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
348 348 "</b> %s<br/></div>") % (author, date, message)
349 349
350 350 lnk_format = show_id(changeset)
351 351 uri = link_to(
352 352 lnk_format,
353 353 url('changeset_home', repo_name=repo_name,
354 354 revision=changeset.raw_id),
355 355 style=get_color_string(changeset.raw_id),
356 356 class_='tooltip safe-html-title',
357 357 title=tooltip_html
358 358 )
359 359
360 360 uri += '\n'
361 361 return uri
362 362 return _url_func
363 363
364 364 return literal(markup_whitespace(annotate_highlight(filenode, url_func(repo_name), **kwargs)))
365 365
366 366
367 367 def is_following_repo(repo_name, user_id):
368 368 from kallithea.model.scm import ScmModel
369 369 return ScmModel().is_following_repo(repo_name, user_id)
370 370
371 371 class _Message(object):
372 372 """A message returned by ``Flash.pop_messages()``.
373 373
374 374 Converting the message to a string returns the message text. Instances
375 375 also have the following attributes:
376 376
377 377 * ``message``: the message text.
378 378 * ``category``: the category specified when the message was created.
379 379 """
380 380
381 381 def __init__(self, category, message):
382 382 self.category = category
383 383 self.message = message
384 384
385 385 def __str__(self):
386 386 return self.message
387 387
388 388 __unicode__ = __str__
389 389
390 390 def __html__(self):
391 391 return escape(safe_unicode(self.message))
392 392
393 393 class Flash(_Flash):
394 394
395 395 def __call__(self, message, category=None, ignore_duplicate=False, logf=None):
396 396 """
397 397 Show a message to the user _and_ log it through the specified function
398 398
399 399 category: notice (default), warning, error, success
400 400 logf: a custom log function - such as log.debug
401 401
402 402 logf defaults to log.info, unless category equals 'success', in which
403 403 case logf defaults to log.debug.
404 404 """
405 405 if logf is None:
406 406 logf = log.info
407 407 if category == 'success':
408 408 logf = log.debug
409 409
410 410 logf('Flash %s: %s', category, message)
411 411
412 412 super(Flash, self).__call__(message, category, ignore_duplicate)
413 413
414 414 def pop_messages(self):
415 415 """Return all accumulated messages and delete them from the session.
416 416
417 417 The return value is a list of ``Message`` objects.
418 418 """
419 419 from pylons import session
420 420 messages = session.pop(self.session_key, [])
421 421 session.save()
422 422 return [_Message(*m) for m in messages]
423 423
424 424 flash = Flash()
425 425
426 426 #==============================================================================
427 427 # SCM FILTERS available via h.
428 428 #==============================================================================
429 429 from kallithea.lib.vcs.utils import author_name, author_email
430 430 from kallithea.lib.utils2 import credentials_filter, age as _age
431 431 from kallithea.model.db import User, ChangesetStatus, PullRequest
432 432
433 433 age = lambda x, y=False: _age(x, y)
434 434 capitalize = lambda x: x.capitalize()
435 435 email = author_email
436 436 short_id = lambda x: x[:12]
437 437 hide_credentials = lambda x: ''.join(credentials_filter(x))
438 438
439 439
440 440 def show_id(cs):
441 441 """
442 442 Configurable function that shows ID
443 443 by default it's r123:fffeeefffeee
444 444
445 445 :param cs: changeset instance
446 446 """
447 447 from kallithea import CONFIG
448 448 def_len = safe_int(CONFIG.get('show_sha_length', 12))
449 449 show_rev = str2bool(CONFIG.get('show_revision_number', False))
450 450
451 451 raw_id = cs.raw_id[:def_len]
452 452 if show_rev:
453 453 return 'r%s:%s' % (cs.revision, raw_id)
454 454 else:
455 455 return raw_id
456 456
457 457
458 458 def fmt_date(date):
459 459 if date:
460 460 return date.strftime("%Y-%m-%d %H:%M:%S").decode('utf8')
461 461
462 462 return ""
463 463
464 464
465 465 def is_git(repository):
466 466 if hasattr(repository, 'alias'):
467 467 _type = repository.alias
468 468 elif hasattr(repository, 'repo_type'):
469 469 _type = repository.repo_type
470 470 else:
471 471 _type = repository
472 472 return _type == 'git'
473 473
474 474
475 475 def is_hg(repository):
476 476 if hasattr(repository, 'alias'):
477 477 _type = repository.alias
478 478 elif hasattr(repository, 'repo_type'):
479 479 _type = repository.repo_type
480 480 else:
481 481 _type = repository
482 482 return _type == 'hg'
483 483
484 484
485 485 @cache_region('long_term', 'user_or_none')
486 486 def user_or_none(author):
487 487 """Try to match email part of VCS committer string with a local user - or return None"""
488 488 email = author_email(author)
489 489 if email:
490 490 user = User.get_by_email(email, cache=True) # cache will only use sql_cache_short
491 491 if user is not None:
492 492 return user
493 493 return None
494 494
495 495 def email_or_none(author):
496 496 """Try to match email part of VCS committer string with a local user.
497 497 Return primary email of user, email part of the specified author name, or None."""
498 498 if not author:
499 499 return None
500 500 user = user_or_none(author)
501 501 if user is not None:
502 502 return user.email # always use main email address - not necessarily the one used to find user
503 503
504 504 # extract email from the commit string
505 505 email = author_email(author)
506 506 if email:
507 507 return email
508 508
509 509 # No valid email, not a valid user in the system, none!
510 510 return None
511 511
512 512 def person(author, show_attr="username"):
513 513 """Find the user identified by 'author', return one of the users attributes,
514 514 default to the username attribute, None if there is no user"""
515 515 # attr to return from fetched user
516 516 person_getter = lambda usr: getattr(usr, show_attr)
517 517
518 518 # if author is already an instance use it for extraction
519 519 if isinstance(author, User):
520 520 return person_getter(author)
521 521
522 522 user = user_or_none(author)
523 523 if user is not None:
524 524 return person_getter(user)
525 525
526 526 # Still nothing? Just pass back the author name if any, else the email
527 527 return author_name(author) or email(author)
528 528
529 529
530 530 def person_by_id(id_, show_attr="username"):
531 531 # attr to return from fetched user
532 532 person_getter = lambda usr: getattr(usr, show_attr)
533 533
534 534 #maybe it's an ID ?
535 535 if str(id_).isdigit() or isinstance(id_, int):
536 536 id_ = int(id_)
537 537 user = User.get(id_)
538 538 if user is not None:
539 539 return person_getter(user)
540 540 return id_
541 541
542 542
543 543 def desc_stylize(value):
544 544 """
545 545 converts tags from value into html equivalent
546 546
547 547 :param value:
548 548 """
549 549 if not value:
550 550 return ''
551 551
552 552 value = re.sub(r'\[see\ \=&gt;\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
553 553 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
554 554 value = re.sub(r'\[license\ \=&gt;\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
555 555 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
556 556 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=&gt;\ *([a-zA-Z0-9\-\/]*)\]',
557 557 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
558 558 value = re.sub(r'\[(lang|language)\ \=&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
559 559 '<div class="metatag" tag="lang">\\2</div>', value)
560 560 value = re.sub(r'\[([a-z]+)\]',
561 561 '<div class="metatag" tag="\\1">\\1</div>', value)
562 562
563 563 return value
564 564
565 565
566 566 def boolicon(value):
567 567 """Returns boolean value of a value, represented as small html image of true/false
568 568 icons
569 569
570 570 :param value: value
571 571 """
572 572
573 573 if value:
574 574 return HTML.tag('i', class_="icon-ok")
575 575 else:
576 576 return HTML.tag('i', class_="icon-minus-circled")
577 577
578 578
579 579 def action_parser(user_log, feed=False, parse_cs=False):
580 580 """
581 581 This helper will action_map the specified string action into translated
582 582 fancy names with icons and links
583 583
584 584 :param user_log: user log instance
585 585 :param feed: use output for feeds (no html and fancy icons)
586 586 :param parse_cs: parse Changesets into VCS instances
587 587 """
588 588
589 589 action = user_log.action
590 590 action_params = ' '
591 591
592 592 x = action.split(':')
593 593
594 594 if len(x) > 1:
595 595 action, action_params = x
596 596
597 597 def get_cs_links():
598 598 revs_limit = 3 # display this amount always
599 599 revs_top_limit = 50 # show upto this amount of changesets hidden
600 600 revs_ids = action_params.split(',')
601 601 deleted = user_log.repository is None
602 602 if deleted:
603 603 return ','.join(revs_ids)
604 604
605 605 repo_name = user_log.repository.repo_name
606 606
607 607 def lnk(rev, repo_name):
608 608 lazy_cs = False
609 609 title_ = None
610 610 url_ = '#'
611 611 if isinstance(rev, BaseChangeset) or isinstance(rev, AttributeDict):
612 612 if rev.op and rev.ref_name:
613 613 if rev.op == 'delete_branch':
614 614 lbl = _('Deleted branch: %s') % rev.ref_name
615 615 elif rev.op == 'tag':
616 616 lbl = _('Created tag: %s') % rev.ref_name
617 617 else:
618 618 lbl = 'Unknown operation %s' % rev.op
619 619 else:
620 620 lazy_cs = True
621 621 lbl = rev.short_id[:8]
622 622 url_ = url('changeset_home', repo_name=repo_name,
623 623 revision=rev.raw_id)
624 624 else:
625 625 # changeset cannot be found - it might have been stripped or removed
626 626 lbl = rev[:12]
627 627 title_ = _('Changeset not found')
628 628 if parse_cs:
629 629 return link_to(lbl, url_, title=title_, class_='tooltip')
630 630 return link_to(lbl, url_, raw_id=rev.raw_id, repo_name=repo_name,
631 631 class_='lazy-cs' if lazy_cs else '')
632 632
633 633 def _get_op(rev_txt):
634 634 _op = None
635 635 _name = rev_txt
636 636 if len(rev_txt.split('=>')) == 2:
637 637 _op, _name = rev_txt.split('=>')
638 638 return _op, _name
639 639
640 640 revs = []
641 641 if len(filter(lambda v: v != '', revs_ids)) > 0:
642 642 repo = None
643 643 for rev in revs_ids[:revs_top_limit]:
644 644 _op, _name = _get_op(rev)
645 645
646 646 # we want parsed changesets, or new log store format is bad
647 647 if parse_cs:
648 648 try:
649 649 if repo is None:
650 650 repo = user_log.repository.scm_instance
651 651 _rev = repo.get_changeset(rev)
652 652 revs.append(_rev)
653 653 except ChangesetDoesNotExistError:
654 654 log.error('cannot find revision %s in this repo', rev)
655 655 revs.append(rev)
656 656 else:
657 657 _rev = AttributeDict({
658 658 'short_id': rev[:12],
659 659 'raw_id': rev,
660 660 'message': '',
661 661 'op': _op,
662 662 'ref_name': _name
663 663 })
664 664 revs.append(_rev)
665 665 cs_links = [" " + ', '.join(
666 666 [lnk(rev, repo_name) for rev in revs[:revs_limit]]
667 667 )]
668 668 _op1, _name1 = _get_op(revs_ids[0])
669 669 _op2, _name2 = _get_op(revs_ids[-1])
670 670
671 671 _rev = '%s...%s' % (_name1, _name2)
672 672
673 673 compare_view = (
674 674 ' <div class="compare_view tooltip" title="%s">'
675 675 '<a href="%s">%s</a> </div>' % (
676 676 _('Show all combined changesets %s->%s') % (
677 677 revs_ids[0][:12], revs_ids[-1][:12]
678 678 ),
679 679 url('changeset_home', repo_name=repo_name,
680 680 revision=_rev
681 681 ),
682 682 _('Compare view')
683 683 )
684 684 )
685 685
686 686 # if we have exactly one more than normally displayed
687 687 # just display it, takes less space than displaying
688 688 # "and 1 more revisions"
689 689 if len(revs_ids) == revs_limit + 1:
690 690 cs_links.append(", " + lnk(revs[revs_limit], repo_name))
691 691
692 692 # hidden-by-default ones
693 693 if len(revs_ids) > revs_limit + 1:
694 694 uniq_id = revs_ids[0]
695 695 html_tmpl = (
696 696 '<span> %s <a class="show_more" id="_%s" '
697 697 'href="#more">%s</a> %s</span>'
698 698 )
699 699 if not feed:
700 700 cs_links.append(html_tmpl % (
701 701 _('and'),
702 702 uniq_id, _('%s more') % (len(revs_ids) - revs_limit),
703 703 _('revisions')
704 704 )
705 705 )
706 706
707 707 if not feed:
708 708 html_tmpl = '<span id="%s" style="display:none">, %s </span>'
709 709 else:
710 710 html_tmpl = '<span id="%s"> %s </span>'
711 711
712 712 morelinks = ', '.join(
713 713 [lnk(rev, repo_name) for rev in revs[revs_limit:]]
714 714 )
715 715
716 716 if len(revs_ids) > revs_top_limit:
717 717 morelinks += ', ...'
718 718
719 719 cs_links.append(html_tmpl % (uniq_id, morelinks))
720 720 if len(revs) > 1:
721 721 cs_links.append(compare_view)
722 722 return ''.join(cs_links)
723 723
724 724 def get_fork_name():
725 725 repo_name = action_params
726 726 url_ = url('summary_home', repo_name=repo_name)
727 727 return _('Fork name %s') % link_to(action_params, url_)
728 728
729 729 def get_user_name():
730 730 user_name = action_params
731 731 return user_name
732 732
733 733 def get_users_group():
734 734 group_name = action_params
735 735 return group_name
736 736
737 737 def get_pull_request():
738 738 pull_request_id = action_params
739 739 nice_id = PullRequest.make_nice_id(pull_request_id)
740 740
741 741 deleted = user_log.repository is None
742 742 if deleted:
743 743 repo_name = user_log.repository_name
744 744 else:
745 745 repo_name = user_log.repository.repo_name
746 746
747 747 return link_to(_('Pull request %s') % nice_id,
748 748 url('pullrequest_show', repo_name=repo_name,
749 749 pull_request_id=pull_request_id))
750 750
751 751 def get_archive_name():
752 752 archive_name = action_params
753 753 return archive_name
754 754
755 755 # action : translated str, callback(extractor), icon
756 756 action_map = {
757 757 'user_deleted_repo': (_('[deleted] repository'),
758 758 None, 'icon-trashcan'),
759 759 'user_created_repo': (_('[created] repository'),
760 760 None, 'icon-plus'),
761 761 'user_created_fork': (_('[created] repository as fork'),
762 762 None, 'icon-fork'),
763 763 'user_forked_repo': (_('[forked] repository'),
764 764 get_fork_name, 'icon-fork'),
765 765 'user_updated_repo': (_('[updated] repository'),
766 766 None, 'icon-pencil'),
767 767 'user_downloaded_archive': (_('[downloaded] archive from repository'),
768 768 get_archive_name, 'icon-download-cloud'),
769 769 'admin_deleted_repo': (_('[delete] repository'),
770 770 None, 'icon-trashcan'),
771 771 'admin_created_repo': (_('[created] repository'),
772 772 None, 'icon-plus'),
773 773 'admin_forked_repo': (_('[forked] repository'),
774 774 None, 'icon-fork'),
775 775 'admin_updated_repo': (_('[updated] repository'),
776 776 None, 'icon-pencil'),
777 777 'admin_created_user': (_('[created] user'),
778 778 get_user_name, 'icon-user'),
779 779 'admin_updated_user': (_('[updated] user'),
780 780 get_user_name, 'icon-user'),
781 781 'admin_created_users_group': (_('[created] user group'),
782 782 get_users_group, 'icon-pencil'),
783 783 'admin_updated_users_group': (_('[updated] user group'),
784 784 get_users_group, 'icon-pencil'),
785 785 'user_commented_revision': (_('[commented] on revision in repository'),
786 786 get_cs_links, 'icon-comment'),
787 787 'user_commented_pull_request': (_('[commented] on pull request for'),
788 788 get_pull_request, 'icon-comment'),
789 789 'user_closed_pull_request': (_('[closed] pull request for'),
790 790 get_pull_request, 'icon-ok'),
791 791 'push': (_('[pushed] into'),
792 792 get_cs_links, 'icon-move-up'),
793 793 'push_local': (_('[committed via Kallithea] into repository'),
794 794 get_cs_links, 'icon-pencil'),
795 795 'push_remote': (_('[pulled from remote] into repository'),
796 796 get_cs_links, 'icon-move-up'),
797 797 'pull': (_('[pulled] from'),
798 798 None, 'icon-move-down'),
799 799 'started_following_repo': (_('[started following] repository'),
800 800 None, 'icon-heart'),
801 801 'stopped_following_repo': (_('[stopped following] repository'),
802 802 None, 'icon-heart-empty'),
803 803 }
804 804
805 805 action_str = action_map.get(action, action)
806 806 if feed:
807 807 action = action_str[0].replace('[', '').replace(']', '')
808 808 else:
809 809 action = action_str[0] \
810 810 .replace('[', '<span class="journal_highlight">') \
811 811 .replace(']', '</span>')
812 812
813 813 action_params_func = lambda: ""
814 814
815 815 if callable(action_str[1]):
816 816 action_params_func = action_str[1]
817 817
818 818 def action_parser_icon():
819 819 action = user_log.action
820 820 action_params = None
821 821 x = action.split(':')
822 822
823 823 if len(x) > 1:
824 824 action, action_params = x
825 825
826 826 tmpl = """<i class="%s" alt="%s"></i>"""
827 827 ico = action_map.get(action, ['', '', ''])[2]
828 828 return literal(tmpl % (ico, action))
829 829
830 830 # returned callbacks we need to call to get
831 831 return [lambda: literal(action), action_params_func, action_parser_icon]
832 832
833 833
834 834
835 835 #==============================================================================
836 836 # PERMS
837 837 #==============================================================================
838 838 from kallithea.lib.auth import HasPermissionAny, \
839 839 HasRepoPermissionAny, HasRepoGroupPermissionAny
840 840
841 841
842 842 #==============================================================================
843 843 # GRAVATAR URL
844 844 #==============================================================================
845 845 def gravatar_div(email_address, cls='', size=30, **div_attributes):
846 846 """Return an html literal with a div around a gravatar if they are enabled.
847 847 Extra keyword parameters starting with 'div_' will get the prefix removed
848 848 and be used as attributes on the div. The default class is 'gravatar'.
849 849 """
850 850 from pylons import tmpl_context as c
851 851 if not c.visual.use_gravatar:
852 852 return ''
853 853 if 'div_class' not in div_attributes:
854 854 div_attributes['div_class'] = "gravatar"
855 855 attributes = []
856 856 for k, v in sorted(div_attributes.items()):
857 857 assert k.startswith('div_'), k
858 858 attributes.append(' %s="%s"' % (k[4:], escape(v)))
859 859 return literal("""<div%s>%s</div>""" %
860 860 (''.join(attributes),
861 861 gravatar(email_address, cls=cls, size=size)))
862 862
863 863 def gravatar(email_address, cls='', size=30):
864 864 """return html element of the gravatar
865 865
866 866 This method will return an <img> with the resolution double the size (for
867 867 retina screens) of the image. If the url returned from gravatar_url is
868 868 empty then we fallback to using an icon.
869 869
870 870 """
871 871 from pylons import tmpl_context as c
872 872 if not c.visual.use_gravatar:
873 873 return ''
874 874
875 875 src = gravatar_url(email_address, size * 2)
876 876
877 877 if src:
878 878 # here it makes sense to use style="width: ..." (instead of, say, a
879 879 # stylesheet) because we using this to generate a high-res (retina) size
880 880 html = ('<img alt="" class="{cls}" style="width: {size}px; height: {size}px" src="{src}"/>'
881 881 .format(cls=cls, size=size, src=src))
882 882
883 883 else:
884 884 # if src is empty then there was no gravatar, so we use a font icon
885 885 html = ("""<i class="icon-user {cls}" style="font-size: {size}px;"></i>"""
886 886 .format(cls=cls, size=size, src=src))
887 887
888 888 return literal(html)
889 889
890 890 def gravatar_url(email_address, size=30, default=''):
891 891 # doh, we need to re-import those to mock it later
892 892 from pylons import url
893 893 from pylons import tmpl_context as c
894 894 if not c.visual.use_gravatar:
895 895 return ""
896 896
897 897 _def = 'anonymous@kallithea-scm.org' # default gravatar
898 898 email_address = email_address or _def
899 899
900 900 if email_address == _def:
901 901 return default
902 902
903 903 parsed_url = urlparse.urlparse(url.current(qualified=True))
904 904 url = (c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL ) \
905 905 .replace('{email}', email_address) \
906 906 .replace('{md5email}', hashlib.md5(safe_str(email_address).lower()).hexdigest()) \
907 907 .replace('{netloc}', parsed_url.netloc) \
908 908 .replace('{scheme}', parsed_url.scheme) \
909 909 .replace('{size}', safe_str(size))
910 910 return url
911 911
912 912 class Page(_Page):
913 913 """
914 914 Custom pager to match rendering style with YUI paginator
915 915 """
916 916
917 917 def __init__(self, *args, **kwargs):
918 918 kwargs.setdefault('url', url.current)
919 919 _Page.__init__(self, *args, **kwargs)
920 920
921 921 def _get_pos(self, cur_page, max_page, items):
922 922 edge = (items / 2) + 1
923 923 if (cur_page <= edge):
924 924 radius = max(items / 2, items - cur_page)
925 925 elif (max_page - cur_page) < edge:
926 926 radius = (items - 1) - (max_page - cur_page)
927 927 else:
928 928 radius = items / 2
929 929
930 930 left = max(1, (cur_page - (radius)))
931 931 right = min(max_page, cur_page + (radius))
932 932 return left, cur_page, right
933 933
934 934 def _range(self, regexp_match):
935 935 """
936 936 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
937 937
938 938 Arguments:
939 939
940 940 regexp_match
941 941 A "re" (regular expressions) match object containing the
942 942 radius of linked pages around the current page in
943 943 regexp_match.group(1) as a string
944 944
945 945 This function is supposed to be called as a callable in
946 946 re.sub.
947 947
948 948 """
949 949 radius = int(regexp_match.group(1))
950 950
951 951 # Compute the first and last page number within the radius
952 952 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
953 953 # -> leftmost_page = 5
954 954 # -> rightmost_page = 9
955 955 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
956 956 self.last_page,
957 957 (radius * 2) + 1)
958 958 nav_items = []
959 959
960 960 # Create a link to the first page (unless we are on the first page
961 961 # or there would be no need to insert '..' spacers)
962 962 if self.page != self.first_page and self.first_page < leftmost_page:
963 963 nav_items.append(self._pagerlink(self.first_page, self.first_page))
964 964
965 965 # Insert dots if there are pages between the first page
966 966 # and the currently displayed page range
967 967 if leftmost_page - self.first_page > 1:
968 968 # Wrap in a SPAN tag if nolink_attr is set
969 969 text_ = '..'
970 970 if self.dotdot_attr:
971 971 text_ = HTML.span(c=text_, **self.dotdot_attr)
972 972 nav_items.append(text_)
973 973
974 974 for thispage in xrange(leftmost_page, rightmost_page + 1):
975 975 # Highlight the current page number and do not use a link
976 976 text_ = str(thispage)
977 977 if thispage == self.page:
978 978 # Wrap in a SPAN tag if nolink_attr is set
979 979 if self.curpage_attr:
980 980 text_ = HTML.span(c=text_, **self.curpage_attr)
981 981 nav_items.append(text_)
982 982 # Otherwise create just a link to that page
983 983 else:
984 984 nav_items.append(self._pagerlink(thispage, text_))
985 985
986 986 # Insert dots if there are pages between the displayed
987 987 # page numbers and the end of the page range
988 988 if self.last_page - rightmost_page > 1:
989 989 text_ = '..'
990 990 # Wrap in a SPAN tag if nolink_attr is set
991 991 if self.dotdot_attr:
992 992 text_ = HTML.span(c=text_, **self.dotdot_attr)
993 993 nav_items.append(text_)
994 994
995 995 # Create a link to the very last page (unless we are on the last
996 996 # page or there would be no need to insert '..' spacers)
997 997 if self.page != self.last_page and rightmost_page < self.last_page:
998 998 nav_items.append(self._pagerlink(self.last_page, self.last_page))
999 999
1000 1000 #_page_link = url.current()
1001 1001 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1002 1002 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1003 1003 return self.separator.join(nav_items)
1004 1004
1005 1005 def pager(self, format='~2~', page_param='page', partial_param='partial',
1006 1006 show_if_single_page=False, separator=' ', onclick=None,
1007 1007 symbol_first='<<', symbol_last='>>',
1008 1008 symbol_previous='<', symbol_next='>',
1009 1009 link_attr=None,
1010 1010 curpage_attr=None,
1011 1011 dotdot_attr=None, **kwargs):
1012 1012 self.curpage_attr = curpage_attr or {'class': 'pager_curpage'}
1013 1013 self.separator = separator
1014 1014 self.pager_kwargs = kwargs
1015 1015 self.page_param = page_param
1016 1016 self.partial_param = partial_param
1017 1017 self.onclick = onclick
1018 1018 self.link_attr = link_attr or {'class': 'pager_link', 'rel': 'prerender'}
1019 1019 self.dotdot_attr = dotdot_attr or {'class': 'pager_dotdot'}
1020 1020
1021 1021 # Don't show navigator if there is no more than one page
1022 1022 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1023 1023 return ''
1024 1024
1025 1025 from string import Template
1026 1026 # Replace ~...~ in token format by range of pages
1027 1027 result = re.sub(r'~(\d+)~', self._range, format)
1028 1028
1029 1029 # Interpolate '%' variables
1030 1030 result = Template(result).safe_substitute({
1031 1031 'first_page': self.first_page,
1032 1032 'last_page': self.last_page,
1033 1033 'page': self.page,
1034 1034 'page_count': self.page_count,
1035 1035 'items_per_page': self.items_per_page,
1036 1036 'first_item': self.first_item,
1037 1037 'last_item': self.last_item,
1038 1038 'item_count': self.item_count,
1039 1039 'link_first': self.page > self.first_page and \
1040 1040 self._pagerlink(self.first_page, symbol_first) or '',
1041 1041 'link_last': self.page < self.last_page and \
1042 1042 self._pagerlink(self.last_page, symbol_last) or '',
1043 1043 'link_previous': self.previous_page and \
1044 1044 self._pagerlink(self.previous_page, symbol_previous) \
1045 1045 or HTML.span(symbol_previous, class_="yui-pg-previous"),
1046 1046 'link_next': self.next_page and \
1047 1047 self._pagerlink(self.next_page, symbol_next) \
1048 1048 or HTML.span(symbol_next, class_="yui-pg-next")
1049 1049 })
1050 1050
1051 1051 return literal(result)
1052 1052
1053 1053
1054 1054 #==============================================================================
1055 1055 # REPO PAGER, PAGER FOR REPOSITORY
1056 1056 #==============================================================================
1057 1057 class RepoPage(Page):
1058 1058
1059 1059 def __init__(self, collection, page=1, items_per_page=20,
1060 1060 item_count=None, **kwargs):
1061 1061
1062 1062 """Create a "RepoPage" instance. special pager for paging
1063 1063 repository
1064 1064 """
1065 1065 # TODO: call baseclass __init__
1066 1066 self._url_generator = kwargs.pop('url', url.current)
1067 1067
1068 1068 # Safe the kwargs class-wide so they can be used in the pager() method
1069 1069 self.kwargs = kwargs
1070 1070
1071 1071 # Save a reference to the collection
1072 1072 self.original_collection = collection
1073 1073
1074 1074 self.collection = collection
1075 1075
1076 1076 # The self.page is the number of the current page.
1077 1077 # The first page has the number 1!
1078 1078 try:
1079 1079 self.page = int(page) # make it int() if we get it as a string
1080 1080 except (ValueError, TypeError):
1081 1081 self.page = 1
1082 1082
1083 1083 self.items_per_page = items_per_page
1084 1084
1085 1085 # Unless the user tells us how many items the collections has
1086 1086 # we calculate that ourselves.
1087 1087 if item_count is not None:
1088 1088 self.item_count = item_count
1089 1089 else:
1090 1090 self.item_count = len(self.collection)
1091 1091
1092 1092 # Compute the number of the first and last available page
1093 1093 if self.item_count > 0:
1094 1094 self.first_page = 1
1095 1095 self.page_count = int(math.ceil(float(self.item_count) /
1096 1096 self.items_per_page))
1097 1097 self.last_page = self.first_page + self.page_count - 1
1098 1098
1099 1099 # Make sure that the requested page number is the range of
1100 1100 # valid pages
1101 1101 if self.page > self.last_page:
1102 1102 self.page = self.last_page
1103 1103 elif self.page < self.first_page:
1104 1104 self.page = self.first_page
1105 1105
1106 1106 # Note: the number of items on this page can be less than
1107 1107 # items_per_page if the last page is not full
1108 1108 self.first_item = max(0, (self.item_count) - (self.page *
1109 1109 items_per_page))
1110 1110 self.last_item = ((self.item_count - 1) - items_per_page *
1111 1111 (self.page - 1))
1112 1112
1113 1113 self.items = list(self.collection[self.first_item:self.last_item + 1])
1114 1114
1115 1115 # Links to previous and next page
1116 1116 if self.page > self.first_page:
1117 1117 self.previous_page = self.page - 1
1118 1118 else:
1119 1119 self.previous_page = None
1120 1120
1121 1121 if self.page < self.last_page:
1122 1122 self.next_page = self.page + 1
1123 1123 else:
1124 1124 self.next_page = None
1125 1125
1126 1126 # No items available
1127 1127 else:
1128 1128 self.first_page = None
1129 1129 self.page_count = 0
1130 1130 self.last_page = None
1131 1131 self.first_item = None
1132 1132 self.last_item = None
1133 1133 self.previous_page = None
1134 1134 self.next_page = None
1135 1135 self.items = []
1136 1136
1137 1137 # This is a subclass of the 'list' type. Initialise the list now.
1138 1138 list.__init__(self, reversed(self.items))
1139 1139
1140 1140
1141 1141 def changed_tooltip(nodes):
1142 1142 """
1143 1143 Generates a html string for changed nodes in changeset page.
1144 1144 It limits the output to 30 entries
1145 1145
1146 1146 :param nodes: LazyNodesGenerator
1147 1147 """
1148 1148 if nodes:
1149 1149 pref = ': <br/> '
1150 1150 suf = ''
1151 1151 if len(nodes) > 30:
1152 1152 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1153 1153 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1154 1154 for x in nodes[:30]]) + suf)
1155 1155 else:
1156 1156 return ': ' + _('No files')
1157 1157
1158 1158
1159 1159 def repo_link(groups_and_repos):
1160 1160 """
1161 1161 Makes a breadcrumbs link to repo within a group
1162 1162 joins &raquo; on each group to create a fancy link
1163 1163
1164 1164 ex::
1165 1165 group >> subgroup >> repo
1166 1166
1167 1167 :param groups_and_repos:
1168 1168 :param last_url:
1169 1169 """
1170 1170 groups, just_name, repo_name = groups_and_repos
1171 1171 last_url = url('summary_home', repo_name=repo_name)
1172 1172 last_link = link_to(just_name, last_url)
1173 1173
1174 1174 def make_link(group):
1175 1175 return link_to(group.name,
1176 1176 url('repos_group_home', group_name=group.group_name))
1177 1177 return literal(' &raquo; '.join(map(make_link, groups) + ['<span>%s</span>' % last_link]))
1178 1178
1179 1179
1180 1180 def fancy_file_stats(stats):
1181 1181 """
1182 1182 Displays a fancy two colored bar for number of added/deleted
1183 1183 lines of code on file
1184 1184
1185 1185 :param stats: two element list of added/deleted lines of code
1186 1186 """
1187 1187 from kallithea.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1188 1188 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1189 1189
1190 1190 def cgen(l_type, a_v, d_v):
1191 1191 mapping = {'tr': 'top-right-rounded-corner-mid',
1192 1192 'tl': 'top-left-rounded-corner-mid',
1193 1193 'br': 'bottom-right-rounded-corner-mid',
1194 1194 'bl': 'bottom-left-rounded-corner-mid'}
1195 1195 map_getter = lambda x: mapping[x]
1196 1196
1197 1197 if l_type == 'a' and d_v:
1198 1198 #case when added and deleted are present
1199 1199 return ' '.join(map(map_getter, ['tl', 'bl']))
1200 1200
1201 1201 if l_type == 'a' and not d_v:
1202 1202 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1203 1203
1204 1204 if l_type == 'd' and a_v:
1205 1205 return ' '.join(map(map_getter, ['tr', 'br']))
1206 1206
1207 1207 if l_type == 'd' and not a_v:
1208 1208 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1209 1209
1210 1210 a, d = stats['added'], stats['deleted']
1211 1211 width = 100
1212 1212
1213 1213 if stats['binary']:
1214 1214 #binary mode
1215 1215 lbl = ''
1216 1216 bin_op = 1
1217 1217
1218 1218 if BIN_FILENODE in stats['ops']:
1219 1219 lbl = 'bin+'
1220 1220
1221 1221 if NEW_FILENODE in stats['ops']:
1222 1222 lbl += _('new file')
1223 1223 bin_op = NEW_FILENODE
1224 1224 elif MOD_FILENODE in stats['ops']:
1225 1225 lbl += _('mod')
1226 1226 bin_op = MOD_FILENODE
1227 1227 elif DEL_FILENODE in stats['ops']:
1228 1228 lbl += _('del')
1229 1229 bin_op = DEL_FILENODE
1230 1230 elif RENAMED_FILENODE in stats['ops']:
1231 1231 lbl += _('rename')
1232 1232 bin_op = RENAMED_FILENODE
1233 1233
1234 1234 #chmod can go with other operations
1235 1235 if CHMOD_FILENODE in stats['ops']:
1236 1236 _org_lbl = _('chmod')
1237 1237 lbl += _org_lbl if lbl.endswith('+') else '+%s' % _org_lbl
1238 1238
1239 1239 #import ipdb;ipdb.set_trace()
1240 1240 b_d = '<div class="bin bin%s %s" style="width:100%%">%s</div>' % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1241 1241 b_a = '<div class="bin bin1" style="width:0%"></div>'
1242 1242 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1243 1243
1244 1244 t = stats['added'] + stats['deleted']
1245 1245 unit = float(width) / (t or 1)
1246 1246
1247 1247 # needs > 9% of width to be visible or 0 to be hidden
1248 1248 a_p = max(9, unit * a) if a > 0 else 0
1249 1249 d_p = max(9, unit * d) if d > 0 else 0
1250 1250 p_sum = a_p + d_p
1251 1251
1252 1252 if p_sum > width:
1253 1253 #adjust the percentage to be == 100% since we adjusted to 9
1254 1254 if a_p > d_p:
1255 1255 a_p = a_p - (p_sum - width)
1256 1256 else:
1257 1257 d_p = d_p - (p_sum - width)
1258 1258
1259 1259 a_v = a if a > 0 else ''
1260 1260 d_v = d if d > 0 else ''
1261 1261
1262 1262 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1263 1263 cgen('a', a_v, d_v), a_p, a_v
1264 1264 )
1265 1265 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1266 1266 cgen('d', a_v, d_v), d_p, d_v
1267 1267 )
1268 1268 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1269 1269
1270 1270
1271 def _urlify_text_replace(match_obj):
1272 url_full = match_obj.group(1)
1273 return '<a href="%(url)s">%(url)s</a>' % {'url': url_full}
1274
1271 _URLIFY_RE = re.compile(r'''
1272 # URL markup
1273 (?P<url>%s)
1274 ''' % (url_re.pattern),
1275 re.VERBOSE | re.MULTILINE | re.IGNORECASE)
1275 1276
1276 def _urlify_text(s):
1277 """
1278 Extract urls from text and make html links out of them
1279 """
1280 return url_re.sub(_urlify_text_replace, s)
1281 1277
1282 1278
1283 1279 def urlify_text(s, repo_name=None, link_=None, truncate=None, stylize=False, truncatef=truncate):
1284 1280 """
1285 1281 Parses given text message and make literal html with markup.
1286 1282 The text will be truncated to the specified length.
1287 1283 Hashes are turned into changeset links to specified repository.
1288 1284 URLs links to what they say.
1289 1285 Issues are linked to given issue-server.
1290 1286 If link_ is provided, all text not already linking somewhere will link there.
1291 1287 """
1288
1289 def _replace(match_obj):
1290 url = match_obj.group('url')
1291 if url is not None:
1292 return '<a href="%(url)s">%(url)s</a>' % {'url': url}
1293 return match_obj.group(0)
1294
1295 def _urlify(s):
1296 """
1297 Extract urls from text and make html links out of them
1298 """
1299 return _URLIFY_RE.sub(_replace, s)
1300
1292 1301 if truncate is None:
1293 1302 s = s.rstrip()
1294 1303 else:
1295 1304 s = truncatef(s, truncate, whole_word=True)
1296 1305 s = html_escape(s)
1297 1306 if repo_name is not None:
1298 1307 s = urlify_changesets(s, repo_name)
1299 1308 if stylize:
1300 1309 s = desc_stylize(s)
1301 s = _urlify_text(s)
1310 s = _urlify(s)
1302 1311 if repo_name is not None:
1303 1312 s = urlify_issues(s, repo_name, link_)
1304 1313 s = MENTIONS_REGEX.sub(_mentions_replace, s)
1305 1314 s = s.replace('\r\n', '<br/>').replace('\n', '<br/>')
1306 1315 return literal(s)
1307 1316
1308 1317
1309 1318 def _urlify_changeset_replace_f(repo_name):
1310 1319 from pylons import url # doh, we need to re-import url to mock it later
1311 1320 def urlify_changeset_replace(match_obj):
1312 1321 rev = match_obj.group(0)
1313 1322 return '<a class="revision-link" href="%(url)s">%(rev)s</a>' % {
1314 1323 'url': url('changeset_home', repo_name=repo_name, revision=rev),
1315 1324 'rev': rev,
1316 1325 }
1317 1326 return urlify_changeset_replace
1318 1327
1319 1328
1320 1329 urilify_changeset_re = r'(?:^|(?<=[\s(),]))([0-9a-fA-F]{12,40})(?=$|\s|[.,:()])'
1321 1330
1322 1331 def urlify_changesets(text_, repo_name):
1323 1332 """
1324 1333 Extract revision ids from changeset and make link from them
1325 1334 """
1326 1335 urlify_changeset_replace = _urlify_changeset_replace_f(repo_name)
1327 1336 return re.sub(urilify_changeset_re, urlify_changeset_replace, text_)
1328 1337
1329 1338
1330 1339 def linkify_others(t, l):
1331 1340 """Add a default link to html with links.
1332 1341 HTML doesn't allow nesting of links, so the outer link must be broken up
1333 1342 in pieces and give space for other links.
1334 1343 """
1335 1344 urls = re.compile(r'(\<a.*?\<\/a\>)',)
1336 1345 links = []
1337 1346 for e in urls.split(t):
1338 1347 if not urls.match(e):
1339 1348 links.append('<a class="message-link" href="%s">%s</a>' % (l, e))
1340 1349 else:
1341 1350 links.append(e)
1342 1351
1343 1352 return ''.join(links)
1344 1353
1345 1354
1346 1355 def _urlify_issues_replace_f(repo_name, ISSUE_SERVER_LNK, ISSUE_PREFIX):
1347 1356 def urlify_issues_replace(match_obj):
1348 1357 pref = ''
1349 1358 if match_obj.group().startswith(' '):
1350 1359 pref = ' '
1351 1360
1352 1361 issue_id = ''.join(match_obj.groups())
1353 1362 issue_url = ISSUE_SERVER_LNK.replace('{id}', issue_id)
1354 1363 issue_url = issue_url.replace('{repo}', repo_name)
1355 1364 issue_url = issue_url.replace('{repo_name}', repo_name.split(URL_SEP)[-1])
1356 1365
1357 1366 return (
1358 1367 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1359 1368 '%(issue-prefix)s%(id-repr)s'
1360 1369 '</a>'
1361 1370 ) % {
1362 1371 'pref': pref,
1363 1372 'cls': 'issue-tracker-link',
1364 1373 'url': issue_url,
1365 1374 'id-repr': issue_id,
1366 1375 'issue-prefix': ISSUE_PREFIX,
1367 1376 'serv': ISSUE_SERVER_LNK,
1368 1377 }
1369 1378 return urlify_issues_replace
1370 1379
1371 1380
1372 1381 def urlify_issues(newtext, repo_name, link_=None):
1373 1382 from kallithea import CONFIG as conf
1374 1383
1375 1384 # allow multiple issue servers to be used
1376 1385 valid_indices = [
1377 1386 x.group(1)
1378 1387 for x in map(lambda x: re.match(r'issue_pat(.*)', x), conf.keys())
1379 1388 if x and 'issue_server_link%s' % x.group(1) in conf
1380 1389 and 'issue_prefix%s' % x.group(1) in conf
1381 1390 ]
1382 1391
1383 1392 if valid_indices:
1384 1393 log.debug('found issue server suffixes `%s` during valuation of: %s',
1385 1394 ','.join(valid_indices), newtext)
1386 1395
1387 1396 for pattern_index in valid_indices:
1388 1397 ISSUE_PATTERN = conf.get('issue_pat%s' % pattern_index)
1389 1398 ISSUE_SERVER_LNK = conf.get('issue_server_link%s' % pattern_index)
1390 1399 ISSUE_PREFIX = conf.get('issue_prefix%s' % pattern_index)
1391 1400
1392 1401 log.debug('pattern suffix `%s` PAT:%s SERVER_LINK:%s PREFIX:%s',
1393 1402 pattern_index, ISSUE_PATTERN, ISSUE_SERVER_LNK,
1394 1403 ISSUE_PREFIX)
1395 1404
1396 1405 URL_PAT = re.compile(ISSUE_PATTERN)
1397 1406
1398 1407 urlify_issues_replace = _urlify_issues_replace_f(repo_name, ISSUE_SERVER_LNK, ISSUE_PREFIX)
1399 1408 newtext = URL_PAT.sub(urlify_issues_replace, newtext)
1400 1409 log.debug('processed prefix:`%s` => %s', pattern_index, newtext)
1401 1410
1402 1411 # if we actually did something above
1403 1412 if link_:
1404 1413 # wrap not links into final link => link_
1405 1414 newtext = linkify_others(newtext, link_)
1406 1415 return newtext
1407 1416
1408 1417
1409 1418 def _mentions_replace(match_obj):
1410 1419 return '<b>@%s</b>' % match_obj.group(1)
1411 1420
1412 1421
1413 1422 def render_w_mentions(source, repo_name=None):
1414 1423 """
1415 1424 Render plain text with revision hashes and issue references urlified
1416 1425 and with @mention highlighting.
1417 1426 """
1418 1427 s = safe_unicode(source)
1419 1428 s = urlify_text(s, repo_name=repo_name)
1420 1429 return literal('<div class="formatted-fixed">%s</div>' % s)
1421 1430
1422 1431
1423 1432 def short_ref(ref_type, ref_name):
1424 1433 if ref_type == 'rev':
1425 1434 return short_id(ref_name)
1426 1435 return ref_name
1427 1436
1428 1437 def link_to_ref(repo_name, ref_type, ref_name, rev=None):
1429 1438 """
1430 1439 Return full markup for a href to changeset_home for a changeset.
1431 1440 If ref_type is branch it will link to changelog.
1432 1441 ref_name is shortened if ref_type is 'rev'.
1433 1442 if rev is specified show it too, explicitly linking to that revision.
1434 1443 """
1435 1444 txt = short_ref(ref_type, ref_name)
1436 1445 if ref_type == 'branch':
1437 1446 u = url('changelog_home', repo_name=repo_name, branch=ref_name)
1438 1447 else:
1439 1448 u = url('changeset_home', repo_name=repo_name, revision=ref_name)
1440 1449 l = link_to(repo_name + '#' + txt, u)
1441 1450 if rev and ref_type != 'rev':
1442 1451 l = literal('%s (%s)' % (l, link_to(short_id(rev), url('changeset_home', repo_name=repo_name, revision=rev))))
1443 1452 return l
1444 1453
1445 1454 def changeset_status(repo, revision):
1446 1455 return ChangesetStatusModel().get_status(repo, revision)
1447 1456
1448 1457
1449 1458 def changeset_status_lbl(changeset_status):
1450 1459 return ChangesetStatus.get_status_lbl(changeset_status)
1451 1460
1452 1461
1453 1462 def get_permission_name(key):
1454 1463 return dict(Permission.PERMS).get(key)
1455 1464
1456 1465
1457 1466 def journal_filter_help():
1458 1467 return _(textwrap.dedent('''
1459 1468 Example filter terms:
1460 1469 repository:vcs
1461 1470 username:developer
1462 1471 action:*push*
1463 1472 ip:127.0.0.1
1464 1473 date:20120101
1465 1474 date:[20120101100000 TO 20120102]
1466 1475
1467 1476 Generate wildcards using '*' character:
1468 1477 "repository:vcs*" - search everything starting with 'vcs'
1469 1478 "repository:*vcs*" - search for repository containing 'vcs'
1470 1479
1471 1480 Optional AND / OR operators in queries
1472 1481 "repository:vcs OR repository:test"
1473 1482 "username:test AND repository:test*"
1474 1483 '''))
1475 1484
1476 1485
1477 1486 def not_mapped_error(repo_name):
1478 1487 flash(_('%s repository is not mapped to db perhaps'
1479 1488 ' it was created or renamed from the filesystem'
1480 1489 ' please run the application again'
1481 1490 ' in order to rescan repositories') % repo_name, category='error')
1482 1491
1483 1492
1484 1493 def ip_range(ip_addr):
1485 1494 from kallithea.model.db import UserIpMap
1486 1495 s, e = UserIpMap._get_ip_range(ip_addr)
1487 1496 return '%s - %s' % (s, e)
1488 1497
1489 1498
1490 1499 def form(url, method="post", **attrs):
1491 1500 """Like webhelpers.html.tags.form but automatically using secure_form with
1492 1501 authentication_token for POST. authentication_token is thus never leaked
1493 1502 in the URL."""
1494 1503 if method.lower() == 'get':
1495 1504 return insecure_form(url, method=method, **attrs)
1496 1505 # webhelpers will turn everything but GET into POST
1497 1506 return secure_form(url, method=method, **attrs)
@@ -1,198 +1,199 b''
1 1 # -*- coding: utf-8 -*-
2 2 # This program is free software: you can redistribute it and/or modify
3 3 # it under the terms of the GNU General Public License as published by
4 4 # the Free Software Foundation, either version 3 of the License, or
5 5 # (at your option) any later version.
6 6 #
7 7 # This program is distributed in the hope that it will be useful,
8 8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 10 # GNU General Public License for more details.
11 11 #
12 12 # You should have received a copy of the GNU General Public License
13 13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 14 """
15 15 kallithea.lib.markup_renderer
16 16 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
17 17
18 18 Renderer for markup languages with ability to parse using rst or markdown
19 19
20 20 This file was forked by the Kallithea project in July 2014.
21 21 Original author and date, and relevant copyright and licensing information is below:
22 22 :created_on: Oct 27, 2011
23 23 :author: marcink
24 24 :copyright: (c) 2013 RhodeCode GmbH, and others.
25 25 :license: GPLv3, see LICENSE.md for more details.
26 26 """
27 27
28 28
29 29 import re
30 30 import logging
31 31 import traceback
32 32
33 33 from kallithea.lib.utils2 import safe_unicode, MENTIONS_REGEX
34 34
35 35 log = logging.getLogger(__name__)
36 36
37 37
38 url_re = re.compile(r'''(\bhttps?://(?:[\da-zA-Z0-9@:.-]+)'''
39 r'''(?:[/a-zA-Z0-9_=@#~&+%.,:;?!*()-]*[/a-zA-Z0-9_=@#~])?)''')
38 url_re = re.compile(r'''\bhttps?://(?:[\da-zA-Z0-9@:.-]+)'''
39 r'''(?:[/a-zA-Z0-9_=@#~&+%.,:;?!*()-]*[/a-zA-Z0-9_=@#~])?''')
40
40 41
41 42 class MarkupRenderer(object):
42 43 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
43 44
44 45 MARKDOWN_PAT = re.compile(r'md|mkdn?|mdown|markdown', re.IGNORECASE)
45 46 RST_PAT = re.compile(r're?st', re.IGNORECASE)
46 47 PLAIN_PAT = re.compile(r'readme', re.IGNORECASE)
47 48
48 49 def _detect_renderer(self, source, filename=None):
49 50 """
50 51 runs detection of what renderer should be used for generating html
51 52 from a markup language
52 53
53 54 filename can be also explicitly a renderer name
54 55
55 56 :param source:
56 57 :param filename:
57 58 """
58 59
59 60 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
60 61 detected_renderer = 'markdown'
61 62 elif MarkupRenderer.RST_PAT.findall(filename):
62 63 detected_renderer = 'rst'
63 64 elif MarkupRenderer.PLAIN_PAT.findall(filename):
64 65 detected_renderer = 'rst'
65 66 else:
66 67 detected_renderer = 'plain'
67 68
68 69 return getattr(MarkupRenderer, detected_renderer)
69 70
70 71 @classmethod
71 72 def _flavored_markdown(cls, text):
72 73 """
73 74 Github style flavored markdown
74 75
75 76 :param text:
76 77 """
77 78 from hashlib import md5
78 79
79 80 # Extract pre blocks.
80 81 extractions = {}
81 82 def pre_extraction_callback(matchobj):
82 83 digest = md5(matchobj.group(0)).hexdigest()
83 84 extractions[digest] = matchobj.group(0)
84 85 return "{gfm-extraction-%s}" % digest
85 86 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
86 87 text = re.sub(pattern, pre_extraction_callback, text)
87 88
88 89 # Prevent foo_bar_baz from ending up with an italic word in the middle.
89 90 def italic_callback(matchobj):
90 91 s = matchobj.group(0)
91 92 if list(s).count('_') >= 2:
92 93 return s.replace('_', '\_')
93 94 return s
94 95 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
95 96
96 97 # In very clear cases, let newlines become <br /> tags.
97 98 def newline_callback(matchobj):
98 99 if len(matchobj.group(1)) == 1:
99 100 return matchobj.group(0).rstrip() + ' \n'
100 101 else:
101 102 return matchobj.group(0)
102 103 pattern = re.compile(r'^[\w\<][^\n]*(\n+)', re.MULTILINE)
103 104 text = re.sub(pattern, newline_callback, text)
104 105
105 106 # Insert pre block extractions.
106 107 def pre_insert_callback(matchobj):
107 108 return '\n\n' + extractions[matchobj.group(1)]
108 109 text = re.sub(r'{gfm-extraction-([0-9a-f]{32})\}',
109 110 pre_insert_callback, text)
110 111
111 112 return text
112 113
113 114 def render(self, source, filename=None):
114 115 """
115 116 Renders a given filename using detected renderer
116 117 it detects renderers based on file extension or mimetype.
117 118 At last it will just do a simple html replacing new lines with <br/>
118 119
119 120 :param file_name:
120 121 :param source:
121 122 """
122 123
123 124 renderer = self._detect_renderer(source, filename)
124 125 readme_data = renderer(source)
125 126 return readme_data
126 127
127 128 @classmethod
128 129 def plain(cls, source, universal_newline=True):
129 130 source = safe_unicode(source)
130 131 if universal_newline:
131 132 newline = '\n'
132 133 source = newline.join(source.splitlines())
133 134
134 135 def url_func(match_obj):
135 url_full = match_obj.groups()[0]
136 url_full = match_obj.group(0)
136 137 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
137 138 source = url_re.sub(url_func, source)
138 139 return '<br />' + source.replace("\n", '<br />')
139 140
140 141 @classmethod
141 142 def markdown(cls, source, safe=True, flavored=False):
142 143 source = safe_unicode(source)
143 144 try:
144 145 import markdown as __markdown
145 146 if flavored:
146 147 source = cls._flavored_markdown(source)
147 148 return __markdown.markdown(source, ['codehilite', 'extra'])
148 149 except ImportError:
149 150 log.warning('Install markdown to use this function')
150 151 return cls.plain(source)
151 152 except Exception:
152 153 log.error(traceback.format_exc())
153 154 if safe:
154 155 log.debug('Falling back to render in plain mode')
155 156 return cls.plain(source)
156 157 else:
157 158 raise
158 159
159 160 @classmethod
160 161 def rst(cls, source, safe=True):
161 162 source = safe_unicode(source)
162 163 try:
163 164 from docutils.core import publish_parts
164 165 from docutils.parsers.rst import directives
165 166 docutils_settings = dict([(alias, None) for alias in
166 167 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
167 168
168 169 docutils_settings.update({'input_encoding': 'unicode',
169 170 'report_level': 4})
170 171
171 172 for k, v in docutils_settings.iteritems():
172 173 directives.register_directive(k, v)
173 174
174 175 parts = publish_parts(source=source,
175 176 writer_name="html4css1",
176 177 settings_overrides=docutils_settings)
177 178
178 179 return parts['html_title'] + parts["fragment"]
179 180 except ImportError:
180 181 log.warning('Install docutils to use this function')
181 182 return cls.plain(source)
182 183 except Exception:
183 184 log.error(traceback.format_exc())
184 185 if safe:
185 186 log.debug('Falling back to render in plain mode')
186 187 return cls.plain(source)
187 188 else:
188 189 raise
189 190
190 191 @classmethod
191 192 def rst_with_mentions(cls, source):
192 193 mention_pat = re.compile(MENTIONS_REGEX)
193 194
194 195 def wrapp(match_obj):
195 196 uname = match_obj.groups()[0]
196 197 return '\ **@%(uname)s**\ ' % {'uname': uname}
197 198 mention_hl = mention_pat.sub(wrapp, source).strip()
198 199 return cls.rst(mention_hl)
General Comments 0
You need to be logged in to leave comments. Login now