##// END OF EJS Templates
markup-rendering: added relative image support....
marcink -
r1527:4089d52f default
parent child Browse files
Show More
@@ -1,318 +1,326 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Summary controller for RhodeCode Enterprise
23 23 """
24 24
25 25 import logging
26 26 from string import lower
27 27
28 28 from pylons import tmpl_context as c, request
29 29 from pylons.i18n.translation import _
30 30 from beaker.cache import cache_region, region_invalidate
31 31
32 32 from rhodecode.config.conf import (LANGUAGES_EXTENSIONS_MAP)
33 33 from rhodecode.controllers import utils
34 34 from rhodecode.controllers.changelog import _load_changelog_summary
35 35 from rhodecode.lib import caches, helpers as h
36 36 from rhodecode.lib.utils import jsonify
37 37 from rhodecode.lib.utils2 import safe_str
38 38 from rhodecode.lib.auth import (
39 39 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
40 40 from rhodecode.lib.base import BaseRepoController, render
41 from rhodecode.lib.markup_renderer import MarkupRenderer
41 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
42 42 from rhodecode.lib.ext_json import json
43 43 from rhodecode.lib.vcs.backends.base import EmptyCommit
44 44 from rhodecode.lib.vcs.exceptions import (
45 45 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
46 46 from rhodecode.model.db import Statistics, CacheKey, User
47 47 from rhodecode.model.repo import ReadmeFinder
48 48
49 49
50 50 log = logging.getLogger(__name__)
51 51
52 52
53 53 class SummaryController(BaseRepoController):
54 54
55 55 def __before__(self):
56 56 super(SummaryController, self).__before__()
57 57
58 58 def __get_readme_data(self, db_repo):
59 59 repo_name = db_repo.repo_name
60 60 log.debug('Looking for README file')
61 61 default_renderer = c.visual.default_renderer
62 62
63 63 @cache_region('long_term')
64 64 def _generate_readme(cache_key):
65 65 readme_data = None
66 66 readme_node = None
67 67 readme_filename = None
68 68 commit = self._get_landing_commit_or_none(db_repo)
69 69 if commit:
70 70 log.debug("Searching for a README file.")
71 71 readme_node = ReadmeFinder(default_renderer).search(commit)
72 72 if readme_node:
73 readme_data = self._render_readme_or_none(commit, readme_node)
73 relative_url = h.url('files_raw_home',
74 repo_name=repo_name,
75 revision=commit.raw_id,
76 f_path=readme_node.path)
77 readme_data = self._render_readme_or_none(
78 commit, readme_node, relative_url)
74 79 readme_filename = readme_node.path
75 80 return readme_data, readme_filename
76 81
77 82 invalidator_context = CacheKey.repo_context_cache(
78 83 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
79 84
80 85 with invalidator_context as context:
81 86 context.invalidate()
82 87 computed = context.compute()
83 88
84 89 return computed
85 90
86 91 def _get_landing_commit_or_none(self, db_repo):
87 92 log.debug("Getting the landing commit.")
88 93 try:
89 94 commit = db_repo.get_landing_commit()
90 95 if not isinstance(commit, EmptyCommit):
91 96 return commit
92 97 else:
93 98 log.debug("Repository is empty, no README to render.")
94 99 except CommitError:
95 100 log.exception(
96 101 "Problem getting commit when trying to render the README.")
97 102
98 def _render_readme_or_none(self, commit, readme_node):
103 def _render_readme_or_none(self, commit, readme_node, relative_url):
99 104 log.debug(
100 105 'Found README file `%s` rendering...', readme_node.path)
101 106 renderer = MarkupRenderer()
102 107 try:
103 return renderer.render(
108 html_source = renderer.render(
104 109 readme_node.content, filename=readme_node.path)
110 if relative_url:
111 return relative_links(html_source, relative_url)
112 return html_source
105 113 except Exception:
106 114 log.exception(
107 115 "Exception while trying to render the README")
108 116
109 117 @LoginRequired()
110 118 @HasRepoPermissionAnyDecorator(
111 119 'repository.read', 'repository.write', 'repository.admin')
112 120 def index(self, repo_name):
113 121
114 122 # Prepare the clone URL
115 123
116 124 username = ''
117 125 if c.rhodecode_user.username != User.DEFAULT_USER:
118 126 username = safe_str(c.rhodecode_user.username)
119 127
120 128 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
121 129 if '{repo}' in _def_clone_uri:
122 130 _def_clone_uri_by_id = _def_clone_uri.replace(
123 131 '{repo}', '_{repoid}')
124 132 elif '{repoid}' in _def_clone_uri:
125 133 _def_clone_uri_by_id = _def_clone_uri.replace(
126 134 '_{repoid}', '{repo}')
127 135
128 136 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
129 137 user=username, uri_tmpl=_def_clone_uri)
130 138 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
131 139 user=username, uri_tmpl=_def_clone_uri_by_id)
132 140
133 141 # If enabled, get statistics data
134 142
135 143 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
136 144
137 145 stats = self.sa.query(Statistics)\
138 146 .filter(Statistics.repository == c.rhodecode_db_repo)\
139 147 .scalar()
140 148
141 149 c.stats_percentage = 0
142 150
143 151 if stats and stats.languages:
144 152 c.no_data = False is c.rhodecode_db_repo.enable_statistics
145 153 lang_stats_d = json.loads(stats.languages)
146 154
147 155 # Sort first by decreasing count and second by the file extension,
148 156 # so we have a consistent output.
149 157 lang_stats_items = sorted(lang_stats_d.iteritems(),
150 158 key=lambda k: (-k[1], k[0]))[:10]
151 159 lang_stats = [(x, {"count": y,
152 160 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
153 161 for x, y in lang_stats_items]
154 162
155 163 c.trending_languages = json.dumps(lang_stats)
156 164 else:
157 165 c.no_data = True
158 166 c.trending_languages = json.dumps({})
159 167
160 168 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
161 169 c.repository_followers = self.scm_model.get_followers(
162 170 c.rhodecode_db_repo)
163 171 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
164 172 c.repository_is_user_following = self.scm_model.is_following_repo(
165 173 c.repo_name, c.rhodecode_user.user_id)
166 174
167 175 if c.repository_requirements_missing:
168 176 return render('summary/missing_requirements.mako')
169 177
170 178 c.readme_data, c.readme_file = \
171 179 self.__get_readme_data(c.rhodecode_db_repo)
172 180
173 181 _load_changelog_summary()
174 182
175 183 if request.is_xhr:
176 184 return render('changelog/changelog_summary_data.mako')
177 185
178 186 return render('summary/summary.mako')
179 187
180 188 @LoginRequired()
181 189 @XHRRequired()
182 190 @HasRepoPermissionAnyDecorator(
183 191 'repository.read', 'repository.write', 'repository.admin')
184 192 @jsonify
185 193 def repo_stats(self, repo_name, commit_id):
186 194 _namespace = caches.get_repo_namespace_key(
187 195 caches.SUMMARY_STATS, repo_name)
188 196 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
189 197 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
190 198 _cache_key = caches.compute_key_from_params(
191 199 repo_name, commit_id, show_stats)
192 200
193 201 def compute_stats():
194 202 code_stats = {}
195 203 size = 0
196 204 try:
197 205 scm_instance = c.rhodecode_db_repo.scm_instance()
198 206 commit = scm_instance.get_commit(commit_id)
199 207
200 208 for node in commit.get_filenodes_generator():
201 209 size += node.size
202 210 if not show_stats:
203 211 continue
204 212 ext = lower(node.extension)
205 213 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
206 214 if ext_info:
207 215 if ext in code_stats:
208 216 code_stats[ext]['count'] += 1
209 217 else:
210 218 code_stats[ext] = {"count": 1, "desc": ext_info}
211 219 except EmptyRepositoryError:
212 220 pass
213 221 return {'size': h.format_byte_size_binary(size),
214 222 'code_stats': code_stats}
215 223
216 224 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
217 225 return stats
218 226
219 227 def _switcher_reference_data(self, repo_name, references, is_svn):
220 228 """Prepare reference data for given `references`"""
221 229 items = []
222 230 for name, commit_id in references.items():
223 231 use_commit_id = '/' in name or is_svn
224 232 items.append({
225 233 'name': name,
226 234 'commit_id': commit_id,
227 235 'files_url': h.url(
228 236 'files_home',
229 237 repo_name=repo_name,
230 238 f_path=name if is_svn else '',
231 239 revision=commit_id if use_commit_id else name,
232 240 at=name)
233 241 })
234 242 return items
235 243
236 244 @LoginRequired()
237 245 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
238 246 'repository.admin')
239 247 @jsonify
240 248 def repo_refs_data(self, repo_name):
241 249 repo = c.rhodecode_repo
242 250 refs_to_create = [
243 251 (_("Branch"), repo.branches, 'branch'),
244 252 (_("Tag"), repo.tags, 'tag'),
245 253 (_("Bookmark"), repo.bookmarks, 'book'),
246 254 ]
247 255 res = self._create_reference_data(repo, repo_name, refs_to_create)
248 256 data = {
249 257 'more': False,
250 258 'results': res
251 259 }
252 260 return data
253 261
254 262 @LoginRequired()
255 263 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
256 264 'repository.admin')
257 265 @jsonify
258 266 def repo_default_reviewers_data(self, repo_name):
259 267 return {
260 268 'reviewers': [utils.reviewer_as_json(
261 269 user=c.rhodecode_db_repo.user, reasons=None)]
262 270 }
263 271
264 272 @jsonify
265 273 def repo_refs_changelog_data(self, repo_name):
266 274 repo = c.rhodecode_repo
267 275
268 276 refs_to_create = [
269 277 (_("Branches"), repo.branches, 'branch'),
270 278 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
271 279 # TODO: enable when vcs can handle bookmarks filters
272 280 # (_("Bookmarks"), repo.bookmarks, "book"),
273 281 ]
274 282 res = self._create_reference_data(repo, repo_name, refs_to_create)
275 283 data = {
276 284 'more': False,
277 285 'results': res
278 286 }
279 287 return data
280 288
281 289 def _create_reference_data(self, repo, full_repo_name, refs_to_create):
282 290 format_ref_id = utils.get_format_ref_id(repo)
283 291
284 292 result = []
285 293 for title, refs, ref_type in refs_to_create:
286 294 if refs:
287 295 result.append({
288 296 'text': title,
289 297 'children': self._create_reference_items(
290 298 repo, full_repo_name, refs, ref_type, format_ref_id),
291 299 })
292 300 return result
293 301
294 302 def _create_reference_items(self, repo, full_repo_name, refs, ref_type,
295 303 format_ref_id):
296 304 result = []
297 305 is_svn = h.is_svn(repo)
298 306 for ref_name, raw_id in refs.iteritems():
299 307 files_url = self._create_files_url(
300 308 repo, full_repo_name, ref_name, raw_id, is_svn)
301 309 result.append({
302 310 'text': ref_name,
303 311 'id': format_ref_id(ref_name, raw_id),
304 312 'raw_id': raw_id,
305 313 'type': ref_type,
306 314 'files_url': files_url,
307 315 })
308 316 return result
309 317
310 318 def _create_files_url(self, repo, full_repo_name, ref_name, raw_id,
311 319 is_svn):
312 320 use_commit_id = '/' in ref_name or is_svn
313 321 return h.url(
314 322 'files_home',
315 323 repo_name=full_repo_name,
316 324 f_path=ref_name if is_svn else '',
317 325 revision=raw_id if use_commit_id else ref_name,
318 326 at=ref_name)
@@ -1,2019 +1,2029 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40 import itertools
41 41 import fnmatch
42 42
43 43 from datetime import datetime
44 44 from functools import partial
45 45 from pygments.formatters.html import HtmlFormatter
46 46 from pygments import highlight as code_highlight
47 47 from pygments.lexers import (
48 48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 49 from pylons import url as pylons_url
50 50 from pylons.i18n.translation import _, ungettext
51 51 from pyramid.threadlocal import get_current_request
52 52
53 53 from webhelpers.html import literal, HTML, escape
54 54 from webhelpers.html.tools import *
55 55 from webhelpers.html.builder import make_tag
56 56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 62 from webhelpers.pylonslib import Flash as _Flash
63 63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 66 from webhelpers.date import time_ago_in_words
67 67 from webhelpers.paginate import Page as _Page
68 68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 70 from webhelpers2.number import format_byte_size
71 71
72 72 from rhodecode.lib.action_parser import action_parser
73 73 from rhodecode.lib.ext_json import json
74 74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 83 from rhodecode.model.db import Permission, User, Repository
84 84 from rhodecode.model.repo_group import RepoGroupModel
85 85 from rhodecode.model.settings import IssueTrackerSettingsModel
86 86
87 87 log = logging.getLogger(__name__)
88 88
89 89
90 90 DEFAULT_USER = User.DEFAULT_USER
91 91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92 92
93 93
94 94 def url(*args, **kw):
95 95 return pylons_url(*args, **kw)
96 96
97 97
98 98 def pylons_url_current(*args, **kw):
99 99 """
100 100 This function overrides pylons.url.current() which returns the current
101 101 path so that it will also work from a pyramid only context. This
102 102 should be removed once port to pyramid is complete.
103 103 """
104 104 if not args and not kw:
105 105 request = get_current_request()
106 106 return request.path
107 107 return pylons_url.current(*args, **kw)
108 108
109 109 url.current = pylons_url_current
110 110
111 111
112 112 def url_replace(**qargs):
113 113 """ Returns the current request url while replacing query string args """
114 114
115 115 request = get_current_request()
116 116 new_args = request.GET.mixed()
117 117 new_args.update(qargs)
118 118 return url('', **new_args)
119 119
120 120
121 121 def asset(path, ver=None, **kwargs):
122 122 """
123 123 Helper to generate a static asset file path for rhodecode assets
124 124
125 125 eg. h.asset('images/image.png', ver='3923')
126 126
127 127 :param path: path of asset
128 128 :param ver: optional version query param to append as ?ver=
129 129 """
130 130 request = get_current_request()
131 131 query = {}
132 132 query.update(kwargs)
133 133 if ver:
134 134 query = {'ver': ver}
135 135 return request.static_path(
136 136 'rhodecode:public/{}'.format(path), _query=query)
137 137
138 138
139 139 default_html_escape_table = {
140 140 ord('&'): u'&amp;',
141 141 ord('<'): u'&lt;',
142 142 ord('>'): u'&gt;',
143 143 ord('"'): u'&quot;',
144 144 ord("'"): u'&#39;',
145 145 }
146 146
147 147
148 148 def html_escape(text, html_escape_table=default_html_escape_table):
149 149 """Produce entities within text."""
150 150 return text.translate(html_escape_table)
151 151
152 152
153 153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 154 """
155 155 Truncate string ``s`` at the first occurrence of ``sub``.
156 156
157 157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 158 """
159 159 suffix_if_chopped = suffix_if_chopped or ''
160 160 pos = s.find(sub)
161 161 if pos == -1:
162 162 return s
163 163
164 164 if inclusive:
165 165 pos += len(sub)
166 166
167 167 chopped = s[:pos]
168 168 left = s[pos:].strip()
169 169
170 170 if left and suffix_if_chopped:
171 171 chopped += suffix_if_chopped
172 172
173 173 return chopped
174 174
175 175
176 176 def shorter(text, size=20):
177 177 postfix = '...'
178 178 if len(text) > size:
179 179 return text[:size - len(postfix)] + postfix
180 180 return text
181 181
182 182
183 183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 184 """
185 185 Reset button
186 186 """
187 187 _set_input_attrs(attrs, type, name, value)
188 188 _set_id_attr(attrs, id, name)
189 189 convert_boolean_attrs(attrs, ["disabled"])
190 190 return HTML.input(**attrs)
191 191
192 192 reset = _reset
193 193 safeid = _make_safe_id_component
194 194
195 195
196 196 def branding(name, length=40):
197 197 return truncate(name, length, indicator="")
198 198
199 199
200 200 def FID(raw_id, path):
201 201 """
202 202 Creates a unique ID for filenode based on it's hash of path and commit
203 203 it's safe to use in urls
204 204
205 205 :param raw_id:
206 206 :param path:
207 207 """
208 208
209 209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210 210
211 211
212 212 class _GetError(object):
213 213 """Get error from form_errors, and represent it as span wrapped error
214 214 message
215 215
216 216 :param field_name: field to fetch errors for
217 217 :param form_errors: form errors dict
218 218 """
219 219
220 220 def __call__(self, field_name, form_errors):
221 221 tmpl = """<span class="error_msg">%s</span>"""
222 222 if form_errors and field_name in form_errors:
223 223 return literal(tmpl % form_errors.get(field_name))
224 224
225 225 get_error = _GetError()
226 226
227 227
228 228 class _ToolTip(object):
229 229
230 230 def __call__(self, tooltip_title, trim_at=50):
231 231 """
232 232 Special function just to wrap our text into nice formatted
233 233 autowrapped text
234 234
235 235 :param tooltip_title:
236 236 """
237 237 tooltip_title = escape(tooltip_title)
238 238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 239 return tooltip_title
240 240 tooltip = _ToolTip()
241 241
242 242
243 243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 244 if isinstance(file_path, str):
245 245 file_path = safe_unicode(file_path)
246 246
247 247 # TODO: johbo: Is this always a url like path, or is this operating
248 248 # system dependent?
249 249 path_segments = file_path.split('/')
250 250
251 251 repo_name_html = escape(repo_name)
252 252 if len(path_segments) == 1 and path_segments[0] == '':
253 253 url_segments = [repo_name_html]
254 254 else:
255 255 url_segments = [
256 256 link_to(
257 257 repo_name_html,
258 258 url('files_home',
259 259 repo_name=repo_name,
260 260 revision=commit_id,
261 261 f_path=''),
262 262 class_='pjax-link')]
263 263
264 264 last_cnt = len(path_segments) - 1
265 265 for cnt, segment in enumerate(path_segments):
266 266 if not segment:
267 267 continue
268 268 segment_html = escape(segment)
269 269
270 270 if cnt != last_cnt:
271 271 url_segments.append(
272 272 link_to(
273 273 segment_html,
274 274 url('files_home',
275 275 repo_name=repo_name,
276 276 revision=commit_id,
277 277 f_path='/'.join(path_segments[:cnt + 1])),
278 278 class_='pjax-link'))
279 279 else:
280 280 url_segments.append(segment_html)
281 281
282 282 return literal('/'.join(url_segments))
283 283
284 284
285 285 class CodeHtmlFormatter(HtmlFormatter):
286 286 """
287 287 My code Html Formatter for source codes
288 288 """
289 289
290 290 def wrap(self, source, outfile):
291 291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292 292
293 293 def _wrap_code(self, source):
294 294 for cnt, it in enumerate(source):
295 295 i, t = it
296 296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 297 yield i, t
298 298
299 299 def _wrap_tablelinenos(self, inner):
300 300 dummyoutfile = StringIO.StringIO()
301 301 lncount = 0
302 302 for t, line in inner:
303 303 if t:
304 304 lncount += 1
305 305 dummyoutfile.write(line)
306 306
307 307 fl = self.linenostart
308 308 mw = len(str(lncount + fl - 1))
309 309 sp = self.linenospecial
310 310 st = self.linenostep
311 311 la = self.lineanchors
312 312 aln = self.anchorlinenos
313 313 nocls = self.noclasses
314 314 if sp:
315 315 lines = []
316 316
317 317 for i in range(fl, fl + lncount):
318 318 if i % st == 0:
319 319 if i % sp == 0:
320 320 if aln:
321 321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 322 (la, i, mw, i))
323 323 else:
324 324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 325 else:
326 326 if aln:
327 327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 328 else:
329 329 lines.append('%*d' % (mw, i))
330 330 else:
331 331 lines.append('')
332 332 ls = '\n'.join(lines)
333 333 else:
334 334 lines = []
335 335 for i in range(fl, fl + lncount):
336 336 if i % st == 0:
337 337 if aln:
338 338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 339 else:
340 340 lines.append('%*d' % (mw, i))
341 341 else:
342 342 lines.append('')
343 343 ls = '\n'.join(lines)
344 344
345 345 # in case you wonder about the seemingly redundant <div> here: since the
346 346 # content in the other cell also is wrapped in a div, some browsers in
347 347 # some configurations seem to mess up the formatting...
348 348 if nocls:
349 349 yield 0, ('<table class="%stable">' % self.cssclass +
350 350 '<tr><td><div class="linenodiv" '
351 351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 352 '<pre style="line-height: 125%">' +
353 353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 354 else:
355 355 yield 0, ('<table class="%stable">' % self.cssclass +
356 356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 358 yield 0, dummyoutfile.getvalue()
359 359 yield 0, '</td></tr></table>'
360 360
361 361
362 362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 363 def __init__(self, **kw):
364 364 # only show these line numbers if set
365 365 self.only_lines = kw.pop('only_line_numbers', [])
366 366 self.query_terms = kw.pop('query_terms', [])
367 367 self.max_lines = kw.pop('max_lines', 5)
368 368 self.line_context = kw.pop('line_context', 3)
369 369 self.url = kw.pop('url', None)
370 370
371 371 super(CodeHtmlFormatter, self).__init__(**kw)
372 372
373 373 def _wrap_code(self, source):
374 374 for cnt, it in enumerate(source):
375 375 i, t = it
376 376 t = '<pre>%s</pre>' % t
377 377 yield i, t
378 378
379 379 def _wrap_tablelinenos(self, inner):
380 380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381 381
382 382 last_shown_line_number = 0
383 383 current_line_number = 1
384 384
385 385 for t, line in inner:
386 386 if not t:
387 387 yield t, line
388 388 continue
389 389
390 390 if current_line_number in self.only_lines:
391 391 if last_shown_line_number + 1 != current_line_number:
392 392 yield 0, '<tr>'
393 393 yield 0, '<td class="line">...</td>'
394 394 yield 0, '<td id="hlcode" class="code"></td>'
395 395 yield 0, '</tr>'
396 396
397 397 yield 0, '<tr>'
398 398 if self.url:
399 399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 400 self.url, current_line_number, current_line_number)
401 401 else:
402 402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 403 current_line_number)
404 404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 405 yield 0, '</tr>'
406 406
407 407 last_shown_line_number = current_line_number
408 408
409 409 current_line_number += 1
410 410
411 411
412 412 yield 0, '</table>'
413 413
414 414
415 415 def extract_phrases(text_query):
416 416 """
417 417 Extracts phrases from search term string making sure phrases
418 418 contained in double quotes are kept together - and discarding empty values
419 419 or fully whitespace values eg.
420 420
421 421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422 422
423 423 """
424 424
425 425 in_phrase = False
426 426 buf = ''
427 427 phrases = []
428 428 for char in text_query:
429 429 if in_phrase:
430 430 if char == '"': # end phrase
431 431 phrases.append(buf)
432 432 buf = ''
433 433 in_phrase = False
434 434 continue
435 435 else:
436 436 buf += char
437 437 continue
438 438 else:
439 439 if char == '"': # start phrase
440 440 in_phrase = True
441 441 phrases.append(buf)
442 442 buf = ''
443 443 continue
444 444 elif char == ' ':
445 445 phrases.append(buf)
446 446 buf = ''
447 447 continue
448 448 else:
449 449 buf += char
450 450
451 451 phrases.append(buf)
452 452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 453 return phrases
454 454
455 455
456 456 def get_matching_offsets(text, phrases):
457 457 """
458 458 Returns a list of string offsets in `text` that the list of `terms` match
459 459
460 460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 461 [(0, 4), (10, 14)]
462 462
463 463 """
464 464 offsets = []
465 465 for phrase in phrases:
466 466 for match in re.finditer(phrase, text):
467 467 offsets.append((match.start(), match.end()))
468 468
469 469 return offsets
470 470
471 471
472 472 def normalize_text_for_matching(x):
473 473 """
474 474 Replaces all non alnum characters to spaces and lower cases the string,
475 475 useful for comparing two text strings without punctuation
476 476 """
477 477 return re.sub(r'[^\w]', ' ', x.lower())
478 478
479 479
480 480 def get_matching_line_offsets(lines, terms):
481 481 """ Return a set of `lines` indices (starting from 1) matching a
482 482 text search query, along with `context` lines above/below matching lines
483 483
484 484 :param lines: list of strings representing lines
485 485 :param terms: search term string to match in lines eg. 'some text'
486 486 :param context: number of lines above/below a matching line to add to result
487 487 :param max_lines: cut off for lines of interest
488 488 eg.
489 489
490 490 text = '''
491 491 words words words
492 492 words words words
493 493 some text some
494 494 words words words
495 495 words words words
496 496 text here what
497 497 '''
498 498 get_matching_line_offsets(text, 'text', context=1)
499 499 {3: [(5, 9)], 6: [(0, 4)]]
500 500
501 501 """
502 502 matching_lines = {}
503 503 phrases = [normalize_text_for_matching(phrase)
504 504 for phrase in extract_phrases(terms)]
505 505
506 506 for line_index, line in enumerate(lines, start=1):
507 507 match_offsets = get_matching_offsets(
508 508 normalize_text_for_matching(line), phrases)
509 509 if match_offsets:
510 510 matching_lines[line_index] = match_offsets
511 511
512 512 return matching_lines
513 513
514 514
515 515 def hsv_to_rgb(h, s, v):
516 516 """ Convert hsv color values to rgb """
517 517
518 518 if s == 0.0:
519 519 return v, v, v
520 520 i = int(h * 6.0) # XXX assume int() truncates!
521 521 f = (h * 6.0) - i
522 522 p = v * (1.0 - s)
523 523 q = v * (1.0 - s * f)
524 524 t = v * (1.0 - s * (1.0 - f))
525 525 i = i % 6
526 526 if i == 0:
527 527 return v, t, p
528 528 if i == 1:
529 529 return q, v, p
530 530 if i == 2:
531 531 return p, v, t
532 532 if i == 3:
533 533 return p, q, v
534 534 if i == 4:
535 535 return t, p, v
536 536 if i == 5:
537 537 return v, p, q
538 538
539 539
540 540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 541 """
542 542 Generator for getting n of evenly distributed colors using
543 543 hsv color and golden ratio. It always return same order of colors
544 544
545 545 :param n: number of colors to generate
546 546 :param saturation: saturation of returned colors
547 547 :param lightness: lightness of returned colors
548 548 :returns: RGB tuple
549 549 """
550 550
551 551 golden_ratio = 0.618033988749895
552 552 h = 0.22717784590367374
553 553
554 554 for _ in xrange(n):
555 555 h += golden_ratio
556 556 h %= 1
557 557 HSV_tuple = [h, saturation, lightness]
558 558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560 560
561 561
562 562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 563 """
564 564 Returns a function which when called with an argument returns a unique
565 565 color for that argument, eg.
566 566
567 567 :param n: number of colors to generate
568 568 :param saturation: saturation of returned colors
569 569 :param lightness: lightness of returned colors
570 570 :returns: css RGB string
571 571
572 572 >>> color_hash = color_hasher()
573 573 >>> color_hash('hello')
574 574 'rgb(34, 12, 59)'
575 575 >>> color_hash('hello')
576 576 'rgb(34, 12, 59)'
577 577 >>> color_hash('other')
578 578 'rgb(90, 224, 159)'
579 579 """
580 580
581 581 color_dict = {}
582 582 cgenerator = unique_color_generator(
583 583 saturation=saturation, lightness=lightness)
584 584
585 585 def get_color_string(thing):
586 586 if thing in color_dict:
587 587 col = color_dict[thing]
588 588 else:
589 589 col = color_dict[thing] = cgenerator.next()
590 590 return "rgb(%s)" % (', '.join(col))
591 591
592 592 return get_color_string
593 593
594 594
595 595 def get_lexer_safe(mimetype=None, filepath=None):
596 596 """
597 597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 598 defaulting to plain text if none could be found
599 599 """
600 600 lexer = None
601 601 try:
602 602 if mimetype:
603 603 lexer = get_lexer_for_mimetype(mimetype)
604 604 if not lexer:
605 605 lexer = get_lexer_for_filename(filepath)
606 606 except pygments.util.ClassNotFound:
607 607 pass
608 608
609 609 if not lexer:
610 610 lexer = get_lexer_by_name('text')
611 611
612 612 return lexer
613 613
614 614
615 615 def get_lexer_for_filenode(filenode):
616 616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 617 return lexer
618 618
619 619
620 620 def pygmentize(filenode, **kwargs):
621 621 """
622 622 pygmentize function using pygments
623 623
624 624 :param filenode:
625 625 """
626 626 lexer = get_lexer_for_filenode(filenode)
627 627 return literal(code_highlight(filenode.content, lexer,
628 628 CodeHtmlFormatter(**kwargs)))
629 629
630 630
631 631 def is_following_repo(repo_name, user_id):
632 632 from rhodecode.model.scm import ScmModel
633 633 return ScmModel().is_following_repo(repo_name, user_id)
634 634
635 635
636 636 class _Message(object):
637 637 """A message returned by ``Flash.pop_messages()``.
638 638
639 639 Converting the message to a string returns the message text. Instances
640 640 also have the following attributes:
641 641
642 642 * ``message``: the message text.
643 643 * ``category``: the category specified when the message was created.
644 644 """
645 645
646 646 def __init__(self, category, message):
647 647 self.category = category
648 648 self.message = message
649 649
650 650 def __str__(self):
651 651 return self.message
652 652
653 653 __unicode__ = __str__
654 654
655 655 def __html__(self):
656 656 return escape(safe_unicode(self.message))
657 657
658 658
659 659 class Flash(_Flash):
660 660
661 661 def pop_messages(self):
662 662 """Return all accumulated messages and delete them from the session.
663 663
664 664 The return value is a list of ``Message`` objects.
665 665 """
666 666 from pylons import session
667 667
668 668 messages = []
669 669
670 670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 671 # (category, message)
672 672 for cat, msg in session.pop(self.session_key, []):
673 673 messages.append(_Message(cat, msg))
674 674
675 675 # Pop the 'new' pyramid flash messages for each category as list
676 676 # of strings.
677 677 for cat in self.categories:
678 678 for msg in session.pop_flash(queue=cat):
679 679 messages.append(_Message(cat, msg))
680 680 # Map messages from the default queue to the 'notice' category.
681 681 for msg in session.pop_flash():
682 682 messages.append(_Message('notice', msg))
683 683
684 684 session.save()
685 685 return messages
686 686
687 687 def json_alerts(self):
688 688 payloads = []
689 689 messages = flash.pop_messages()
690 690 if messages:
691 691 for message in messages:
692 692 subdata = {}
693 693 if hasattr(message.message, 'rsplit'):
694 694 flash_data = message.message.rsplit('|DELIM|', 1)
695 695 org_message = flash_data[0]
696 696 if len(flash_data) > 1:
697 697 subdata = json.loads(flash_data[1])
698 698 else:
699 699 org_message = message.message
700 700 payloads.append({
701 701 'message': {
702 702 'message': u'{}'.format(org_message),
703 703 'level': message.category,
704 704 'force': True,
705 705 'subdata': subdata
706 706 }
707 707 })
708 708 return json.dumps(payloads)
709 709
710 710 flash = Flash()
711 711
712 712 #==============================================================================
713 713 # SCM FILTERS available via h.
714 714 #==============================================================================
715 715 from rhodecode.lib.vcs.utils import author_name, author_email
716 716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 717 from rhodecode.model.db import User, ChangesetStatus
718 718
719 719 age = _age
720 720 capitalize = lambda x: x.capitalize()
721 721 email = author_email
722 722 short_id = lambda x: x[:12]
723 723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724 724
725 725
726 726 def age_component(datetime_iso, value=None, time_is_local=False):
727 727 title = value or format_date(datetime_iso)
728 728 tzinfo = '+00:00'
729 729
730 730 # detect if we have a timezone info, otherwise, add it
731 731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 732 if time_is_local:
733 733 tzinfo = time.strftime("+%H:%M",
734 734 time.gmtime(
735 735 (datetime.now() - datetime.utcnow()).seconds + 1
736 736 )
737 737 )
738 738
739 739 return literal(
740 740 '<time class="timeago tooltip" '
741 741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 742 datetime_iso, title, tzinfo))
743 743
744 744
745 745 def _shorten_commit_id(commit_id):
746 746 from rhodecode import CONFIG
747 747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 748 return commit_id[:def_len]
749 749
750 750
751 751 def show_id(commit):
752 752 """
753 753 Configurable function that shows ID
754 754 by default it's r123:fffeeefffeee
755 755
756 756 :param commit: commit instance
757 757 """
758 758 from rhodecode import CONFIG
759 759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760 760
761 761 raw_id = _shorten_commit_id(commit.raw_id)
762 762 if show_idx:
763 763 return 'r%s:%s' % (commit.idx, raw_id)
764 764 else:
765 765 return '%s' % (raw_id, )
766 766
767 767
768 768 def format_date(date):
769 769 """
770 770 use a standardized formatting for dates used in RhodeCode
771 771
772 772 :param date: date/datetime object
773 773 :return: formatted date
774 774 """
775 775
776 776 if date:
777 777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 778 return safe_unicode(date.strftime(_fmt))
779 779
780 780 return u""
781 781
782 782
783 783 class _RepoChecker(object):
784 784
785 785 def __init__(self, backend_alias):
786 786 self._backend_alias = backend_alias
787 787
788 788 def __call__(self, repository):
789 789 if hasattr(repository, 'alias'):
790 790 _type = repository.alias
791 791 elif hasattr(repository, 'repo_type'):
792 792 _type = repository.repo_type
793 793 else:
794 794 _type = repository
795 795 return _type == self._backend_alias
796 796
797 797 is_git = _RepoChecker('git')
798 798 is_hg = _RepoChecker('hg')
799 799 is_svn = _RepoChecker('svn')
800 800
801 801
802 802 def get_repo_type_by_name(repo_name):
803 803 repo = Repository.get_by_repo_name(repo_name)
804 804 return repo.repo_type
805 805
806 806
807 807 def is_svn_without_proxy(repository):
808 808 if is_svn(repository):
809 809 from rhodecode.model.settings import VcsSettingsModel
810 810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 812 return False
813 813
814 814
815 815 def discover_user(author):
816 816 """
817 817 Tries to discover RhodeCode User based on the autho string. Author string
818 818 is typically `FirstName LastName <email@address.com>`
819 819 """
820 820
821 821 # if author is already an instance use it for extraction
822 822 if isinstance(author, User):
823 823 return author
824 824
825 825 # Valid email in the attribute passed, see if they're in the system
826 826 _email = author_email(author)
827 827 if _email != '':
828 828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 829 if user is not None:
830 830 return user
831 831
832 832 # Maybe it's a username, we try to extract it and fetch by username ?
833 833 _author = author_name(author)
834 834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 835 if user is not None:
836 836 return user
837 837
838 838 return None
839 839
840 840
841 841 def email_or_none(author):
842 842 # extract email from the commit string
843 843 _email = author_email(author)
844 844
845 845 # If we have an email, use it, otherwise
846 846 # see if it contains a username we can get an email from
847 847 if _email != '':
848 848 return _email
849 849 else:
850 850 user = User.get_by_username(
851 851 author_name(author), case_insensitive=True, cache=True)
852 852
853 853 if user is not None:
854 854 return user.email
855 855
856 856 # No valid email, not a valid user in the system, none!
857 857 return None
858 858
859 859
860 860 def link_to_user(author, length=0, **kwargs):
861 861 user = discover_user(author)
862 862 # user can be None, but if we have it already it means we can re-use it
863 863 # in the person() function, so we save 1 intensive-query
864 864 if user:
865 865 author = user
866 866
867 867 display_person = person(author, 'username_or_name_or_email')
868 868 if length:
869 869 display_person = shorter(display_person, length)
870 870
871 871 if user:
872 872 return link_to(
873 873 escape(display_person),
874 874 route_path('user_profile', username=user.username),
875 875 **kwargs)
876 876 else:
877 877 return escape(display_person)
878 878
879 879
880 880 def person(author, show_attr="username_and_name"):
881 881 user = discover_user(author)
882 882 if user:
883 883 return getattr(user, show_attr)
884 884 else:
885 885 _author = author_name(author)
886 886 _email = email(author)
887 887 return _author or _email
888 888
889 889
890 890 def author_string(email):
891 891 if email:
892 892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 893 if user:
894 894 if user.firstname or user.lastname:
895 895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 896 else:
897 897 return email
898 898 else:
899 899 return email
900 900 else:
901 901 return None
902 902
903 903
904 904 def person_by_id(id_, show_attr="username_and_name"):
905 905 # attr to return from fetched user
906 906 person_getter = lambda usr: getattr(usr, show_attr)
907 907
908 908 #maybe it's an ID ?
909 909 if str(id_).isdigit() or isinstance(id_, int):
910 910 id_ = int(id_)
911 911 user = User.get(id_)
912 912 if user is not None:
913 913 return person_getter(user)
914 914 return id_
915 915
916 916
917 917 def gravatar_with_user(author, show_disabled=False):
918 918 from rhodecode.lib.utils import PartialRenderer
919 919 _render = PartialRenderer('base/base.mako')
920 920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921 921
922 922
923 923 def desc_stylize(value):
924 924 """
925 925 converts tags from value into html equivalent
926 926
927 927 :param value:
928 928 """
929 929 if not value:
930 930 return ''
931 931
932 932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 939 '<div class="metatag" tag="lang">\\2</div>', value)
940 940 value = re.sub(r'\[([a-z]+)\]',
941 941 '<div class="metatag" tag="\\1">\\1</div>', value)
942 942
943 943 return value
944 944
945 945
946 946 def escaped_stylize(value):
947 947 """
948 948 converts tags from value into html equivalent, but escaping its value first
949 949 """
950 950 if not value:
951 951 return ''
952 952
953 953 # Using default webhelper escape method, but has to force it as a
954 954 # plain unicode instead of a markup tag to be used in regex expressions
955 955 value = unicode(escape(safe_unicode(value)))
956 956
957 957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 964 '<div class="metatag" tag="lang">\\2</div>', value)
965 965 value = re.sub(r'\[([a-z]+)\]',
966 966 '<div class="metatag" tag="\\1">\\1</div>', value)
967 967
968 968 return value
969 969
970 970
971 971 def bool2icon(value):
972 972 """
973 973 Returns boolean value of a given value, represented as html element with
974 974 classes that will represent icons
975 975
976 976 :param value: given value to convert to html node
977 977 """
978 978
979 979 if value: # does bool conversion
980 980 return HTML.tag('i', class_="icon-true")
981 981 else: # not true as bool
982 982 return HTML.tag('i', class_="icon-false")
983 983
984 984
985 985 #==============================================================================
986 986 # PERMS
987 987 #==============================================================================
988 988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 991 csrf_token_key
992 992
993 993
994 994 #==============================================================================
995 995 # GRAVATAR URL
996 996 #==============================================================================
997 997 class InitialsGravatar(object):
998 998 def __init__(self, email_address, first_name, last_name, size=30,
999 999 background=None, text_color='#fff'):
1000 1000 self.size = size
1001 1001 self.first_name = first_name
1002 1002 self.last_name = last_name
1003 1003 self.email_address = email_address
1004 1004 self.background = background or self.str2color(email_address)
1005 1005 self.text_color = text_color
1006 1006
1007 1007 def get_color_bank(self):
1008 1008 """
1009 1009 returns a predefined list of colors that gravatars can use.
1010 1010 Those are randomized distinct colors that guarantee readability and
1011 1011 uniqueness.
1012 1012
1013 1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 1014 """
1015 1015 return [
1016 1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 1060 '#4f8c46', '#368dd9', '#5c0073'
1061 1061 ]
1062 1062
1063 1063 def rgb_to_hex_color(self, rgb_tuple):
1064 1064 """
1065 1065 Converts an rgb_tuple passed to an hex color.
1066 1066
1067 1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 1068 """
1069 1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070 1070
1071 1071 def email_to_int_list(self, email_str):
1072 1072 """
1073 1073 Get every byte of the hex digest value of email and turn it to integer.
1074 1074 It's going to be always between 0-255
1075 1075 """
1076 1076 digest = md5_safe(email_str.lower())
1077 1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078 1078
1079 1079 def pick_color_bank_index(self, email_str, color_bank):
1080 1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081 1081
1082 1082 def str2color(self, email_str):
1083 1083 """
1084 1084 Tries to map in a stable algorithm an email to color
1085 1085
1086 1086 :param email_str:
1087 1087 """
1088 1088 color_bank = self.get_color_bank()
1089 1089 # pick position (module it's length so we always find it in the
1090 1090 # bank even if it's smaller than 256 values
1091 1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 1092 return color_bank[pos]
1093 1093
1094 1094 def normalize_email(self, email_address):
1095 1095 import unicodedata
1096 1096 # default host used to fill in the fake/missing email
1097 1097 default_host = u'localhost'
1098 1098
1099 1099 if not email_address:
1100 1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101 1101
1102 1102 email_address = safe_unicode(email_address)
1103 1103
1104 1104 if u'@' not in email_address:
1105 1105 email_address = u'%s@%s' % (email_address, default_host)
1106 1106
1107 1107 if email_address.endswith(u'@'):
1108 1108 email_address = u'%s%s' % (email_address, default_host)
1109 1109
1110 1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 1111 .encode('ascii', 'ignore')
1112 1112 return email_address
1113 1113
1114 1114 def get_initials(self):
1115 1115 """
1116 1116 Returns 2 letter initials calculated based on the input.
1117 1117 The algorithm picks first given email address, and takes first letter
1118 1118 of part before @, and then the first letter of server name. In case
1119 1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 1120 the server letter with first letter of somestring2
1121 1121
1122 1122 In case function was initialized with both first and lastname, this
1123 1123 overrides the extraction from email by first letter of the first and
1124 1124 last name. We add special logic to that functionality, In case Full name
1125 1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 1126 (Von Rossum) picking `R`.
1127 1127
1128 1128 Function also normalizes the non-ascii characters to they ascii
1129 1129 representation, eg Ą => A
1130 1130 """
1131 1131 import unicodedata
1132 1132 # replace non-ascii to ascii
1133 1133 first_name = unicodedata.normalize(
1134 1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 1135 last_name = unicodedata.normalize(
1136 1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137 1137
1138 1138 # do NFKD encoding, and also make sure email has proper format
1139 1139 email_address = self.normalize_email(self.email_address)
1140 1140
1141 1141 # first push the email initials
1142 1142 prefix, server = email_address.split('@', 1)
1143 1143
1144 1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 1145 _dot_split = prefix.rsplit('.', 1)
1146 1146 if len(_dot_split) == 2:
1147 1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 1148 else:
1149 1149 initials = [prefix[0], server[0]]
1150 1150
1151 1151 # then try to replace either firtname or lastname
1152 1152 fn_letter = (first_name or " ")[0].strip()
1153 1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154 1154
1155 1155 if fn_letter:
1156 1156 initials[0] = fn_letter
1157 1157
1158 1158 if ln_letter:
1159 1159 initials[1] = ln_letter
1160 1160
1161 1161 return ''.join(initials).upper()
1162 1162
1163 1163 def get_img_data_by_type(self, font_family, img_type):
1164 1164 default_user = """
1165 1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 1167 viewBox="-15 -10 439.165 429.164"
1168 1168
1169 1169 xml:space="preserve"
1170 1170 style="background:{background};" >
1171 1171
1172 1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 1175 168.596,153.916,216.671,
1176 1176 204.583,216.671z" fill="{text_color}"/>
1177 1177 <path d="M407.164,374.717L360.88,
1178 1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 1181 0-48.762-8.122-69.078-23.488
1182 1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 1186 19.402-10.527 C409.699,390.129,
1187 1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 1188 </svg>""".format(
1189 1189 size=self.size,
1190 1190 background='#979797', # @grey4
1191 1191 text_color=self.text_color,
1192 1192 font_family=font_family)
1193 1193
1194 1194 return {
1195 1195 "default_user": default_user
1196 1196 }[img_type]
1197 1197
1198 1198 def get_img_data(self, svg_type=None):
1199 1199 """
1200 1200 generates the svg metadata for image
1201 1201 """
1202 1202
1203 1203 font_family = ','.join([
1204 1204 'proximanovaregular',
1205 1205 'Proxima Nova Regular',
1206 1206 'Proxima Nova',
1207 1207 'Arial',
1208 1208 'Lucida Grande',
1209 1209 'sans-serif'
1210 1210 ])
1211 1211 if svg_type:
1212 1212 return self.get_img_data_by_type(font_family, svg_type)
1213 1213
1214 1214 initials = self.get_initials()
1215 1215 img_data = """
1216 1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 1217 width="{size}" height="{size}"
1218 1218 style="width: 100%; height: 100%; background-color: {background}"
1219 1219 viewBox="0 0 {size} {size}">
1220 1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 1221 pointer-events="auto" fill="{text_color}"
1222 1222 font-family="{font_family}"
1223 1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 1224 </text>
1225 1225 </svg>""".format(
1226 1226 size=self.size,
1227 1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 1228 background=self.background,
1229 1229 text_color=self.text_color,
1230 1230 text=initials.upper(),
1231 1231 font_family=font_family)
1232 1232
1233 1233 return img_data
1234 1234
1235 1235 def generate_svg(self, svg_type=None):
1236 1236 img_data = self.get_img_data(svg_type)
1237 1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238 1238
1239 1239
1240 1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 1241 svg_type = None
1242 1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 1243 svg_type = 'default_user'
1244 1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 1245 return klass.generate_svg(svg_type=svg_type)
1246 1246
1247 1247
1248 1248 def gravatar_url(email_address, size=30):
1249 1249 # doh, we need to re-import those to mock it later
1250 1250 from pylons import tmpl_context as c
1251 1251
1252 1252 _use_gravatar = c.visual.use_gravatar
1253 1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254 1254
1255 1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 1256 if isinstance(email_address, unicode):
1257 1257 # hashlib crashes on unicode items
1258 1258 email_address = safe_str(email_address)
1259 1259
1260 1260 # empty email or default user
1261 1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263 1263
1264 1264 if _use_gravatar:
1265 1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 1266 # get the host and schema here.
1267 1267 request = get_current_request()
1268 1268 tmpl = safe_str(_gravatar_url)
1269 1269 tmpl = tmpl.replace('{email}', email_address)\
1270 1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 1271 .replace('{netloc}', request.host)\
1272 1272 .replace('{scheme}', request.scheme)\
1273 1273 .replace('{size}', safe_str(size))
1274 1274 return tmpl
1275 1275 else:
1276 1276 return initials_gravatar(email_address, '', '', size=size)
1277 1277
1278 1278
1279 1279 class Page(_Page):
1280 1280 """
1281 1281 Custom pager to match rendering style with paginator
1282 1282 """
1283 1283
1284 1284 def _get_pos(self, cur_page, max_page, items):
1285 1285 edge = (items / 2) + 1
1286 1286 if (cur_page <= edge):
1287 1287 radius = max(items / 2, items - cur_page)
1288 1288 elif (max_page - cur_page) < edge:
1289 1289 radius = (items - 1) - (max_page - cur_page)
1290 1290 else:
1291 1291 radius = items / 2
1292 1292
1293 1293 left = max(1, (cur_page - (radius)))
1294 1294 right = min(max_page, cur_page + (radius))
1295 1295 return left, cur_page, right
1296 1296
1297 1297 def _range(self, regexp_match):
1298 1298 """
1299 1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300 1300
1301 1301 Arguments:
1302 1302
1303 1303 regexp_match
1304 1304 A "re" (regular expressions) match object containing the
1305 1305 radius of linked pages around the current page in
1306 1306 regexp_match.group(1) as a string
1307 1307
1308 1308 This function is supposed to be called as a callable in
1309 1309 re.sub.
1310 1310
1311 1311 """
1312 1312 radius = int(regexp_match.group(1))
1313 1313
1314 1314 # Compute the first and last page number within the radius
1315 1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 1316 # -> leftmost_page = 5
1317 1317 # -> rightmost_page = 9
1318 1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 1319 self.last_page,
1320 1320 (radius * 2) + 1)
1321 1321 nav_items = []
1322 1322
1323 1323 # Create a link to the first page (unless we are on the first page
1324 1324 # or there would be no need to insert '..' spacers)
1325 1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327 1327
1328 1328 # Insert dots if there are pages between the first page
1329 1329 # and the currently displayed page range
1330 1330 if leftmost_page - self.first_page > 1:
1331 1331 # Wrap in a SPAN tag if nolink_attr is set
1332 1332 text = '..'
1333 1333 if self.dotdot_attr:
1334 1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 1335 nav_items.append(text)
1336 1336
1337 1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 1338 # Hilight the current page number and do not use a link
1339 1339 if thispage == self.page:
1340 1340 text = '%s' % (thispage,)
1341 1341 # Wrap in a SPAN tag if nolink_attr is set
1342 1342 if self.curpage_attr:
1343 1343 text = HTML.span(c=text, **self.curpage_attr)
1344 1344 nav_items.append(text)
1345 1345 # Otherwise create just a link to that page
1346 1346 else:
1347 1347 text = '%s' % (thispage,)
1348 1348 nav_items.append(self._pagerlink(thispage, text))
1349 1349
1350 1350 # Insert dots if there are pages between the displayed
1351 1351 # page numbers and the end of the page range
1352 1352 if self.last_page - rightmost_page > 1:
1353 1353 text = '..'
1354 1354 # Wrap in a SPAN tag if nolink_attr is set
1355 1355 if self.dotdot_attr:
1356 1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 1357 nav_items.append(text)
1358 1358
1359 1359 # Create a link to the very last page (unless we are on the last
1360 1360 # page or there would be no need to insert '..' spacers)
1361 1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363 1363
1364 1364 ## prerender links
1365 1365 #_page_link = url.current()
1366 1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 1368 return self.separator.join(nav_items)
1369 1369
1370 1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 1371 show_if_single_page=False, separator=' ', onclick=None,
1372 1372 symbol_first='<<', symbol_last='>>',
1373 1373 symbol_previous='<', symbol_next='>',
1374 1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 1375 curpage_attr={'class': 'pager_curpage'},
1376 1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377 1377
1378 1378 self.curpage_attr = curpage_attr
1379 1379 self.separator = separator
1380 1380 self.pager_kwargs = kwargs
1381 1381 self.page_param = page_param
1382 1382 self.partial_param = partial_param
1383 1383 self.onclick = onclick
1384 1384 self.link_attr = link_attr
1385 1385 self.dotdot_attr = dotdot_attr
1386 1386
1387 1387 # Don't show navigator if there is no more than one page
1388 1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 1389 return ''
1390 1390
1391 1391 from string import Template
1392 1392 # Replace ~...~ in token format by range of pages
1393 1393 result = re.sub(r'~(\d+)~', self._range, format)
1394 1394
1395 1395 # Interpolate '%' variables
1396 1396 result = Template(result).safe_substitute({
1397 1397 'first_page': self.first_page,
1398 1398 'last_page': self.last_page,
1399 1399 'page': self.page,
1400 1400 'page_count': self.page_count,
1401 1401 'items_per_page': self.items_per_page,
1402 1402 'first_item': self.first_item,
1403 1403 'last_item': self.last_item,
1404 1404 'item_count': self.item_count,
1405 1405 'link_first': self.page > self.first_page and \
1406 1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 1407 'link_last': self.page < self.last_page and \
1408 1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 1409 'link_previous': self.previous_page and \
1410 1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 1412 'link_next': self.next_page and \
1413 1413 self._pagerlink(self.next_page, symbol_next) \
1414 1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 1415 })
1416 1416
1417 1417 return literal(result)
1418 1418
1419 1419
1420 1420 #==============================================================================
1421 1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 1422 #==============================================================================
1423 1423 class RepoPage(Page):
1424 1424
1425 1425 def __init__(self, collection, page=1, items_per_page=20,
1426 1426 item_count=None, url=None, **kwargs):
1427 1427
1428 1428 """Create a "RepoPage" instance. special pager for paging
1429 1429 repository
1430 1430 """
1431 1431 self._url_generator = url
1432 1432
1433 1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 1434 self.kwargs = kwargs
1435 1435
1436 1436 # Save a reference to the collection
1437 1437 self.original_collection = collection
1438 1438
1439 1439 self.collection = collection
1440 1440
1441 1441 # The self.page is the number of the current page.
1442 1442 # The first page has the number 1!
1443 1443 try:
1444 1444 self.page = int(page) # make it int() if we get it as a string
1445 1445 except (ValueError, TypeError):
1446 1446 self.page = 1
1447 1447
1448 1448 self.items_per_page = items_per_page
1449 1449
1450 1450 # Unless the user tells us how many items the collections has
1451 1451 # we calculate that ourselves.
1452 1452 if item_count is not None:
1453 1453 self.item_count = item_count
1454 1454 else:
1455 1455 self.item_count = len(self.collection)
1456 1456
1457 1457 # Compute the number of the first and last available page
1458 1458 if self.item_count > 0:
1459 1459 self.first_page = 1
1460 1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 1461 self.items_per_page))
1462 1462 self.last_page = self.first_page + self.page_count - 1
1463 1463
1464 1464 # Make sure that the requested page number is the range of
1465 1465 # valid pages
1466 1466 if self.page > self.last_page:
1467 1467 self.page = self.last_page
1468 1468 elif self.page < self.first_page:
1469 1469 self.page = self.first_page
1470 1470
1471 1471 # Note: the number of items on this page can be less than
1472 1472 # items_per_page if the last page is not full
1473 1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 1474 items_per_page))
1475 1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 1476 (self.page - 1))
1477 1477
1478 1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479 1479
1480 1480 # Links to previous and next page
1481 1481 if self.page > self.first_page:
1482 1482 self.previous_page = self.page - 1
1483 1483 else:
1484 1484 self.previous_page = None
1485 1485
1486 1486 if self.page < self.last_page:
1487 1487 self.next_page = self.page + 1
1488 1488 else:
1489 1489 self.next_page = None
1490 1490
1491 1491 # No items available
1492 1492 else:
1493 1493 self.first_page = None
1494 1494 self.page_count = 0
1495 1495 self.last_page = None
1496 1496 self.first_item = None
1497 1497 self.last_item = None
1498 1498 self.previous_page = None
1499 1499 self.next_page = None
1500 1500 self.items = []
1501 1501
1502 1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 1503 list.__init__(self, reversed(self.items))
1504 1504
1505 1505
1506 1506 def changed_tooltip(nodes):
1507 1507 """
1508 1508 Generates a html string for changed nodes in commit page.
1509 1509 It limits the output to 30 entries
1510 1510
1511 1511 :param nodes: LazyNodesGenerator
1512 1512 """
1513 1513 if nodes:
1514 1514 pref = ': <br/> '
1515 1515 suf = ''
1516 1516 if len(nodes) > 30:
1517 1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 1519 for x in nodes[:30]]) + suf)
1520 1520 else:
1521 1521 return ': ' + _('No Files')
1522 1522
1523 1523
1524 1524 def breadcrumb_repo_link(repo):
1525 1525 """
1526 1526 Makes a breadcrumbs path link to repo
1527 1527
1528 1528 ex::
1529 1529 group >> subgroup >> repo
1530 1530
1531 1531 :param repo: a Repository instance
1532 1532 """
1533 1533
1534 1534 path = [
1535 1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 1536 for group in repo.groups_with_parents
1537 1537 ] + [
1538 1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 1539 ]
1540 1540
1541 1541 return literal(' &raquo; '.join(path))
1542 1542
1543 1543
1544 1544 def format_byte_size_binary(file_size):
1545 1545 """
1546 1546 Formats file/folder sizes to standard.
1547 1547 """
1548 1548 formatted_size = format_byte_size(file_size, binary=True)
1549 1549 return formatted_size
1550 1550
1551 1551
1552 1552 def fancy_file_stats(stats):
1553 1553 """
1554 1554 Displays a fancy two colored bar for number of added/deleted
1555 1555 lines of code on file
1556 1556
1557 1557 :param stats: two element list of added/deleted lines of code
1558 1558 """
1559 1559 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1560 1560 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1561 1561
1562 1562 def cgen(l_type, a_v, d_v):
1563 1563 mapping = {'tr': 'top-right-rounded-corner-mid',
1564 1564 'tl': 'top-left-rounded-corner-mid',
1565 1565 'br': 'bottom-right-rounded-corner-mid',
1566 1566 'bl': 'bottom-left-rounded-corner-mid'}
1567 1567 map_getter = lambda x: mapping[x]
1568 1568
1569 1569 if l_type == 'a' and d_v:
1570 1570 #case when added and deleted are present
1571 1571 return ' '.join(map(map_getter, ['tl', 'bl']))
1572 1572
1573 1573 if l_type == 'a' and not d_v:
1574 1574 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1575 1575
1576 1576 if l_type == 'd' and a_v:
1577 1577 return ' '.join(map(map_getter, ['tr', 'br']))
1578 1578
1579 1579 if l_type == 'd' and not a_v:
1580 1580 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1581 1581
1582 1582 a, d = stats['added'], stats['deleted']
1583 1583 width = 100
1584 1584
1585 1585 if stats['binary']: # binary operations like chmod/rename etc
1586 1586 lbl = []
1587 1587 bin_op = 0 # undefined
1588 1588
1589 1589 # prefix with bin for binary files
1590 1590 if BIN_FILENODE in stats['ops']:
1591 1591 lbl += ['bin']
1592 1592
1593 1593 if NEW_FILENODE in stats['ops']:
1594 1594 lbl += [_('new file')]
1595 1595 bin_op = NEW_FILENODE
1596 1596 elif MOD_FILENODE in stats['ops']:
1597 1597 lbl += [_('mod')]
1598 1598 bin_op = MOD_FILENODE
1599 1599 elif DEL_FILENODE in stats['ops']:
1600 1600 lbl += [_('del')]
1601 1601 bin_op = DEL_FILENODE
1602 1602 elif RENAMED_FILENODE in stats['ops']:
1603 1603 lbl += [_('rename')]
1604 1604 bin_op = RENAMED_FILENODE
1605 1605
1606 1606 # chmod can go with other operations, so we add a + to lbl if needed
1607 1607 if CHMOD_FILENODE in stats['ops']:
1608 1608 lbl += [_('chmod')]
1609 1609 if bin_op == 0:
1610 1610 bin_op = CHMOD_FILENODE
1611 1611
1612 1612 lbl = '+'.join(lbl)
1613 1613 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1614 1614 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1615 1615 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1616 1616 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1617 1617
1618 1618 t = stats['added'] + stats['deleted']
1619 1619 unit = float(width) / (t or 1)
1620 1620
1621 1621 # needs > 9% of width to be visible or 0 to be hidden
1622 1622 a_p = max(9, unit * a) if a > 0 else 0
1623 1623 d_p = max(9, unit * d) if d > 0 else 0
1624 1624 p_sum = a_p + d_p
1625 1625
1626 1626 if p_sum > width:
1627 1627 #adjust the percentage to be == 100% since we adjusted to 9
1628 1628 if a_p > d_p:
1629 1629 a_p = a_p - (p_sum - width)
1630 1630 else:
1631 1631 d_p = d_p - (p_sum - width)
1632 1632
1633 1633 a_v = a if a > 0 else ''
1634 1634 d_v = d if d > 0 else ''
1635 1635
1636 1636 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1637 1637 cgen('a', a_v, d_v), a_p, a_v
1638 1638 )
1639 1639 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1640 1640 cgen('d', a_v, d_v), d_p, d_v
1641 1641 )
1642 1642 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1643 1643
1644 1644
1645 1645 def urlify_text(text_, safe=True):
1646 1646 """
1647 1647 Extrac urls from text and make html links out of them
1648 1648
1649 1649 :param text_:
1650 1650 """
1651 1651
1652 1652 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1653 1653 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1654 1654
1655 1655 def url_func(match_obj):
1656 1656 url_full = match_obj.groups()[0]
1657 1657 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1658 1658 _newtext = url_pat.sub(url_func, text_)
1659 1659 if safe:
1660 1660 return literal(_newtext)
1661 1661 return _newtext
1662 1662
1663 1663
1664 1664 def urlify_commits(text_, repository):
1665 1665 """
1666 1666 Extract commit ids from text and make link from them
1667 1667
1668 1668 :param text_:
1669 1669 :param repository: repo name to build the URL with
1670 1670 """
1671 1671 from pylons import url # doh, we need to re-import url to mock it later
1672 1672 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1673 1673
1674 1674 def url_func(match_obj):
1675 1675 commit_id = match_obj.groups()[1]
1676 1676 pref = match_obj.groups()[0]
1677 1677 suf = match_obj.groups()[2]
1678 1678
1679 1679 tmpl = (
1680 1680 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1681 1681 '%(commit_id)s</a>%(suf)s'
1682 1682 )
1683 1683 return tmpl % {
1684 1684 'pref': pref,
1685 1685 'cls': 'revision-link',
1686 1686 'url': url('changeset_home', repo_name=repository,
1687 1687 revision=commit_id, qualified=True),
1688 1688 'commit_id': commit_id,
1689 1689 'suf': suf
1690 1690 }
1691 1691
1692 1692 newtext = URL_PAT.sub(url_func, text_)
1693 1693
1694 1694 return newtext
1695 1695
1696 1696
1697 1697 def _process_url_func(match_obj, repo_name, uid, entry,
1698 1698 return_raw_data=False):
1699 1699 pref = ''
1700 1700 if match_obj.group().startswith(' '):
1701 1701 pref = ' '
1702 1702
1703 1703 issue_id = ''.join(match_obj.groups())
1704 1704 tmpl = (
1705 1705 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1706 1706 '%(issue-prefix)s%(id-repr)s'
1707 1707 '</a>')
1708 1708
1709 1709 (repo_name_cleaned,
1710 1710 parent_group_name) = RepoGroupModel().\
1711 1711 _get_group_name_and_parent(repo_name)
1712 1712
1713 1713 # variables replacement
1714 1714 named_vars = {
1715 1715 'id': issue_id,
1716 1716 'repo': repo_name,
1717 1717 'repo_name': repo_name_cleaned,
1718 1718 'group_name': parent_group_name
1719 1719 }
1720 1720 # named regex variables
1721 1721 named_vars.update(match_obj.groupdict())
1722 1722 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1723 1723
1724 1724 data = {
1725 1725 'pref': pref,
1726 1726 'cls': 'issue-tracker-link',
1727 1727 'url': _url,
1728 1728 'id-repr': issue_id,
1729 1729 'issue-prefix': entry['pref'],
1730 1730 'serv': entry['url'],
1731 1731 }
1732 1732 if return_raw_data:
1733 1733 return {
1734 1734 'id': issue_id,
1735 1735 'url': _url
1736 1736 }
1737 1737 return tmpl % data
1738 1738
1739 1739
1740 1740 def process_patterns(text_string, repo_name, config=None):
1741 1741 repo = None
1742 1742 if repo_name:
1743 1743 # Retrieving repo_name to avoid invalid repo_name to explode on
1744 1744 # IssueTrackerSettingsModel but still passing invalid name further down
1745 1745 repo = Repository.get_by_repo_name(repo_name, cache=True)
1746 1746
1747 1747 settings_model = IssueTrackerSettingsModel(repo=repo)
1748 1748 active_entries = settings_model.get_settings(cache=True)
1749 1749
1750 1750 issues_data = []
1751 1751 newtext = text_string
1752 1752 for uid, entry in active_entries.items():
1753 1753 log.debug('found issue tracker entry with uid %s' % (uid,))
1754 1754
1755 1755 if not (entry['pat'] and entry['url']):
1756 1756 log.debug('skipping due to missing data')
1757 1757 continue
1758 1758
1759 1759 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1760 1760 % (uid, entry['pat'], entry['url'], entry['pref']))
1761 1761
1762 1762 try:
1763 1763 pattern = re.compile(r'%s' % entry['pat'])
1764 1764 except re.error:
1765 1765 log.exception(
1766 1766 'issue tracker pattern: `%s` failed to compile',
1767 1767 entry['pat'])
1768 1768 continue
1769 1769
1770 1770 data_func = partial(
1771 1771 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1772 1772 return_raw_data=True)
1773 1773
1774 1774 for match_obj in pattern.finditer(text_string):
1775 1775 issues_data.append(data_func(match_obj))
1776 1776
1777 1777 url_func = partial(
1778 1778 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1779 1779
1780 1780 newtext = pattern.sub(url_func, newtext)
1781 1781 log.debug('processed prefix:uid `%s`' % (uid,))
1782 1782
1783 1783 return newtext, issues_data
1784 1784
1785 1785
1786 1786 def urlify_commit_message(commit_text, repository=None):
1787 1787 """
1788 1788 Parses given text message and makes proper links.
1789 1789 issues are linked to given issue-server, and rest is a commit link
1790 1790
1791 1791 :param commit_text:
1792 1792 :param repository:
1793 1793 """
1794 1794 from pylons import url # doh, we need to re-import url to mock it later
1795 1795
1796 1796 def escaper(string):
1797 1797 return string.replace('<', '&lt;').replace('>', '&gt;')
1798 1798
1799 1799 newtext = escaper(commit_text)
1800 1800
1801 1801 # extract http/https links and make them real urls
1802 1802 newtext = urlify_text(newtext, safe=False)
1803 1803
1804 1804 # urlify commits - extract commit ids and make link out of them, if we have
1805 1805 # the scope of repository present.
1806 1806 if repository:
1807 1807 newtext = urlify_commits(newtext, repository)
1808 1808
1809 1809 # process issue tracker patterns
1810 1810 newtext, issues = process_patterns(newtext, repository or '')
1811 1811
1812 1812 return literal(newtext)
1813 1813
1814 1814
1815 1815 def renderer_from_filename(filename, exclude=None):
1816 1816 """
1817 1817 choose a renderer based on filename
1818 1818 """
1819 1819
1820 1820 # ipython
1821 1821 for ext in ['*.ipynb']:
1822 1822 if fnmatch.fnmatch(filename, pat=ext):
1823 1823 return 'jupyter'
1824 1824
1825 1825 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1826 1826 if is_markup:
1827 1827 return is_markup
1828 1828 return None
1829 1829
1830 1830
1831 def render(source, renderer='rst', mentions=False):
1831 def render(source, renderer='rst', mentions=False, relative_url=None):
1832
1833 def maybe_convert_relative_links(html_source):
1834 if relative_url:
1835 return relative_links(html_source, relative_url)
1836 return html_source
1837
1832 1838 if renderer == 'rst':
1833 1839 return literal(
1834 1840 '<div class="rst-block">%s</div>' %
1835 MarkupRenderer.rst(source, mentions=mentions))
1841 maybe_convert_relative_links(
1842 MarkupRenderer.rst(source, mentions=mentions)))
1836 1843 elif renderer == 'markdown':
1837 1844 return literal(
1838 1845 '<div class="markdown-block">%s</div>' %
1839 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1846 maybe_convert_relative_links(
1847 MarkupRenderer.markdown(source, flavored=True,
1848 mentions=mentions)))
1840 1849 elif renderer == 'jupyter':
1841 1850 return literal(
1842 1851 '<div class="ipynb">%s</div>' %
1843 MarkupRenderer.jupyter(source))
1852 maybe_convert_relative_links(
1853 MarkupRenderer.jupyter(source)))
1844 1854
1845 1855 # None means just show the file-source
1846 1856 return None
1847 1857
1848 1858
1849 1859 def commit_status(repo, commit_id):
1850 1860 return ChangesetStatusModel().get_status(repo, commit_id)
1851 1861
1852 1862
1853 1863 def commit_status_lbl(commit_status):
1854 1864 return dict(ChangesetStatus.STATUSES).get(commit_status)
1855 1865
1856 1866
1857 1867 def commit_time(repo_name, commit_id):
1858 1868 repo = Repository.get_by_repo_name(repo_name)
1859 1869 commit = repo.get_commit(commit_id=commit_id)
1860 1870 return commit.date
1861 1871
1862 1872
1863 1873 def get_permission_name(key):
1864 1874 return dict(Permission.PERMS).get(key)
1865 1875
1866 1876
1867 1877 def journal_filter_help():
1868 1878 return _(
1869 1879 'Example filter terms:\n' +
1870 1880 ' repository:vcs\n' +
1871 1881 ' username:marcin\n' +
1872 1882 ' action:*push*\n' +
1873 1883 ' ip:127.0.0.1\n' +
1874 1884 ' date:20120101\n' +
1875 1885 ' date:[20120101100000 TO 20120102]\n' +
1876 1886 '\n' +
1877 1887 'Generate wildcards using \'*\' character:\n' +
1878 1888 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1879 1889 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1880 1890 '\n' +
1881 1891 'Optional AND / OR operators in queries\n' +
1882 1892 ' "repository:vcs OR repository:test"\n' +
1883 1893 ' "username:test AND repository:test*"\n'
1884 1894 )
1885 1895
1886 1896
1887 1897 def not_mapped_error(repo_name):
1888 1898 flash(_('%s repository is not mapped to db perhaps'
1889 1899 ' it was created or renamed from the filesystem'
1890 1900 ' please run the application again'
1891 1901 ' in order to rescan repositories') % repo_name, category='error')
1892 1902
1893 1903
1894 1904 def ip_range(ip_addr):
1895 1905 from rhodecode.model.db import UserIpMap
1896 1906 s, e = UserIpMap._get_ip_range(ip_addr)
1897 1907 return '%s - %s' % (s, e)
1898 1908
1899 1909
1900 1910 def form(url, method='post', needs_csrf_token=True, **attrs):
1901 1911 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1902 1912 if method.lower() != 'get' and needs_csrf_token:
1903 1913 raise Exception(
1904 1914 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1905 1915 'CSRF token. If the endpoint does not require such token you can ' +
1906 1916 'explicitly set the parameter needs_csrf_token to false.')
1907 1917
1908 1918 return wh_form(url, method=method, **attrs)
1909 1919
1910 1920
1911 1921 def secure_form(url, method="POST", multipart=False, **attrs):
1912 1922 """Start a form tag that points the action to an url. This
1913 1923 form tag will also include the hidden field containing
1914 1924 the auth token.
1915 1925
1916 1926 The url options should be given either as a string, or as a
1917 1927 ``url()`` function. The method for the form defaults to POST.
1918 1928
1919 1929 Options:
1920 1930
1921 1931 ``multipart``
1922 1932 If set to True, the enctype is set to "multipart/form-data".
1923 1933 ``method``
1924 1934 The method to use when submitting the form, usually either
1925 1935 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1926 1936 hidden input with name _method is added to simulate the verb
1927 1937 over POST.
1928 1938
1929 1939 """
1930 1940 from webhelpers.pylonslib.secure_form import insecure_form
1931 1941 form = insecure_form(url, method, multipart, **attrs)
1932 1942 token = csrf_input()
1933 1943 return literal("%s\n%s" % (form, token))
1934 1944
1935 1945 def csrf_input():
1936 1946 return literal(
1937 1947 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1938 1948 csrf_token_key, csrf_token_key, get_csrf_token()))
1939 1949
1940 1950 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1941 1951 select_html = select(name, selected, options, **attrs)
1942 1952 select2 = """
1943 1953 <script>
1944 1954 $(document).ready(function() {
1945 1955 $('#%s').select2({
1946 1956 containerCssClass: 'drop-menu',
1947 1957 dropdownCssClass: 'drop-menu-dropdown',
1948 1958 dropdownAutoWidth: true%s
1949 1959 });
1950 1960 });
1951 1961 </script>
1952 1962 """
1953 1963 filter_option = """,
1954 1964 minimumResultsForSearch: -1
1955 1965 """
1956 1966 input_id = attrs.get('id') or name
1957 1967 filter_enabled = "" if enable_filter else filter_option
1958 1968 select_script = literal(select2 % (input_id, filter_enabled))
1959 1969
1960 1970 return literal(select_html+select_script)
1961 1971
1962 1972
1963 1973 def get_visual_attr(tmpl_context_var, attr_name):
1964 1974 """
1965 1975 A safe way to get a variable from visual variable of template context
1966 1976
1967 1977 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1968 1978 :param attr_name: name of the attribute we fetch from the c.visual
1969 1979 """
1970 1980 visual = getattr(tmpl_context_var, 'visual', None)
1971 1981 if not visual:
1972 1982 return
1973 1983 else:
1974 1984 return getattr(visual, attr_name, None)
1975 1985
1976 1986
1977 1987 def get_last_path_part(file_node):
1978 1988 if not file_node.path:
1979 1989 return u''
1980 1990
1981 1991 path = safe_unicode(file_node.path.split('/')[-1])
1982 1992 return u'../' + path
1983 1993
1984 1994
1985 1995 def route_path(*args, **kwds):
1986 1996 """
1987 1997 Wrapper around pyramids `route_path` function. It is used to generate
1988 1998 URLs from within pylons views or templates. This will be removed when
1989 1999 pyramid migration if finished.
1990 2000 """
1991 2001 req = get_current_request()
1992 2002 return req.route_path(*args, **kwds)
1993 2003
1994 2004
1995 2005 def route_path_or_none(*args, **kwargs):
1996 2006 try:
1997 2007 return route_path(*args, **kwargs)
1998 2008 except KeyError:
1999 2009 return None
2000 2010
2001 2011
2002 2012 def static_url(*args, **kwds):
2003 2013 """
2004 2014 Wrapper around pyramids `route_path` function. It is used to generate
2005 2015 URLs from within pylons views or templates. This will be removed when
2006 2016 pyramid migration if finished.
2007 2017 """
2008 2018 req = get_current_request()
2009 2019 return req.static_url(*args, **kwds)
2010 2020
2011 2021
2012 2022 def resource_path(*args, **kwds):
2013 2023 """
2014 2024 Wrapper around pyramids `route_path` function. It is used to generate
2015 2025 URLs from within pylons views or templates. This will be removed when
2016 2026 pyramid migration if finished.
2017 2027 """
2018 2028 req = get_current_request()
2019 2029 return req.resource_path(*args, **kwds)
@@ -1,375 +1,455 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Renderer for markup languages with ability to parse using rst or markdown
24 24 """
25 25
26 26 import re
27 27 import os
28 import lxml
28 29 import logging
29 import itertools
30 import urlparse
31 import urllib
30 32
31 33 from mako.lookup import TemplateLookup
32 34 from mako.template import Template as MakoTemplate
33 35
34 36 from docutils.core import publish_parts
35 37 from docutils.parsers.rst import directives
36 38 import markdown
37 39
38 from rhodecode.lib.markdown_ext import (
39 UrlizeExtension, GithubFlavoredMarkdownExtension)
40 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
40 from rhodecode.lib.markdown_ext import GithubFlavoredMarkdownExtension
41 from rhodecode.lib.utils2 import (
42 safe_str, safe_unicode, md5_safe, MENTIONS_REGEX)
41 43
42 44 log = logging.getLogger(__name__)
43 45
44 46 # default renderer used to generate automated comments
45 47 DEFAULT_COMMENTS_RENDERER = 'rst'
46 48
47 49
50 def relative_links(html_source, server_path):
51 doc = lxml.html.fromstring(html_source)
52 for el in doc.cssselect('img, video'):
53 src = el.attrib['src']
54 if src:
55 el.attrib['src'] = relative_path(src, server_path)
56
57 for el in doc.cssselect('a:not(.gfm)'):
58 src = el.attrib['href']
59 if src:
60 el.attrib['href'] = relative_path(src, server_path)
61
62 return lxml.html.tostring(doc)
63
64
65 def relative_path(path, request_path, is_repo_file=None):
66 """
67 relative link support, path is a rel path, and request_path is current
68 server path (not absolute)
69
70 e.g.
71
72 path = '../logo.png'
73 request_path= '/repo/files/path/file.md'
74 produces: '/repo/files/logo.png'
75 """
76 # TODO(marcink): unicode/str support ?
77 # maybe=> safe_unicode(urllib.quote(safe_str(final_path), '/:'))
78
79 def dummy_check(p):
80 return True # assume default is a valid file path
81
82 is_repo_file = is_repo_file or dummy_check
83 if not path:
84 return request_path
85
86 path = safe_unicode(path)
87 request_path = safe_unicode(request_path)
88
89 if path.startswith((u'data:', u'#', u':')):
90 # skip data, anchor, invalid links
91 return path
92
93 is_absolute = bool(urlparse.urlparse(path).netloc)
94 if is_absolute:
95 return path
96
97 if not request_path:
98 return path
99
100 if path.startswith(u'/'):
101 path = path[1:]
102
103 if path.startswith(u'./'):
104 path = path[2:]
105
106 parts = request_path.split('/')
107 # compute how deep we need to traverse the request_path
108 depth = 0
109
110 if is_repo_file(request_path):
111 # if request path is a VALID file, we use a relative path with
112 # one level up
113 depth += 1
114
115 while path.startswith(u'../'):
116 depth += 1
117 path = path[3:]
118
119 if depth > 0:
120 parts = parts[:-depth]
121
122 parts.append(path)
123 final_path = u'/'.join(parts).lstrip(u'/')
124
125 return u'/' + final_path
126
127
48 128 class MarkupRenderer(object):
49 129 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
50 130
51 131 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
52 132 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
53 133 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
54 134 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
55 135
56 136 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
57 137 markdown_renderer = markdown.Markdown(
58 138 extensions, safe_mode=True, enable_attributes=False)
59 139
60 140 markdown_renderer_flavored = markdown.Markdown(
61 141 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
62 142 enable_attributes=False)
63 143
64 144 # extension together with weights. Lower is first means we control how
65 145 # extensions are attached to readme names with those.
66 146 PLAIN_EXTS = [
67 147 # prefer no extension
68 148 ('', 0), # special case that renders READMES names without extension
69 149 ('.text', 2), ('.TEXT', 2),
70 150 ('.txt', 3), ('.TXT', 3)
71 151 ]
72 152
73 153 RST_EXTS = [
74 154 ('.rst', 1), ('.rest', 1),
75 155 ('.RST', 2), ('.REST', 2)
76 156 ]
77 157
78 158 MARKDOWN_EXTS = [
79 159 ('.md', 1), ('.MD', 1),
80 160 ('.mkdn', 2), ('.MKDN', 2),
81 161 ('.mdown', 3), ('.MDOWN', 3),
82 162 ('.markdown', 4), ('.MARKDOWN', 4)
83 163 ]
84 164
85 165 def _detect_renderer(self, source, filename=None):
86 166 """
87 167 runs detection of what renderer should be used for generating html
88 168 from a markup language
89 169
90 170 filename can be also explicitly a renderer name
91 171
92 172 :param source:
93 173 :param filename:
94 174 """
95 175
96 176 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
97 177 detected_renderer = 'markdown'
98 178 elif MarkupRenderer.RST_PAT.findall(filename):
99 179 detected_renderer = 'rst'
100 180 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
101 181 detected_renderer = 'jupyter'
102 182 elif MarkupRenderer.PLAIN_PAT.findall(filename):
103 183 detected_renderer = 'plain'
104 184 else:
105 185 detected_renderer = 'plain'
106 186
107 187 return getattr(MarkupRenderer, detected_renderer)
108 188
109 189 @classmethod
110 190 def renderer_from_filename(cls, filename, exclude):
111 191 """
112 192 Detect renderer markdown/rst from filename and optionally use exclude
113 193 list to remove some options. This is mostly used in helpers.
114 194 Returns None when no renderer can be detected.
115 195 """
116 196 def _filter(elements):
117 197 if isinstance(exclude, (list, tuple)):
118 198 return [x for x in elements if x not in exclude]
119 199 return elements
120 200
121 201 if filename.endswith(
122 202 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
123 203 return 'markdown'
124 204 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
125 205 return 'rst'
126 206
127 207 return None
128 208
129 209 def render(self, source, filename=None):
130 210 """
131 211 Renders a given filename using detected renderer
132 212 it detects renderers based on file extension or mimetype.
133 213 At last it will just do a simple html replacing new lines with <br/>
134 214
135 215 :param file_name:
136 216 :param source:
137 217 """
138 218
139 219 renderer = self._detect_renderer(source, filename)
140 220 readme_data = renderer(source)
141 221 return readme_data
142 222
143 223 @classmethod
144 224 def _flavored_markdown(cls, text):
145 225 """
146 226 Github style flavored markdown
147 227
148 228 :param text:
149 229 """
150 230
151 231 # Extract pre blocks.
152 232 extractions = {}
153 233
154 234 def pre_extraction_callback(matchobj):
155 235 digest = md5_safe(matchobj.group(0))
156 236 extractions[digest] = matchobj.group(0)
157 237 return "{gfm-extraction-%s}" % digest
158 238 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
159 239 text = re.sub(pattern, pre_extraction_callback, text)
160 240
161 241 # Prevent foo_bar_baz from ending up with an italic word in the middle.
162 242 def italic_callback(matchobj):
163 243 s = matchobj.group(0)
164 244 if list(s).count('_') >= 2:
165 245 return s.replace('_', r'\_')
166 246 return s
167 247 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
168 248
169 249 # Insert pre block extractions.
170 250 def pre_insert_callback(matchobj):
171 251 return '\n\n' + extractions[matchobj.group(1)]
172 252 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
173 253 pre_insert_callback, text)
174 254
175 255 return text
176 256
177 257 @classmethod
178 258 def urlify_text(cls, text):
179 259 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
180 260 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
181 261
182 262 def url_func(match_obj):
183 263 url_full = match_obj.groups()[0]
184 264 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
185 265
186 266 return url_pat.sub(url_func, text)
187 267
188 268 @classmethod
189 269 def plain(cls, source, universal_newline=True):
190 270 source = safe_unicode(source)
191 271 if universal_newline:
192 272 newline = '\n'
193 273 source = newline.join(source.splitlines())
194 274
195 275 source = cls.urlify_text(source)
196 276 return '<br />' + source.replace("\n", '<br />')
197 277
198 278 @classmethod
199 279 def markdown(cls, source, safe=True, flavored=True, mentions=False):
200 280 # It does not allow to insert inline HTML. In presence of HTML tags, it
201 281 # will replace them instead with [HTML_REMOVED]. This is controlled by
202 282 # the safe_mode=True parameter of the markdown method.
203 283
204 284 if flavored:
205 285 markdown_renderer = cls.markdown_renderer_flavored
206 286 else:
207 287 markdown_renderer = cls.markdown_renderer
208 288
209 289 if mentions:
210 290 mention_pat = re.compile(MENTIONS_REGEX)
211 291
212 292 def wrapp(match_obj):
213 293 uname = match_obj.groups()[0]
214 294 return ' **@%(uname)s** ' % {'uname': uname}
215 295 mention_hl = mention_pat.sub(wrapp, source).strip()
216 296 # we extracted mentions render with this using Mentions false
217 297 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
218 298 mentions=False)
219 299
220 300 source = safe_unicode(source)
221 301 try:
222 302 if flavored:
223 303 source = cls._flavored_markdown(source)
224 304 return markdown_renderer.convert(source)
225 305 except Exception:
226 306 log.exception('Error when rendering Markdown')
227 307 if safe:
228 308 log.debug('Fallback to render in plain mode')
229 309 return cls.plain(source)
230 310 else:
231 311 raise
232 312
233 313 @classmethod
234 314 def rst(cls, source, safe=True, mentions=False):
235 315 if mentions:
236 316 mention_pat = re.compile(MENTIONS_REGEX)
237 317
238 318 def wrapp(match_obj):
239 319 uname = match_obj.groups()[0]
240 320 return ' **@%(uname)s** ' % {'uname': uname}
241 321 mention_hl = mention_pat.sub(wrapp, source).strip()
242 322 # we extracted mentions render with this using Mentions false
243 323 return cls.rst(mention_hl, safe=safe, mentions=False)
244 324
245 325 source = safe_unicode(source)
246 326 try:
247 327 docutils_settings = dict(
248 328 [(alias, None) for alias in
249 329 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
250 330
251 331 docutils_settings.update({'input_encoding': 'unicode',
252 332 'report_level': 4})
253 333
254 334 for k, v in docutils_settings.iteritems():
255 335 directives.register_directive(k, v)
256 336
257 337 parts = publish_parts(source=source,
258 338 writer_name="html4css1",
259 339 settings_overrides=docutils_settings)
260 340
261 341 return parts['html_title'] + parts["fragment"]
262 342 except Exception:
263 343 log.exception('Error when rendering RST')
264 344 if safe:
265 345 log.debug('Fallbacking to render in plain mode')
266 346 return cls.plain(source)
267 347 else:
268 348 raise
269 349
270 350 @classmethod
271 351 def jupyter(cls, source, safe=True):
272 352 from rhodecode.lib import helpers
273 353
274 354 from traitlets.config import Config
275 355 import nbformat
276 356 from nbconvert import HTMLExporter
277 357 from nbconvert.preprocessors import Preprocessor
278 358
279 359 class CustomHTMLExporter(HTMLExporter):
280 360 def _template_file_default(self):
281 361 return 'basic'
282 362
283 363 class Sandbox(Preprocessor):
284 364
285 365 def preprocess(self, nb, resources):
286 366 sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)'
287 367 for cell in nb['cells']:
288 368 if safe and 'outputs' in cell:
289 369 for cell_output in cell['outputs']:
290 370 if 'data' in cell_output:
291 371 if 'application/javascript' in cell_output['data']:
292 372 cell_output['data']['text/plain'] = sandbox_text
293 373 cell_output['data'].pop('application/javascript', None)
294 374 return nb, resources
295 375
296 376 def _sanitize_resources(resources):
297 377 """
298 378 Skip/sanitize some of the CSS generated and included in jupyter
299 379 so it doesn't messes up UI so much
300 380 """
301 381
302 382 # TODO(marcink): probably we should replace this with whole custom
303 383 # CSS set that doesn't screw up, but jupyter generated html has some
304 384 # special markers, so it requires Custom HTML exporter template with
305 385 # _default_template_path_default, to achieve that
306 386
307 387 # strip the reset CSS
308 388 resources[0] = resources[0][resources[0].find('/*! Source'):]
309 389 return resources
310 390
311 391 def as_html(notebook):
312 392 conf = Config()
313 393 conf.CustomHTMLExporter.preprocessors = [Sandbox]
314 394 html_exporter = CustomHTMLExporter(config=conf)
315 395
316 396 (body, resources) = html_exporter.from_notebook_node(notebook)
317 397 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
318 398 js = MakoTemplate(r'''
319 399 <!-- Load mathjax -->
320 400 <!-- MathJax configuration -->
321 401 <script type="text/x-mathjax-config">
322 402 MathJax.Hub.Config({
323 403 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
324 404 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
325 405 TeX: {
326 406 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
327 407 },
328 408 tex2jax: {
329 409 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
330 410 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
331 411 processEscapes: true,
332 412 processEnvironments: true
333 413 },
334 414 // Center justify equations in code and markdown cells. Elsewhere
335 415 // we use CSS to left justify single line equations in code cells.
336 416 displayAlign: 'center',
337 417 "HTML-CSS": {
338 418 styles: {'.MathJax_Display': {"margin": 0}},
339 419 linebreaks: { automatic: true },
340 420 availableFonts: ["STIX", "TeX"]
341 421 },
342 422 showMathMenu: false
343 423 });
344 424 </script>
345 425 <!-- End of mathjax configuration -->
346 426 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
347 427 ''').render(h=helpers)
348 428
349 429 css = '<style>{}</style>'.format(
350 430 ''.join(_sanitize_resources(resources['inlining']['css'])))
351 431
352 432 body = '\n'.join([header, css, js, body])
353 433 return body, resources
354 434
355 435 notebook = nbformat.reads(source, as_version=4)
356 436 (body, resources) = as_html(notebook)
357 437 return body
358 438
359 439
360 440 class RstTemplateRenderer(object):
361 441
362 442 def __init__(self):
363 443 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
364 444 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
365 445 self.template_store = TemplateLookup(
366 446 directories=rst_template_dirs,
367 447 input_encoding='utf-8',
368 448 imports=['from rhodecode.lib import helpers as h'])
369 449
370 450 def _get_template(self, templatename):
371 451 return self.template_store.get_template(templatename)
372 452
373 453 def render(self, template_name, **kwargs):
374 454 template = self._get_template(template_name)
375 455 return template.render(**kwargs)
@@ -1,78 +1,78 b''
1 1 <%namespace name="sourceblock" file="/codeblocks/source.mako"/>
2 2
3 3 <div id="codeblock" class="codeblock">
4 4 <div class="codeblock-header">
5 5 <div class="stats">
6 6 <span> <strong>${c.file}</strong></span>
7 7 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
8 8 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
9 9 <span> | ${c.file.mimetype} </span>
10 10 <span class="item last"> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
11 11 </div>
12 12 <div class="buttons">
13 13 <a id="file_history_overview" href="#">
14 14 ${_('History')}
15 15 </a>
16 16 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
17 17 ${_('Show Full History')}
18 18 </a> |
19 19 %if c.annotate:
20 20 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
21 21 %else:
22 22 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
23 23 %endif
24 24 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
25 25 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
26 26 ${_('Download')}
27 27 </a>
28 28
29 29 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
30 30 |
31 31 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
32 32 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
33 33 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
34 34 </a>
35 35 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
36 36 </a>
37 37 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
38 38 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
39 39 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
40 40 %else:
41 41 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
42 42 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
43 43 %endif
44 44 %endif
45 45 </div>
46 46 </div>
47 47 <div id="file_history_container"></div>
48 48 <div class="code-body">
49 49 %if c.file.is_binary:
50 50 <div>
51 51 ${_('Binary file (%s)') % c.file.mimetype}
52 52 </div>
53 53 %else:
54 54 % if c.file.size < c.cut_off_limit:
55 55 %if c.renderer and not c.annotate:
56 ${h.render(c.file.content, renderer=c.renderer)}
56 ${h.render(c.file.content, renderer=c.renderer, relative_url=h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
57 57 %else:
58 58 <table class="cb codehilite">
59 59 %if c.annotate:
60 60 <% color_hasher = h.color_hasher() %>
61 61 %for annotation, lines in c.annotated_lines:
62 62 ${sourceblock.render_annotation_lines(annotation, lines, color_hasher)}
63 63 %endfor
64 64 %else:
65 65 %for line_num, tokens in enumerate(c.lines, 1):
66 66 ${sourceblock.render_line(line_num, tokens)}
67 67 %endfor
68 68 %endif
69 69 </table>
70 70 </div>
71 71 %endif
72 72 %else:
73 73 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
74 74 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
75 75 %endif
76 76 %endif
77 77 </div>
78 78 </div> No newline at end of file
@@ -1,179 +1,255 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import pytest
22 22
23 from rhodecode.lib.markup_renderer import MarkupRenderer, RstTemplateRenderer
23 from rhodecode.lib.markup_renderer import (
24 MarkupRenderer, RstTemplateRenderer, relative_path, relative_links)
24 25
25 26
26 27 @pytest.mark.parametrize(
27 28 "filename, expected_renderer",
28 29 [
29 30 ('readme.md', 'markdown'),
30 31 ('readme.Md', 'markdown'),
31 32 ('readme.MdoWn', 'markdown'),
32 33 ('readme.rst', 'rst'),
33 34 ('readme.Rst', 'rst'),
34 35 ('readme.rest', 'rst'),
35 36 ('readme.rest', 'rst'),
36 37
37 38 ('markdown.xml', 'plain'),
38 39 ('rest.xml', 'plain'),
39 40 ('readme.xml', 'plain'),
40 41
41 42 ('readme', 'plain'),
42 43 ('README', 'plain'),
43 44 ('readme.mdx', 'plain'),
44 45 ('readme.rstx', 'plain'),
45 46 ('readmex', 'plain'),
46 47 ])
47 48 def test_detect_renderer(filename, expected_renderer):
48 49 detected_renderer = MarkupRenderer()._detect_renderer(
49 50 '', filename=filename).__name__
50 51 assert expected_renderer == detected_renderer
51 52
52 53
53 54 def test_markdown_xss_link():
54 55 xss_md = "[link](javascript:alert('XSS: pwned!'))"
55 56 rendered_html = MarkupRenderer.markdown(xss_md)
56 57 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
57 58
58 59
59 60 def test_markdown_xss_inline_html():
60 61 xss_md = '\n'.join([
61 62 '> <a name="n"',
62 63 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
63 64 rendered_html = MarkupRenderer.markdown(xss_md)
64 65 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
65 66
66 67
67 68 def test_markdown_inline_html():
68 69 xss_md = '\n'.join(['> <a name="n"',
69 70 '> href="https://rhodecode.com">link</a>'])
70 71 rendered_html = MarkupRenderer.markdown(xss_md)
71 72 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
72 73
73 74
74 75 def test_rst_xss_link():
75 76 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
76 77 rendered_html = MarkupRenderer.rst(xss_rst)
77 78 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
78 79
79 80
80 81 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
81 82 def test_rst_xss_inline_html():
82 83 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
83 84 rendered_html = MarkupRenderer.rst(xss_rst)
84 85 assert 'href="javascript:alert(' not in rendered_html
85 86
86 87
87 88 def test_rst_xss_raw_directive():
88 89 xss_rst = '\n'.join([
89 90 '.. raw:: html',
90 91 '',
91 92 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
92 93 rendered_html = MarkupRenderer.rst(xss_rst)
93 94 assert 'href="javascript:alert(' not in rendered_html
94 95
95 96
96 97 def test_render_rst_template_without_files():
97 98 expected = u'''\
98 99 Pull request updated. Auto status change to |under_review|
99 100
100 101 .. role:: added
101 102 .. role:: removed
102 103 .. parsed-literal::
103 104
104 105 Changed commits:
105 106 * :added:`2 added`
106 107 * :removed:`3 removed`
107 108
108 109 No file changes found
109 110
110 111 .. |under_review| replace:: *"NEW STATUS"*'''
111 112
112 113 params = {
113 114 'under_review_label': 'NEW STATUS',
114 115 'added_commits': ['a', 'b'],
115 116 'removed_commits': ['a', 'b', 'c'],
116 117 'changed_files': [],
117 118 'added_files': [],
118 119 'modified_files': [],
119 120 'removed_files': [],
120 121 }
121 122 renderer = RstTemplateRenderer()
122 123 rendered = renderer.render('pull_request_update.mako', **params)
123 124 assert expected == rendered
124 125
125 126
126 127 def test_render_rst_template_with_files():
127 128 expected = u'''\
128 129 Pull request updated. Auto status change to |under_review|
129 130
130 131 .. role:: added
131 132 .. role:: removed
132 133 .. parsed-literal::
133 134
134 135 Changed commits:
135 136 * :added:`1 added`
136 137 * :removed:`3 removed`
137 138
138 139 Changed files:
139 140 * `A /path/a.py <#a_c--68ed34923b68>`_
140 141 * `A /path/b.js <#a_c--64f90608b607>`_
141 142 * `M /path/d.js <#a_c--85842bf30c6e>`_
142 143 * `M /path/ę.py <#a_c--d713adf009cd>`_
143 144 * R /path/ź.py
144 145
145 146 .. |under_review| replace:: *"NEW STATUS"*'''
146 147
147 148 added = ['/path/a.py', '/path/b.js']
148 149 modified = ['/path/d.js', u'/path/ę.py']
149 150 removed = [u'/path/ź.py']
150 151
151 152 params = {
152 153 'under_review_label': 'NEW STATUS',
153 154 'added_commits': ['a'],
154 155 'removed_commits': ['a', 'b', 'c'],
155 156 'changed_files': added + modified + removed,
156 157 'added_files': added,
157 158 'modified_files': modified,
158 159 'removed_files': removed,
159 160 }
160 161 renderer = RstTemplateRenderer()
161 162 rendered = renderer.render('pull_request_update.mako', **params)
162 163
163 164 assert expected == rendered
164 165
165 166
166 167 def test_render_rst_auto_status_template():
167 168 expected = u'''\
168 169 Auto status change to |new_status|
169 170
170 171 .. |new_status| replace:: *"NEW STATUS"*'''
171 172
172 173 params = {
173 174 'new_status_label': 'NEW STATUS',
174 175 'pull_request': None,
175 176 'commit_id': None,
176 177 }
177 178 renderer = RstTemplateRenderer()
178 179 rendered = renderer.render('auto_status_change.mako', **params)
179 180 assert expected == rendered
181
182
183 @pytest.mark.parametrize(
184 "src_path, server_path, is_path, expected",
185 [
186 ('source.png', '/repo/files/path', lambda p: False,
187 '/repo/files/path/source.png'),
188
189 ('source.png', 'mk/git/blob/master/README.md', lambda p: True,
190 '/mk/git/blob/master/source.png'),
191
192 ('./source.png', 'mk/git/blob/master/README.md', lambda p: True,
193 '/mk/git/blob/master/source.png'),
194
195 ('/source.png', 'mk/git/blob/master/README.md', lambda p: True,
196 '/mk/git/blob/master/source.png'),
197
198 ('./source.png', 'repo/files/path/source.md', lambda p: True,
199 '/repo/files/path/source.png'),
200
201 ('./source.png', '/repo/files/path/file.md', lambda p: True,
202 '/repo/files/path/source.png'),
203
204 ('../source.png', '/repo/files/path/file.md', lambda p: True,
205 '/repo/files/source.png'),
206
207 ('./../source.png', '/repo/files/path/file.md', lambda p: True,
208 '/repo/files/source.png'),
209
210 ('./source.png', '/repo/files/path/file.md', lambda p: True,
211 '/repo/files/path/source.png'),
212
213 ('../../../source.png', 'path/file.md', lambda p: True,
214 '/source.png'),
215
216 ('../../../../../source.png', '/path/file.md', None,
217 '/source.png'),
218
219 ('../../../../../source.png', 'files/path/file.md', None,
220 '/source.png'),
221
222 ('../../../../../https://google.com/image.png', 'files/path/file.md', None,
223 '/https://google.com/image.png'),
224
225 ('https://google.com/image.png', 'files/path/file.md', None,
226 'https://google.com/image.png'),
227
228 ('://foo', '/files/path/file.md', None,
229 '://foo'),
230
231 (u'한글.png', '/files/path/file.md', None,
232 u'/files/path/한글.png'),
233
234 ('my custom image.png', '/files/path/file.md', None,
235 '/files/path/my custom image.png'),
236 ])
237 def test_relative_path(src_path, server_path, is_path, expected):
238 path = relative_path(src_path, server_path, is_path)
239 assert path == expected
240
241
242 @pytest.mark.parametrize(
243 "src_html, expected_html",
244 [
245 ('<div></div>', '<div></div>'),
246 ('<img src="/file.png"></img>', '<img src="/path/raw/file.png">'),
247 ('<img src="data:abcd"/>', '<img src="data:abcd">'),
248 ('<a href="/file.png"></a>', '<a href="/path/raw/file.png"></a>'),
249 ('<a href="#anchor"></a>', '<a href="#anchor"></a>'),
250 ('<a href="./README.md"></a>', '<a href="/path/raw/README.md"></a>'),
251 ('<a href="../README.md"></a>', '<a href="/path/README.md"></a>'),
252
253 ])
254 def test_relative_links(src_html, expected_html):
255 assert relative_links(src_html, '/path/raw/file.md') == expected_html
General Comments 0
You need to be logged in to leave comments. Login now