##// END OF EJS Templates
readme/markup: improved order of generating readme files. Fixes #4050...
marcink -
r396:2ba4c171 default
parent child Browse files
Show More
@@ -1,65 +1,35 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2013-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Various config settings for RhodeCode
23 23 """
24 24 from rhodecode import EXTENSIONS
25 25
26 26 from rhodecode.lib.utils2 import __get_lem
27 27
28 28
29 29 # language map is also used by whoosh indexer, which for those specified
30 30 # extensions will index it's content
31 31 LANGUAGES_EXTENSIONS_MAP = __get_lem()
32 32
33 # list of readme files to search in file tree and display in summary
34 # attached weights defines the search order lower is first
35 ALL_READMES = [
36 ('readme', 0), ('README', 0), ('Readme', 0),
37 ('doc/readme', 1), ('doc/README', 1), ('doc/Readme', 1),
38 ('Docs/readme', 2), ('Docs/README', 2), ('Docs/Readme', 2),
39 ('DOCS/readme', 2), ('DOCS/README', 2), ('DOCS/Readme', 2),
40 ('docs/readme', 2), ('docs/README', 2), ('docs/Readme', 2),
41 ]
42
43 # extension together with weights to search lower is first
44 RST_EXTS = [
45 ('', 0), ('.rst', 1), ('.rest', 1),
46 ('.RST', 2), ('.REST', 2)
47 ]
48
49 MARKDOWN_EXTS = [
50 ('.md', 1), ('.MD', 1),
51 ('.mkdn', 2), ('.MKDN', 2),
52 ('.mdown', 3), ('.MDOWN', 3),
53 ('.markdown', 4), ('.MARKDOWN', 4)
54 ]
55
56 PLAIN_EXTS = [
57 ('.text', 2), ('.TEXT', 2),
58 ('.txt', 3), ('.TXT', 3)
59 ]
60
61 ALL_EXTS = MARKDOWN_EXTS + RST_EXTS + PLAIN_EXTS
62
63 33 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
64 34
65 35 DATE_FORMAT = "%Y-%m-%d"
@@ -1,301 +1,296 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Summary controller for RhodeCode Enterprise
23 23 """
24 24
25 25 import logging
26 26 from string import lower
27 from itertools import product
28 27
29 28 from pylons import tmpl_context as c, request
30 29 from pylons.i18n.translation import _
31 30 from beaker.cache import cache_region, region_invalidate
32 31
33 from rhodecode.config.conf import (
34 ALL_READMES, ALL_EXTS, LANGUAGES_EXTENSIONS_MAP)
32 from rhodecode.config.conf import (LANGUAGES_EXTENSIONS_MAP)
35 33 from rhodecode.controllers import utils
36 34 from rhodecode.controllers.changelog import _load_changelog_summary
37 35 from rhodecode.lib import caches, helpers as h
38 36 from rhodecode.lib.utils import jsonify
39 37 from rhodecode.lib.utils2 import safe_str
40 38 from rhodecode.lib.auth import (
41 39 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
42 40 from rhodecode.lib.base import BaseRepoController, render
43 41 from rhodecode.lib.markup_renderer import MarkupRenderer
44 42 from rhodecode.lib.ext_json import json
45 43 from rhodecode.lib.vcs.backends.base import EmptyCommit
46 44 from rhodecode.lib.vcs.exceptions import (
47 45 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
48 46 from rhodecode.model.db import Statistics, CacheKey, User
49 47
50 48 log = logging.getLogger(__name__)
51 49
52 README_FILES = [''.join([x[0][0], x[1][0]])
53 for x in sorted(list(product(ALL_READMES, ALL_EXTS)),
54 key=lambda y:y[0][1] + y[1][1])]
55
56 50
57 51 class SummaryController(BaseRepoController):
58 52
59 53 def __before__(self):
60 54 super(SummaryController, self).__before__()
61 55
62 56 def __get_readme_data(self, db_repo):
63 57 repo_name = db_repo.repo_name
64 58 log.debug('Looking for README file')
59 default_renderer = c.visual.default_renderer
65 60
66 61 @cache_region('long_term')
67 62 def _generate_readme(cache_key):
68 63 readme_data = None
69 64 readme_file = None
70 65 try:
71 66 # gets the landing revision or tip if fails
72 67 commit = db_repo.get_landing_commit()
73 68 if isinstance(commit, EmptyCommit):
74 69 raise EmptyRepositoryError()
75 70 renderer = MarkupRenderer()
76 for f in README_FILES:
71 for f in renderer.pick_readme_order(default_renderer):
77 72 try:
78 73 node = commit.get_node(f)
79 74 except NodeDoesNotExistError:
80 75 continue
81 76
82 77 if not node.is_file():
83 78 continue
84 79
85 80 readme_file = f
86 81 log.debug('Found README file `%s` rendering...',
87 82 readme_file)
88 83 readme_data = renderer.render(node.content,
89 84 filename=f)
90 85 break
91 86 except CommitError:
92 87 log.exception("Problem getting commit")
93 88 pass
94 89 except EmptyRepositoryError:
95 90 pass
96 91 except Exception:
97 92 log.exception("General failure")
98 93
99 94 return readme_data, readme_file
100 95
101 96 invalidator_context = CacheKey.repo_context_cache(
102 97 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
103 98
104 99 with invalidator_context as context:
105 100 context.invalidate()
106 101 computed = context.compute()
107 102
108 103 return computed
109 104
110 105
111 106 @LoginRequired()
112 107 @HasRepoPermissionAnyDecorator(
113 108 'repository.read', 'repository.write', 'repository.admin')
114 109 def index(self, repo_name):
115 110 username = ''
116 111 if c.rhodecode_user.username != User.DEFAULT_USER:
117 112 username = safe_str(c.rhodecode_user.username)
118 113
119 114 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
120 115 if '{repo}' in _def_clone_uri:
121 116 _def_clone_uri_by_id = _def_clone_uri.replace(
122 117 '{repo}', '_{repoid}')
123 118 elif '{repoid}' in _def_clone_uri:
124 119 _def_clone_uri_by_id = _def_clone_uri.replace(
125 120 '_{repoid}', '{repo}')
126 121
127 122 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
128 123 user=username, uri_tmpl=_def_clone_uri)
129 124 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
130 125 user=username, uri_tmpl=_def_clone_uri_by_id)
131 126
132 127 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
133 128
134 129 stats = self.sa.query(Statistics)\
135 130 .filter(Statistics.repository == c.rhodecode_db_repo)\
136 131 .scalar()
137 132
138 133 c.stats_percentage = 0
139 134
140 135 if stats and stats.languages:
141 136 c.no_data = False is c.rhodecode_db_repo.enable_statistics
142 137 lang_stats_d = json.loads(stats.languages)
143 138
144 139 # Sort first by decreasing count and second by the file extension,
145 140 # so we have a consistent output.
146 141 lang_stats_items = sorted(lang_stats_d.iteritems(),
147 142 key=lambda k: (-k[1], k[0]))[:10]
148 143 lang_stats = [(x, {"count": y,
149 144 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
150 145 for x, y in lang_stats_items]
151 146
152 147 c.trending_languages = json.dumps(lang_stats)
153 148 else:
154 149 c.no_data = True
155 150 c.trending_languages = json.dumps({})
156 151
157 152 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
158 153 c.repository_followers = self.scm_model.get_followers(
159 154 c.rhodecode_db_repo)
160 155 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
161 156 c.repository_is_user_following = self.scm_model.is_following_repo(
162 157 c.repo_name, c.rhodecode_user.user_id)
163 158
164 159 if c.repository_requirements_missing:
165 160 return render('summary/missing_requirements.html')
166 161
167 162 c.readme_data, c.readme_file = \
168 163 self.__get_readme_data(c.rhodecode_db_repo)
169 164
170 165 _load_changelog_summary()
171 166
172 167 if request.is_xhr:
173 168 return render('changelog/changelog_summary_data.html')
174 169
175 170 return render('summary/summary.html')
176 171
177 172 @LoginRequired()
178 173 @XHRRequired()
179 174 @HasRepoPermissionAnyDecorator(
180 175 'repository.read', 'repository.write', 'repository.admin')
181 176 @jsonify
182 177 def repo_stats(self, repo_name, commit_id):
183 178 _namespace = caches.get_repo_namespace_key(
184 179 caches.SUMMARY_STATS, repo_name)
185 180 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
186 181 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
187 182 _cache_key = caches.compute_key_from_params(
188 183 repo_name, commit_id, show_stats)
189 184
190 185 def compute_stats():
191 186 code_stats = {}
192 187 size = 0
193 188 try:
194 189 scm_instance = c.rhodecode_db_repo.scm_instance()
195 190 commit = scm_instance.get_commit(commit_id)
196 191
197 192 for node in commit.get_filenodes_generator():
198 193 size += node.size
199 194 if not show_stats:
200 195 continue
201 196 ext = lower(node.extension)
202 197 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
203 198 if ext_info:
204 199 if ext in code_stats:
205 200 code_stats[ext]['count'] += 1
206 201 else:
207 202 code_stats[ext] = {"count": 1, "desc": ext_info}
208 203 except EmptyRepositoryError:
209 204 pass
210 205 return {'size': h.format_byte_size_binary(size),
211 206 'code_stats': code_stats}
212 207
213 208 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
214 209 return stats
215 210
216 211 def _switcher_reference_data(self, repo_name, references, is_svn):
217 212 """Prepare reference data for given `references`"""
218 213 items = []
219 214 for name, commit_id in references.items():
220 215 use_commit_id = '/' in name or is_svn
221 216 items.append({
222 217 'name': name,
223 218 'commit_id': commit_id,
224 219 'files_url': h.url(
225 220 'files_home',
226 221 repo_name=repo_name,
227 222 f_path=name if is_svn else '',
228 223 revision=commit_id if use_commit_id else name,
229 224 at=name)
230 225 })
231 226 return items
232 227
233 228 @LoginRequired()
234 229 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
235 230 'repository.admin')
236 231 @jsonify
237 232 def repo_refs_data(self, repo_name):
238 233 repo = c.rhodecode_repo
239 234 refs_to_create = [
240 235 (_("Branch"), repo.branches, 'branch'),
241 236 (_("Tag"), repo.tags, 'tag'),
242 237 (_("Bookmark"), repo.bookmarks, 'book'),
243 238 ]
244 239 res = self._create_reference_data(repo, refs_to_create)
245 240 data = {
246 241 'more': False,
247 242 'results': res
248 243 }
249 244 return data
250 245
251 246 @jsonify
252 247 def repo_refs_changelog_data(self, repo_name):
253 248 repo = c.rhodecode_repo
254 249
255 250 refs_to_create = [
256 251 (_("Branches"), repo.branches, 'branch'),
257 252 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
258 253 # TODO: enable when vcs can handle bookmarks filters
259 254 # (_("Bookmarks"), repo.bookmarks, "book"),
260 255 ]
261 256 res = self._create_reference_data(repo, refs_to_create)
262 257 data = {
263 258 'more': False,
264 259 'results': res
265 260 }
266 261 return data
267 262
268 263 def _create_reference_data(self, repo, refs_to_create):
269 264 format_ref_id = utils.get_format_ref_id(repo)
270 265
271 266 result = []
272 267 for title, refs, ref_type in refs_to_create:
273 268 if refs:
274 269 result.append({
275 270 'text': title,
276 271 'children': self._create_reference_items(
277 272 repo, refs, ref_type, format_ref_id),
278 273 })
279 274 return result
280 275
281 276 def _create_reference_items(self, repo, refs, ref_type, format_ref_id):
282 277 result = []
283 278 is_svn = h.is_svn(repo)
284 279 for name, raw_id in refs.iteritems():
285 280 result.append({
286 281 'text': name,
287 282 'id': format_ref_id(name, raw_id),
288 283 'raw_id': raw_id,
289 284 'type': ref_type,
290 285 'files_url': self._create_files_url(repo, name, raw_id, is_svn)
291 286 })
292 287 return result
293 288
294 289 def _create_files_url(self, repo, name, raw_id, is_svn):
295 290 use_commit_id = '/' in name or is_svn
296 291 return h.url(
297 292 'files_home',
298 293 repo_name=repo.name,
299 294 f_path=name if is_svn else '',
300 295 revision=raw_id if use_commit_id else name,
301 296 at=name)
@@ -1,1898 +1,1888 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40
41 41 from datetime import datetime
42 42 from functools import partial
43 43 from pygments.formatters.html import HtmlFormatter
44 44 from pygments import highlight as code_highlight
45 45 from pygments.lexers import (
46 46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 47 from pylons import url
48 48 from pylons.i18n.translation import _, ungettext
49 49 from pyramid.threadlocal import get_current_request
50 50
51 51 from webhelpers.html import literal, HTML, escape
52 52 from webhelpers.html.tools import *
53 53 from webhelpers.html.builder import make_tag
54 54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 60 from webhelpers.pylonslib import Flash as _Flash
61 61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 64 from webhelpers.date import time_ago_in_words
65 65 from webhelpers.paginate import Page as _Page
66 66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 68 from webhelpers2.number import format_byte_size
69 69
70 70 from rhodecode.lib.annotate import annotate_highlight
71 71 from rhodecode.lib.action_parser import action_parser
72 72 from rhodecode.lib.ext_json import json
73 73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 76 AttributeDict, safe_int, md5, md5_safe
77 77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 82 from rhodecode.model.db import Permission, User, Repository
83 83 from rhodecode.model.repo_group import RepoGroupModel
84 84 from rhodecode.model.settings import IssueTrackerSettingsModel
85 85
86 86 log = logging.getLogger(__name__)
87 87
88 88 DEFAULT_USER = User.DEFAULT_USER
89 89 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
90 90
91 91
92 92 def html_escape(text, html_escape_table=None):
93 93 """Produce entities within text."""
94 94 if not html_escape_table:
95 95 html_escape_table = {
96 96 "&": "&amp;",
97 97 '"': "&quot;",
98 98 "'": "&apos;",
99 99 ">": "&gt;",
100 100 "<": "&lt;",
101 101 }
102 102 return "".join(html_escape_table.get(c, c) for c in text)
103 103
104 104
105 105 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
106 106 """
107 107 Truncate string ``s`` at the first occurrence of ``sub``.
108 108
109 109 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
110 110 """
111 111 suffix_if_chopped = suffix_if_chopped or ''
112 112 pos = s.find(sub)
113 113 if pos == -1:
114 114 return s
115 115
116 116 if inclusive:
117 117 pos += len(sub)
118 118
119 119 chopped = s[:pos]
120 120 left = s[pos:].strip()
121 121
122 122 if left and suffix_if_chopped:
123 123 chopped += suffix_if_chopped
124 124
125 125 return chopped
126 126
127 127
128 128 def shorter(text, size=20):
129 129 postfix = '...'
130 130 if len(text) > size:
131 131 return text[:size - len(postfix)] + postfix
132 132 return text
133 133
134 134
135 135 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
136 136 """
137 137 Reset button
138 138 """
139 139 _set_input_attrs(attrs, type, name, value)
140 140 _set_id_attr(attrs, id, name)
141 141 convert_boolean_attrs(attrs, ["disabled"])
142 142 return HTML.input(**attrs)
143 143
144 144 reset = _reset
145 145 safeid = _make_safe_id_component
146 146
147 147
148 148 def branding(name, length=40):
149 149 return truncate(name, length, indicator="")
150 150
151 151
152 152 def FID(raw_id, path):
153 153 """
154 154 Creates a unique ID for filenode based on it's hash of path and commit
155 155 it's safe to use in urls
156 156
157 157 :param raw_id:
158 158 :param path:
159 159 """
160 160
161 161 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
162 162
163 163
164 164 class _GetError(object):
165 165 """Get error from form_errors, and represent it as span wrapped error
166 166 message
167 167
168 168 :param field_name: field to fetch errors for
169 169 :param form_errors: form errors dict
170 170 """
171 171
172 172 def __call__(self, field_name, form_errors):
173 173 tmpl = """<span class="error_msg">%s</span>"""
174 174 if form_errors and field_name in form_errors:
175 175 return literal(tmpl % form_errors.get(field_name))
176 176
177 177 get_error = _GetError()
178 178
179 179
180 180 class _ToolTip(object):
181 181
182 182 def __call__(self, tooltip_title, trim_at=50):
183 183 """
184 184 Special function just to wrap our text into nice formatted
185 185 autowrapped text
186 186
187 187 :param tooltip_title:
188 188 """
189 189 tooltip_title = escape(tooltip_title)
190 190 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
191 191 return tooltip_title
192 192 tooltip = _ToolTip()
193 193
194 194
195 195 def files_breadcrumbs(repo_name, commit_id, file_path):
196 196 if isinstance(file_path, str):
197 197 file_path = safe_unicode(file_path)
198 198
199 199 # TODO: johbo: Is this always a url like path, or is this operating
200 200 # system dependent?
201 201 path_segments = file_path.split('/')
202 202
203 203 repo_name_html = escape(repo_name)
204 204 if len(path_segments) == 1 and path_segments[0] == '':
205 205 url_segments = [repo_name_html]
206 206 else:
207 207 url_segments = [
208 208 link_to(
209 209 repo_name_html,
210 210 url('files_home',
211 211 repo_name=repo_name,
212 212 revision=commit_id,
213 213 f_path=''),
214 214 class_='pjax-link')]
215 215
216 216 last_cnt = len(path_segments) - 1
217 217 for cnt, segment in enumerate(path_segments):
218 218 if not segment:
219 219 continue
220 220 segment_html = escape(segment)
221 221
222 222 if cnt != last_cnt:
223 223 url_segments.append(
224 224 link_to(
225 225 segment_html,
226 226 url('files_home',
227 227 repo_name=repo_name,
228 228 revision=commit_id,
229 229 f_path='/'.join(path_segments[:cnt + 1])),
230 230 class_='pjax-link'))
231 231 else:
232 232 url_segments.append(segment_html)
233 233
234 234 return literal('/'.join(url_segments))
235 235
236 236
237 237 class CodeHtmlFormatter(HtmlFormatter):
238 238 """
239 239 My code Html Formatter for source codes
240 240 """
241 241
242 242 def wrap(self, source, outfile):
243 243 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
244 244
245 245 def _wrap_code(self, source):
246 246 for cnt, it in enumerate(source):
247 247 i, t = it
248 248 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
249 249 yield i, t
250 250
251 251 def _wrap_tablelinenos(self, inner):
252 252 dummyoutfile = StringIO.StringIO()
253 253 lncount = 0
254 254 for t, line in inner:
255 255 if t:
256 256 lncount += 1
257 257 dummyoutfile.write(line)
258 258
259 259 fl = self.linenostart
260 260 mw = len(str(lncount + fl - 1))
261 261 sp = self.linenospecial
262 262 st = self.linenostep
263 263 la = self.lineanchors
264 264 aln = self.anchorlinenos
265 265 nocls = self.noclasses
266 266 if sp:
267 267 lines = []
268 268
269 269 for i in range(fl, fl + lncount):
270 270 if i % st == 0:
271 271 if i % sp == 0:
272 272 if aln:
273 273 lines.append('<a href="#%s%d" class="special">%*d</a>' %
274 274 (la, i, mw, i))
275 275 else:
276 276 lines.append('<span class="special">%*d</span>' % (mw, i))
277 277 else:
278 278 if aln:
279 279 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
280 280 else:
281 281 lines.append('%*d' % (mw, i))
282 282 else:
283 283 lines.append('')
284 284 ls = '\n'.join(lines)
285 285 else:
286 286 lines = []
287 287 for i in range(fl, fl + lncount):
288 288 if i % st == 0:
289 289 if aln:
290 290 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
291 291 else:
292 292 lines.append('%*d' % (mw, i))
293 293 else:
294 294 lines.append('')
295 295 ls = '\n'.join(lines)
296 296
297 297 # in case you wonder about the seemingly redundant <div> here: since the
298 298 # content in the other cell also is wrapped in a div, some browsers in
299 299 # some configurations seem to mess up the formatting...
300 300 if nocls:
301 301 yield 0, ('<table class="%stable">' % self.cssclass +
302 302 '<tr><td><div class="linenodiv" '
303 303 'style="background-color: #f0f0f0; padding-right: 10px">'
304 304 '<pre style="line-height: 125%">' +
305 305 ls + '</pre></div></td><td id="hlcode" class="code">')
306 306 else:
307 307 yield 0, ('<table class="%stable">' % self.cssclass +
308 308 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
309 309 ls + '</pre></div></td><td id="hlcode" class="code">')
310 310 yield 0, dummyoutfile.getvalue()
311 311 yield 0, '</td></tr></table>'
312 312
313 313
314 314 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
315 315 def __init__(self, **kw):
316 316 # only show these line numbers if set
317 317 self.only_lines = kw.pop('only_line_numbers', [])
318 318 self.query_terms = kw.pop('query_terms', [])
319 319 self.max_lines = kw.pop('max_lines', 5)
320 320 self.line_context = kw.pop('line_context', 3)
321 321 self.url = kw.pop('url', None)
322 322
323 323 super(CodeHtmlFormatter, self).__init__(**kw)
324 324
325 325 def _wrap_code(self, source):
326 326 for cnt, it in enumerate(source):
327 327 i, t = it
328 328 t = '<pre>%s</pre>' % t
329 329 yield i, t
330 330
331 331 def _wrap_tablelinenos(self, inner):
332 332 yield 0, '<table class="code-highlight %stable">' % self.cssclass
333 333
334 334 last_shown_line_number = 0
335 335 current_line_number = 1
336 336
337 337 for t, line in inner:
338 338 if not t:
339 339 yield t, line
340 340 continue
341 341
342 342 if current_line_number in self.only_lines:
343 343 if last_shown_line_number + 1 != current_line_number:
344 344 yield 0, '<tr>'
345 345 yield 0, '<td class="line">...</td>'
346 346 yield 0, '<td id="hlcode" class="code"></td>'
347 347 yield 0, '</tr>'
348 348
349 349 yield 0, '<tr>'
350 350 if self.url:
351 351 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
352 352 self.url, current_line_number, current_line_number)
353 353 else:
354 354 yield 0, '<td class="line"><a href="">%i</a></td>' % (
355 355 current_line_number)
356 356 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
357 357 yield 0, '</tr>'
358 358
359 359 last_shown_line_number = current_line_number
360 360
361 361 current_line_number += 1
362 362
363 363
364 364 yield 0, '</table>'
365 365
366 366
367 367 def extract_phrases(text_query):
368 368 """
369 369 Extracts phrases from search term string making sure phrases
370 370 contained in double quotes are kept together - and discarding empty values
371 371 or fully whitespace values eg.
372 372
373 373 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
374 374
375 375 """
376 376
377 377 in_phrase = False
378 378 buf = ''
379 379 phrases = []
380 380 for char in text_query:
381 381 if in_phrase:
382 382 if char == '"': # end phrase
383 383 phrases.append(buf)
384 384 buf = ''
385 385 in_phrase = False
386 386 continue
387 387 else:
388 388 buf += char
389 389 continue
390 390 else:
391 391 if char == '"': # start phrase
392 392 in_phrase = True
393 393 phrases.append(buf)
394 394 buf = ''
395 395 continue
396 396 elif char == ' ':
397 397 phrases.append(buf)
398 398 buf = ''
399 399 continue
400 400 else:
401 401 buf += char
402 402
403 403 phrases.append(buf)
404 404 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
405 405 return phrases
406 406
407 407
408 408 def get_matching_offsets(text, phrases):
409 409 """
410 410 Returns a list of string offsets in `text` that the list of `terms` match
411 411
412 412 >>> get_matching_offsets('some text here', ['some', 'here'])
413 413 [(0, 4), (10, 14)]
414 414
415 415 """
416 416 offsets = []
417 417 for phrase in phrases:
418 418 for match in re.finditer(phrase, text):
419 419 offsets.append((match.start(), match.end()))
420 420
421 421 return offsets
422 422
423 423
424 424 def normalize_text_for_matching(x):
425 425 """
426 426 Replaces all non alnum characters to spaces and lower cases the string,
427 427 useful for comparing two text strings without punctuation
428 428 """
429 429 return re.sub(r'[^\w]', ' ', x.lower())
430 430
431 431
432 432 def get_matching_line_offsets(lines, terms):
433 433 """ Return a set of `lines` indices (starting from 1) matching a
434 434 text search query, along with `context` lines above/below matching lines
435 435
436 436 :param lines: list of strings representing lines
437 437 :param terms: search term string to match in lines eg. 'some text'
438 438 :param context: number of lines above/below a matching line to add to result
439 439 :param max_lines: cut off for lines of interest
440 440 eg.
441 441
442 442 text = '''
443 443 words words words
444 444 words words words
445 445 some text some
446 446 words words words
447 447 words words words
448 448 text here what
449 449 '''
450 450 get_matching_line_offsets(text, 'text', context=1)
451 451 {3: [(5, 9)], 6: [(0, 4)]]
452 452
453 453 """
454 454 matching_lines = {}
455 455 phrases = [normalize_text_for_matching(phrase)
456 456 for phrase in extract_phrases(terms)]
457 457
458 458 for line_index, line in enumerate(lines, start=1):
459 459 match_offsets = get_matching_offsets(
460 460 normalize_text_for_matching(line), phrases)
461 461 if match_offsets:
462 462 matching_lines[line_index] = match_offsets
463 463
464 464 return matching_lines
465 465
466 466
467 467 def get_lexer_safe(mimetype=None, filepath=None):
468 468 """
469 469 Tries to return a relevant pygments lexer using mimetype/filepath name,
470 470 defaulting to plain text if none could be found
471 471 """
472 472 lexer = None
473 473 try:
474 474 if mimetype:
475 475 lexer = get_lexer_for_mimetype(mimetype)
476 476 if not lexer:
477 477 lexer = get_lexer_for_filename(filepath)
478 478 except pygments.util.ClassNotFound:
479 479 pass
480 480
481 481 if not lexer:
482 482 lexer = get_lexer_by_name('text')
483 483
484 484 return lexer
485 485
486 486
487 487 def pygmentize(filenode, **kwargs):
488 488 """
489 489 pygmentize function using pygments
490 490
491 491 :param filenode:
492 492 """
493 493 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
494 494 return literal(code_highlight(filenode.content, lexer,
495 495 CodeHtmlFormatter(**kwargs)))
496 496
497 497
498 498 def pygmentize_annotation(repo_name, filenode, **kwargs):
499 499 """
500 500 pygmentize function for annotation
501 501
502 502 :param filenode:
503 503 """
504 504
505 505 color_dict = {}
506 506
507 507 def gen_color(n=10000):
508 508 """generator for getting n of evenly distributed colors using
509 509 hsv color and golden ratio. It always return same order of colors
510 510
511 511 :returns: RGB tuple
512 512 """
513 513
514 514 def hsv_to_rgb(h, s, v):
515 515 if s == 0.0:
516 516 return v, v, v
517 517 i = int(h * 6.0) # XXX assume int() truncates!
518 518 f = (h * 6.0) - i
519 519 p = v * (1.0 - s)
520 520 q = v * (1.0 - s * f)
521 521 t = v * (1.0 - s * (1.0 - f))
522 522 i = i % 6
523 523 if i == 0:
524 524 return v, t, p
525 525 if i == 1:
526 526 return q, v, p
527 527 if i == 2:
528 528 return p, v, t
529 529 if i == 3:
530 530 return p, q, v
531 531 if i == 4:
532 532 return t, p, v
533 533 if i == 5:
534 534 return v, p, q
535 535
536 536 golden_ratio = 0.618033988749895
537 537 h = 0.22717784590367374
538 538
539 539 for _ in xrange(n):
540 540 h += golden_ratio
541 541 h %= 1
542 542 HSV_tuple = [h, 0.95, 0.95]
543 543 RGB_tuple = hsv_to_rgb(*HSV_tuple)
544 544 yield map(lambda x: str(int(x * 256)), RGB_tuple)
545 545
546 546 cgenerator = gen_color()
547 547
548 548 def get_color_string(commit_id):
549 549 if commit_id in color_dict:
550 550 col = color_dict[commit_id]
551 551 else:
552 552 col = color_dict[commit_id] = cgenerator.next()
553 553 return "color: rgb(%s)! important;" % (', '.join(col))
554 554
555 555 def url_func(repo_name):
556 556
557 557 def _url_func(commit):
558 558 author = commit.author
559 559 date = commit.date
560 560 message = tooltip(commit.message)
561 561
562 562 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
563 563 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
564 564 "</b> %s<br/></div>")
565 565
566 566 tooltip_html = tooltip_html % (author, date, message)
567 567 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
568 568 uri = link_to(
569 569 lnk_format,
570 570 url('changeset_home', repo_name=repo_name,
571 571 revision=commit.raw_id),
572 572 style=get_color_string(commit.raw_id),
573 573 class_='tooltip',
574 574 title=tooltip_html
575 575 )
576 576
577 577 uri += '\n'
578 578 return uri
579 579 return _url_func
580 580
581 581 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
582 582
583 583
584 584 def is_following_repo(repo_name, user_id):
585 585 from rhodecode.model.scm import ScmModel
586 586 return ScmModel().is_following_repo(repo_name, user_id)
587 587
588 588
589 589 class _Message(object):
590 590 """A message returned by ``Flash.pop_messages()``.
591 591
592 592 Converting the message to a string returns the message text. Instances
593 593 also have the following attributes:
594 594
595 595 * ``message``: the message text.
596 596 * ``category``: the category specified when the message was created.
597 597 """
598 598
599 599 def __init__(self, category, message):
600 600 self.category = category
601 601 self.message = message
602 602
603 603 def __str__(self):
604 604 return self.message
605 605
606 606 __unicode__ = __str__
607 607
608 608 def __html__(self):
609 609 return escape(safe_unicode(self.message))
610 610
611 611
612 612 class Flash(_Flash):
613 613
614 614 def pop_messages(self):
615 615 """Return all accumulated messages and delete them from the session.
616 616
617 617 The return value is a list of ``Message`` objects.
618 618 """
619 619 from pylons import session
620 620
621 621 messages = []
622 622
623 623 # Pop the 'old' pylons flash messages. They are tuples of the form
624 624 # (category, message)
625 625 for cat, msg in session.pop(self.session_key, []):
626 626 messages.append(_Message(cat, msg))
627 627
628 628 # Pop the 'new' pyramid flash messages for each category as list
629 629 # of strings.
630 630 for cat in self.categories:
631 631 for msg in session.pop_flash(queue=cat):
632 632 messages.append(_Message(cat, msg))
633 633 # Map messages from the default queue to the 'notice' category.
634 634 for msg in session.pop_flash():
635 635 messages.append(_Message('notice', msg))
636 636
637 637 session.save()
638 638 return messages
639 639
640 640 flash = Flash()
641 641
642 642 #==============================================================================
643 643 # SCM FILTERS available via h.
644 644 #==============================================================================
645 645 from rhodecode.lib.vcs.utils import author_name, author_email
646 646 from rhodecode.lib.utils2 import credentials_filter, age as _age
647 647 from rhodecode.model.db import User, ChangesetStatus
648 648
649 649 age = _age
650 650 capitalize = lambda x: x.capitalize()
651 651 email = author_email
652 652 short_id = lambda x: x[:12]
653 653 hide_credentials = lambda x: ''.join(credentials_filter(x))
654 654
655 655
656 656 def age_component(datetime_iso, value=None, time_is_local=False):
657 657 title = value or format_date(datetime_iso)
658 658
659 659 # detect if we have a timezone info, otherwise, add it
660 660 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
661 661 tzinfo = '+00:00'
662 662
663 663 if time_is_local:
664 664 tzinfo = time.strftime("+%H:%M",
665 665 time.gmtime(
666 666 (datetime.now() - datetime.utcnow()).seconds + 1
667 667 )
668 668 )
669 669
670 670 return literal(
671 671 '<time class="timeago tooltip" '
672 672 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
673 673 datetime_iso, title, tzinfo))
674 674
675 675
676 676 def _shorten_commit_id(commit_id):
677 677 from rhodecode import CONFIG
678 678 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
679 679 return commit_id[:def_len]
680 680
681 681
682 682 def show_id(commit):
683 683 """
684 684 Configurable function that shows ID
685 685 by default it's r123:fffeeefffeee
686 686
687 687 :param commit: commit instance
688 688 """
689 689 from rhodecode import CONFIG
690 690 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
691 691
692 692 raw_id = _shorten_commit_id(commit.raw_id)
693 693 if show_idx:
694 694 return 'r%s:%s' % (commit.idx, raw_id)
695 695 else:
696 696 return '%s' % (raw_id, )
697 697
698 698
699 699 def format_date(date):
700 700 """
701 701 use a standardized formatting for dates used in RhodeCode
702 702
703 703 :param date: date/datetime object
704 704 :return: formatted date
705 705 """
706 706
707 707 if date:
708 708 _fmt = "%a, %d %b %Y %H:%M:%S"
709 709 return safe_unicode(date.strftime(_fmt))
710 710
711 711 return u""
712 712
713 713
714 714 class _RepoChecker(object):
715 715
716 716 def __init__(self, backend_alias):
717 717 self._backend_alias = backend_alias
718 718
719 719 def __call__(self, repository):
720 720 if hasattr(repository, 'alias'):
721 721 _type = repository.alias
722 722 elif hasattr(repository, 'repo_type'):
723 723 _type = repository.repo_type
724 724 else:
725 725 _type = repository
726 726 return _type == self._backend_alias
727 727
728 728 is_git = _RepoChecker('git')
729 729 is_hg = _RepoChecker('hg')
730 730 is_svn = _RepoChecker('svn')
731 731
732 732
733 733 def get_repo_type_by_name(repo_name):
734 734 repo = Repository.get_by_repo_name(repo_name)
735 735 return repo.repo_type
736 736
737 737
738 738 def is_svn_without_proxy(repository):
739 739 from rhodecode import CONFIG
740 740 if is_svn(repository):
741 741 if not CONFIG.get('rhodecode_proxy_subversion_http_requests', False):
742 742 return True
743 743 return False
744 744
745 745
746 746 def discover_user(author):
747 747 """
748 748 Tries to discover RhodeCode User based on the autho string. Author string
749 749 is typically `FirstName LastName <email@address.com>`
750 750 """
751 751
752 752 # if author is already an instance use it for extraction
753 753 if isinstance(author, User):
754 754 return author
755 755
756 756 # Valid email in the attribute passed, see if they're in the system
757 757 _email = author_email(author)
758 758 if _email != '':
759 759 user = User.get_by_email(_email, case_insensitive=True, cache=True)
760 760 if user is not None:
761 761 return user
762 762
763 763 # Maybe it's a username, we try to extract it and fetch by username ?
764 764 _author = author_name(author)
765 765 user = User.get_by_username(_author, case_insensitive=True, cache=True)
766 766 if user is not None:
767 767 return user
768 768
769 769 return None
770 770
771 771
772 772 def email_or_none(author):
773 773 # extract email from the commit string
774 774 _email = author_email(author)
775 775 if _email != '':
776 776 # check it against RhodeCode database, and use the MAIN email for this
777 777 # user
778 778 user = User.get_by_email(_email, case_insensitive=True, cache=True)
779 779 if user is not None:
780 780 return user.email
781 781 return _email
782 782
783 783 # See if it contains a username we can get an email from
784 784 user = User.get_by_username(author_name(author), case_insensitive=True,
785 785 cache=True)
786 786 if user is not None:
787 787 return user.email
788 788
789 789 # No valid email, not a valid user in the system, none!
790 790 return None
791 791
792 792
793 793 def link_to_user(author, length=0, **kwargs):
794 794 user = discover_user(author)
795 795 # user can be None, but if we have it already it means we can re-use it
796 796 # in the person() function, so we save 1 intensive-query
797 797 if user:
798 798 author = user
799 799
800 800 display_person = person(author, 'username_or_name_or_email')
801 801 if length:
802 802 display_person = shorter(display_person, length)
803 803
804 804 if user:
805 805 return link_to(
806 806 escape(display_person),
807 807 url('user_profile', username=user.username),
808 808 **kwargs)
809 809 else:
810 810 return escape(display_person)
811 811
812 812
813 813 def person(author, show_attr="username_and_name"):
814 814 user = discover_user(author)
815 815 if user:
816 816 return getattr(user, show_attr)
817 817 else:
818 818 _author = author_name(author)
819 819 _email = email(author)
820 820 return _author or _email
821 821
822 822
823 823 def person_by_id(id_, show_attr="username_and_name"):
824 824 # attr to return from fetched user
825 825 person_getter = lambda usr: getattr(usr, show_attr)
826 826
827 827 #maybe it's an ID ?
828 828 if str(id_).isdigit() or isinstance(id_, int):
829 829 id_ = int(id_)
830 830 user = User.get(id_)
831 831 if user is not None:
832 832 return person_getter(user)
833 833 return id_
834 834
835 835
836 836 def gravatar_with_user(author, show_disabled=False):
837 837 from rhodecode.lib.utils import PartialRenderer
838 838 _render = PartialRenderer('base/base.html')
839 839 return _render('gravatar_with_user', author, show_disabled=show_disabled)
840 840
841 841
842 842 def desc_stylize(value):
843 843 """
844 844 converts tags from value into html equivalent
845 845
846 846 :param value:
847 847 """
848 848 if not value:
849 849 return ''
850 850
851 851 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
852 852 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
853 853 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
854 854 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
855 855 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
856 856 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
857 857 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
858 858 '<div class="metatag" tag="lang">\\2</div>', value)
859 859 value = re.sub(r'\[([a-z]+)\]',
860 860 '<div class="metatag" tag="\\1">\\1</div>', value)
861 861
862 862 return value
863 863
864 864
865 865 def escaped_stylize(value):
866 866 """
867 867 converts tags from value into html equivalent, but escaping its value first
868 868 """
869 869 if not value:
870 870 return ''
871 871
872 872 # Using default webhelper escape method, but has to force it as a
873 873 # plain unicode instead of a markup tag to be used in regex expressions
874 874 value = unicode(escape(safe_unicode(value)))
875 875
876 876 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
877 877 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
878 878 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
879 879 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
880 880 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
881 881 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
882 882 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
883 883 '<div class="metatag" tag="lang">\\2</div>', value)
884 884 value = re.sub(r'\[([a-z]+)\]',
885 885 '<div class="metatag" tag="\\1">\\1</div>', value)
886 886
887 887 return value
888 888
889 889
890 890 def bool2icon(value):
891 891 """
892 892 Returns boolean value of a given value, represented as html element with
893 893 classes that will represent icons
894 894
895 895 :param value: given value to convert to html node
896 896 """
897 897
898 898 if value: # does bool conversion
899 899 return HTML.tag('i', class_="icon-true")
900 900 else: # not true as bool
901 901 return HTML.tag('i', class_="icon-false")
902 902
903 903
904 904 #==============================================================================
905 905 # PERMS
906 906 #==============================================================================
907 907 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
908 908 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
909 909 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token
910 910
911 911
912 912 #==============================================================================
913 913 # GRAVATAR URL
914 914 #==============================================================================
915 915 class InitialsGravatar(object):
916 916 def __init__(self, email_address, first_name, last_name, size=30,
917 917 background=None, text_color='#fff'):
918 918 self.size = size
919 919 self.first_name = first_name
920 920 self.last_name = last_name
921 921 self.email_address = email_address
922 922 self.background = background or self.str2color(email_address)
923 923 self.text_color = text_color
924 924
925 925 def get_color_bank(self):
926 926 """
927 927 returns a predefined list of colors that gravatars can use.
928 928 Those are randomized distinct colors that guarantee readability and
929 929 uniqueness.
930 930
931 931 generated with: http://phrogz.net/css/distinct-colors.html
932 932 """
933 933 return [
934 934 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
935 935 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
936 936 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
937 937 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
938 938 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
939 939 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
940 940 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
941 941 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
942 942 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
943 943 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
944 944 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
945 945 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
946 946 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
947 947 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
948 948 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
949 949 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
950 950 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
951 951 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
952 952 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
953 953 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
954 954 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
955 955 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
956 956 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
957 957 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
958 958 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
959 959 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
960 960 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
961 961 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
962 962 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
963 963 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
964 964 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
965 965 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
966 966 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
967 967 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
968 968 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
969 969 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
970 970 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
971 971 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
972 972 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
973 973 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
974 974 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
975 975 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
976 976 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
977 977 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
978 978 '#4f8c46', '#368dd9', '#5c0073'
979 979 ]
980 980
981 981 def rgb_to_hex_color(self, rgb_tuple):
982 982 """
983 983 Converts an rgb_tuple passed to an hex color.
984 984
985 985 :param rgb_tuple: tuple with 3 ints represents rgb color space
986 986 """
987 987 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
988 988
989 989 def email_to_int_list(self, email_str):
990 990 """
991 991 Get every byte of the hex digest value of email and turn it to integer.
992 992 It's going to be always between 0-255
993 993 """
994 994 digest = md5_safe(email_str.lower())
995 995 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
996 996
997 997 def pick_color_bank_index(self, email_str, color_bank):
998 998 return self.email_to_int_list(email_str)[0] % len(color_bank)
999 999
1000 1000 def str2color(self, email_str):
1001 1001 """
1002 1002 Tries to map in a stable algorithm an email to color
1003 1003
1004 1004 :param email_str:
1005 1005 """
1006 1006 color_bank = self.get_color_bank()
1007 1007 # pick position (module it's length so we always find it in the
1008 1008 # bank even if it's smaller than 256 values
1009 1009 pos = self.pick_color_bank_index(email_str, color_bank)
1010 1010 return color_bank[pos]
1011 1011
1012 1012 def normalize_email(self, email_address):
1013 1013 import unicodedata
1014 1014 # default host used to fill in the fake/missing email
1015 1015 default_host = u'localhost'
1016 1016
1017 1017 if not email_address:
1018 1018 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1019 1019
1020 1020 email_address = safe_unicode(email_address)
1021 1021
1022 1022 if u'@' not in email_address:
1023 1023 email_address = u'%s@%s' % (email_address, default_host)
1024 1024
1025 1025 if email_address.endswith(u'@'):
1026 1026 email_address = u'%s%s' % (email_address, default_host)
1027 1027
1028 1028 email_address = unicodedata.normalize('NFKD', email_address)\
1029 1029 .encode('ascii', 'ignore')
1030 1030 return email_address
1031 1031
1032 1032 def get_initials(self):
1033 1033 """
1034 1034 Returns 2 letter initials calculated based on the input.
1035 1035 The algorithm picks first given email address, and takes first letter
1036 1036 of part before @, and then the first letter of server name. In case
1037 1037 the part before @ is in a format of `somestring.somestring2` it replaces
1038 1038 the server letter with first letter of somestring2
1039 1039
1040 1040 In case function was initialized with both first and lastname, this
1041 1041 overrides the extraction from email by first letter of the first and
1042 1042 last name. We add special logic to that functionality, In case Full name
1043 1043 is compound, like Guido Von Rossum, we use last part of the last name
1044 1044 (Von Rossum) picking `R`.
1045 1045
1046 1046 Function also normalizes the non-ascii characters to they ascii
1047 1047 representation, eg Δ„ => A
1048 1048 """
1049 1049 import unicodedata
1050 1050 # replace non-ascii to ascii
1051 1051 first_name = unicodedata.normalize(
1052 1052 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1053 1053 last_name = unicodedata.normalize(
1054 1054 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1055 1055
1056 1056 # do NFKD encoding, and also make sure email has proper format
1057 1057 email_address = self.normalize_email(self.email_address)
1058 1058
1059 1059 # first push the email initials
1060 1060 prefix, server = email_address.split('@', 1)
1061 1061
1062 1062 # check if prefix is maybe a 'firstname.lastname' syntax
1063 1063 _dot_split = prefix.rsplit('.', 1)
1064 1064 if len(_dot_split) == 2:
1065 1065 initials = [_dot_split[0][0], _dot_split[1][0]]
1066 1066 else:
1067 1067 initials = [prefix[0], server[0]]
1068 1068
1069 1069 # then try to replace either firtname or lastname
1070 1070 fn_letter = (first_name or " ")[0].strip()
1071 1071 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1072 1072
1073 1073 if fn_letter:
1074 1074 initials[0] = fn_letter
1075 1075
1076 1076 if ln_letter:
1077 1077 initials[1] = ln_letter
1078 1078
1079 1079 return ''.join(initials).upper()
1080 1080
1081 1081 def get_img_data_by_type(self, font_family, img_type):
1082 1082 default_user = """
1083 1083 <svg xmlns="http://www.w3.org/2000/svg"
1084 1084 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1085 1085 viewBox="-15 -10 439.165 429.164"
1086 1086
1087 1087 xml:space="preserve"
1088 1088 style="background:{background};" >
1089 1089
1090 1090 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1091 1091 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1092 1092 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1093 1093 168.596,153.916,216.671,
1094 1094 204.583,216.671z" fill="{text_color}"/>
1095 1095 <path d="M407.164,374.717L360.88,
1096 1096 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1097 1097 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1098 1098 15.366-44.203,23.488-69.076,23.488c-24.877,
1099 1099 0-48.762-8.122-69.078-23.488
1100 1100 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1101 1101 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1102 1102 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1103 1103 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1104 1104 19.402-10.527 C409.699,390.129,
1105 1105 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1106 1106 </svg>""".format(
1107 1107 size=self.size,
1108 1108 background='#979797', # @grey4
1109 1109 text_color=self.text_color,
1110 1110 font_family=font_family)
1111 1111
1112 1112 return {
1113 1113 "default_user": default_user
1114 1114 }[img_type]
1115 1115
1116 1116 def get_img_data(self, svg_type=None):
1117 1117 """
1118 1118 generates the svg metadata for image
1119 1119 """
1120 1120
1121 1121 font_family = ','.join([
1122 1122 'proximanovaregular',
1123 1123 'Proxima Nova Regular',
1124 1124 'Proxima Nova',
1125 1125 'Arial',
1126 1126 'Lucida Grande',
1127 1127 'sans-serif'
1128 1128 ])
1129 1129 if svg_type:
1130 1130 return self.get_img_data_by_type(font_family, svg_type)
1131 1131
1132 1132 initials = self.get_initials()
1133 1133 img_data = """
1134 1134 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1135 1135 width="{size}" height="{size}"
1136 1136 style="width: 100%; height: 100%; background-color: {background}"
1137 1137 viewBox="0 0 {size} {size}">
1138 1138 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1139 1139 pointer-events="auto" fill="{text_color}"
1140 1140 font-family="{font_family}"
1141 1141 style="font-weight: 400; font-size: {f_size}px;">{text}
1142 1142 </text>
1143 1143 </svg>""".format(
1144 1144 size=self.size,
1145 1145 f_size=self.size/1.85, # scale the text inside the box nicely
1146 1146 background=self.background,
1147 1147 text_color=self.text_color,
1148 1148 text=initials.upper(),
1149 1149 font_family=font_family)
1150 1150
1151 1151 return img_data
1152 1152
1153 1153 def generate_svg(self, svg_type=None):
1154 1154 img_data = self.get_img_data(svg_type)
1155 1155 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1156 1156
1157 1157
1158 1158 def initials_gravatar(email_address, first_name, last_name, size=30):
1159 1159 svg_type = None
1160 1160 if email_address == User.DEFAULT_USER_EMAIL:
1161 1161 svg_type = 'default_user'
1162 1162 klass = InitialsGravatar(email_address, first_name, last_name, size)
1163 1163 return klass.generate_svg(svg_type=svg_type)
1164 1164
1165 1165
1166 1166 def gravatar_url(email_address, size=30):
1167 1167 # doh, we need to re-import those to mock it later
1168 1168 from pylons import tmpl_context as c
1169 1169
1170 1170 _use_gravatar = c.visual.use_gravatar
1171 1171 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1172 1172
1173 1173 email_address = email_address or User.DEFAULT_USER_EMAIL
1174 1174 if isinstance(email_address, unicode):
1175 1175 # hashlib crashes on unicode items
1176 1176 email_address = safe_str(email_address)
1177 1177
1178 1178 # empty email or default user
1179 1179 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1180 1180 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1181 1181
1182 1182 if _use_gravatar:
1183 1183 # TODO: Disuse pyramid thread locals. Think about another solution to
1184 1184 # get the host and schema here.
1185 1185 request = get_current_request()
1186 1186 tmpl = safe_str(_gravatar_url)
1187 1187 tmpl = tmpl.replace('{email}', email_address)\
1188 1188 .replace('{md5email}', md5_safe(email_address.lower())) \
1189 1189 .replace('{netloc}', request.host)\
1190 1190 .replace('{scheme}', request.scheme)\
1191 1191 .replace('{size}', safe_str(size))
1192 1192 return tmpl
1193 1193 else:
1194 1194 return initials_gravatar(email_address, '', '', size=size)
1195 1195
1196 1196
1197 1197 class Page(_Page):
1198 1198 """
1199 1199 Custom pager to match rendering style with paginator
1200 1200 """
1201 1201
1202 1202 def _get_pos(self, cur_page, max_page, items):
1203 1203 edge = (items / 2) + 1
1204 1204 if (cur_page <= edge):
1205 1205 radius = max(items / 2, items - cur_page)
1206 1206 elif (max_page - cur_page) < edge:
1207 1207 radius = (items - 1) - (max_page - cur_page)
1208 1208 else:
1209 1209 radius = items / 2
1210 1210
1211 1211 left = max(1, (cur_page - (radius)))
1212 1212 right = min(max_page, cur_page + (radius))
1213 1213 return left, cur_page, right
1214 1214
1215 1215 def _range(self, regexp_match):
1216 1216 """
1217 1217 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1218 1218
1219 1219 Arguments:
1220 1220
1221 1221 regexp_match
1222 1222 A "re" (regular expressions) match object containing the
1223 1223 radius of linked pages around the current page in
1224 1224 regexp_match.group(1) as a string
1225 1225
1226 1226 This function is supposed to be called as a callable in
1227 1227 re.sub.
1228 1228
1229 1229 """
1230 1230 radius = int(regexp_match.group(1))
1231 1231
1232 1232 # Compute the first and last page number within the radius
1233 1233 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1234 1234 # -> leftmost_page = 5
1235 1235 # -> rightmost_page = 9
1236 1236 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1237 1237 self.last_page,
1238 1238 (radius * 2) + 1)
1239 1239 nav_items = []
1240 1240
1241 1241 # Create a link to the first page (unless we are on the first page
1242 1242 # or there would be no need to insert '..' spacers)
1243 1243 if self.page != self.first_page and self.first_page < leftmost_page:
1244 1244 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1245 1245
1246 1246 # Insert dots if there are pages between the first page
1247 1247 # and the currently displayed page range
1248 1248 if leftmost_page - self.first_page > 1:
1249 1249 # Wrap in a SPAN tag if nolink_attr is set
1250 1250 text = '..'
1251 1251 if self.dotdot_attr:
1252 1252 text = HTML.span(c=text, **self.dotdot_attr)
1253 1253 nav_items.append(text)
1254 1254
1255 1255 for thispage in xrange(leftmost_page, rightmost_page + 1):
1256 1256 # Hilight the current page number and do not use a link
1257 1257 if thispage == self.page:
1258 1258 text = '%s' % (thispage,)
1259 1259 # Wrap in a SPAN tag if nolink_attr is set
1260 1260 if self.curpage_attr:
1261 1261 text = HTML.span(c=text, **self.curpage_attr)
1262 1262 nav_items.append(text)
1263 1263 # Otherwise create just a link to that page
1264 1264 else:
1265 1265 text = '%s' % (thispage,)
1266 1266 nav_items.append(self._pagerlink(thispage, text))
1267 1267
1268 1268 # Insert dots if there are pages between the displayed
1269 1269 # page numbers and the end of the page range
1270 1270 if self.last_page - rightmost_page > 1:
1271 1271 text = '..'
1272 1272 # Wrap in a SPAN tag if nolink_attr is set
1273 1273 if self.dotdot_attr:
1274 1274 text = HTML.span(c=text, **self.dotdot_attr)
1275 1275 nav_items.append(text)
1276 1276
1277 1277 # Create a link to the very last page (unless we are on the last
1278 1278 # page or there would be no need to insert '..' spacers)
1279 1279 if self.page != self.last_page and rightmost_page < self.last_page:
1280 1280 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1281 1281
1282 1282 ## prerender links
1283 1283 #_page_link = url.current()
1284 1284 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1285 1285 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1286 1286 return self.separator.join(nav_items)
1287 1287
1288 1288 def pager(self, format='~2~', page_param='page', partial_param='partial',
1289 1289 show_if_single_page=False, separator=' ', onclick=None,
1290 1290 symbol_first='<<', symbol_last='>>',
1291 1291 symbol_previous='<', symbol_next='>',
1292 1292 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1293 1293 curpage_attr={'class': 'pager_curpage'},
1294 1294 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1295 1295
1296 1296 self.curpage_attr = curpage_attr
1297 1297 self.separator = separator
1298 1298 self.pager_kwargs = kwargs
1299 1299 self.page_param = page_param
1300 1300 self.partial_param = partial_param
1301 1301 self.onclick = onclick
1302 1302 self.link_attr = link_attr
1303 1303 self.dotdot_attr = dotdot_attr
1304 1304
1305 1305 # Don't show navigator if there is no more than one page
1306 1306 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1307 1307 return ''
1308 1308
1309 1309 from string import Template
1310 1310 # Replace ~...~ in token format by range of pages
1311 1311 result = re.sub(r'~(\d+)~', self._range, format)
1312 1312
1313 1313 # Interpolate '%' variables
1314 1314 result = Template(result).safe_substitute({
1315 1315 'first_page': self.first_page,
1316 1316 'last_page': self.last_page,
1317 1317 'page': self.page,
1318 1318 'page_count': self.page_count,
1319 1319 'items_per_page': self.items_per_page,
1320 1320 'first_item': self.first_item,
1321 1321 'last_item': self.last_item,
1322 1322 'item_count': self.item_count,
1323 1323 'link_first': self.page > self.first_page and \
1324 1324 self._pagerlink(self.first_page, symbol_first) or '',
1325 1325 'link_last': self.page < self.last_page and \
1326 1326 self._pagerlink(self.last_page, symbol_last) or '',
1327 1327 'link_previous': self.previous_page and \
1328 1328 self._pagerlink(self.previous_page, symbol_previous) \
1329 1329 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1330 1330 'link_next': self.next_page and \
1331 1331 self._pagerlink(self.next_page, symbol_next) \
1332 1332 or HTML.span(symbol_next, class_="pg-next disabled")
1333 1333 })
1334 1334
1335 1335 return literal(result)
1336 1336
1337 1337
1338 1338 #==============================================================================
1339 1339 # REPO PAGER, PAGER FOR REPOSITORY
1340 1340 #==============================================================================
1341 1341 class RepoPage(Page):
1342 1342
1343 1343 def __init__(self, collection, page=1, items_per_page=20,
1344 1344 item_count=None, url=None, **kwargs):
1345 1345
1346 1346 """Create a "RepoPage" instance. special pager for paging
1347 1347 repository
1348 1348 """
1349 1349 self._url_generator = url
1350 1350
1351 1351 # Safe the kwargs class-wide so they can be used in the pager() method
1352 1352 self.kwargs = kwargs
1353 1353
1354 1354 # Save a reference to the collection
1355 1355 self.original_collection = collection
1356 1356
1357 1357 self.collection = collection
1358 1358
1359 1359 # The self.page is the number of the current page.
1360 1360 # The first page has the number 1!
1361 1361 try:
1362 1362 self.page = int(page) # make it int() if we get it as a string
1363 1363 except (ValueError, TypeError):
1364 1364 self.page = 1
1365 1365
1366 1366 self.items_per_page = items_per_page
1367 1367
1368 1368 # Unless the user tells us how many items the collections has
1369 1369 # we calculate that ourselves.
1370 1370 if item_count is not None:
1371 1371 self.item_count = item_count
1372 1372 else:
1373 1373 self.item_count = len(self.collection)
1374 1374
1375 1375 # Compute the number of the first and last available page
1376 1376 if self.item_count > 0:
1377 1377 self.first_page = 1
1378 1378 self.page_count = int(math.ceil(float(self.item_count) /
1379 1379 self.items_per_page))
1380 1380 self.last_page = self.first_page + self.page_count - 1
1381 1381
1382 1382 # Make sure that the requested page number is the range of
1383 1383 # valid pages
1384 1384 if self.page > self.last_page:
1385 1385 self.page = self.last_page
1386 1386 elif self.page < self.first_page:
1387 1387 self.page = self.first_page
1388 1388
1389 1389 # Note: the number of items on this page can be less than
1390 1390 # items_per_page if the last page is not full
1391 1391 self.first_item = max(0, (self.item_count) - (self.page *
1392 1392 items_per_page))
1393 1393 self.last_item = ((self.item_count - 1) - items_per_page *
1394 1394 (self.page - 1))
1395 1395
1396 1396 self.items = list(self.collection[self.first_item:self.last_item + 1])
1397 1397
1398 1398 # Links to previous and next page
1399 1399 if self.page > self.first_page:
1400 1400 self.previous_page = self.page - 1
1401 1401 else:
1402 1402 self.previous_page = None
1403 1403
1404 1404 if self.page < self.last_page:
1405 1405 self.next_page = self.page + 1
1406 1406 else:
1407 1407 self.next_page = None
1408 1408
1409 1409 # No items available
1410 1410 else:
1411 1411 self.first_page = None
1412 1412 self.page_count = 0
1413 1413 self.last_page = None
1414 1414 self.first_item = None
1415 1415 self.last_item = None
1416 1416 self.previous_page = None
1417 1417 self.next_page = None
1418 1418 self.items = []
1419 1419
1420 1420 # This is a subclass of the 'list' type. Initialise the list now.
1421 1421 list.__init__(self, reversed(self.items))
1422 1422
1423 1423
1424 1424 def changed_tooltip(nodes):
1425 1425 """
1426 1426 Generates a html string for changed nodes in commit page.
1427 1427 It limits the output to 30 entries
1428 1428
1429 1429 :param nodes: LazyNodesGenerator
1430 1430 """
1431 1431 if nodes:
1432 1432 pref = ': <br/> '
1433 1433 suf = ''
1434 1434 if len(nodes) > 30:
1435 1435 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1436 1436 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1437 1437 for x in nodes[:30]]) + suf)
1438 1438 else:
1439 1439 return ': ' + _('No Files')
1440 1440
1441 1441
1442 1442 def breadcrumb_repo_link(repo):
1443 1443 """
1444 1444 Makes a breadcrumbs path link to repo
1445 1445
1446 1446 ex::
1447 1447 group >> subgroup >> repo
1448 1448
1449 1449 :param repo: a Repository instance
1450 1450 """
1451 1451
1452 1452 path = [
1453 1453 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1454 1454 for group in repo.groups_with_parents
1455 1455 ] + [
1456 1456 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1457 1457 ]
1458 1458
1459 1459 return literal(' &raquo; '.join(path))
1460 1460
1461 1461
1462 1462 def format_byte_size_binary(file_size):
1463 1463 """
1464 1464 Formats file/folder sizes to standard.
1465 1465 """
1466 1466 formatted_size = format_byte_size(file_size, binary=True)
1467 1467 return formatted_size
1468 1468
1469 1469
1470 1470 def fancy_file_stats(stats):
1471 1471 """
1472 1472 Displays a fancy two colored bar for number of added/deleted
1473 1473 lines of code on file
1474 1474
1475 1475 :param stats: two element list of added/deleted lines of code
1476 1476 """
1477 1477 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1478 1478 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1479 1479
1480 1480 def cgen(l_type, a_v, d_v):
1481 1481 mapping = {'tr': 'top-right-rounded-corner-mid',
1482 1482 'tl': 'top-left-rounded-corner-mid',
1483 1483 'br': 'bottom-right-rounded-corner-mid',
1484 1484 'bl': 'bottom-left-rounded-corner-mid'}
1485 1485 map_getter = lambda x: mapping[x]
1486 1486
1487 1487 if l_type == 'a' and d_v:
1488 1488 #case when added and deleted are present
1489 1489 return ' '.join(map(map_getter, ['tl', 'bl']))
1490 1490
1491 1491 if l_type == 'a' and not d_v:
1492 1492 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1493 1493
1494 1494 if l_type == 'd' and a_v:
1495 1495 return ' '.join(map(map_getter, ['tr', 'br']))
1496 1496
1497 1497 if l_type == 'd' and not a_v:
1498 1498 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1499 1499
1500 1500 a, d = stats['added'], stats['deleted']
1501 1501 width = 100
1502 1502
1503 1503 if stats['binary']: # binary operations like chmod/rename etc
1504 1504 lbl = []
1505 1505 bin_op = 0 # undefined
1506 1506
1507 1507 # prefix with bin for binary files
1508 1508 if BIN_FILENODE in stats['ops']:
1509 1509 lbl += ['bin']
1510 1510
1511 1511 if NEW_FILENODE in stats['ops']:
1512 1512 lbl += [_('new file')]
1513 1513 bin_op = NEW_FILENODE
1514 1514 elif MOD_FILENODE in stats['ops']:
1515 1515 lbl += [_('mod')]
1516 1516 bin_op = MOD_FILENODE
1517 1517 elif DEL_FILENODE in stats['ops']:
1518 1518 lbl += [_('del')]
1519 1519 bin_op = DEL_FILENODE
1520 1520 elif RENAMED_FILENODE in stats['ops']:
1521 1521 lbl += [_('rename')]
1522 1522 bin_op = RENAMED_FILENODE
1523 1523
1524 1524 # chmod can go with other operations, so we add a + to lbl if needed
1525 1525 if CHMOD_FILENODE in stats['ops']:
1526 1526 lbl += [_('chmod')]
1527 1527 if bin_op == 0:
1528 1528 bin_op = CHMOD_FILENODE
1529 1529
1530 1530 lbl = '+'.join(lbl)
1531 1531 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1532 1532 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1533 1533 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1534 1534 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1535 1535
1536 1536 t = stats['added'] + stats['deleted']
1537 1537 unit = float(width) / (t or 1)
1538 1538
1539 1539 # needs > 9% of width to be visible or 0 to be hidden
1540 1540 a_p = max(9, unit * a) if a > 0 else 0
1541 1541 d_p = max(9, unit * d) if d > 0 else 0
1542 1542 p_sum = a_p + d_p
1543 1543
1544 1544 if p_sum > width:
1545 1545 #adjust the percentage to be == 100% since we adjusted to 9
1546 1546 if a_p > d_p:
1547 1547 a_p = a_p - (p_sum - width)
1548 1548 else:
1549 1549 d_p = d_p - (p_sum - width)
1550 1550
1551 1551 a_v = a if a > 0 else ''
1552 1552 d_v = d if d > 0 else ''
1553 1553
1554 1554 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1555 1555 cgen('a', a_v, d_v), a_p, a_v
1556 1556 )
1557 1557 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1558 1558 cgen('d', a_v, d_v), d_p, d_v
1559 1559 )
1560 1560 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1561 1561
1562 1562
1563 1563 def urlify_text(text_, safe=True):
1564 1564 """
1565 1565 Extrac urls from text and make html links out of them
1566 1566
1567 1567 :param text_:
1568 1568 """
1569 1569
1570 1570 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1571 1571 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1572 1572
1573 1573 def url_func(match_obj):
1574 1574 url_full = match_obj.groups()[0]
1575 1575 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1576 1576 _newtext = url_pat.sub(url_func, text_)
1577 1577 if safe:
1578 1578 return literal(_newtext)
1579 1579 return _newtext
1580 1580
1581 1581
1582 1582 def urlify_commits(text_, repository):
1583 1583 """
1584 1584 Extract commit ids from text and make link from them
1585 1585
1586 1586 :param text_:
1587 1587 :param repository: repo name to build the URL with
1588 1588 """
1589 1589 from pylons import url # doh, we need to re-import url to mock it later
1590 1590 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1591 1591
1592 1592 def url_func(match_obj):
1593 1593 commit_id = match_obj.groups()[1]
1594 1594 pref = match_obj.groups()[0]
1595 1595 suf = match_obj.groups()[2]
1596 1596
1597 1597 tmpl = (
1598 1598 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1599 1599 '%(commit_id)s</a>%(suf)s'
1600 1600 )
1601 1601 return tmpl % {
1602 1602 'pref': pref,
1603 1603 'cls': 'revision-link',
1604 1604 'url': url('changeset_home', repo_name=repository,
1605 1605 revision=commit_id),
1606 1606 'commit_id': commit_id,
1607 1607 'suf': suf
1608 1608 }
1609 1609
1610 1610 newtext = URL_PAT.sub(url_func, text_)
1611 1611
1612 1612 return newtext
1613 1613
1614 1614
1615 1615 def _process_url_func(match_obj, repo_name, uid, entry):
1616 1616 pref = ''
1617 1617 if match_obj.group().startswith(' '):
1618 1618 pref = ' '
1619 1619
1620 1620 issue_id = ''.join(match_obj.groups())
1621 1621 tmpl = (
1622 1622 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1623 1623 '%(issue-prefix)s%(id-repr)s'
1624 1624 '</a>')
1625 1625
1626 1626 (repo_name_cleaned,
1627 1627 parent_group_name) = RepoGroupModel().\
1628 1628 _get_group_name_and_parent(repo_name)
1629 1629
1630 1630 # variables replacement
1631 1631 named_vars = {
1632 1632 'id': issue_id,
1633 1633 'repo': repo_name,
1634 1634 'repo_name': repo_name_cleaned,
1635 1635 'group_name': parent_group_name
1636 1636 }
1637 1637 # named regex variables
1638 1638 named_vars.update(match_obj.groupdict())
1639 1639 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1640 1640
1641 1641 return tmpl % {
1642 1642 'pref': pref,
1643 1643 'cls': 'issue-tracker-link',
1644 1644 'url': _url,
1645 1645 'id-repr': issue_id,
1646 1646 'issue-prefix': entry['pref'],
1647 1647 'serv': entry['url'],
1648 1648 }
1649 1649
1650 1650
1651 1651 def process_patterns(text_string, repo_name, config):
1652 1652 repo = None
1653 1653 if repo_name:
1654 1654 # Retrieving repo_name to avoid invalid repo_name to explode on
1655 1655 # IssueTrackerSettingsModel but still passing invalid name further down
1656 1656 repo = Repository.get_by_repo_name(repo_name, cache=True)
1657 1657
1658 1658 settings_model = IssueTrackerSettingsModel(repo=repo)
1659 1659 active_entries = settings_model.get_settings(cache=True)
1660 1660
1661 1661 newtext = text_string
1662 1662 for uid, entry in active_entries.items():
1663 1663 url_func = partial(
1664 1664 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1665 1665
1666 1666 log.debug('found issue tracker entry with uid %s' % (uid,))
1667 1667
1668 1668 if not (entry['pat'] and entry['url']):
1669 1669 log.debug('skipping due to missing data')
1670 1670 continue
1671 1671
1672 1672 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1673 1673 % (uid, entry['pat'], entry['url'], entry['pref']))
1674 1674
1675 1675 try:
1676 1676 pattern = re.compile(r'%s' % entry['pat'])
1677 1677 except re.error:
1678 1678 log.exception(
1679 1679 'issue tracker pattern: `%s` failed to compile',
1680 1680 entry['pat'])
1681 1681 continue
1682 1682
1683 1683 newtext = pattern.sub(url_func, newtext)
1684 1684 log.debug('processed prefix:uid `%s`' % (uid,))
1685 1685
1686 1686 return newtext
1687 1687
1688 1688
1689 1689 def urlify_commit_message(commit_text, repository=None):
1690 1690 """
1691 1691 Parses given text message and makes proper links.
1692 1692 issues are linked to given issue-server, and rest is a commit link
1693 1693
1694 1694 :param commit_text:
1695 1695 :param repository:
1696 1696 """
1697 1697 from pylons import url # doh, we need to re-import url to mock it later
1698 1698 from rhodecode import CONFIG
1699 1699
1700 1700 def escaper(string):
1701 1701 return string.replace('<', '&lt;').replace('>', '&gt;')
1702 1702
1703 1703 newtext = escaper(commit_text)
1704 1704 # urlify commits - extract commit ids and make link out of them, if we have
1705 1705 # the scope of repository present.
1706 1706 if repository:
1707 1707 newtext = urlify_commits(newtext, repository)
1708 1708
1709 1709 # extract http/https links and make them real urls
1710 1710 newtext = urlify_text(newtext, safe=False)
1711 1711
1712 1712 # process issue tracker patterns
1713 1713 newtext = process_patterns(newtext, repository or '', CONFIG)
1714 1714
1715 1715 return literal(newtext)
1716 1716
1717 1717
1718 1718 def rst(source, mentions=False):
1719 1719 return literal('<div class="rst-block">%s</div>' %
1720 1720 MarkupRenderer.rst(source, mentions=mentions))
1721 1721
1722 1722
1723 1723 def markdown(source, mentions=False):
1724 1724 return literal('<div class="markdown-block">%s</div>' %
1725 1725 MarkupRenderer.markdown(source, flavored=True,
1726 1726 mentions=mentions))
1727 1727
1728 1728 def renderer_from_filename(filename, exclude=None):
1729 from rhodecode.config.conf import MARKDOWN_EXTS, RST_EXTS
1730
1731 def _filter(elements):
1732 if isinstance(exclude, (list, tuple)):
1733 return [x for x in elements if x not in exclude]
1734 return elements
1735
1736 if filename.endswith(tuple(_filter([x[0] for x in MARKDOWN_EXTS if x[0]]))):
1737 return 'markdown'
1738 if filename.endswith(tuple(_filter([x[0] for x in RST_EXTS if x[0]]))):
1739 return 'rst'
1729 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1740 1730
1741 1731
1742 1732 def render(source, renderer='rst', mentions=False):
1743 1733 if renderer == 'rst':
1744 1734 return rst(source, mentions=mentions)
1745 1735 if renderer == 'markdown':
1746 1736 return markdown(source, mentions=mentions)
1747 1737
1748 1738
1749 1739 def commit_status(repo, commit_id):
1750 1740 return ChangesetStatusModel().get_status(repo, commit_id)
1751 1741
1752 1742
1753 1743 def commit_status_lbl(commit_status):
1754 1744 return dict(ChangesetStatus.STATUSES).get(commit_status)
1755 1745
1756 1746
1757 1747 def commit_time(repo_name, commit_id):
1758 1748 repo = Repository.get_by_repo_name(repo_name)
1759 1749 commit = repo.get_commit(commit_id=commit_id)
1760 1750 return commit.date
1761 1751
1762 1752
1763 1753 def get_permission_name(key):
1764 1754 return dict(Permission.PERMS).get(key)
1765 1755
1766 1756
1767 1757 def journal_filter_help():
1768 1758 return _(
1769 1759 'Example filter terms:\n' +
1770 1760 ' repository:vcs\n' +
1771 1761 ' username:marcin\n' +
1772 1762 ' action:*push*\n' +
1773 1763 ' ip:127.0.0.1\n' +
1774 1764 ' date:20120101\n' +
1775 1765 ' date:[20120101100000 TO 20120102]\n' +
1776 1766 '\n' +
1777 1767 'Generate wildcards using \'*\' character:\n' +
1778 1768 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1779 1769 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1780 1770 '\n' +
1781 1771 'Optional AND / OR operators in queries\n' +
1782 1772 ' "repository:vcs OR repository:test"\n' +
1783 1773 ' "username:test AND repository:test*"\n'
1784 1774 )
1785 1775
1786 1776
1787 1777 def not_mapped_error(repo_name):
1788 1778 flash(_('%s repository is not mapped to db perhaps'
1789 1779 ' it was created or renamed from the filesystem'
1790 1780 ' please run the application again'
1791 1781 ' in order to rescan repositories') % repo_name, category='error')
1792 1782
1793 1783
1794 1784 def ip_range(ip_addr):
1795 1785 from rhodecode.model.db import UserIpMap
1796 1786 s, e = UserIpMap._get_ip_range(ip_addr)
1797 1787 return '%s - %s' % (s, e)
1798 1788
1799 1789
1800 1790 def form(url, method='post', needs_csrf_token=True, **attrs):
1801 1791 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1802 1792 if method.lower() != 'get' and needs_csrf_token:
1803 1793 raise Exception(
1804 1794 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1805 1795 'CSRF token. If the endpoint does not require such token you can ' +
1806 1796 'explicitly set the parameter needs_csrf_token to false.')
1807 1797
1808 1798 return wh_form(url, method=method, **attrs)
1809 1799
1810 1800
1811 1801 def secure_form(url, method="POST", multipart=False, **attrs):
1812 1802 """Start a form tag that points the action to an url. This
1813 1803 form tag will also include the hidden field containing
1814 1804 the auth token.
1815 1805
1816 1806 The url options should be given either as a string, or as a
1817 1807 ``url()`` function. The method for the form defaults to POST.
1818 1808
1819 1809 Options:
1820 1810
1821 1811 ``multipart``
1822 1812 If set to True, the enctype is set to "multipart/form-data".
1823 1813 ``method``
1824 1814 The method to use when submitting the form, usually either
1825 1815 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1826 1816 hidden input with name _method is added to simulate the verb
1827 1817 over POST.
1828 1818
1829 1819 """
1830 1820 from webhelpers.pylonslib.secure_form import insecure_form
1831 1821 from rhodecode.lib.auth import get_csrf_token, csrf_token_key
1832 1822 form = insecure_form(url, method, multipart, **attrs)
1833 1823 token = HTML.div(hidden(csrf_token_key, get_csrf_token()), style="display: none;")
1834 1824 return literal("%s\n%s" % (form, token))
1835 1825
1836 1826 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1837 1827 select_html = select(name, selected, options, **attrs)
1838 1828 select2 = """
1839 1829 <script>
1840 1830 $(document).ready(function() {
1841 1831 $('#%s').select2({
1842 1832 containerCssClass: 'drop-menu',
1843 1833 dropdownCssClass: 'drop-menu-dropdown',
1844 1834 dropdownAutoWidth: true%s
1845 1835 });
1846 1836 });
1847 1837 </script>
1848 1838 """
1849 1839 filter_option = """,
1850 1840 minimumResultsForSearch: -1
1851 1841 """
1852 1842 input_id = attrs.get('id') or name
1853 1843 filter_enabled = "" if enable_filter else filter_option
1854 1844 select_script = literal(select2 % (input_id, filter_enabled))
1855 1845
1856 1846 return literal(select_html+select_script)
1857 1847
1858 1848
1859 1849 def get_visual_attr(tmpl_context_var, attr_name):
1860 1850 """
1861 1851 A safe way to get a variable from visual variable of template context
1862 1852
1863 1853 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1864 1854 :param attr_name: name of the attribute we fetch from the c.visual
1865 1855 """
1866 1856 visual = getattr(tmpl_context_var, 'visual', None)
1867 1857 if not visual:
1868 1858 return
1869 1859 else:
1870 1860 return getattr(visual, attr_name, None)
1871 1861
1872 1862
1873 1863 def get_last_path_part(file_node):
1874 1864 if not file_node.path:
1875 1865 return u''
1876 1866
1877 1867 path = safe_unicode(file_node.path.split('/')[-1])
1878 1868 return u'../' + path
1879 1869
1880 1870
1881 1871 def route_path(*args, **kwds):
1882 1872 """
1883 1873 Wrapper around pyramids `route_path` function. It is used to generate
1884 1874 URLs from within pylons views or templates. This will be removed when
1885 1875 pyramid migration if finished.
1886 1876 """
1887 1877 req = get_current_request()
1888 1878 return req.route_path(*args, **kwds)
1889 1879
1890 1880
1891 1881 def resource_path(*args, **kwds):
1892 1882 """
1893 1883 Wrapper around pyramids `route_path` function. It is used to generate
1894 1884 URLs from within pylons views or templates. This will be removed when
1895 1885 pyramid migration if finished.
1896 1886 """
1897 1887 req = get_current_request()
1898 1888 return req.resource_path(*args, **kwds)
@@ -1,231 +1,305 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Renderer for markup languages with ability to parse using rst or markdown
24 24 """
25 25
26
27 26 import re
28 27 import os
29 28 import logging
29 import itertools
30
30 31 from mako.lookup import TemplateLookup
31 32
32 33 from docutils.core import publish_parts
33 34 from docutils.parsers.rst import directives
34 35 import markdown
35 36
36 37 from rhodecode.lib.markdown_ext import (
37 38 UrlizeExtension, GithubFlavoredMarkdownExtension)
38 39 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
39 40
40 41 log = logging.getLogger(__name__)
41 42
42 43 # default renderer used to generate automated comments
43 44 DEFAULT_COMMENTS_RENDERER = 'rst'
44 45
45 46
46 47 class MarkupRenderer(object):
47 48 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
48 49
49 50 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
50 51 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
51 52 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
52 53
54 # list of readme files to search in file tree and display in summary
55 # attached weights defines the search order lower is first
56 ALL_READMES = [
57 ('readme', 0), ('README', 0), ('Readme', 0),
58 ('doc/readme', 1), ('doc/README', 1), ('doc/Readme', 1),
59 ('Docs/readme', 2), ('Docs/README', 2), ('Docs/Readme', 2),
60 ('DOCS/readme', 2), ('DOCS/README', 2), ('DOCS/Readme', 2),
61 ('docs/readme', 2), ('docs/README', 2), ('docs/Readme', 2),
62 ]
63 # extension together with weights. Lower is first means we control how
64 # extensions are attached to readme names with those.
65 PLAIN_EXTS = [
66 ('', 0), # special case that renders READMES names without extension
67 ('.text', 2), ('.TEXT', 2),
68 ('.txt', 3), ('.TXT', 3)
69 ]
70
71 RST_EXTS = [
72 ('.rst', 1), ('.rest', 1),
73 ('.RST', 2), ('.REST', 2)
74 ]
75
76 MARKDOWN_EXTS = [
77 ('.md', 1), ('.MD', 1),
78 ('.mkdn', 2), ('.MKDN', 2),
79 ('.mdown', 3), ('.MDOWN', 3),
80 ('.markdown', 4), ('.MARKDOWN', 4)
81 ]
82
83 ALL_EXTS = PLAIN_EXTS + MARKDOWN_EXTS + RST_EXTS
84
53 85 def _detect_renderer(self, source, filename=None):
54 86 """
55 87 runs detection of what renderer should be used for generating html
56 88 from a markup language
57 89
58 90 filename can be also explicitly a renderer name
59 91
60 92 :param source:
61 93 :param filename:
62 94 """
63 95
64 96 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
65 97 detected_renderer = 'markdown'
66 98 elif MarkupRenderer.RST_PAT.findall(filename):
67 99 detected_renderer = 'rst'
68 100 elif MarkupRenderer.PLAIN_PAT.findall(filename):
69 101 detected_renderer = 'rst'
70 102 else:
71 103 detected_renderer = 'plain'
72 104
73 105 return getattr(MarkupRenderer, detected_renderer)
74 106
107 @classmethod
108 def renderer_from_filename(cls, filename, exclude):
109 """
110 Detect renderer from filename and optionally use exlcude list to
111 remove some options. This is mostly used in helpers
112 """
113 def _filter(elements):
114 if isinstance(exclude, (list, tuple)):
115 return [x for x in elements if x not in exclude]
116 return elements
117
118 if filename.endswith(
119 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
120 return 'markdown'
121 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
122 return 'rst'
123
124 return 'plain'
125
126 @classmethod
127 def generate_readmes(cls, all_readmes, extensions):
128 combined = itertools.product(all_readmes, extensions)
129 # sort by filename weight(y[0][1]) + extensions weight(y[1][1])
130 prioritized_readmes = sorted(combined, key=lambda y: y[0][1] + y[1][1])
131 # filename, extension
132 return [''.join([x[0][0], x[1][0]]) for x in prioritized_readmes]
133
134 def pick_readme_order(self, default_renderer):
135
136 if default_renderer == 'markdown':
137 markdown = self.generate_readmes(self.ALL_READMES, self.MARKDOWN_EXTS)
138 readme_order = markdown + self.generate_readmes(
139 self.ALL_READMES, self.RST_EXTS + self.PLAIN_EXTS)
140 elif default_renderer == 'rst':
141 markdown = self.generate_readmes(self.ALL_READMES, self.RST_EXTS)
142 readme_order = markdown + self.generate_readmes(
143 self.ALL_READMES, self.MARKDOWN_EXTS + self.PLAIN_EXTS)
144 else:
145 readme_order = self.generate_readmes(self.ALL_READMES, self.ALL_EXTS)
146
147 return readme_order
148
75 149 def render(self, source, filename=None):
76 150 """
77 151 Renders a given filename using detected renderer
78 152 it detects renderers based on file extension or mimetype.
79 153 At last it will just do a simple html replacing new lines with <br/>
80 154
81 155 :param file_name:
82 156 :param source:
83 157 """
84 158
85 159 renderer = self._detect_renderer(source, filename)
86 160 readme_data = renderer(source)
87 161 return readme_data
88 162
89 163 @classmethod
90 164 def _flavored_markdown(cls, text):
91 165 """
92 166 Github style flavored markdown
93 167
94 168 :param text:
95 169 """
96 170
97 171 # Extract pre blocks.
98 172 extractions = {}
99 173
100 174 def pre_extraction_callback(matchobj):
101 175 digest = md5_safe(matchobj.group(0))
102 176 extractions[digest] = matchobj.group(0)
103 177 return "{gfm-extraction-%s}" % digest
104 178 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
105 179 text = re.sub(pattern, pre_extraction_callback, text)
106 180
107 181 # Prevent foo_bar_baz from ending up with an italic word in the middle.
108 182 def italic_callback(matchobj):
109 183 s = matchobj.group(0)
110 184 if list(s).count('_') >= 2:
111 185 return s.replace('_', r'\_')
112 186 return s
113 187 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
114 188
115 189 # Insert pre block extractions.
116 190 def pre_insert_callback(matchobj):
117 191 return '\n\n' + extractions[matchobj.group(1)]
118 192 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
119 193 pre_insert_callback, text)
120 194
121 195 return text
122 196
123 197 @classmethod
124 198 def urlify_text(cls, text):
125 199 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
126 200 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
127 201
128 202 def url_func(match_obj):
129 203 url_full = match_obj.groups()[0]
130 204 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
131 205
132 206 return url_pat.sub(url_func, text)
133 207
134 208 @classmethod
135 209 def plain(cls, source, universal_newline=True):
136 210 source = safe_unicode(source)
137 211 if universal_newline:
138 212 newline = '\n'
139 213 source = newline.join(source.splitlines())
140 214
141 215 source = cls.urlify_text(source)
142 216 return '<br />' + source.replace("\n", '<br />')
143 217
144 218 @classmethod
145 219 def markdown(cls, source, safe=True, flavored=True, mentions=False):
146 220 # It does not allow to insert inline HTML. In presence of HTML tags, it
147 221 # will replace them instead with [HTML_REMOVED]. This is controlled by
148 222 # the safe_mode=True parameter of the markdown method.
149 223 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
150 224 if flavored:
151 225 extensions.append(GithubFlavoredMarkdownExtension())
152 226
153 227 if mentions:
154 228 mention_pat = re.compile(MENTIONS_REGEX)
155 229
156 230 def wrapp(match_obj):
157 231 uname = match_obj.groups()[0]
158 232 return ' **@%(uname)s** ' % {'uname': uname}
159 233 mention_hl = mention_pat.sub(wrapp, source).strip()
160 234 # we extracted mentions render with this using Mentions false
161 235 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
162 236 mentions=False)
163 237
164 238 source = safe_unicode(source)
165 239 try:
166 240 if flavored:
167 241 source = cls._flavored_markdown(source)
168 242 return markdown.markdown(
169 243 source, extensions, safe_mode=True, enable_attributes=False)
170 244 except Exception:
171 245 log.exception('Error when rendering Markdown')
172 246 if safe:
173 247 log.debug('Fallback to render in plain mode')
174 248 return cls.plain(source)
175 249 else:
176 250 raise
177 251
178 252 @classmethod
179 253 def rst(cls, source, safe=True, mentions=False):
180 254 if mentions:
181 255 mention_pat = re.compile(MENTIONS_REGEX)
182 256
183 257 def wrapp(match_obj):
184 258 uname = match_obj.groups()[0]
185 259 return ' **@%(uname)s** ' % {'uname': uname}
186 260 mention_hl = mention_pat.sub(wrapp, source).strip()
187 261 # we extracted mentions render with this using Mentions false
188 262 return cls.rst(mention_hl, safe=safe, mentions=False)
189 263
190 264 source = safe_unicode(source)
191 265 try:
192 266 docutils_settings = dict(
193 267 [(alias, None) for alias in
194 268 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
195 269
196 270 docutils_settings.update({'input_encoding': 'unicode',
197 271 'report_level': 4})
198 272
199 273 for k, v in docutils_settings.iteritems():
200 274 directives.register_directive(k, v)
201 275
202 276 parts = publish_parts(source=source,
203 277 writer_name="html4css1",
204 278 settings_overrides=docutils_settings)
205 279
206 280 return parts['html_title'] + parts["fragment"]
207 281 except Exception:
208 282 log.exception('Error when rendering RST')
209 283 if safe:
210 284 log.debug('Fallbacking to render in plain mode')
211 285 return cls.plain(source)
212 286 else:
213 287 raise
214 288
215 289
216 290 class RstTemplateRenderer(object):
217 291
218 292 def __init__(self):
219 293 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
220 294 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
221 295 self.template_store = TemplateLookup(
222 296 directories=rst_template_dirs,
223 297 input_encoding='utf-8',
224 298 imports=['from rhodecode.lib import helpers as h'])
225 299
226 300 def _get_template(self, templatename):
227 301 return self.template_store.get_template(templatename)
228 302
229 303 def render(self, template_name, **kwargs):
230 304 template = self._get_template(template_name)
231 305 return template.render(**kwargs)
@@ -1,179 +1,213 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 import pytest
22 22
23 23 from rhodecode.lib.markup_renderer import MarkupRenderer, RstTemplateRenderer
24 24
25 25
26 26 @pytest.mark.parametrize(
27 27 "filename, expected_renderer",
28 28 [
29 29 ('readme.md', 'markdown'),
30 30 ('readme.Md', 'markdown'),
31 31 ('readme.MdoWn', 'markdown'),
32 32 ('readme.rst', 'rst'),
33 33 ('readme.Rst', 'rst'),
34 34 ('readme.rest', 'rst'),
35 35 ('readme.rest', 'rst'),
36 36 ('readme', 'rst'),
37 37 ('README', 'rst'),
38 38
39 39 ('markdown.xml', 'plain'),
40 40 ('rest.xml', 'plain'),
41 41 ('readme.xml', 'plain'),
42 42
43 43 ('readme.mdx', 'plain'),
44 44 ('readme.rstx', 'plain'),
45 45 ('readmex', 'plain'),
46 46 ])
47 47 def test_detect_renderer(filename, expected_renderer):
48 48 detected_renderer = MarkupRenderer()._detect_renderer(
49 49 '', filename=filename).__name__
50 50 assert expected_renderer == detected_renderer
51 51
52 52
53 53 def test_markdown_xss_link():
54 54 xss_md = "[link](javascript:alert('XSS: pwned!'))"
55 55 rendered_html = MarkupRenderer.markdown(xss_md)
56 56 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
57 57
58 58
59 59 def test_markdown_xss_inline_html():
60 60 xss_md = '\n'.join([
61 61 '> <a name="n"',
62 62 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
63 63 rendered_html = MarkupRenderer.markdown(xss_md)
64 64 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
65 65
66 66
67 67 def test_markdown_inline_html():
68 68 xss_md = '\n'.join(['> <a name="n"',
69 69 '> href="https://rhodecode.com">link</a>'])
70 70 rendered_html = MarkupRenderer.markdown(xss_md)
71 71 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
72 72
73 73
74 74 def test_rst_xss_link():
75 75 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
76 76 rendered_html = MarkupRenderer.rst(xss_rst)
77 77 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
78 78
79 79
80 80 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
81 81 def test_rst_xss_inline_html():
82 82 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
83 83 rendered_html = MarkupRenderer.rst(xss_rst)
84 84 assert 'href="javascript:alert(' not in rendered_html
85 85
86 86
87 87 def test_rst_xss_raw_directive():
88 88 xss_rst = '\n'.join([
89 89 '.. raw:: html',
90 90 '',
91 91 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
92 92 rendered_html = MarkupRenderer.rst(xss_rst)
93 93 assert 'href="javascript:alert(' not in rendered_html
94 94
95 95
96 96 def test_render_rst_template_without_files():
97 97 expected = u'''\
98 98 Auto status change to |under_review|
99 99
100 100 .. role:: added
101 101 .. role:: removed
102 102 .. parsed-literal::
103 103
104 104 Changed commits:
105 105 * :added:`2 added`
106 106 * :removed:`3 removed`
107 107
108 108 No file changes found
109 109
110 110 .. |under_review| replace:: *"NEW STATUS"*'''
111 111
112 112 params = {
113 113 'under_review_label': 'NEW STATUS',
114 114 'added_commits': ['a', 'b'],
115 115 'removed_commits': ['a', 'b', 'c'],
116 116 'changed_files': [],
117 117 'added_files': [],
118 118 'modified_files': [],
119 119 'removed_files': [],
120 120 }
121 121 renderer = RstTemplateRenderer()
122 122 rendered = renderer.render('pull_request_update.mako', **params)
123 123 assert expected == rendered
124 124
125 125
126 126 def test_render_rst_template_with_files():
127 127 expected = u'''\
128 128 Auto status change to |under_review|
129 129
130 130 .. role:: added
131 131 .. role:: removed
132 132 .. parsed-literal::
133 133
134 134 Changed commits:
135 135 * :added:`1 added`
136 136 * :removed:`3 removed`
137 137
138 138 Changed files:
139 139 * `A /path/a.py <#a_c--68ed34923b68>`_
140 140 * `A /path/b.js <#a_c--64f90608b607>`_
141 141 * `M /path/d.js <#a_c--85842bf30c6e>`_
142 142 * `M /path/Δ™.py <#a_c--d713adf009cd>`_
143 143 * R /path/ΕΊ.py
144 144
145 145 .. |under_review| replace:: *"NEW STATUS"*'''
146 146
147 147 added = ['/path/a.py', '/path/b.js']
148 148 modified = ['/path/d.js', u'/path/Δ™.py']
149 149 removed = [u'/path/ΕΊ.py']
150 150
151 151 params = {
152 152 'under_review_label': 'NEW STATUS',
153 153 'added_commits': ['a'],
154 154 'removed_commits': ['a', 'b', 'c'],
155 155 'changed_files': added + modified + removed,
156 156 'added_files': added,
157 157 'modified_files': modified,
158 158 'removed_files': removed,
159 159 }
160 160 renderer = RstTemplateRenderer()
161 161 rendered = renderer.render('pull_request_update.mako', **params)
162 162
163 163 assert expected == rendered
164 164
165 165
166 166 def test_render_rst_auto_status_template():
167 167 expected = u'''\
168 168 Auto status change to |new_status|
169 169
170 170 .. |new_status| replace:: *"NEW STATUS"*'''
171 171
172 172 params = {
173 173 'new_status_label': 'NEW STATUS',
174 174 'pull_request': None,
175 175 'commit_id': None,
176 176 }
177 177 renderer = RstTemplateRenderer()
178 178 rendered = renderer.render('auto_status_change.mako', **params)
179 179 assert expected == rendered
180
181
182 @pytest.mark.parametrize(
183 "readmes, exts, order",
184 [
185 ([], [], []),
186
187 ([('readme1', 0), ('text1', 1)], [('.ext', 0), ('.txt', 1)],
188 ['readme1.ext', 'readme1.txt', 'text1.ext', 'text1.txt']),
189
190 ([('readme2', 0), ('text2', 1)], [('.ext', 2), ('.txt', 1)],
191 ['readme2.txt', 'readme2.ext', 'text2.txt', 'text2.ext']),
192
193 ([('readme3', 0), ('text3', 1)], [('.XXX', 1)],
194 ['readme3.XXX', 'text3.XXX']),
195 ])
196 def test_generate_readmes(readmes, exts, order):
197 assert order == MarkupRenderer.generate_readmes(readmes, exts)
198
199
200 @pytest.mark.parametrize(
201 "renderer, expected_order",
202 [
203 ('plain', ['readme', 'README', 'Readme']),
204 ('text', ['readme', 'README', 'Readme']),
205 ('markdown', MarkupRenderer.generate_readmes(
206 MarkupRenderer.ALL_READMES, MarkupRenderer.MARKDOWN_EXTS)),
207 ('rst', MarkupRenderer.generate_readmes(
208 MarkupRenderer.ALL_READMES, MarkupRenderer.RST_EXTS)),
209 ])
210 def test_order_of_readme_generation(renderer, expected_order):
211 mkd_renderer = MarkupRenderer()
212 assert expected_order == mkd_renderer.pick_readme_order(
213 renderer)[:len(expected_order)]
General Comments 0
You need to be logged in to leave comments. Login now