##// END OF EJS Templates
markup-rendering: added relative image support....
marcink -
r1527:4089d52f default
parent child Browse files
Show More
@@ -1,318 +1,326 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Summary controller for RhodeCode Enterprise
22 Summary controller for RhodeCode Enterprise
23 """
23 """
24
24
25 import logging
25 import logging
26 from string import lower
26 from string import lower
27
27
28 from pylons import tmpl_context as c, request
28 from pylons import tmpl_context as c, request
29 from pylons.i18n.translation import _
29 from pylons.i18n.translation import _
30 from beaker.cache import cache_region, region_invalidate
30 from beaker.cache import cache_region, region_invalidate
31
31
32 from rhodecode.config.conf import (LANGUAGES_EXTENSIONS_MAP)
32 from rhodecode.config.conf import (LANGUAGES_EXTENSIONS_MAP)
33 from rhodecode.controllers import utils
33 from rhodecode.controllers import utils
34 from rhodecode.controllers.changelog import _load_changelog_summary
34 from rhodecode.controllers.changelog import _load_changelog_summary
35 from rhodecode.lib import caches, helpers as h
35 from rhodecode.lib import caches, helpers as h
36 from rhodecode.lib.utils import jsonify
36 from rhodecode.lib.utils import jsonify
37 from rhodecode.lib.utils2 import safe_str
37 from rhodecode.lib.utils2 import safe_str
38 from rhodecode.lib.auth import (
38 from rhodecode.lib.auth import (
39 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
39 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
40 from rhodecode.lib.base import BaseRepoController, render
40 from rhodecode.lib.base import BaseRepoController, render
41 from rhodecode.lib.markup_renderer import MarkupRenderer
41 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
42 from rhodecode.lib.ext_json import json
42 from rhodecode.lib.ext_json import json
43 from rhodecode.lib.vcs.backends.base import EmptyCommit
43 from rhodecode.lib.vcs.backends.base import EmptyCommit
44 from rhodecode.lib.vcs.exceptions import (
44 from rhodecode.lib.vcs.exceptions import (
45 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
45 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
46 from rhodecode.model.db import Statistics, CacheKey, User
46 from rhodecode.model.db import Statistics, CacheKey, User
47 from rhodecode.model.repo import ReadmeFinder
47 from rhodecode.model.repo import ReadmeFinder
48
48
49
49
50 log = logging.getLogger(__name__)
50 log = logging.getLogger(__name__)
51
51
52
52
53 class SummaryController(BaseRepoController):
53 class SummaryController(BaseRepoController):
54
54
55 def __before__(self):
55 def __before__(self):
56 super(SummaryController, self).__before__()
56 super(SummaryController, self).__before__()
57
57
58 def __get_readme_data(self, db_repo):
58 def __get_readme_data(self, db_repo):
59 repo_name = db_repo.repo_name
59 repo_name = db_repo.repo_name
60 log.debug('Looking for README file')
60 log.debug('Looking for README file')
61 default_renderer = c.visual.default_renderer
61 default_renderer = c.visual.default_renderer
62
62
63 @cache_region('long_term')
63 @cache_region('long_term')
64 def _generate_readme(cache_key):
64 def _generate_readme(cache_key):
65 readme_data = None
65 readme_data = None
66 readme_node = None
66 readme_node = None
67 readme_filename = None
67 readme_filename = None
68 commit = self._get_landing_commit_or_none(db_repo)
68 commit = self._get_landing_commit_or_none(db_repo)
69 if commit:
69 if commit:
70 log.debug("Searching for a README file.")
70 log.debug("Searching for a README file.")
71 readme_node = ReadmeFinder(default_renderer).search(commit)
71 readme_node = ReadmeFinder(default_renderer).search(commit)
72 if readme_node:
72 if readme_node:
73 readme_data = self._render_readme_or_none(commit, readme_node)
73 relative_url = h.url('files_raw_home',
74 repo_name=repo_name,
75 revision=commit.raw_id,
76 f_path=readme_node.path)
77 readme_data = self._render_readme_or_none(
78 commit, readme_node, relative_url)
74 readme_filename = readme_node.path
79 readme_filename = readme_node.path
75 return readme_data, readme_filename
80 return readme_data, readme_filename
76
81
77 invalidator_context = CacheKey.repo_context_cache(
82 invalidator_context = CacheKey.repo_context_cache(
78 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
83 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
79
84
80 with invalidator_context as context:
85 with invalidator_context as context:
81 context.invalidate()
86 context.invalidate()
82 computed = context.compute()
87 computed = context.compute()
83
88
84 return computed
89 return computed
85
90
86 def _get_landing_commit_or_none(self, db_repo):
91 def _get_landing_commit_or_none(self, db_repo):
87 log.debug("Getting the landing commit.")
92 log.debug("Getting the landing commit.")
88 try:
93 try:
89 commit = db_repo.get_landing_commit()
94 commit = db_repo.get_landing_commit()
90 if not isinstance(commit, EmptyCommit):
95 if not isinstance(commit, EmptyCommit):
91 return commit
96 return commit
92 else:
97 else:
93 log.debug("Repository is empty, no README to render.")
98 log.debug("Repository is empty, no README to render.")
94 except CommitError:
99 except CommitError:
95 log.exception(
100 log.exception(
96 "Problem getting commit when trying to render the README.")
101 "Problem getting commit when trying to render the README.")
97
102
98 def _render_readme_or_none(self, commit, readme_node):
103 def _render_readme_or_none(self, commit, readme_node, relative_url):
99 log.debug(
104 log.debug(
100 'Found README file `%s` rendering...', readme_node.path)
105 'Found README file `%s` rendering...', readme_node.path)
101 renderer = MarkupRenderer()
106 renderer = MarkupRenderer()
102 try:
107 try:
103 return renderer.render(
108 html_source = renderer.render(
104 readme_node.content, filename=readme_node.path)
109 readme_node.content, filename=readme_node.path)
110 if relative_url:
111 return relative_links(html_source, relative_url)
112 return html_source
105 except Exception:
113 except Exception:
106 log.exception(
114 log.exception(
107 "Exception while trying to render the README")
115 "Exception while trying to render the README")
108
116
109 @LoginRequired()
117 @LoginRequired()
110 @HasRepoPermissionAnyDecorator(
118 @HasRepoPermissionAnyDecorator(
111 'repository.read', 'repository.write', 'repository.admin')
119 'repository.read', 'repository.write', 'repository.admin')
112 def index(self, repo_name):
120 def index(self, repo_name):
113
121
114 # Prepare the clone URL
122 # Prepare the clone URL
115
123
116 username = ''
124 username = ''
117 if c.rhodecode_user.username != User.DEFAULT_USER:
125 if c.rhodecode_user.username != User.DEFAULT_USER:
118 username = safe_str(c.rhodecode_user.username)
126 username = safe_str(c.rhodecode_user.username)
119
127
120 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
128 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
121 if '{repo}' in _def_clone_uri:
129 if '{repo}' in _def_clone_uri:
122 _def_clone_uri_by_id = _def_clone_uri.replace(
130 _def_clone_uri_by_id = _def_clone_uri.replace(
123 '{repo}', '_{repoid}')
131 '{repo}', '_{repoid}')
124 elif '{repoid}' in _def_clone_uri:
132 elif '{repoid}' in _def_clone_uri:
125 _def_clone_uri_by_id = _def_clone_uri.replace(
133 _def_clone_uri_by_id = _def_clone_uri.replace(
126 '_{repoid}', '{repo}')
134 '_{repoid}', '{repo}')
127
135
128 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
136 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
129 user=username, uri_tmpl=_def_clone_uri)
137 user=username, uri_tmpl=_def_clone_uri)
130 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
138 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
131 user=username, uri_tmpl=_def_clone_uri_by_id)
139 user=username, uri_tmpl=_def_clone_uri_by_id)
132
140
133 # If enabled, get statistics data
141 # If enabled, get statistics data
134
142
135 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
143 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
136
144
137 stats = self.sa.query(Statistics)\
145 stats = self.sa.query(Statistics)\
138 .filter(Statistics.repository == c.rhodecode_db_repo)\
146 .filter(Statistics.repository == c.rhodecode_db_repo)\
139 .scalar()
147 .scalar()
140
148
141 c.stats_percentage = 0
149 c.stats_percentage = 0
142
150
143 if stats and stats.languages:
151 if stats and stats.languages:
144 c.no_data = False is c.rhodecode_db_repo.enable_statistics
152 c.no_data = False is c.rhodecode_db_repo.enable_statistics
145 lang_stats_d = json.loads(stats.languages)
153 lang_stats_d = json.loads(stats.languages)
146
154
147 # Sort first by decreasing count and second by the file extension,
155 # Sort first by decreasing count and second by the file extension,
148 # so we have a consistent output.
156 # so we have a consistent output.
149 lang_stats_items = sorted(lang_stats_d.iteritems(),
157 lang_stats_items = sorted(lang_stats_d.iteritems(),
150 key=lambda k: (-k[1], k[0]))[:10]
158 key=lambda k: (-k[1], k[0]))[:10]
151 lang_stats = [(x, {"count": y,
159 lang_stats = [(x, {"count": y,
152 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
160 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
153 for x, y in lang_stats_items]
161 for x, y in lang_stats_items]
154
162
155 c.trending_languages = json.dumps(lang_stats)
163 c.trending_languages = json.dumps(lang_stats)
156 else:
164 else:
157 c.no_data = True
165 c.no_data = True
158 c.trending_languages = json.dumps({})
166 c.trending_languages = json.dumps({})
159
167
160 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
168 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
161 c.repository_followers = self.scm_model.get_followers(
169 c.repository_followers = self.scm_model.get_followers(
162 c.rhodecode_db_repo)
170 c.rhodecode_db_repo)
163 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
171 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
164 c.repository_is_user_following = self.scm_model.is_following_repo(
172 c.repository_is_user_following = self.scm_model.is_following_repo(
165 c.repo_name, c.rhodecode_user.user_id)
173 c.repo_name, c.rhodecode_user.user_id)
166
174
167 if c.repository_requirements_missing:
175 if c.repository_requirements_missing:
168 return render('summary/missing_requirements.mako')
176 return render('summary/missing_requirements.mako')
169
177
170 c.readme_data, c.readme_file = \
178 c.readme_data, c.readme_file = \
171 self.__get_readme_data(c.rhodecode_db_repo)
179 self.__get_readme_data(c.rhodecode_db_repo)
172
180
173 _load_changelog_summary()
181 _load_changelog_summary()
174
182
175 if request.is_xhr:
183 if request.is_xhr:
176 return render('changelog/changelog_summary_data.mako')
184 return render('changelog/changelog_summary_data.mako')
177
185
178 return render('summary/summary.mako')
186 return render('summary/summary.mako')
179
187
180 @LoginRequired()
188 @LoginRequired()
181 @XHRRequired()
189 @XHRRequired()
182 @HasRepoPermissionAnyDecorator(
190 @HasRepoPermissionAnyDecorator(
183 'repository.read', 'repository.write', 'repository.admin')
191 'repository.read', 'repository.write', 'repository.admin')
184 @jsonify
192 @jsonify
185 def repo_stats(self, repo_name, commit_id):
193 def repo_stats(self, repo_name, commit_id):
186 _namespace = caches.get_repo_namespace_key(
194 _namespace = caches.get_repo_namespace_key(
187 caches.SUMMARY_STATS, repo_name)
195 caches.SUMMARY_STATS, repo_name)
188 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
196 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
189 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
197 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
190 _cache_key = caches.compute_key_from_params(
198 _cache_key = caches.compute_key_from_params(
191 repo_name, commit_id, show_stats)
199 repo_name, commit_id, show_stats)
192
200
193 def compute_stats():
201 def compute_stats():
194 code_stats = {}
202 code_stats = {}
195 size = 0
203 size = 0
196 try:
204 try:
197 scm_instance = c.rhodecode_db_repo.scm_instance()
205 scm_instance = c.rhodecode_db_repo.scm_instance()
198 commit = scm_instance.get_commit(commit_id)
206 commit = scm_instance.get_commit(commit_id)
199
207
200 for node in commit.get_filenodes_generator():
208 for node in commit.get_filenodes_generator():
201 size += node.size
209 size += node.size
202 if not show_stats:
210 if not show_stats:
203 continue
211 continue
204 ext = lower(node.extension)
212 ext = lower(node.extension)
205 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
213 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
206 if ext_info:
214 if ext_info:
207 if ext in code_stats:
215 if ext in code_stats:
208 code_stats[ext]['count'] += 1
216 code_stats[ext]['count'] += 1
209 else:
217 else:
210 code_stats[ext] = {"count": 1, "desc": ext_info}
218 code_stats[ext] = {"count": 1, "desc": ext_info}
211 except EmptyRepositoryError:
219 except EmptyRepositoryError:
212 pass
220 pass
213 return {'size': h.format_byte_size_binary(size),
221 return {'size': h.format_byte_size_binary(size),
214 'code_stats': code_stats}
222 'code_stats': code_stats}
215
223
216 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
224 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
217 return stats
225 return stats
218
226
219 def _switcher_reference_data(self, repo_name, references, is_svn):
227 def _switcher_reference_data(self, repo_name, references, is_svn):
220 """Prepare reference data for given `references`"""
228 """Prepare reference data for given `references`"""
221 items = []
229 items = []
222 for name, commit_id in references.items():
230 for name, commit_id in references.items():
223 use_commit_id = '/' in name or is_svn
231 use_commit_id = '/' in name or is_svn
224 items.append({
232 items.append({
225 'name': name,
233 'name': name,
226 'commit_id': commit_id,
234 'commit_id': commit_id,
227 'files_url': h.url(
235 'files_url': h.url(
228 'files_home',
236 'files_home',
229 repo_name=repo_name,
237 repo_name=repo_name,
230 f_path=name if is_svn else '',
238 f_path=name if is_svn else '',
231 revision=commit_id if use_commit_id else name,
239 revision=commit_id if use_commit_id else name,
232 at=name)
240 at=name)
233 })
241 })
234 return items
242 return items
235
243
236 @LoginRequired()
244 @LoginRequired()
237 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
245 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
238 'repository.admin')
246 'repository.admin')
239 @jsonify
247 @jsonify
240 def repo_refs_data(self, repo_name):
248 def repo_refs_data(self, repo_name):
241 repo = c.rhodecode_repo
249 repo = c.rhodecode_repo
242 refs_to_create = [
250 refs_to_create = [
243 (_("Branch"), repo.branches, 'branch'),
251 (_("Branch"), repo.branches, 'branch'),
244 (_("Tag"), repo.tags, 'tag'),
252 (_("Tag"), repo.tags, 'tag'),
245 (_("Bookmark"), repo.bookmarks, 'book'),
253 (_("Bookmark"), repo.bookmarks, 'book'),
246 ]
254 ]
247 res = self._create_reference_data(repo, repo_name, refs_to_create)
255 res = self._create_reference_data(repo, repo_name, refs_to_create)
248 data = {
256 data = {
249 'more': False,
257 'more': False,
250 'results': res
258 'results': res
251 }
259 }
252 return data
260 return data
253
261
254 @LoginRequired()
262 @LoginRequired()
255 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
263 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
256 'repository.admin')
264 'repository.admin')
257 @jsonify
265 @jsonify
258 def repo_default_reviewers_data(self, repo_name):
266 def repo_default_reviewers_data(self, repo_name):
259 return {
267 return {
260 'reviewers': [utils.reviewer_as_json(
268 'reviewers': [utils.reviewer_as_json(
261 user=c.rhodecode_db_repo.user, reasons=None)]
269 user=c.rhodecode_db_repo.user, reasons=None)]
262 }
270 }
263
271
264 @jsonify
272 @jsonify
265 def repo_refs_changelog_data(self, repo_name):
273 def repo_refs_changelog_data(self, repo_name):
266 repo = c.rhodecode_repo
274 repo = c.rhodecode_repo
267
275
268 refs_to_create = [
276 refs_to_create = [
269 (_("Branches"), repo.branches, 'branch'),
277 (_("Branches"), repo.branches, 'branch'),
270 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
278 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
271 # TODO: enable when vcs can handle bookmarks filters
279 # TODO: enable when vcs can handle bookmarks filters
272 # (_("Bookmarks"), repo.bookmarks, "book"),
280 # (_("Bookmarks"), repo.bookmarks, "book"),
273 ]
281 ]
274 res = self._create_reference_data(repo, repo_name, refs_to_create)
282 res = self._create_reference_data(repo, repo_name, refs_to_create)
275 data = {
283 data = {
276 'more': False,
284 'more': False,
277 'results': res
285 'results': res
278 }
286 }
279 return data
287 return data
280
288
281 def _create_reference_data(self, repo, full_repo_name, refs_to_create):
289 def _create_reference_data(self, repo, full_repo_name, refs_to_create):
282 format_ref_id = utils.get_format_ref_id(repo)
290 format_ref_id = utils.get_format_ref_id(repo)
283
291
284 result = []
292 result = []
285 for title, refs, ref_type in refs_to_create:
293 for title, refs, ref_type in refs_to_create:
286 if refs:
294 if refs:
287 result.append({
295 result.append({
288 'text': title,
296 'text': title,
289 'children': self._create_reference_items(
297 'children': self._create_reference_items(
290 repo, full_repo_name, refs, ref_type, format_ref_id),
298 repo, full_repo_name, refs, ref_type, format_ref_id),
291 })
299 })
292 return result
300 return result
293
301
294 def _create_reference_items(self, repo, full_repo_name, refs, ref_type,
302 def _create_reference_items(self, repo, full_repo_name, refs, ref_type,
295 format_ref_id):
303 format_ref_id):
296 result = []
304 result = []
297 is_svn = h.is_svn(repo)
305 is_svn = h.is_svn(repo)
298 for ref_name, raw_id in refs.iteritems():
306 for ref_name, raw_id in refs.iteritems():
299 files_url = self._create_files_url(
307 files_url = self._create_files_url(
300 repo, full_repo_name, ref_name, raw_id, is_svn)
308 repo, full_repo_name, ref_name, raw_id, is_svn)
301 result.append({
309 result.append({
302 'text': ref_name,
310 'text': ref_name,
303 'id': format_ref_id(ref_name, raw_id),
311 'id': format_ref_id(ref_name, raw_id),
304 'raw_id': raw_id,
312 'raw_id': raw_id,
305 'type': ref_type,
313 'type': ref_type,
306 'files_url': files_url,
314 'files_url': files_url,
307 })
315 })
308 return result
316 return result
309
317
310 def _create_files_url(self, repo, full_repo_name, ref_name, raw_id,
318 def _create_files_url(self, repo, full_repo_name, ref_name, raw_id,
311 is_svn):
319 is_svn):
312 use_commit_id = '/' in ref_name or is_svn
320 use_commit_id = '/' in ref_name or is_svn
313 return h.url(
321 return h.url(
314 'files_home',
322 'files_home',
315 repo_name=full_repo_name,
323 repo_name=full_repo_name,
316 f_path=ref_name if is_svn else '',
324 f_path=ref_name if is_svn else '',
317 revision=raw_id if use_commit_id else ref_name,
325 revision=raw_id if use_commit_id else ref_name,
318 at=ref_name)
326 at=ref_name)
@@ -1,2019 +1,2029 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40 import itertools
40 import itertools
41 import fnmatch
41 import fnmatch
42
42
43 from datetime import datetime
43 from datetime import datetime
44 from functools import partial
44 from functools import partial
45 from pygments.formatters.html import HtmlFormatter
45 from pygments.formatters.html import HtmlFormatter
46 from pygments import highlight as code_highlight
46 from pygments import highlight as code_highlight
47 from pygments.lexers import (
47 from pygments.lexers import (
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 from pylons import url as pylons_url
49 from pylons import url as pylons_url
50 from pylons.i18n.translation import _, ungettext
50 from pylons.i18n.translation import _, ungettext
51 from pyramid.threadlocal import get_current_request
51 from pyramid.threadlocal import get_current_request
52
52
53 from webhelpers.html import literal, HTML, escape
53 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html.tools import *
54 from webhelpers.html.tools import *
55 from webhelpers.html.builder import make_tag
55 from webhelpers.html.builder import make_tag
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 submit, text, password, textarea, title, ul, xml_declaration, radio
59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 from webhelpers.pylonslib import Flash as _Flash
62 from webhelpers.pylonslib import Flash as _Flash
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def url(*args, **kw):
94 def url(*args, **kw):
95 return pylons_url(*args, **kw)
95 return pylons_url(*args, **kw)
96
96
97
97
98 def pylons_url_current(*args, **kw):
98 def pylons_url_current(*args, **kw):
99 """
99 """
100 This function overrides pylons.url.current() which returns the current
100 This function overrides pylons.url.current() which returns the current
101 path so that it will also work from a pyramid only context. This
101 path so that it will also work from a pyramid only context. This
102 should be removed once port to pyramid is complete.
102 should be removed once port to pyramid is complete.
103 """
103 """
104 if not args and not kw:
104 if not args and not kw:
105 request = get_current_request()
105 request = get_current_request()
106 return request.path
106 return request.path
107 return pylons_url.current(*args, **kw)
107 return pylons_url.current(*args, **kw)
108
108
109 url.current = pylons_url_current
109 url.current = pylons_url_current
110
110
111
111
112 def url_replace(**qargs):
112 def url_replace(**qargs):
113 """ Returns the current request url while replacing query string args """
113 """ Returns the current request url while replacing query string args """
114
114
115 request = get_current_request()
115 request = get_current_request()
116 new_args = request.GET.mixed()
116 new_args = request.GET.mixed()
117 new_args.update(qargs)
117 new_args.update(qargs)
118 return url('', **new_args)
118 return url('', **new_args)
119
119
120
120
121 def asset(path, ver=None, **kwargs):
121 def asset(path, ver=None, **kwargs):
122 """
122 """
123 Helper to generate a static asset file path for rhodecode assets
123 Helper to generate a static asset file path for rhodecode assets
124
124
125 eg. h.asset('images/image.png', ver='3923')
125 eg. h.asset('images/image.png', ver='3923')
126
126
127 :param path: path of asset
127 :param path: path of asset
128 :param ver: optional version query param to append as ?ver=
128 :param ver: optional version query param to append as ?ver=
129 """
129 """
130 request = get_current_request()
130 request = get_current_request()
131 query = {}
131 query = {}
132 query.update(kwargs)
132 query.update(kwargs)
133 if ver:
133 if ver:
134 query = {'ver': ver}
134 query = {'ver': ver}
135 return request.static_path(
135 return request.static_path(
136 'rhodecode:public/{}'.format(path), _query=query)
136 'rhodecode:public/{}'.format(path), _query=query)
137
137
138
138
139 default_html_escape_table = {
139 default_html_escape_table = {
140 ord('&'): u'&amp;',
140 ord('&'): u'&amp;',
141 ord('<'): u'&lt;',
141 ord('<'): u'&lt;',
142 ord('>'): u'&gt;',
142 ord('>'): u'&gt;',
143 ord('"'): u'&quot;',
143 ord('"'): u'&quot;',
144 ord("'"): u'&#39;',
144 ord("'"): u'&#39;',
145 }
145 }
146
146
147
147
148 def html_escape(text, html_escape_table=default_html_escape_table):
148 def html_escape(text, html_escape_table=default_html_escape_table):
149 """Produce entities within text."""
149 """Produce entities within text."""
150 return text.translate(html_escape_table)
150 return text.translate(html_escape_table)
151
151
152
152
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 """
154 """
155 Truncate string ``s`` at the first occurrence of ``sub``.
155 Truncate string ``s`` at the first occurrence of ``sub``.
156
156
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 """
158 """
159 suffix_if_chopped = suffix_if_chopped or ''
159 suffix_if_chopped = suffix_if_chopped or ''
160 pos = s.find(sub)
160 pos = s.find(sub)
161 if pos == -1:
161 if pos == -1:
162 return s
162 return s
163
163
164 if inclusive:
164 if inclusive:
165 pos += len(sub)
165 pos += len(sub)
166
166
167 chopped = s[:pos]
167 chopped = s[:pos]
168 left = s[pos:].strip()
168 left = s[pos:].strip()
169
169
170 if left and suffix_if_chopped:
170 if left and suffix_if_chopped:
171 chopped += suffix_if_chopped
171 chopped += suffix_if_chopped
172
172
173 return chopped
173 return chopped
174
174
175
175
176 def shorter(text, size=20):
176 def shorter(text, size=20):
177 postfix = '...'
177 postfix = '...'
178 if len(text) > size:
178 if len(text) > size:
179 return text[:size - len(postfix)] + postfix
179 return text[:size - len(postfix)] + postfix
180 return text
180 return text
181
181
182
182
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 """
184 """
185 Reset button
185 Reset button
186 """
186 """
187 _set_input_attrs(attrs, type, name, value)
187 _set_input_attrs(attrs, type, name, value)
188 _set_id_attr(attrs, id, name)
188 _set_id_attr(attrs, id, name)
189 convert_boolean_attrs(attrs, ["disabled"])
189 convert_boolean_attrs(attrs, ["disabled"])
190 return HTML.input(**attrs)
190 return HTML.input(**attrs)
191
191
192 reset = _reset
192 reset = _reset
193 safeid = _make_safe_id_component
193 safeid = _make_safe_id_component
194
194
195
195
196 def branding(name, length=40):
196 def branding(name, length=40):
197 return truncate(name, length, indicator="")
197 return truncate(name, length, indicator="")
198
198
199
199
200 def FID(raw_id, path):
200 def FID(raw_id, path):
201 """
201 """
202 Creates a unique ID for filenode based on it's hash of path and commit
202 Creates a unique ID for filenode based on it's hash of path and commit
203 it's safe to use in urls
203 it's safe to use in urls
204
204
205 :param raw_id:
205 :param raw_id:
206 :param path:
206 :param path:
207 """
207 """
208
208
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210
210
211
211
212 class _GetError(object):
212 class _GetError(object):
213 """Get error from form_errors, and represent it as span wrapped error
213 """Get error from form_errors, and represent it as span wrapped error
214 message
214 message
215
215
216 :param field_name: field to fetch errors for
216 :param field_name: field to fetch errors for
217 :param form_errors: form errors dict
217 :param form_errors: form errors dict
218 """
218 """
219
219
220 def __call__(self, field_name, form_errors):
220 def __call__(self, field_name, form_errors):
221 tmpl = """<span class="error_msg">%s</span>"""
221 tmpl = """<span class="error_msg">%s</span>"""
222 if form_errors and field_name in form_errors:
222 if form_errors and field_name in form_errors:
223 return literal(tmpl % form_errors.get(field_name))
223 return literal(tmpl % form_errors.get(field_name))
224
224
225 get_error = _GetError()
225 get_error = _GetError()
226
226
227
227
228 class _ToolTip(object):
228 class _ToolTip(object):
229
229
230 def __call__(self, tooltip_title, trim_at=50):
230 def __call__(self, tooltip_title, trim_at=50):
231 """
231 """
232 Special function just to wrap our text into nice formatted
232 Special function just to wrap our text into nice formatted
233 autowrapped text
233 autowrapped text
234
234
235 :param tooltip_title:
235 :param tooltip_title:
236 """
236 """
237 tooltip_title = escape(tooltip_title)
237 tooltip_title = escape(tooltip_title)
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 return tooltip_title
239 return tooltip_title
240 tooltip = _ToolTip()
240 tooltip = _ToolTip()
241
241
242
242
243 def files_breadcrumbs(repo_name, commit_id, file_path):
243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 if isinstance(file_path, str):
244 if isinstance(file_path, str):
245 file_path = safe_unicode(file_path)
245 file_path = safe_unicode(file_path)
246
246
247 # TODO: johbo: Is this always a url like path, or is this operating
247 # TODO: johbo: Is this always a url like path, or is this operating
248 # system dependent?
248 # system dependent?
249 path_segments = file_path.split('/')
249 path_segments = file_path.split('/')
250
250
251 repo_name_html = escape(repo_name)
251 repo_name_html = escape(repo_name)
252 if len(path_segments) == 1 and path_segments[0] == '':
252 if len(path_segments) == 1 and path_segments[0] == '':
253 url_segments = [repo_name_html]
253 url_segments = [repo_name_html]
254 else:
254 else:
255 url_segments = [
255 url_segments = [
256 link_to(
256 link_to(
257 repo_name_html,
257 repo_name_html,
258 url('files_home',
258 url('files_home',
259 repo_name=repo_name,
259 repo_name=repo_name,
260 revision=commit_id,
260 revision=commit_id,
261 f_path=''),
261 f_path=''),
262 class_='pjax-link')]
262 class_='pjax-link')]
263
263
264 last_cnt = len(path_segments) - 1
264 last_cnt = len(path_segments) - 1
265 for cnt, segment in enumerate(path_segments):
265 for cnt, segment in enumerate(path_segments):
266 if not segment:
266 if not segment:
267 continue
267 continue
268 segment_html = escape(segment)
268 segment_html = escape(segment)
269
269
270 if cnt != last_cnt:
270 if cnt != last_cnt:
271 url_segments.append(
271 url_segments.append(
272 link_to(
272 link_to(
273 segment_html,
273 segment_html,
274 url('files_home',
274 url('files_home',
275 repo_name=repo_name,
275 repo_name=repo_name,
276 revision=commit_id,
276 revision=commit_id,
277 f_path='/'.join(path_segments[:cnt + 1])),
277 f_path='/'.join(path_segments[:cnt + 1])),
278 class_='pjax-link'))
278 class_='pjax-link'))
279 else:
279 else:
280 url_segments.append(segment_html)
280 url_segments.append(segment_html)
281
281
282 return literal('/'.join(url_segments))
282 return literal('/'.join(url_segments))
283
283
284
284
285 class CodeHtmlFormatter(HtmlFormatter):
285 class CodeHtmlFormatter(HtmlFormatter):
286 """
286 """
287 My code Html Formatter for source codes
287 My code Html Formatter for source codes
288 """
288 """
289
289
290 def wrap(self, source, outfile):
290 def wrap(self, source, outfile):
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292
292
293 def _wrap_code(self, source):
293 def _wrap_code(self, source):
294 for cnt, it in enumerate(source):
294 for cnt, it in enumerate(source):
295 i, t = it
295 i, t = it
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 yield i, t
297 yield i, t
298
298
299 def _wrap_tablelinenos(self, inner):
299 def _wrap_tablelinenos(self, inner):
300 dummyoutfile = StringIO.StringIO()
300 dummyoutfile = StringIO.StringIO()
301 lncount = 0
301 lncount = 0
302 for t, line in inner:
302 for t, line in inner:
303 if t:
303 if t:
304 lncount += 1
304 lncount += 1
305 dummyoutfile.write(line)
305 dummyoutfile.write(line)
306
306
307 fl = self.linenostart
307 fl = self.linenostart
308 mw = len(str(lncount + fl - 1))
308 mw = len(str(lncount + fl - 1))
309 sp = self.linenospecial
309 sp = self.linenospecial
310 st = self.linenostep
310 st = self.linenostep
311 la = self.lineanchors
311 la = self.lineanchors
312 aln = self.anchorlinenos
312 aln = self.anchorlinenos
313 nocls = self.noclasses
313 nocls = self.noclasses
314 if sp:
314 if sp:
315 lines = []
315 lines = []
316
316
317 for i in range(fl, fl + lncount):
317 for i in range(fl, fl + lncount):
318 if i % st == 0:
318 if i % st == 0:
319 if i % sp == 0:
319 if i % sp == 0:
320 if aln:
320 if aln:
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 (la, i, mw, i))
322 (la, i, mw, i))
323 else:
323 else:
324 lines.append('<span class="special">%*d</span>' % (mw, i))
324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 else:
325 else:
326 if aln:
326 if aln:
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 else:
328 else:
329 lines.append('%*d' % (mw, i))
329 lines.append('%*d' % (mw, i))
330 else:
330 else:
331 lines.append('')
331 lines.append('')
332 ls = '\n'.join(lines)
332 ls = '\n'.join(lines)
333 else:
333 else:
334 lines = []
334 lines = []
335 for i in range(fl, fl + lncount):
335 for i in range(fl, fl + lncount):
336 if i % st == 0:
336 if i % st == 0:
337 if aln:
337 if aln:
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 else:
339 else:
340 lines.append('%*d' % (mw, i))
340 lines.append('%*d' % (mw, i))
341 else:
341 else:
342 lines.append('')
342 lines.append('')
343 ls = '\n'.join(lines)
343 ls = '\n'.join(lines)
344
344
345 # in case you wonder about the seemingly redundant <div> here: since the
345 # in case you wonder about the seemingly redundant <div> here: since the
346 # content in the other cell also is wrapped in a div, some browsers in
346 # content in the other cell also is wrapped in a div, some browsers in
347 # some configurations seem to mess up the formatting...
347 # some configurations seem to mess up the formatting...
348 if nocls:
348 if nocls:
349 yield 0, ('<table class="%stable">' % self.cssclass +
349 yield 0, ('<table class="%stable">' % self.cssclass +
350 '<tr><td><div class="linenodiv" '
350 '<tr><td><div class="linenodiv" '
351 'style="background-color: #f0f0f0; padding-right: 10px">'
351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 '<pre style="line-height: 125%">' +
352 '<pre style="line-height: 125%">' +
353 ls + '</pre></div></td><td id="hlcode" class="code">')
353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 else:
354 else:
355 yield 0, ('<table class="%stable">' % self.cssclass +
355 yield 0, ('<table class="%stable">' % self.cssclass +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 ls + '</pre></div></td><td id="hlcode" class="code">')
357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 yield 0, dummyoutfile.getvalue()
358 yield 0, dummyoutfile.getvalue()
359 yield 0, '</td></tr></table>'
359 yield 0, '</td></tr></table>'
360
360
361
361
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 def __init__(self, **kw):
363 def __init__(self, **kw):
364 # only show these line numbers if set
364 # only show these line numbers if set
365 self.only_lines = kw.pop('only_line_numbers', [])
365 self.only_lines = kw.pop('only_line_numbers', [])
366 self.query_terms = kw.pop('query_terms', [])
366 self.query_terms = kw.pop('query_terms', [])
367 self.max_lines = kw.pop('max_lines', 5)
367 self.max_lines = kw.pop('max_lines', 5)
368 self.line_context = kw.pop('line_context', 3)
368 self.line_context = kw.pop('line_context', 3)
369 self.url = kw.pop('url', None)
369 self.url = kw.pop('url', None)
370
370
371 super(CodeHtmlFormatter, self).__init__(**kw)
371 super(CodeHtmlFormatter, self).__init__(**kw)
372
372
373 def _wrap_code(self, source):
373 def _wrap_code(self, source):
374 for cnt, it in enumerate(source):
374 for cnt, it in enumerate(source):
375 i, t = it
375 i, t = it
376 t = '<pre>%s</pre>' % t
376 t = '<pre>%s</pre>' % t
377 yield i, t
377 yield i, t
378
378
379 def _wrap_tablelinenos(self, inner):
379 def _wrap_tablelinenos(self, inner):
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381
381
382 last_shown_line_number = 0
382 last_shown_line_number = 0
383 current_line_number = 1
383 current_line_number = 1
384
384
385 for t, line in inner:
385 for t, line in inner:
386 if not t:
386 if not t:
387 yield t, line
387 yield t, line
388 continue
388 continue
389
389
390 if current_line_number in self.only_lines:
390 if current_line_number in self.only_lines:
391 if last_shown_line_number + 1 != current_line_number:
391 if last_shown_line_number + 1 != current_line_number:
392 yield 0, '<tr>'
392 yield 0, '<tr>'
393 yield 0, '<td class="line">...</td>'
393 yield 0, '<td class="line">...</td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
395 yield 0, '</tr>'
395 yield 0, '</tr>'
396
396
397 yield 0, '<tr>'
397 yield 0, '<tr>'
398 if self.url:
398 if self.url:
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 self.url, current_line_number, current_line_number)
400 self.url, current_line_number, current_line_number)
401 else:
401 else:
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 current_line_number)
403 current_line_number)
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 yield 0, '</tr>'
405 yield 0, '</tr>'
406
406
407 last_shown_line_number = current_line_number
407 last_shown_line_number = current_line_number
408
408
409 current_line_number += 1
409 current_line_number += 1
410
410
411
411
412 yield 0, '</table>'
412 yield 0, '</table>'
413
413
414
414
415 def extract_phrases(text_query):
415 def extract_phrases(text_query):
416 """
416 """
417 Extracts phrases from search term string making sure phrases
417 Extracts phrases from search term string making sure phrases
418 contained in double quotes are kept together - and discarding empty values
418 contained in double quotes are kept together - and discarding empty values
419 or fully whitespace values eg.
419 or fully whitespace values eg.
420
420
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422
422
423 """
423 """
424
424
425 in_phrase = False
425 in_phrase = False
426 buf = ''
426 buf = ''
427 phrases = []
427 phrases = []
428 for char in text_query:
428 for char in text_query:
429 if in_phrase:
429 if in_phrase:
430 if char == '"': # end phrase
430 if char == '"': # end phrase
431 phrases.append(buf)
431 phrases.append(buf)
432 buf = ''
432 buf = ''
433 in_phrase = False
433 in_phrase = False
434 continue
434 continue
435 else:
435 else:
436 buf += char
436 buf += char
437 continue
437 continue
438 else:
438 else:
439 if char == '"': # start phrase
439 if char == '"': # start phrase
440 in_phrase = True
440 in_phrase = True
441 phrases.append(buf)
441 phrases.append(buf)
442 buf = ''
442 buf = ''
443 continue
443 continue
444 elif char == ' ':
444 elif char == ' ':
445 phrases.append(buf)
445 phrases.append(buf)
446 buf = ''
446 buf = ''
447 continue
447 continue
448 else:
448 else:
449 buf += char
449 buf += char
450
450
451 phrases.append(buf)
451 phrases.append(buf)
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 return phrases
453 return phrases
454
454
455
455
456 def get_matching_offsets(text, phrases):
456 def get_matching_offsets(text, phrases):
457 """
457 """
458 Returns a list of string offsets in `text` that the list of `terms` match
458 Returns a list of string offsets in `text` that the list of `terms` match
459
459
460 >>> get_matching_offsets('some text here', ['some', 'here'])
460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 [(0, 4), (10, 14)]
461 [(0, 4), (10, 14)]
462
462
463 """
463 """
464 offsets = []
464 offsets = []
465 for phrase in phrases:
465 for phrase in phrases:
466 for match in re.finditer(phrase, text):
466 for match in re.finditer(phrase, text):
467 offsets.append((match.start(), match.end()))
467 offsets.append((match.start(), match.end()))
468
468
469 return offsets
469 return offsets
470
470
471
471
472 def normalize_text_for_matching(x):
472 def normalize_text_for_matching(x):
473 """
473 """
474 Replaces all non alnum characters to spaces and lower cases the string,
474 Replaces all non alnum characters to spaces and lower cases the string,
475 useful for comparing two text strings without punctuation
475 useful for comparing two text strings without punctuation
476 """
476 """
477 return re.sub(r'[^\w]', ' ', x.lower())
477 return re.sub(r'[^\w]', ' ', x.lower())
478
478
479
479
480 def get_matching_line_offsets(lines, terms):
480 def get_matching_line_offsets(lines, terms):
481 """ Return a set of `lines` indices (starting from 1) matching a
481 """ Return a set of `lines` indices (starting from 1) matching a
482 text search query, along with `context` lines above/below matching lines
482 text search query, along with `context` lines above/below matching lines
483
483
484 :param lines: list of strings representing lines
484 :param lines: list of strings representing lines
485 :param terms: search term string to match in lines eg. 'some text'
485 :param terms: search term string to match in lines eg. 'some text'
486 :param context: number of lines above/below a matching line to add to result
486 :param context: number of lines above/below a matching line to add to result
487 :param max_lines: cut off for lines of interest
487 :param max_lines: cut off for lines of interest
488 eg.
488 eg.
489
489
490 text = '''
490 text = '''
491 words words words
491 words words words
492 words words words
492 words words words
493 some text some
493 some text some
494 words words words
494 words words words
495 words words words
495 words words words
496 text here what
496 text here what
497 '''
497 '''
498 get_matching_line_offsets(text, 'text', context=1)
498 get_matching_line_offsets(text, 'text', context=1)
499 {3: [(5, 9)], 6: [(0, 4)]]
499 {3: [(5, 9)], 6: [(0, 4)]]
500
500
501 """
501 """
502 matching_lines = {}
502 matching_lines = {}
503 phrases = [normalize_text_for_matching(phrase)
503 phrases = [normalize_text_for_matching(phrase)
504 for phrase in extract_phrases(terms)]
504 for phrase in extract_phrases(terms)]
505
505
506 for line_index, line in enumerate(lines, start=1):
506 for line_index, line in enumerate(lines, start=1):
507 match_offsets = get_matching_offsets(
507 match_offsets = get_matching_offsets(
508 normalize_text_for_matching(line), phrases)
508 normalize_text_for_matching(line), phrases)
509 if match_offsets:
509 if match_offsets:
510 matching_lines[line_index] = match_offsets
510 matching_lines[line_index] = match_offsets
511
511
512 return matching_lines
512 return matching_lines
513
513
514
514
515 def hsv_to_rgb(h, s, v):
515 def hsv_to_rgb(h, s, v):
516 """ Convert hsv color values to rgb """
516 """ Convert hsv color values to rgb """
517
517
518 if s == 0.0:
518 if s == 0.0:
519 return v, v, v
519 return v, v, v
520 i = int(h * 6.0) # XXX assume int() truncates!
520 i = int(h * 6.0) # XXX assume int() truncates!
521 f = (h * 6.0) - i
521 f = (h * 6.0) - i
522 p = v * (1.0 - s)
522 p = v * (1.0 - s)
523 q = v * (1.0 - s * f)
523 q = v * (1.0 - s * f)
524 t = v * (1.0 - s * (1.0 - f))
524 t = v * (1.0 - s * (1.0 - f))
525 i = i % 6
525 i = i % 6
526 if i == 0:
526 if i == 0:
527 return v, t, p
527 return v, t, p
528 if i == 1:
528 if i == 1:
529 return q, v, p
529 return q, v, p
530 if i == 2:
530 if i == 2:
531 return p, v, t
531 return p, v, t
532 if i == 3:
532 if i == 3:
533 return p, q, v
533 return p, q, v
534 if i == 4:
534 if i == 4:
535 return t, p, v
535 return t, p, v
536 if i == 5:
536 if i == 5:
537 return v, p, q
537 return v, p, q
538
538
539
539
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 """
541 """
542 Generator for getting n of evenly distributed colors using
542 Generator for getting n of evenly distributed colors using
543 hsv color and golden ratio. It always return same order of colors
543 hsv color and golden ratio. It always return same order of colors
544
544
545 :param n: number of colors to generate
545 :param n: number of colors to generate
546 :param saturation: saturation of returned colors
546 :param saturation: saturation of returned colors
547 :param lightness: lightness of returned colors
547 :param lightness: lightness of returned colors
548 :returns: RGB tuple
548 :returns: RGB tuple
549 """
549 """
550
550
551 golden_ratio = 0.618033988749895
551 golden_ratio = 0.618033988749895
552 h = 0.22717784590367374
552 h = 0.22717784590367374
553
553
554 for _ in xrange(n):
554 for _ in xrange(n):
555 h += golden_ratio
555 h += golden_ratio
556 h %= 1
556 h %= 1
557 HSV_tuple = [h, saturation, lightness]
557 HSV_tuple = [h, saturation, lightness]
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560
560
561
561
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 """
563 """
564 Returns a function which when called with an argument returns a unique
564 Returns a function which when called with an argument returns a unique
565 color for that argument, eg.
565 color for that argument, eg.
566
566
567 :param n: number of colors to generate
567 :param n: number of colors to generate
568 :param saturation: saturation of returned colors
568 :param saturation: saturation of returned colors
569 :param lightness: lightness of returned colors
569 :param lightness: lightness of returned colors
570 :returns: css RGB string
570 :returns: css RGB string
571
571
572 >>> color_hash = color_hasher()
572 >>> color_hash = color_hasher()
573 >>> color_hash('hello')
573 >>> color_hash('hello')
574 'rgb(34, 12, 59)'
574 'rgb(34, 12, 59)'
575 >>> color_hash('hello')
575 >>> color_hash('hello')
576 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
577 >>> color_hash('other')
577 >>> color_hash('other')
578 'rgb(90, 224, 159)'
578 'rgb(90, 224, 159)'
579 """
579 """
580
580
581 color_dict = {}
581 color_dict = {}
582 cgenerator = unique_color_generator(
582 cgenerator = unique_color_generator(
583 saturation=saturation, lightness=lightness)
583 saturation=saturation, lightness=lightness)
584
584
585 def get_color_string(thing):
585 def get_color_string(thing):
586 if thing in color_dict:
586 if thing in color_dict:
587 col = color_dict[thing]
587 col = color_dict[thing]
588 else:
588 else:
589 col = color_dict[thing] = cgenerator.next()
589 col = color_dict[thing] = cgenerator.next()
590 return "rgb(%s)" % (', '.join(col))
590 return "rgb(%s)" % (', '.join(col))
591
591
592 return get_color_string
592 return get_color_string
593
593
594
594
595 def get_lexer_safe(mimetype=None, filepath=None):
595 def get_lexer_safe(mimetype=None, filepath=None):
596 """
596 """
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 defaulting to plain text if none could be found
598 defaulting to plain text if none could be found
599 """
599 """
600 lexer = None
600 lexer = None
601 try:
601 try:
602 if mimetype:
602 if mimetype:
603 lexer = get_lexer_for_mimetype(mimetype)
603 lexer = get_lexer_for_mimetype(mimetype)
604 if not lexer:
604 if not lexer:
605 lexer = get_lexer_for_filename(filepath)
605 lexer = get_lexer_for_filename(filepath)
606 except pygments.util.ClassNotFound:
606 except pygments.util.ClassNotFound:
607 pass
607 pass
608
608
609 if not lexer:
609 if not lexer:
610 lexer = get_lexer_by_name('text')
610 lexer = get_lexer_by_name('text')
611
611
612 return lexer
612 return lexer
613
613
614
614
615 def get_lexer_for_filenode(filenode):
615 def get_lexer_for_filenode(filenode):
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 return lexer
617 return lexer
618
618
619
619
620 def pygmentize(filenode, **kwargs):
620 def pygmentize(filenode, **kwargs):
621 """
621 """
622 pygmentize function using pygments
622 pygmentize function using pygments
623
623
624 :param filenode:
624 :param filenode:
625 """
625 """
626 lexer = get_lexer_for_filenode(filenode)
626 lexer = get_lexer_for_filenode(filenode)
627 return literal(code_highlight(filenode.content, lexer,
627 return literal(code_highlight(filenode.content, lexer,
628 CodeHtmlFormatter(**kwargs)))
628 CodeHtmlFormatter(**kwargs)))
629
629
630
630
631 def is_following_repo(repo_name, user_id):
631 def is_following_repo(repo_name, user_id):
632 from rhodecode.model.scm import ScmModel
632 from rhodecode.model.scm import ScmModel
633 return ScmModel().is_following_repo(repo_name, user_id)
633 return ScmModel().is_following_repo(repo_name, user_id)
634
634
635
635
636 class _Message(object):
636 class _Message(object):
637 """A message returned by ``Flash.pop_messages()``.
637 """A message returned by ``Flash.pop_messages()``.
638
638
639 Converting the message to a string returns the message text. Instances
639 Converting the message to a string returns the message text. Instances
640 also have the following attributes:
640 also have the following attributes:
641
641
642 * ``message``: the message text.
642 * ``message``: the message text.
643 * ``category``: the category specified when the message was created.
643 * ``category``: the category specified when the message was created.
644 """
644 """
645
645
646 def __init__(self, category, message):
646 def __init__(self, category, message):
647 self.category = category
647 self.category = category
648 self.message = message
648 self.message = message
649
649
650 def __str__(self):
650 def __str__(self):
651 return self.message
651 return self.message
652
652
653 __unicode__ = __str__
653 __unicode__ = __str__
654
654
655 def __html__(self):
655 def __html__(self):
656 return escape(safe_unicode(self.message))
656 return escape(safe_unicode(self.message))
657
657
658
658
659 class Flash(_Flash):
659 class Flash(_Flash):
660
660
661 def pop_messages(self):
661 def pop_messages(self):
662 """Return all accumulated messages and delete them from the session.
662 """Return all accumulated messages and delete them from the session.
663
663
664 The return value is a list of ``Message`` objects.
664 The return value is a list of ``Message`` objects.
665 """
665 """
666 from pylons import session
666 from pylons import session
667
667
668 messages = []
668 messages = []
669
669
670 # Pop the 'old' pylons flash messages. They are tuples of the form
670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 # (category, message)
671 # (category, message)
672 for cat, msg in session.pop(self.session_key, []):
672 for cat, msg in session.pop(self.session_key, []):
673 messages.append(_Message(cat, msg))
673 messages.append(_Message(cat, msg))
674
674
675 # Pop the 'new' pyramid flash messages for each category as list
675 # Pop the 'new' pyramid flash messages for each category as list
676 # of strings.
676 # of strings.
677 for cat in self.categories:
677 for cat in self.categories:
678 for msg in session.pop_flash(queue=cat):
678 for msg in session.pop_flash(queue=cat):
679 messages.append(_Message(cat, msg))
679 messages.append(_Message(cat, msg))
680 # Map messages from the default queue to the 'notice' category.
680 # Map messages from the default queue to the 'notice' category.
681 for msg in session.pop_flash():
681 for msg in session.pop_flash():
682 messages.append(_Message('notice', msg))
682 messages.append(_Message('notice', msg))
683
683
684 session.save()
684 session.save()
685 return messages
685 return messages
686
686
687 def json_alerts(self):
687 def json_alerts(self):
688 payloads = []
688 payloads = []
689 messages = flash.pop_messages()
689 messages = flash.pop_messages()
690 if messages:
690 if messages:
691 for message in messages:
691 for message in messages:
692 subdata = {}
692 subdata = {}
693 if hasattr(message.message, 'rsplit'):
693 if hasattr(message.message, 'rsplit'):
694 flash_data = message.message.rsplit('|DELIM|', 1)
694 flash_data = message.message.rsplit('|DELIM|', 1)
695 org_message = flash_data[0]
695 org_message = flash_data[0]
696 if len(flash_data) > 1:
696 if len(flash_data) > 1:
697 subdata = json.loads(flash_data[1])
697 subdata = json.loads(flash_data[1])
698 else:
698 else:
699 org_message = message.message
699 org_message = message.message
700 payloads.append({
700 payloads.append({
701 'message': {
701 'message': {
702 'message': u'{}'.format(org_message),
702 'message': u'{}'.format(org_message),
703 'level': message.category,
703 'level': message.category,
704 'force': True,
704 'force': True,
705 'subdata': subdata
705 'subdata': subdata
706 }
706 }
707 })
707 })
708 return json.dumps(payloads)
708 return json.dumps(payloads)
709
709
710 flash = Flash()
710 flash = Flash()
711
711
712 #==============================================================================
712 #==============================================================================
713 # SCM FILTERS available via h.
713 # SCM FILTERS available via h.
714 #==============================================================================
714 #==============================================================================
715 from rhodecode.lib.vcs.utils import author_name, author_email
715 from rhodecode.lib.vcs.utils import author_name, author_email
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 from rhodecode.model.db import User, ChangesetStatus
717 from rhodecode.model.db import User, ChangesetStatus
718
718
719 age = _age
719 age = _age
720 capitalize = lambda x: x.capitalize()
720 capitalize = lambda x: x.capitalize()
721 email = author_email
721 email = author_email
722 short_id = lambda x: x[:12]
722 short_id = lambda x: x[:12]
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724
724
725
725
726 def age_component(datetime_iso, value=None, time_is_local=False):
726 def age_component(datetime_iso, value=None, time_is_local=False):
727 title = value or format_date(datetime_iso)
727 title = value or format_date(datetime_iso)
728 tzinfo = '+00:00'
728 tzinfo = '+00:00'
729
729
730 # detect if we have a timezone info, otherwise, add it
730 # detect if we have a timezone info, otherwise, add it
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 if time_is_local:
732 if time_is_local:
733 tzinfo = time.strftime("+%H:%M",
733 tzinfo = time.strftime("+%H:%M",
734 time.gmtime(
734 time.gmtime(
735 (datetime.now() - datetime.utcnow()).seconds + 1
735 (datetime.now() - datetime.utcnow()).seconds + 1
736 )
736 )
737 )
737 )
738
738
739 return literal(
739 return literal(
740 '<time class="timeago tooltip" '
740 '<time class="timeago tooltip" '
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 datetime_iso, title, tzinfo))
742 datetime_iso, title, tzinfo))
743
743
744
744
745 def _shorten_commit_id(commit_id):
745 def _shorten_commit_id(commit_id):
746 from rhodecode import CONFIG
746 from rhodecode import CONFIG
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 return commit_id[:def_len]
748 return commit_id[:def_len]
749
749
750
750
751 def show_id(commit):
751 def show_id(commit):
752 """
752 """
753 Configurable function that shows ID
753 Configurable function that shows ID
754 by default it's r123:fffeeefffeee
754 by default it's r123:fffeeefffeee
755
755
756 :param commit: commit instance
756 :param commit: commit instance
757 """
757 """
758 from rhodecode import CONFIG
758 from rhodecode import CONFIG
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760
760
761 raw_id = _shorten_commit_id(commit.raw_id)
761 raw_id = _shorten_commit_id(commit.raw_id)
762 if show_idx:
762 if show_idx:
763 return 'r%s:%s' % (commit.idx, raw_id)
763 return 'r%s:%s' % (commit.idx, raw_id)
764 else:
764 else:
765 return '%s' % (raw_id, )
765 return '%s' % (raw_id, )
766
766
767
767
768 def format_date(date):
768 def format_date(date):
769 """
769 """
770 use a standardized formatting for dates used in RhodeCode
770 use a standardized formatting for dates used in RhodeCode
771
771
772 :param date: date/datetime object
772 :param date: date/datetime object
773 :return: formatted date
773 :return: formatted date
774 """
774 """
775
775
776 if date:
776 if date:
777 _fmt = "%a, %d %b %Y %H:%M:%S"
777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 return safe_unicode(date.strftime(_fmt))
778 return safe_unicode(date.strftime(_fmt))
779
779
780 return u""
780 return u""
781
781
782
782
783 class _RepoChecker(object):
783 class _RepoChecker(object):
784
784
785 def __init__(self, backend_alias):
785 def __init__(self, backend_alias):
786 self._backend_alias = backend_alias
786 self._backend_alias = backend_alias
787
787
788 def __call__(self, repository):
788 def __call__(self, repository):
789 if hasattr(repository, 'alias'):
789 if hasattr(repository, 'alias'):
790 _type = repository.alias
790 _type = repository.alias
791 elif hasattr(repository, 'repo_type'):
791 elif hasattr(repository, 'repo_type'):
792 _type = repository.repo_type
792 _type = repository.repo_type
793 else:
793 else:
794 _type = repository
794 _type = repository
795 return _type == self._backend_alias
795 return _type == self._backend_alias
796
796
797 is_git = _RepoChecker('git')
797 is_git = _RepoChecker('git')
798 is_hg = _RepoChecker('hg')
798 is_hg = _RepoChecker('hg')
799 is_svn = _RepoChecker('svn')
799 is_svn = _RepoChecker('svn')
800
800
801
801
802 def get_repo_type_by_name(repo_name):
802 def get_repo_type_by_name(repo_name):
803 repo = Repository.get_by_repo_name(repo_name)
803 repo = Repository.get_by_repo_name(repo_name)
804 return repo.repo_type
804 return repo.repo_type
805
805
806
806
807 def is_svn_without_proxy(repository):
807 def is_svn_without_proxy(repository):
808 if is_svn(repository):
808 if is_svn(repository):
809 from rhodecode.model.settings import VcsSettingsModel
809 from rhodecode.model.settings import VcsSettingsModel
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 return False
812 return False
813
813
814
814
815 def discover_user(author):
815 def discover_user(author):
816 """
816 """
817 Tries to discover RhodeCode User based on the autho string. Author string
817 Tries to discover RhodeCode User based on the autho string. Author string
818 is typically `FirstName LastName <email@address.com>`
818 is typically `FirstName LastName <email@address.com>`
819 """
819 """
820
820
821 # if author is already an instance use it for extraction
821 # if author is already an instance use it for extraction
822 if isinstance(author, User):
822 if isinstance(author, User):
823 return author
823 return author
824
824
825 # Valid email in the attribute passed, see if they're in the system
825 # Valid email in the attribute passed, see if they're in the system
826 _email = author_email(author)
826 _email = author_email(author)
827 if _email != '':
827 if _email != '':
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 if user is not None:
829 if user is not None:
830 return user
830 return user
831
831
832 # Maybe it's a username, we try to extract it and fetch by username ?
832 # Maybe it's a username, we try to extract it and fetch by username ?
833 _author = author_name(author)
833 _author = author_name(author)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 if user is not None:
835 if user is not None:
836 return user
836 return user
837
837
838 return None
838 return None
839
839
840
840
841 def email_or_none(author):
841 def email_or_none(author):
842 # extract email from the commit string
842 # extract email from the commit string
843 _email = author_email(author)
843 _email = author_email(author)
844
844
845 # If we have an email, use it, otherwise
845 # If we have an email, use it, otherwise
846 # see if it contains a username we can get an email from
846 # see if it contains a username we can get an email from
847 if _email != '':
847 if _email != '':
848 return _email
848 return _email
849 else:
849 else:
850 user = User.get_by_username(
850 user = User.get_by_username(
851 author_name(author), case_insensitive=True, cache=True)
851 author_name(author), case_insensitive=True, cache=True)
852
852
853 if user is not None:
853 if user is not None:
854 return user.email
854 return user.email
855
855
856 # No valid email, not a valid user in the system, none!
856 # No valid email, not a valid user in the system, none!
857 return None
857 return None
858
858
859
859
860 def link_to_user(author, length=0, **kwargs):
860 def link_to_user(author, length=0, **kwargs):
861 user = discover_user(author)
861 user = discover_user(author)
862 # user can be None, but if we have it already it means we can re-use it
862 # user can be None, but if we have it already it means we can re-use it
863 # in the person() function, so we save 1 intensive-query
863 # in the person() function, so we save 1 intensive-query
864 if user:
864 if user:
865 author = user
865 author = user
866
866
867 display_person = person(author, 'username_or_name_or_email')
867 display_person = person(author, 'username_or_name_or_email')
868 if length:
868 if length:
869 display_person = shorter(display_person, length)
869 display_person = shorter(display_person, length)
870
870
871 if user:
871 if user:
872 return link_to(
872 return link_to(
873 escape(display_person),
873 escape(display_person),
874 route_path('user_profile', username=user.username),
874 route_path('user_profile', username=user.username),
875 **kwargs)
875 **kwargs)
876 else:
876 else:
877 return escape(display_person)
877 return escape(display_person)
878
878
879
879
880 def person(author, show_attr="username_and_name"):
880 def person(author, show_attr="username_and_name"):
881 user = discover_user(author)
881 user = discover_user(author)
882 if user:
882 if user:
883 return getattr(user, show_attr)
883 return getattr(user, show_attr)
884 else:
884 else:
885 _author = author_name(author)
885 _author = author_name(author)
886 _email = email(author)
886 _email = email(author)
887 return _author or _email
887 return _author or _email
888
888
889
889
890 def author_string(email):
890 def author_string(email):
891 if email:
891 if email:
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 if user:
893 if user:
894 if user.firstname or user.lastname:
894 if user.firstname or user.lastname:
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 else:
896 else:
897 return email
897 return email
898 else:
898 else:
899 return email
899 return email
900 else:
900 else:
901 return None
901 return None
902
902
903
903
904 def person_by_id(id_, show_attr="username_and_name"):
904 def person_by_id(id_, show_attr="username_and_name"):
905 # attr to return from fetched user
905 # attr to return from fetched user
906 person_getter = lambda usr: getattr(usr, show_attr)
906 person_getter = lambda usr: getattr(usr, show_attr)
907
907
908 #maybe it's an ID ?
908 #maybe it's an ID ?
909 if str(id_).isdigit() or isinstance(id_, int):
909 if str(id_).isdigit() or isinstance(id_, int):
910 id_ = int(id_)
910 id_ = int(id_)
911 user = User.get(id_)
911 user = User.get(id_)
912 if user is not None:
912 if user is not None:
913 return person_getter(user)
913 return person_getter(user)
914 return id_
914 return id_
915
915
916
916
917 def gravatar_with_user(author, show_disabled=False):
917 def gravatar_with_user(author, show_disabled=False):
918 from rhodecode.lib.utils import PartialRenderer
918 from rhodecode.lib.utils import PartialRenderer
919 _render = PartialRenderer('base/base.mako')
919 _render = PartialRenderer('base/base.mako')
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921
921
922
922
923 def desc_stylize(value):
923 def desc_stylize(value):
924 """
924 """
925 converts tags from value into html equivalent
925 converts tags from value into html equivalent
926
926
927 :param value:
927 :param value:
928 """
928 """
929 if not value:
929 if not value:
930 return ''
930 return ''
931
931
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 '<div class="metatag" tag="lang">\\2</div>', value)
939 '<div class="metatag" tag="lang">\\2</div>', value)
940 value = re.sub(r'\[([a-z]+)\]',
940 value = re.sub(r'\[([a-z]+)\]',
941 '<div class="metatag" tag="\\1">\\1</div>', value)
941 '<div class="metatag" tag="\\1">\\1</div>', value)
942
942
943 return value
943 return value
944
944
945
945
946 def escaped_stylize(value):
946 def escaped_stylize(value):
947 """
947 """
948 converts tags from value into html equivalent, but escaping its value first
948 converts tags from value into html equivalent, but escaping its value first
949 """
949 """
950 if not value:
950 if not value:
951 return ''
951 return ''
952
952
953 # Using default webhelper escape method, but has to force it as a
953 # Using default webhelper escape method, but has to force it as a
954 # plain unicode instead of a markup tag to be used in regex expressions
954 # plain unicode instead of a markup tag to be used in regex expressions
955 value = unicode(escape(safe_unicode(value)))
955 value = unicode(escape(safe_unicode(value)))
956
956
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 '<div class="metatag" tag="lang">\\2</div>', value)
964 '<div class="metatag" tag="lang">\\2</div>', value)
965 value = re.sub(r'\[([a-z]+)\]',
965 value = re.sub(r'\[([a-z]+)\]',
966 '<div class="metatag" tag="\\1">\\1</div>', value)
966 '<div class="metatag" tag="\\1">\\1</div>', value)
967
967
968 return value
968 return value
969
969
970
970
971 def bool2icon(value):
971 def bool2icon(value):
972 """
972 """
973 Returns boolean value of a given value, represented as html element with
973 Returns boolean value of a given value, represented as html element with
974 classes that will represent icons
974 classes that will represent icons
975
975
976 :param value: given value to convert to html node
976 :param value: given value to convert to html node
977 """
977 """
978
978
979 if value: # does bool conversion
979 if value: # does bool conversion
980 return HTML.tag('i', class_="icon-true")
980 return HTML.tag('i', class_="icon-true")
981 else: # not true as bool
981 else: # not true as bool
982 return HTML.tag('i', class_="icon-false")
982 return HTML.tag('i', class_="icon-false")
983
983
984
984
985 #==============================================================================
985 #==============================================================================
986 # PERMS
986 # PERMS
987 #==============================================================================
987 #==============================================================================
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 csrf_token_key
991 csrf_token_key
992
992
993
993
994 #==============================================================================
994 #==============================================================================
995 # GRAVATAR URL
995 # GRAVATAR URL
996 #==============================================================================
996 #==============================================================================
997 class InitialsGravatar(object):
997 class InitialsGravatar(object):
998 def __init__(self, email_address, first_name, last_name, size=30,
998 def __init__(self, email_address, first_name, last_name, size=30,
999 background=None, text_color='#fff'):
999 background=None, text_color='#fff'):
1000 self.size = size
1000 self.size = size
1001 self.first_name = first_name
1001 self.first_name = first_name
1002 self.last_name = last_name
1002 self.last_name = last_name
1003 self.email_address = email_address
1003 self.email_address = email_address
1004 self.background = background or self.str2color(email_address)
1004 self.background = background or self.str2color(email_address)
1005 self.text_color = text_color
1005 self.text_color = text_color
1006
1006
1007 def get_color_bank(self):
1007 def get_color_bank(self):
1008 """
1008 """
1009 returns a predefined list of colors that gravatars can use.
1009 returns a predefined list of colors that gravatars can use.
1010 Those are randomized distinct colors that guarantee readability and
1010 Those are randomized distinct colors that guarantee readability and
1011 uniqueness.
1011 uniqueness.
1012
1012
1013 generated with: http://phrogz.net/css/distinct-colors.html
1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 """
1014 """
1015 return [
1015 return [
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 '#4f8c46', '#368dd9', '#5c0073'
1060 '#4f8c46', '#368dd9', '#5c0073'
1061 ]
1061 ]
1062
1062
1063 def rgb_to_hex_color(self, rgb_tuple):
1063 def rgb_to_hex_color(self, rgb_tuple):
1064 """
1064 """
1065 Converts an rgb_tuple passed to an hex color.
1065 Converts an rgb_tuple passed to an hex color.
1066
1066
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 """
1068 """
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070
1070
1071 def email_to_int_list(self, email_str):
1071 def email_to_int_list(self, email_str):
1072 """
1072 """
1073 Get every byte of the hex digest value of email and turn it to integer.
1073 Get every byte of the hex digest value of email and turn it to integer.
1074 It's going to be always between 0-255
1074 It's going to be always between 0-255
1075 """
1075 """
1076 digest = md5_safe(email_str.lower())
1076 digest = md5_safe(email_str.lower())
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078
1078
1079 def pick_color_bank_index(self, email_str, color_bank):
1079 def pick_color_bank_index(self, email_str, color_bank):
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081
1081
1082 def str2color(self, email_str):
1082 def str2color(self, email_str):
1083 """
1083 """
1084 Tries to map in a stable algorithm an email to color
1084 Tries to map in a stable algorithm an email to color
1085
1085
1086 :param email_str:
1086 :param email_str:
1087 """
1087 """
1088 color_bank = self.get_color_bank()
1088 color_bank = self.get_color_bank()
1089 # pick position (module it's length so we always find it in the
1089 # pick position (module it's length so we always find it in the
1090 # bank even if it's smaller than 256 values
1090 # bank even if it's smaller than 256 values
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 return color_bank[pos]
1092 return color_bank[pos]
1093
1093
1094 def normalize_email(self, email_address):
1094 def normalize_email(self, email_address):
1095 import unicodedata
1095 import unicodedata
1096 # default host used to fill in the fake/missing email
1096 # default host used to fill in the fake/missing email
1097 default_host = u'localhost'
1097 default_host = u'localhost'
1098
1098
1099 if not email_address:
1099 if not email_address:
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101
1101
1102 email_address = safe_unicode(email_address)
1102 email_address = safe_unicode(email_address)
1103
1103
1104 if u'@' not in email_address:
1104 if u'@' not in email_address:
1105 email_address = u'%s@%s' % (email_address, default_host)
1105 email_address = u'%s@%s' % (email_address, default_host)
1106
1106
1107 if email_address.endswith(u'@'):
1107 if email_address.endswith(u'@'):
1108 email_address = u'%s%s' % (email_address, default_host)
1108 email_address = u'%s%s' % (email_address, default_host)
1109
1109
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 .encode('ascii', 'ignore')
1111 .encode('ascii', 'ignore')
1112 return email_address
1112 return email_address
1113
1113
1114 def get_initials(self):
1114 def get_initials(self):
1115 """
1115 """
1116 Returns 2 letter initials calculated based on the input.
1116 Returns 2 letter initials calculated based on the input.
1117 The algorithm picks first given email address, and takes first letter
1117 The algorithm picks first given email address, and takes first letter
1118 of part before @, and then the first letter of server name. In case
1118 of part before @, and then the first letter of server name. In case
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 the server letter with first letter of somestring2
1120 the server letter with first letter of somestring2
1121
1121
1122 In case function was initialized with both first and lastname, this
1122 In case function was initialized with both first and lastname, this
1123 overrides the extraction from email by first letter of the first and
1123 overrides the extraction from email by first letter of the first and
1124 last name. We add special logic to that functionality, In case Full name
1124 last name. We add special logic to that functionality, In case Full name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 (Von Rossum) picking `R`.
1126 (Von Rossum) picking `R`.
1127
1127
1128 Function also normalizes the non-ascii characters to they ascii
1128 Function also normalizes the non-ascii characters to they ascii
1129 representation, eg Ą => A
1129 representation, eg Ą => A
1130 """
1130 """
1131 import unicodedata
1131 import unicodedata
1132 # replace non-ascii to ascii
1132 # replace non-ascii to ascii
1133 first_name = unicodedata.normalize(
1133 first_name = unicodedata.normalize(
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 last_name = unicodedata.normalize(
1135 last_name = unicodedata.normalize(
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137
1137
1138 # do NFKD encoding, and also make sure email has proper format
1138 # do NFKD encoding, and also make sure email has proper format
1139 email_address = self.normalize_email(self.email_address)
1139 email_address = self.normalize_email(self.email_address)
1140
1140
1141 # first push the email initials
1141 # first push the email initials
1142 prefix, server = email_address.split('@', 1)
1142 prefix, server = email_address.split('@', 1)
1143
1143
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 _dot_split = prefix.rsplit('.', 1)
1145 _dot_split = prefix.rsplit('.', 1)
1146 if len(_dot_split) == 2:
1146 if len(_dot_split) == 2:
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 else:
1148 else:
1149 initials = [prefix[0], server[0]]
1149 initials = [prefix[0], server[0]]
1150
1150
1151 # then try to replace either firtname or lastname
1151 # then try to replace either firtname or lastname
1152 fn_letter = (first_name or " ")[0].strip()
1152 fn_letter = (first_name or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154
1154
1155 if fn_letter:
1155 if fn_letter:
1156 initials[0] = fn_letter
1156 initials[0] = fn_letter
1157
1157
1158 if ln_letter:
1158 if ln_letter:
1159 initials[1] = ln_letter
1159 initials[1] = ln_letter
1160
1160
1161 return ''.join(initials).upper()
1161 return ''.join(initials).upper()
1162
1162
1163 def get_img_data_by_type(self, font_family, img_type):
1163 def get_img_data_by_type(self, font_family, img_type):
1164 default_user = """
1164 default_user = """
1165 <svg xmlns="http://www.w3.org/2000/svg"
1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 viewBox="-15 -10 439.165 429.164"
1167 viewBox="-15 -10 439.165 429.164"
1168
1168
1169 xml:space="preserve"
1169 xml:space="preserve"
1170 style="background:{background};" >
1170 style="background:{background};" >
1171
1171
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 168.596,153.916,216.671,
1175 168.596,153.916,216.671,
1176 204.583,216.671z" fill="{text_color}"/>
1176 204.583,216.671z" fill="{text_color}"/>
1177 <path d="M407.164,374.717L360.88,
1177 <path d="M407.164,374.717L360.88,
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 0-48.762-8.122-69.078-23.488
1181 0-48.762-8.122-69.078-23.488
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 19.402-10.527 C409.699,390.129,
1186 19.402-10.527 C409.699,390.129,
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 </svg>""".format(
1188 </svg>""".format(
1189 size=self.size,
1189 size=self.size,
1190 background='#979797', # @grey4
1190 background='#979797', # @grey4
1191 text_color=self.text_color,
1191 text_color=self.text_color,
1192 font_family=font_family)
1192 font_family=font_family)
1193
1193
1194 return {
1194 return {
1195 "default_user": default_user
1195 "default_user": default_user
1196 }[img_type]
1196 }[img_type]
1197
1197
1198 def get_img_data(self, svg_type=None):
1198 def get_img_data(self, svg_type=None):
1199 """
1199 """
1200 generates the svg metadata for image
1200 generates the svg metadata for image
1201 """
1201 """
1202
1202
1203 font_family = ','.join([
1203 font_family = ','.join([
1204 'proximanovaregular',
1204 'proximanovaregular',
1205 'Proxima Nova Regular',
1205 'Proxima Nova Regular',
1206 'Proxima Nova',
1206 'Proxima Nova',
1207 'Arial',
1207 'Arial',
1208 'Lucida Grande',
1208 'Lucida Grande',
1209 'sans-serif'
1209 'sans-serif'
1210 ])
1210 ])
1211 if svg_type:
1211 if svg_type:
1212 return self.get_img_data_by_type(font_family, svg_type)
1212 return self.get_img_data_by_type(font_family, svg_type)
1213
1213
1214 initials = self.get_initials()
1214 initials = self.get_initials()
1215 img_data = """
1215 img_data = """
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 width="{size}" height="{size}"
1217 width="{size}" height="{size}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1219 viewBox="0 0 {size} {size}">
1219 viewBox="0 0 {size} {size}">
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 pointer-events="auto" fill="{text_color}"
1221 pointer-events="auto" fill="{text_color}"
1222 font-family="{font_family}"
1222 font-family="{font_family}"
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 </text>
1224 </text>
1225 </svg>""".format(
1225 </svg>""".format(
1226 size=self.size,
1226 size=self.size,
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 background=self.background,
1228 background=self.background,
1229 text_color=self.text_color,
1229 text_color=self.text_color,
1230 text=initials.upper(),
1230 text=initials.upper(),
1231 font_family=font_family)
1231 font_family=font_family)
1232
1232
1233 return img_data
1233 return img_data
1234
1234
1235 def generate_svg(self, svg_type=None):
1235 def generate_svg(self, svg_type=None):
1236 img_data = self.get_img_data(svg_type)
1236 img_data = self.get_img_data(svg_type)
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238
1238
1239
1239
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 svg_type = None
1241 svg_type = None
1242 if email_address == User.DEFAULT_USER_EMAIL:
1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 svg_type = 'default_user'
1243 svg_type = 'default_user'
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 return klass.generate_svg(svg_type=svg_type)
1245 return klass.generate_svg(svg_type=svg_type)
1246
1246
1247
1247
1248 def gravatar_url(email_address, size=30):
1248 def gravatar_url(email_address, size=30):
1249 # doh, we need to re-import those to mock it later
1249 # doh, we need to re-import those to mock it later
1250 from pylons import tmpl_context as c
1250 from pylons import tmpl_context as c
1251
1251
1252 _use_gravatar = c.visual.use_gravatar
1252 _use_gravatar = c.visual.use_gravatar
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254
1254
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 if isinstance(email_address, unicode):
1256 if isinstance(email_address, unicode):
1257 # hashlib crashes on unicode items
1257 # hashlib crashes on unicode items
1258 email_address = safe_str(email_address)
1258 email_address = safe_str(email_address)
1259
1259
1260 # empty email or default user
1260 # empty email or default user
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263
1263
1264 if _use_gravatar:
1264 if _use_gravatar:
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 # get the host and schema here.
1266 # get the host and schema here.
1267 request = get_current_request()
1267 request = get_current_request()
1268 tmpl = safe_str(_gravatar_url)
1268 tmpl = safe_str(_gravatar_url)
1269 tmpl = tmpl.replace('{email}', email_address)\
1269 tmpl = tmpl.replace('{email}', email_address)\
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 .replace('{netloc}', request.host)\
1271 .replace('{netloc}', request.host)\
1272 .replace('{scheme}', request.scheme)\
1272 .replace('{scheme}', request.scheme)\
1273 .replace('{size}', safe_str(size))
1273 .replace('{size}', safe_str(size))
1274 return tmpl
1274 return tmpl
1275 else:
1275 else:
1276 return initials_gravatar(email_address, '', '', size=size)
1276 return initials_gravatar(email_address, '', '', size=size)
1277
1277
1278
1278
1279 class Page(_Page):
1279 class Page(_Page):
1280 """
1280 """
1281 Custom pager to match rendering style with paginator
1281 Custom pager to match rendering style with paginator
1282 """
1282 """
1283
1283
1284 def _get_pos(self, cur_page, max_page, items):
1284 def _get_pos(self, cur_page, max_page, items):
1285 edge = (items / 2) + 1
1285 edge = (items / 2) + 1
1286 if (cur_page <= edge):
1286 if (cur_page <= edge):
1287 radius = max(items / 2, items - cur_page)
1287 radius = max(items / 2, items - cur_page)
1288 elif (max_page - cur_page) < edge:
1288 elif (max_page - cur_page) < edge:
1289 radius = (items - 1) - (max_page - cur_page)
1289 radius = (items - 1) - (max_page - cur_page)
1290 else:
1290 else:
1291 radius = items / 2
1291 radius = items / 2
1292
1292
1293 left = max(1, (cur_page - (radius)))
1293 left = max(1, (cur_page - (radius)))
1294 right = min(max_page, cur_page + (radius))
1294 right = min(max_page, cur_page + (radius))
1295 return left, cur_page, right
1295 return left, cur_page, right
1296
1296
1297 def _range(self, regexp_match):
1297 def _range(self, regexp_match):
1298 """
1298 """
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300
1300
1301 Arguments:
1301 Arguments:
1302
1302
1303 regexp_match
1303 regexp_match
1304 A "re" (regular expressions) match object containing the
1304 A "re" (regular expressions) match object containing the
1305 radius of linked pages around the current page in
1305 radius of linked pages around the current page in
1306 regexp_match.group(1) as a string
1306 regexp_match.group(1) as a string
1307
1307
1308 This function is supposed to be called as a callable in
1308 This function is supposed to be called as a callable in
1309 re.sub.
1309 re.sub.
1310
1310
1311 """
1311 """
1312 radius = int(regexp_match.group(1))
1312 radius = int(regexp_match.group(1))
1313
1313
1314 # Compute the first and last page number within the radius
1314 # Compute the first and last page number within the radius
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 # -> leftmost_page = 5
1316 # -> leftmost_page = 5
1317 # -> rightmost_page = 9
1317 # -> rightmost_page = 9
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 self.last_page,
1319 self.last_page,
1320 (radius * 2) + 1)
1320 (radius * 2) + 1)
1321 nav_items = []
1321 nav_items = []
1322
1322
1323 # Create a link to the first page (unless we are on the first page
1323 # Create a link to the first page (unless we are on the first page
1324 # or there would be no need to insert '..' spacers)
1324 # or there would be no need to insert '..' spacers)
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327
1327
1328 # Insert dots if there are pages between the first page
1328 # Insert dots if there are pages between the first page
1329 # and the currently displayed page range
1329 # and the currently displayed page range
1330 if leftmost_page - self.first_page > 1:
1330 if leftmost_page - self.first_page > 1:
1331 # Wrap in a SPAN tag if nolink_attr is set
1331 # Wrap in a SPAN tag if nolink_attr is set
1332 text = '..'
1332 text = '..'
1333 if self.dotdot_attr:
1333 if self.dotdot_attr:
1334 text = HTML.span(c=text, **self.dotdot_attr)
1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 nav_items.append(text)
1335 nav_items.append(text)
1336
1336
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 # Hilight the current page number and do not use a link
1338 # Hilight the current page number and do not use a link
1339 if thispage == self.page:
1339 if thispage == self.page:
1340 text = '%s' % (thispage,)
1340 text = '%s' % (thispage,)
1341 # Wrap in a SPAN tag if nolink_attr is set
1341 # Wrap in a SPAN tag if nolink_attr is set
1342 if self.curpage_attr:
1342 if self.curpage_attr:
1343 text = HTML.span(c=text, **self.curpage_attr)
1343 text = HTML.span(c=text, **self.curpage_attr)
1344 nav_items.append(text)
1344 nav_items.append(text)
1345 # Otherwise create just a link to that page
1345 # Otherwise create just a link to that page
1346 else:
1346 else:
1347 text = '%s' % (thispage,)
1347 text = '%s' % (thispage,)
1348 nav_items.append(self._pagerlink(thispage, text))
1348 nav_items.append(self._pagerlink(thispage, text))
1349
1349
1350 # Insert dots if there are pages between the displayed
1350 # Insert dots if there are pages between the displayed
1351 # page numbers and the end of the page range
1351 # page numbers and the end of the page range
1352 if self.last_page - rightmost_page > 1:
1352 if self.last_page - rightmost_page > 1:
1353 text = '..'
1353 text = '..'
1354 # Wrap in a SPAN tag if nolink_attr is set
1354 # Wrap in a SPAN tag if nolink_attr is set
1355 if self.dotdot_attr:
1355 if self.dotdot_attr:
1356 text = HTML.span(c=text, **self.dotdot_attr)
1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 nav_items.append(text)
1357 nav_items.append(text)
1358
1358
1359 # Create a link to the very last page (unless we are on the last
1359 # Create a link to the very last page (unless we are on the last
1360 # page or there would be no need to insert '..' spacers)
1360 # page or there would be no need to insert '..' spacers)
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363
1363
1364 ## prerender links
1364 ## prerender links
1365 #_page_link = url.current()
1365 #_page_link = url.current()
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 return self.separator.join(nav_items)
1368 return self.separator.join(nav_items)
1369
1369
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 show_if_single_page=False, separator=' ', onclick=None,
1371 show_if_single_page=False, separator=' ', onclick=None,
1372 symbol_first='<<', symbol_last='>>',
1372 symbol_first='<<', symbol_last='>>',
1373 symbol_previous='<', symbol_next='>',
1373 symbol_previous='<', symbol_next='>',
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 curpage_attr={'class': 'pager_curpage'},
1375 curpage_attr={'class': 'pager_curpage'},
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377
1377
1378 self.curpage_attr = curpage_attr
1378 self.curpage_attr = curpage_attr
1379 self.separator = separator
1379 self.separator = separator
1380 self.pager_kwargs = kwargs
1380 self.pager_kwargs = kwargs
1381 self.page_param = page_param
1381 self.page_param = page_param
1382 self.partial_param = partial_param
1382 self.partial_param = partial_param
1383 self.onclick = onclick
1383 self.onclick = onclick
1384 self.link_attr = link_attr
1384 self.link_attr = link_attr
1385 self.dotdot_attr = dotdot_attr
1385 self.dotdot_attr = dotdot_attr
1386
1386
1387 # Don't show navigator if there is no more than one page
1387 # Don't show navigator if there is no more than one page
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 return ''
1389 return ''
1390
1390
1391 from string import Template
1391 from string import Template
1392 # Replace ~...~ in token format by range of pages
1392 # Replace ~...~ in token format by range of pages
1393 result = re.sub(r'~(\d+)~', self._range, format)
1393 result = re.sub(r'~(\d+)~', self._range, format)
1394
1394
1395 # Interpolate '%' variables
1395 # Interpolate '%' variables
1396 result = Template(result).safe_substitute({
1396 result = Template(result).safe_substitute({
1397 'first_page': self.first_page,
1397 'first_page': self.first_page,
1398 'last_page': self.last_page,
1398 'last_page': self.last_page,
1399 'page': self.page,
1399 'page': self.page,
1400 'page_count': self.page_count,
1400 'page_count': self.page_count,
1401 'items_per_page': self.items_per_page,
1401 'items_per_page': self.items_per_page,
1402 'first_item': self.first_item,
1402 'first_item': self.first_item,
1403 'last_item': self.last_item,
1403 'last_item': self.last_item,
1404 'item_count': self.item_count,
1404 'item_count': self.item_count,
1405 'link_first': self.page > self.first_page and \
1405 'link_first': self.page > self.first_page and \
1406 self._pagerlink(self.first_page, symbol_first) or '',
1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 'link_last': self.page < self.last_page and \
1407 'link_last': self.page < self.last_page and \
1408 self._pagerlink(self.last_page, symbol_last) or '',
1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 'link_previous': self.previous_page and \
1409 'link_previous': self.previous_page and \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 'link_next': self.next_page and \
1412 'link_next': self.next_page and \
1413 self._pagerlink(self.next_page, symbol_next) \
1413 self._pagerlink(self.next_page, symbol_next) \
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 })
1415 })
1416
1416
1417 return literal(result)
1417 return literal(result)
1418
1418
1419
1419
1420 #==============================================================================
1420 #==============================================================================
1421 # REPO PAGER, PAGER FOR REPOSITORY
1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 #==============================================================================
1422 #==============================================================================
1423 class RepoPage(Page):
1423 class RepoPage(Page):
1424
1424
1425 def __init__(self, collection, page=1, items_per_page=20,
1425 def __init__(self, collection, page=1, items_per_page=20,
1426 item_count=None, url=None, **kwargs):
1426 item_count=None, url=None, **kwargs):
1427
1427
1428 """Create a "RepoPage" instance. special pager for paging
1428 """Create a "RepoPage" instance. special pager for paging
1429 repository
1429 repository
1430 """
1430 """
1431 self._url_generator = url
1431 self._url_generator = url
1432
1432
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 self.kwargs = kwargs
1434 self.kwargs = kwargs
1435
1435
1436 # Save a reference to the collection
1436 # Save a reference to the collection
1437 self.original_collection = collection
1437 self.original_collection = collection
1438
1438
1439 self.collection = collection
1439 self.collection = collection
1440
1440
1441 # The self.page is the number of the current page.
1441 # The self.page is the number of the current page.
1442 # The first page has the number 1!
1442 # The first page has the number 1!
1443 try:
1443 try:
1444 self.page = int(page) # make it int() if we get it as a string
1444 self.page = int(page) # make it int() if we get it as a string
1445 except (ValueError, TypeError):
1445 except (ValueError, TypeError):
1446 self.page = 1
1446 self.page = 1
1447
1447
1448 self.items_per_page = items_per_page
1448 self.items_per_page = items_per_page
1449
1449
1450 # Unless the user tells us how many items the collections has
1450 # Unless the user tells us how many items the collections has
1451 # we calculate that ourselves.
1451 # we calculate that ourselves.
1452 if item_count is not None:
1452 if item_count is not None:
1453 self.item_count = item_count
1453 self.item_count = item_count
1454 else:
1454 else:
1455 self.item_count = len(self.collection)
1455 self.item_count = len(self.collection)
1456
1456
1457 # Compute the number of the first and last available page
1457 # Compute the number of the first and last available page
1458 if self.item_count > 0:
1458 if self.item_count > 0:
1459 self.first_page = 1
1459 self.first_page = 1
1460 self.page_count = int(math.ceil(float(self.item_count) /
1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 self.items_per_page))
1461 self.items_per_page))
1462 self.last_page = self.first_page + self.page_count - 1
1462 self.last_page = self.first_page + self.page_count - 1
1463
1463
1464 # Make sure that the requested page number is the range of
1464 # Make sure that the requested page number is the range of
1465 # valid pages
1465 # valid pages
1466 if self.page > self.last_page:
1466 if self.page > self.last_page:
1467 self.page = self.last_page
1467 self.page = self.last_page
1468 elif self.page < self.first_page:
1468 elif self.page < self.first_page:
1469 self.page = self.first_page
1469 self.page = self.first_page
1470
1470
1471 # Note: the number of items on this page can be less than
1471 # Note: the number of items on this page can be less than
1472 # items_per_page if the last page is not full
1472 # items_per_page if the last page is not full
1473 self.first_item = max(0, (self.item_count) - (self.page *
1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 items_per_page))
1474 items_per_page))
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 (self.page - 1))
1476 (self.page - 1))
1477
1477
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479
1479
1480 # Links to previous and next page
1480 # Links to previous and next page
1481 if self.page > self.first_page:
1481 if self.page > self.first_page:
1482 self.previous_page = self.page - 1
1482 self.previous_page = self.page - 1
1483 else:
1483 else:
1484 self.previous_page = None
1484 self.previous_page = None
1485
1485
1486 if self.page < self.last_page:
1486 if self.page < self.last_page:
1487 self.next_page = self.page + 1
1487 self.next_page = self.page + 1
1488 else:
1488 else:
1489 self.next_page = None
1489 self.next_page = None
1490
1490
1491 # No items available
1491 # No items available
1492 else:
1492 else:
1493 self.first_page = None
1493 self.first_page = None
1494 self.page_count = 0
1494 self.page_count = 0
1495 self.last_page = None
1495 self.last_page = None
1496 self.first_item = None
1496 self.first_item = None
1497 self.last_item = None
1497 self.last_item = None
1498 self.previous_page = None
1498 self.previous_page = None
1499 self.next_page = None
1499 self.next_page = None
1500 self.items = []
1500 self.items = []
1501
1501
1502 # This is a subclass of the 'list' type. Initialise the list now.
1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 list.__init__(self, reversed(self.items))
1503 list.__init__(self, reversed(self.items))
1504
1504
1505
1505
1506 def changed_tooltip(nodes):
1506 def changed_tooltip(nodes):
1507 """
1507 """
1508 Generates a html string for changed nodes in commit page.
1508 Generates a html string for changed nodes in commit page.
1509 It limits the output to 30 entries
1509 It limits the output to 30 entries
1510
1510
1511 :param nodes: LazyNodesGenerator
1511 :param nodes: LazyNodesGenerator
1512 """
1512 """
1513 if nodes:
1513 if nodes:
1514 pref = ': <br/> '
1514 pref = ': <br/> '
1515 suf = ''
1515 suf = ''
1516 if len(nodes) > 30:
1516 if len(nodes) > 30:
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 for x in nodes[:30]]) + suf)
1519 for x in nodes[:30]]) + suf)
1520 else:
1520 else:
1521 return ': ' + _('No Files')
1521 return ': ' + _('No Files')
1522
1522
1523
1523
1524 def breadcrumb_repo_link(repo):
1524 def breadcrumb_repo_link(repo):
1525 """
1525 """
1526 Makes a breadcrumbs path link to repo
1526 Makes a breadcrumbs path link to repo
1527
1527
1528 ex::
1528 ex::
1529 group >> subgroup >> repo
1529 group >> subgroup >> repo
1530
1530
1531 :param repo: a Repository instance
1531 :param repo: a Repository instance
1532 """
1532 """
1533
1533
1534 path = [
1534 path = [
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 for group in repo.groups_with_parents
1536 for group in repo.groups_with_parents
1537 ] + [
1537 ] + [
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 ]
1539 ]
1540
1540
1541 return literal(' &raquo; '.join(path))
1541 return literal(' &raquo; '.join(path))
1542
1542
1543
1543
1544 def format_byte_size_binary(file_size):
1544 def format_byte_size_binary(file_size):
1545 """
1545 """
1546 Formats file/folder sizes to standard.
1546 Formats file/folder sizes to standard.
1547 """
1547 """
1548 formatted_size = format_byte_size(file_size, binary=True)
1548 formatted_size = format_byte_size(file_size, binary=True)
1549 return formatted_size
1549 return formatted_size
1550
1550
1551
1551
1552 def fancy_file_stats(stats):
1552 def fancy_file_stats(stats):
1553 """
1553 """
1554 Displays a fancy two colored bar for number of added/deleted
1554 Displays a fancy two colored bar for number of added/deleted
1555 lines of code on file
1555 lines of code on file
1556
1556
1557 :param stats: two element list of added/deleted lines of code
1557 :param stats: two element list of added/deleted lines of code
1558 """
1558 """
1559 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1559 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1560 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1560 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1561
1561
1562 def cgen(l_type, a_v, d_v):
1562 def cgen(l_type, a_v, d_v):
1563 mapping = {'tr': 'top-right-rounded-corner-mid',
1563 mapping = {'tr': 'top-right-rounded-corner-mid',
1564 'tl': 'top-left-rounded-corner-mid',
1564 'tl': 'top-left-rounded-corner-mid',
1565 'br': 'bottom-right-rounded-corner-mid',
1565 'br': 'bottom-right-rounded-corner-mid',
1566 'bl': 'bottom-left-rounded-corner-mid'}
1566 'bl': 'bottom-left-rounded-corner-mid'}
1567 map_getter = lambda x: mapping[x]
1567 map_getter = lambda x: mapping[x]
1568
1568
1569 if l_type == 'a' and d_v:
1569 if l_type == 'a' and d_v:
1570 #case when added and deleted are present
1570 #case when added and deleted are present
1571 return ' '.join(map(map_getter, ['tl', 'bl']))
1571 return ' '.join(map(map_getter, ['tl', 'bl']))
1572
1572
1573 if l_type == 'a' and not d_v:
1573 if l_type == 'a' and not d_v:
1574 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1574 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1575
1575
1576 if l_type == 'd' and a_v:
1576 if l_type == 'd' and a_v:
1577 return ' '.join(map(map_getter, ['tr', 'br']))
1577 return ' '.join(map(map_getter, ['tr', 'br']))
1578
1578
1579 if l_type == 'd' and not a_v:
1579 if l_type == 'd' and not a_v:
1580 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1580 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1581
1581
1582 a, d = stats['added'], stats['deleted']
1582 a, d = stats['added'], stats['deleted']
1583 width = 100
1583 width = 100
1584
1584
1585 if stats['binary']: # binary operations like chmod/rename etc
1585 if stats['binary']: # binary operations like chmod/rename etc
1586 lbl = []
1586 lbl = []
1587 bin_op = 0 # undefined
1587 bin_op = 0 # undefined
1588
1588
1589 # prefix with bin for binary files
1589 # prefix with bin for binary files
1590 if BIN_FILENODE in stats['ops']:
1590 if BIN_FILENODE in stats['ops']:
1591 lbl += ['bin']
1591 lbl += ['bin']
1592
1592
1593 if NEW_FILENODE in stats['ops']:
1593 if NEW_FILENODE in stats['ops']:
1594 lbl += [_('new file')]
1594 lbl += [_('new file')]
1595 bin_op = NEW_FILENODE
1595 bin_op = NEW_FILENODE
1596 elif MOD_FILENODE in stats['ops']:
1596 elif MOD_FILENODE in stats['ops']:
1597 lbl += [_('mod')]
1597 lbl += [_('mod')]
1598 bin_op = MOD_FILENODE
1598 bin_op = MOD_FILENODE
1599 elif DEL_FILENODE in stats['ops']:
1599 elif DEL_FILENODE in stats['ops']:
1600 lbl += [_('del')]
1600 lbl += [_('del')]
1601 bin_op = DEL_FILENODE
1601 bin_op = DEL_FILENODE
1602 elif RENAMED_FILENODE in stats['ops']:
1602 elif RENAMED_FILENODE in stats['ops']:
1603 lbl += [_('rename')]
1603 lbl += [_('rename')]
1604 bin_op = RENAMED_FILENODE
1604 bin_op = RENAMED_FILENODE
1605
1605
1606 # chmod can go with other operations, so we add a + to lbl if needed
1606 # chmod can go with other operations, so we add a + to lbl if needed
1607 if CHMOD_FILENODE in stats['ops']:
1607 if CHMOD_FILENODE in stats['ops']:
1608 lbl += [_('chmod')]
1608 lbl += [_('chmod')]
1609 if bin_op == 0:
1609 if bin_op == 0:
1610 bin_op = CHMOD_FILENODE
1610 bin_op = CHMOD_FILENODE
1611
1611
1612 lbl = '+'.join(lbl)
1612 lbl = '+'.join(lbl)
1613 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1613 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1614 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1614 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1615 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1615 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1616 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1616 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1617
1617
1618 t = stats['added'] + stats['deleted']
1618 t = stats['added'] + stats['deleted']
1619 unit = float(width) / (t or 1)
1619 unit = float(width) / (t or 1)
1620
1620
1621 # needs > 9% of width to be visible or 0 to be hidden
1621 # needs > 9% of width to be visible or 0 to be hidden
1622 a_p = max(9, unit * a) if a > 0 else 0
1622 a_p = max(9, unit * a) if a > 0 else 0
1623 d_p = max(9, unit * d) if d > 0 else 0
1623 d_p = max(9, unit * d) if d > 0 else 0
1624 p_sum = a_p + d_p
1624 p_sum = a_p + d_p
1625
1625
1626 if p_sum > width:
1626 if p_sum > width:
1627 #adjust the percentage to be == 100% since we adjusted to 9
1627 #adjust the percentage to be == 100% since we adjusted to 9
1628 if a_p > d_p:
1628 if a_p > d_p:
1629 a_p = a_p - (p_sum - width)
1629 a_p = a_p - (p_sum - width)
1630 else:
1630 else:
1631 d_p = d_p - (p_sum - width)
1631 d_p = d_p - (p_sum - width)
1632
1632
1633 a_v = a if a > 0 else ''
1633 a_v = a if a > 0 else ''
1634 d_v = d if d > 0 else ''
1634 d_v = d if d > 0 else ''
1635
1635
1636 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1636 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1637 cgen('a', a_v, d_v), a_p, a_v
1637 cgen('a', a_v, d_v), a_p, a_v
1638 )
1638 )
1639 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1639 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1640 cgen('d', a_v, d_v), d_p, d_v
1640 cgen('d', a_v, d_v), d_p, d_v
1641 )
1641 )
1642 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1642 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1643
1643
1644
1644
1645 def urlify_text(text_, safe=True):
1645 def urlify_text(text_, safe=True):
1646 """
1646 """
1647 Extrac urls from text and make html links out of them
1647 Extrac urls from text and make html links out of them
1648
1648
1649 :param text_:
1649 :param text_:
1650 """
1650 """
1651
1651
1652 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1652 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1653 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1653 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1654
1654
1655 def url_func(match_obj):
1655 def url_func(match_obj):
1656 url_full = match_obj.groups()[0]
1656 url_full = match_obj.groups()[0]
1657 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1657 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1658 _newtext = url_pat.sub(url_func, text_)
1658 _newtext = url_pat.sub(url_func, text_)
1659 if safe:
1659 if safe:
1660 return literal(_newtext)
1660 return literal(_newtext)
1661 return _newtext
1661 return _newtext
1662
1662
1663
1663
1664 def urlify_commits(text_, repository):
1664 def urlify_commits(text_, repository):
1665 """
1665 """
1666 Extract commit ids from text and make link from them
1666 Extract commit ids from text and make link from them
1667
1667
1668 :param text_:
1668 :param text_:
1669 :param repository: repo name to build the URL with
1669 :param repository: repo name to build the URL with
1670 """
1670 """
1671 from pylons import url # doh, we need to re-import url to mock it later
1671 from pylons import url # doh, we need to re-import url to mock it later
1672 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1672 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1673
1673
1674 def url_func(match_obj):
1674 def url_func(match_obj):
1675 commit_id = match_obj.groups()[1]
1675 commit_id = match_obj.groups()[1]
1676 pref = match_obj.groups()[0]
1676 pref = match_obj.groups()[0]
1677 suf = match_obj.groups()[2]
1677 suf = match_obj.groups()[2]
1678
1678
1679 tmpl = (
1679 tmpl = (
1680 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1680 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1681 '%(commit_id)s</a>%(suf)s'
1681 '%(commit_id)s</a>%(suf)s'
1682 )
1682 )
1683 return tmpl % {
1683 return tmpl % {
1684 'pref': pref,
1684 'pref': pref,
1685 'cls': 'revision-link',
1685 'cls': 'revision-link',
1686 'url': url('changeset_home', repo_name=repository,
1686 'url': url('changeset_home', repo_name=repository,
1687 revision=commit_id, qualified=True),
1687 revision=commit_id, qualified=True),
1688 'commit_id': commit_id,
1688 'commit_id': commit_id,
1689 'suf': suf
1689 'suf': suf
1690 }
1690 }
1691
1691
1692 newtext = URL_PAT.sub(url_func, text_)
1692 newtext = URL_PAT.sub(url_func, text_)
1693
1693
1694 return newtext
1694 return newtext
1695
1695
1696
1696
1697 def _process_url_func(match_obj, repo_name, uid, entry,
1697 def _process_url_func(match_obj, repo_name, uid, entry,
1698 return_raw_data=False):
1698 return_raw_data=False):
1699 pref = ''
1699 pref = ''
1700 if match_obj.group().startswith(' '):
1700 if match_obj.group().startswith(' '):
1701 pref = ' '
1701 pref = ' '
1702
1702
1703 issue_id = ''.join(match_obj.groups())
1703 issue_id = ''.join(match_obj.groups())
1704 tmpl = (
1704 tmpl = (
1705 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1705 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1706 '%(issue-prefix)s%(id-repr)s'
1706 '%(issue-prefix)s%(id-repr)s'
1707 '</a>')
1707 '</a>')
1708
1708
1709 (repo_name_cleaned,
1709 (repo_name_cleaned,
1710 parent_group_name) = RepoGroupModel().\
1710 parent_group_name) = RepoGroupModel().\
1711 _get_group_name_and_parent(repo_name)
1711 _get_group_name_and_parent(repo_name)
1712
1712
1713 # variables replacement
1713 # variables replacement
1714 named_vars = {
1714 named_vars = {
1715 'id': issue_id,
1715 'id': issue_id,
1716 'repo': repo_name,
1716 'repo': repo_name,
1717 'repo_name': repo_name_cleaned,
1717 'repo_name': repo_name_cleaned,
1718 'group_name': parent_group_name
1718 'group_name': parent_group_name
1719 }
1719 }
1720 # named regex variables
1720 # named regex variables
1721 named_vars.update(match_obj.groupdict())
1721 named_vars.update(match_obj.groupdict())
1722 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1722 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1723
1723
1724 data = {
1724 data = {
1725 'pref': pref,
1725 'pref': pref,
1726 'cls': 'issue-tracker-link',
1726 'cls': 'issue-tracker-link',
1727 'url': _url,
1727 'url': _url,
1728 'id-repr': issue_id,
1728 'id-repr': issue_id,
1729 'issue-prefix': entry['pref'],
1729 'issue-prefix': entry['pref'],
1730 'serv': entry['url'],
1730 'serv': entry['url'],
1731 }
1731 }
1732 if return_raw_data:
1732 if return_raw_data:
1733 return {
1733 return {
1734 'id': issue_id,
1734 'id': issue_id,
1735 'url': _url
1735 'url': _url
1736 }
1736 }
1737 return tmpl % data
1737 return tmpl % data
1738
1738
1739
1739
1740 def process_patterns(text_string, repo_name, config=None):
1740 def process_patterns(text_string, repo_name, config=None):
1741 repo = None
1741 repo = None
1742 if repo_name:
1742 if repo_name:
1743 # Retrieving repo_name to avoid invalid repo_name to explode on
1743 # Retrieving repo_name to avoid invalid repo_name to explode on
1744 # IssueTrackerSettingsModel but still passing invalid name further down
1744 # IssueTrackerSettingsModel but still passing invalid name further down
1745 repo = Repository.get_by_repo_name(repo_name, cache=True)
1745 repo = Repository.get_by_repo_name(repo_name, cache=True)
1746
1746
1747 settings_model = IssueTrackerSettingsModel(repo=repo)
1747 settings_model = IssueTrackerSettingsModel(repo=repo)
1748 active_entries = settings_model.get_settings(cache=True)
1748 active_entries = settings_model.get_settings(cache=True)
1749
1749
1750 issues_data = []
1750 issues_data = []
1751 newtext = text_string
1751 newtext = text_string
1752 for uid, entry in active_entries.items():
1752 for uid, entry in active_entries.items():
1753 log.debug('found issue tracker entry with uid %s' % (uid,))
1753 log.debug('found issue tracker entry with uid %s' % (uid,))
1754
1754
1755 if not (entry['pat'] and entry['url']):
1755 if not (entry['pat'] and entry['url']):
1756 log.debug('skipping due to missing data')
1756 log.debug('skipping due to missing data')
1757 continue
1757 continue
1758
1758
1759 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1759 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1760 % (uid, entry['pat'], entry['url'], entry['pref']))
1760 % (uid, entry['pat'], entry['url'], entry['pref']))
1761
1761
1762 try:
1762 try:
1763 pattern = re.compile(r'%s' % entry['pat'])
1763 pattern = re.compile(r'%s' % entry['pat'])
1764 except re.error:
1764 except re.error:
1765 log.exception(
1765 log.exception(
1766 'issue tracker pattern: `%s` failed to compile',
1766 'issue tracker pattern: `%s` failed to compile',
1767 entry['pat'])
1767 entry['pat'])
1768 continue
1768 continue
1769
1769
1770 data_func = partial(
1770 data_func = partial(
1771 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1771 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1772 return_raw_data=True)
1772 return_raw_data=True)
1773
1773
1774 for match_obj in pattern.finditer(text_string):
1774 for match_obj in pattern.finditer(text_string):
1775 issues_data.append(data_func(match_obj))
1775 issues_data.append(data_func(match_obj))
1776
1776
1777 url_func = partial(
1777 url_func = partial(
1778 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1778 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1779
1779
1780 newtext = pattern.sub(url_func, newtext)
1780 newtext = pattern.sub(url_func, newtext)
1781 log.debug('processed prefix:uid `%s`' % (uid,))
1781 log.debug('processed prefix:uid `%s`' % (uid,))
1782
1782
1783 return newtext, issues_data
1783 return newtext, issues_data
1784
1784
1785
1785
1786 def urlify_commit_message(commit_text, repository=None):
1786 def urlify_commit_message(commit_text, repository=None):
1787 """
1787 """
1788 Parses given text message and makes proper links.
1788 Parses given text message and makes proper links.
1789 issues are linked to given issue-server, and rest is a commit link
1789 issues are linked to given issue-server, and rest is a commit link
1790
1790
1791 :param commit_text:
1791 :param commit_text:
1792 :param repository:
1792 :param repository:
1793 """
1793 """
1794 from pylons import url # doh, we need to re-import url to mock it later
1794 from pylons import url # doh, we need to re-import url to mock it later
1795
1795
1796 def escaper(string):
1796 def escaper(string):
1797 return string.replace('<', '&lt;').replace('>', '&gt;')
1797 return string.replace('<', '&lt;').replace('>', '&gt;')
1798
1798
1799 newtext = escaper(commit_text)
1799 newtext = escaper(commit_text)
1800
1800
1801 # extract http/https links and make them real urls
1801 # extract http/https links and make them real urls
1802 newtext = urlify_text(newtext, safe=False)
1802 newtext = urlify_text(newtext, safe=False)
1803
1803
1804 # urlify commits - extract commit ids and make link out of them, if we have
1804 # urlify commits - extract commit ids and make link out of them, if we have
1805 # the scope of repository present.
1805 # the scope of repository present.
1806 if repository:
1806 if repository:
1807 newtext = urlify_commits(newtext, repository)
1807 newtext = urlify_commits(newtext, repository)
1808
1808
1809 # process issue tracker patterns
1809 # process issue tracker patterns
1810 newtext, issues = process_patterns(newtext, repository or '')
1810 newtext, issues = process_patterns(newtext, repository or '')
1811
1811
1812 return literal(newtext)
1812 return literal(newtext)
1813
1813
1814
1814
1815 def renderer_from_filename(filename, exclude=None):
1815 def renderer_from_filename(filename, exclude=None):
1816 """
1816 """
1817 choose a renderer based on filename
1817 choose a renderer based on filename
1818 """
1818 """
1819
1819
1820 # ipython
1820 # ipython
1821 for ext in ['*.ipynb']:
1821 for ext in ['*.ipynb']:
1822 if fnmatch.fnmatch(filename, pat=ext):
1822 if fnmatch.fnmatch(filename, pat=ext):
1823 return 'jupyter'
1823 return 'jupyter'
1824
1824
1825 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1825 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1826 if is_markup:
1826 if is_markup:
1827 return is_markup
1827 return is_markup
1828 return None
1828 return None
1829
1829
1830
1830
1831 def render(source, renderer='rst', mentions=False):
1831 def render(source, renderer='rst', mentions=False, relative_url=None):
1832
1833 def maybe_convert_relative_links(html_source):
1834 if relative_url:
1835 return relative_links(html_source, relative_url)
1836 return html_source
1837
1832 if renderer == 'rst':
1838 if renderer == 'rst':
1833 return literal(
1839 return literal(
1834 '<div class="rst-block">%s</div>' %
1840 '<div class="rst-block">%s</div>' %
1835 MarkupRenderer.rst(source, mentions=mentions))
1841 maybe_convert_relative_links(
1842 MarkupRenderer.rst(source, mentions=mentions)))
1836 elif renderer == 'markdown':
1843 elif renderer == 'markdown':
1837 return literal(
1844 return literal(
1838 '<div class="markdown-block">%s</div>' %
1845 '<div class="markdown-block">%s</div>' %
1839 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1846 maybe_convert_relative_links(
1847 MarkupRenderer.markdown(source, flavored=True,
1848 mentions=mentions)))
1840 elif renderer == 'jupyter':
1849 elif renderer == 'jupyter':
1841 return literal(
1850 return literal(
1842 '<div class="ipynb">%s</div>' %
1851 '<div class="ipynb">%s</div>' %
1843 MarkupRenderer.jupyter(source))
1852 maybe_convert_relative_links(
1853 MarkupRenderer.jupyter(source)))
1844
1854
1845 # None means just show the file-source
1855 # None means just show the file-source
1846 return None
1856 return None
1847
1857
1848
1858
1849 def commit_status(repo, commit_id):
1859 def commit_status(repo, commit_id):
1850 return ChangesetStatusModel().get_status(repo, commit_id)
1860 return ChangesetStatusModel().get_status(repo, commit_id)
1851
1861
1852
1862
1853 def commit_status_lbl(commit_status):
1863 def commit_status_lbl(commit_status):
1854 return dict(ChangesetStatus.STATUSES).get(commit_status)
1864 return dict(ChangesetStatus.STATUSES).get(commit_status)
1855
1865
1856
1866
1857 def commit_time(repo_name, commit_id):
1867 def commit_time(repo_name, commit_id):
1858 repo = Repository.get_by_repo_name(repo_name)
1868 repo = Repository.get_by_repo_name(repo_name)
1859 commit = repo.get_commit(commit_id=commit_id)
1869 commit = repo.get_commit(commit_id=commit_id)
1860 return commit.date
1870 return commit.date
1861
1871
1862
1872
1863 def get_permission_name(key):
1873 def get_permission_name(key):
1864 return dict(Permission.PERMS).get(key)
1874 return dict(Permission.PERMS).get(key)
1865
1875
1866
1876
1867 def journal_filter_help():
1877 def journal_filter_help():
1868 return _(
1878 return _(
1869 'Example filter terms:\n' +
1879 'Example filter terms:\n' +
1870 ' repository:vcs\n' +
1880 ' repository:vcs\n' +
1871 ' username:marcin\n' +
1881 ' username:marcin\n' +
1872 ' action:*push*\n' +
1882 ' action:*push*\n' +
1873 ' ip:127.0.0.1\n' +
1883 ' ip:127.0.0.1\n' +
1874 ' date:20120101\n' +
1884 ' date:20120101\n' +
1875 ' date:[20120101100000 TO 20120102]\n' +
1885 ' date:[20120101100000 TO 20120102]\n' +
1876 '\n' +
1886 '\n' +
1877 'Generate wildcards using \'*\' character:\n' +
1887 'Generate wildcards using \'*\' character:\n' +
1878 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1888 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1879 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1889 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1880 '\n' +
1890 '\n' +
1881 'Optional AND / OR operators in queries\n' +
1891 'Optional AND / OR operators in queries\n' +
1882 ' "repository:vcs OR repository:test"\n' +
1892 ' "repository:vcs OR repository:test"\n' +
1883 ' "username:test AND repository:test*"\n'
1893 ' "username:test AND repository:test*"\n'
1884 )
1894 )
1885
1895
1886
1896
1887 def not_mapped_error(repo_name):
1897 def not_mapped_error(repo_name):
1888 flash(_('%s repository is not mapped to db perhaps'
1898 flash(_('%s repository is not mapped to db perhaps'
1889 ' it was created or renamed from the filesystem'
1899 ' it was created or renamed from the filesystem'
1890 ' please run the application again'
1900 ' please run the application again'
1891 ' in order to rescan repositories') % repo_name, category='error')
1901 ' in order to rescan repositories') % repo_name, category='error')
1892
1902
1893
1903
1894 def ip_range(ip_addr):
1904 def ip_range(ip_addr):
1895 from rhodecode.model.db import UserIpMap
1905 from rhodecode.model.db import UserIpMap
1896 s, e = UserIpMap._get_ip_range(ip_addr)
1906 s, e = UserIpMap._get_ip_range(ip_addr)
1897 return '%s - %s' % (s, e)
1907 return '%s - %s' % (s, e)
1898
1908
1899
1909
1900 def form(url, method='post', needs_csrf_token=True, **attrs):
1910 def form(url, method='post', needs_csrf_token=True, **attrs):
1901 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1911 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1902 if method.lower() != 'get' and needs_csrf_token:
1912 if method.lower() != 'get' and needs_csrf_token:
1903 raise Exception(
1913 raise Exception(
1904 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1914 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1905 'CSRF token. If the endpoint does not require such token you can ' +
1915 'CSRF token. If the endpoint does not require such token you can ' +
1906 'explicitly set the parameter needs_csrf_token to false.')
1916 'explicitly set the parameter needs_csrf_token to false.')
1907
1917
1908 return wh_form(url, method=method, **attrs)
1918 return wh_form(url, method=method, **attrs)
1909
1919
1910
1920
1911 def secure_form(url, method="POST", multipart=False, **attrs):
1921 def secure_form(url, method="POST", multipart=False, **attrs):
1912 """Start a form tag that points the action to an url. This
1922 """Start a form tag that points the action to an url. This
1913 form tag will also include the hidden field containing
1923 form tag will also include the hidden field containing
1914 the auth token.
1924 the auth token.
1915
1925
1916 The url options should be given either as a string, or as a
1926 The url options should be given either as a string, or as a
1917 ``url()`` function. The method for the form defaults to POST.
1927 ``url()`` function. The method for the form defaults to POST.
1918
1928
1919 Options:
1929 Options:
1920
1930
1921 ``multipart``
1931 ``multipart``
1922 If set to True, the enctype is set to "multipart/form-data".
1932 If set to True, the enctype is set to "multipart/form-data".
1923 ``method``
1933 ``method``
1924 The method to use when submitting the form, usually either
1934 The method to use when submitting the form, usually either
1925 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1935 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1926 hidden input with name _method is added to simulate the verb
1936 hidden input with name _method is added to simulate the verb
1927 over POST.
1937 over POST.
1928
1938
1929 """
1939 """
1930 from webhelpers.pylonslib.secure_form import insecure_form
1940 from webhelpers.pylonslib.secure_form import insecure_form
1931 form = insecure_form(url, method, multipart, **attrs)
1941 form = insecure_form(url, method, multipart, **attrs)
1932 token = csrf_input()
1942 token = csrf_input()
1933 return literal("%s\n%s" % (form, token))
1943 return literal("%s\n%s" % (form, token))
1934
1944
1935 def csrf_input():
1945 def csrf_input():
1936 return literal(
1946 return literal(
1937 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1947 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1938 csrf_token_key, csrf_token_key, get_csrf_token()))
1948 csrf_token_key, csrf_token_key, get_csrf_token()))
1939
1949
1940 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1950 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1941 select_html = select(name, selected, options, **attrs)
1951 select_html = select(name, selected, options, **attrs)
1942 select2 = """
1952 select2 = """
1943 <script>
1953 <script>
1944 $(document).ready(function() {
1954 $(document).ready(function() {
1945 $('#%s').select2({
1955 $('#%s').select2({
1946 containerCssClass: 'drop-menu',
1956 containerCssClass: 'drop-menu',
1947 dropdownCssClass: 'drop-menu-dropdown',
1957 dropdownCssClass: 'drop-menu-dropdown',
1948 dropdownAutoWidth: true%s
1958 dropdownAutoWidth: true%s
1949 });
1959 });
1950 });
1960 });
1951 </script>
1961 </script>
1952 """
1962 """
1953 filter_option = """,
1963 filter_option = """,
1954 minimumResultsForSearch: -1
1964 minimumResultsForSearch: -1
1955 """
1965 """
1956 input_id = attrs.get('id') or name
1966 input_id = attrs.get('id') or name
1957 filter_enabled = "" if enable_filter else filter_option
1967 filter_enabled = "" if enable_filter else filter_option
1958 select_script = literal(select2 % (input_id, filter_enabled))
1968 select_script = literal(select2 % (input_id, filter_enabled))
1959
1969
1960 return literal(select_html+select_script)
1970 return literal(select_html+select_script)
1961
1971
1962
1972
1963 def get_visual_attr(tmpl_context_var, attr_name):
1973 def get_visual_attr(tmpl_context_var, attr_name):
1964 """
1974 """
1965 A safe way to get a variable from visual variable of template context
1975 A safe way to get a variable from visual variable of template context
1966
1976
1967 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1977 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1968 :param attr_name: name of the attribute we fetch from the c.visual
1978 :param attr_name: name of the attribute we fetch from the c.visual
1969 """
1979 """
1970 visual = getattr(tmpl_context_var, 'visual', None)
1980 visual = getattr(tmpl_context_var, 'visual', None)
1971 if not visual:
1981 if not visual:
1972 return
1982 return
1973 else:
1983 else:
1974 return getattr(visual, attr_name, None)
1984 return getattr(visual, attr_name, None)
1975
1985
1976
1986
1977 def get_last_path_part(file_node):
1987 def get_last_path_part(file_node):
1978 if not file_node.path:
1988 if not file_node.path:
1979 return u''
1989 return u''
1980
1990
1981 path = safe_unicode(file_node.path.split('/')[-1])
1991 path = safe_unicode(file_node.path.split('/')[-1])
1982 return u'../' + path
1992 return u'../' + path
1983
1993
1984
1994
1985 def route_path(*args, **kwds):
1995 def route_path(*args, **kwds):
1986 """
1996 """
1987 Wrapper around pyramids `route_path` function. It is used to generate
1997 Wrapper around pyramids `route_path` function. It is used to generate
1988 URLs from within pylons views or templates. This will be removed when
1998 URLs from within pylons views or templates. This will be removed when
1989 pyramid migration if finished.
1999 pyramid migration if finished.
1990 """
2000 """
1991 req = get_current_request()
2001 req = get_current_request()
1992 return req.route_path(*args, **kwds)
2002 return req.route_path(*args, **kwds)
1993
2003
1994
2004
1995 def route_path_or_none(*args, **kwargs):
2005 def route_path_or_none(*args, **kwargs):
1996 try:
2006 try:
1997 return route_path(*args, **kwargs)
2007 return route_path(*args, **kwargs)
1998 except KeyError:
2008 except KeyError:
1999 return None
2009 return None
2000
2010
2001
2011
2002 def static_url(*args, **kwds):
2012 def static_url(*args, **kwds):
2003 """
2013 """
2004 Wrapper around pyramids `route_path` function. It is used to generate
2014 Wrapper around pyramids `route_path` function. It is used to generate
2005 URLs from within pylons views or templates. This will be removed when
2015 URLs from within pylons views or templates. This will be removed when
2006 pyramid migration if finished.
2016 pyramid migration if finished.
2007 """
2017 """
2008 req = get_current_request()
2018 req = get_current_request()
2009 return req.static_url(*args, **kwds)
2019 return req.static_url(*args, **kwds)
2010
2020
2011
2021
2012 def resource_path(*args, **kwds):
2022 def resource_path(*args, **kwds):
2013 """
2023 """
2014 Wrapper around pyramids `route_path` function. It is used to generate
2024 Wrapper around pyramids `route_path` function. It is used to generate
2015 URLs from within pylons views or templates. This will be removed when
2025 URLs from within pylons views or templates. This will be removed when
2016 pyramid migration if finished.
2026 pyramid migration if finished.
2017 """
2027 """
2018 req = get_current_request()
2028 req = get_current_request()
2019 return req.resource_path(*args, **kwds)
2029 return req.resource_path(*args, **kwds)
@@ -1,375 +1,455 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2017 RhodeCode GmbH
3 # Copyright (C) 2011-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Renderer for markup languages with ability to parse using rst or markdown
23 Renderer for markup languages with ability to parse using rst or markdown
24 """
24 """
25
25
26 import re
26 import re
27 import os
27 import os
28 import lxml
28 import logging
29 import logging
29 import itertools
30 import urlparse
31 import urllib
30
32
31 from mako.lookup import TemplateLookup
33 from mako.lookup import TemplateLookup
32 from mako.template import Template as MakoTemplate
34 from mako.template import Template as MakoTemplate
33
35
34 from docutils.core import publish_parts
36 from docutils.core import publish_parts
35 from docutils.parsers.rst import directives
37 from docutils.parsers.rst import directives
36 import markdown
38 import markdown
37
39
38 from rhodecode.lib.markdown_ext import (
40 from rhodecode.lib.markdown_ext import GithubFlavoredMarkdownExtension
39 UrlizeExtension, GithubFlavoredMarkdownExtension)
41 from rhodecode.lib.utils2 import (
40 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
42 safe_str, safe_unicode, md5_safe, MENTIONS_REGEX)
41
43
42 log = logging.getLogger(__name__)
44 log = logging.getLogger(__name__)
43
45
44 # default renderer used to generate automated comments
46 # default renderer used to generate automated comments
45 DEFAULT_COMMENTS_RENDERER = 'rst'
47 DEFAULT_COMMENTS_RENDERER = 'rst'
46
48
47
49
50 def relative_links(html_source, server_path):
51 doc = lxml.html.fromstring(html_source)
52 for el in doc.cssselect('img, video'):
53 src = el.attrib['src']
54 if src:
55 el.attrib['src'] = relative_path(src, server_path)
56
57 for el in doc.cssselect('a:not(.gfm)'):
58 src = el.attrib['href']
59 if src:
60 el.attrib['href'] = relative_path(src, server_path)
61
62 return lxml.html.tostring(doc)
63
64
65 def relative_path(path, request_path, is_repo_file=None):
66 """
67 relative link support, path is a rel path, and request_path is current
68 server path (not absolute)
69
70 e.g.
71
72 path = '../logo.png'
73 request_path= '/repo/files/path/file.md'
74 produces: '/repo/files/logo.png'
75 """
76 # TODO(marcink): unicode/str support ?
77 # maybe=> safe_unicode(urllib.quote(safe_str(final_path), '/:'))
78
79 def dummy_check(p):
80 return True # assume default is a valid file path
81
82 is_repo_file = is_repo_file or dummy_check
83 if not path:
84 return request_path
85
86 path = safe_unicode(path)
87 request_path = safe_unicode(request_path)
88
89 if path.startswith((u'data:', u'#', u':')):
90 # skip data, anchor, invalid links
91 return path
92
93 is_absolute = bool(urlparse.urlparse(path).netloc)
94 if is_absolute:
95 return path
96
97 if not request_path:
98 return path
99
100 if path.startswith(u'/'):
101 path = path[1:]
102
103 if path.startswith(u'./'):
104 path = path[2:]
105
106 parts = request_path.split('/')
107 # compute how deep we need to traverse the request_path
108 depth = 0
109
110 if is_repo_file(request_path):
111 # if request path is a VALID file, we use a relative path with
112 # one level up
113 depth += 1
114
115 while path.startswith(u'../'):
116 depth += 1
117 path = path[3:]
118
119 if depth > 0:
120 parts = parts[:-depth]
121
122 parts.append(path)
123 final_path = u'/'.join(parts).lstrip(u'/')
124
125 return u'/' + final_path
126
127
48 class MarkupRenderer(object):
128 class MarkupRenderer(object):
49 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
129 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
50
130
51 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
131 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
52 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
132 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
53 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
133 JUPYTER_PAT = re.compile(r'\.(ipynb)$', re.IGNORECASE)
54 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
134 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
55
135
56 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
136 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
57 markdown_renderer = markdown.Markdown(
137 markdown_renderer = markdown.Markdown(
58 extensions, safe_mode=True, enable_attributes=False)
138 extensions, safe_mode=True, enable_attributes=False)
59
139
60 markdown_renderer_flavored = markdown.Markdown(
140 markdown_renderer_flavored = markdown.Markdown(
61 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
141 extensions + [GithubFlavoredMarkdownExtension()], safe_mode=True,
62 enable_attributes=False)
142 enable_attributes=False)
63
143
64 # extension together with weights. Lower is first means we control how
144 # extension together with weights. Lower is first means we control how
65 # extensions are attached to readme names with those.
145 # extensions are attached to readme names with those.
66 PLAIN_EXTS = [
146 PLAIN_EXTS = [
67 # prefer no extension
147 # prefer no extension
68 ('', 0), # special case that renders READMES names without extension
148 ('', 0), # special case that renders READMES names without extension
69 ('.text', 2), ('.TEXT', 2),
149 ('.text', 2), ('.TEXT', 2),
70 ('.txt', 3), ('.TXT', 3)
150 ('.txt', 3), ('.TXT', 3)
71 ]
151 ]
72
152
73 RST_EXTS = [
153 RST_EXTS = [
74 ('.rst', 1), ('.rest', 1),
154 ('.rst', 1), ('.rest', 1),
75 ('.RST', 2), ('.REST', 2)
155 ('.RST', 2), ('.REST', 2)
76 ]
156 ]
77
157
78 MARKDOWN_EXTS = [
158 MARKDOWN_EXTS = [
79 ('.md', 1), ('.MD', 1),
159 ('.md', 1), ('.MD', 1),
80 ('.mkdn', 2), ('.MKDN', 2),
160 ('.mkdn', 2), ('.MKDN', 2),
81 ('.mdown', 3), ('.MDOWN', 3),
161 ('.mdown', 3), ('.MDOWN', 3),
82 ('.markdown', 4), ('.MARKDOWN', 4)
162 ('.markdown', 4), ('.MARKDOWN', 4)
83 ]
163 ]
84
164
85 def _detect_renderer(self, source, filename=None):
165 def _detect_renderer(self, source, filename=None):
86 """
166 """
87 runs detection of what renderer should be used for generating html
167 runs detection of what renderer should be used for generating html
88 from a markup language
168 from a markup language
89
169
90 filename can be also explicitly a renderer name
170 filename can be also explicitly a renderer name
91
171
92 :param source:
172 :param source:
93 :param filename:
173 :param filename:
94 """
174 """
95
175
96 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
176 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
97 detected_renderer = 'markdown'
177 detected_renderer = 'markdown'
98 elif MarkupRenderer.RST_PAT.findall(filename):
178 elif MarkupRenderer.RST_PAT.findall(filename):
99 detected_renderer = 'rst'
179 detected_renderer = 'rst'
100 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
180 elif MarkupRenderer.JUPYTER_PAT.findall(filename):
101 detected_renderer = 'jupyter'
181 detected_renderer = 'jupyter'
102 elif MarkupRenderer.PLAIN_PAT.findall(filename):
182 elif MarkupRenderer.PLAIN_PAT.findall(filename):
103 detected_renderer = 'plain'
183 detected_renderer = 'plain'
104 else:
184 else:
105 detected_renderer = 'plain'
185 detected_renderer = 'plain'
106
186
107 return getattr(MarkupRenderer, detected_renderer)
187 return getattr(MarkupRenderer, detected_renderer)
108
188
109 @classmethod
189 @classmethod
110 def renderer_from_filename(cls, filename, exclude):
190 def renderer_from_filename(cls, filename, exclude):
111 """
191 """
112 Detect renderer markdown/rst from filename and optionally use exclude
192 Detect renderer markdown/rst from filename and optionally use exclude
113 list to remove some options. This is mostly used in helpers.
193 list to remove some options. This is mostly used in helpers.
114 Returns None when no renderer can be detected.
194 Returns None when no renderer can be detected.
115 """
195 """
116 def _filter(elements):
196 def _filter(elements):
117 if isinstance(exclude, (list, tuple)):
197 if isinstance(exclude, (list, tuple)):
118 return [x for x in elements if x not in exclude]
198 return [x for x in elements if x not in exclude]
119 return elements
199 return elements
120
200
121 if filename.endswith(
201 if filename.endswith(
122 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
202 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
123 return 'markdown'
203 return 'markdown'
124 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
204 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
125 return 'rst'
205 return 'rst'
126
206
127 return None
207 return None
128
208
129 def render(self, source, filename=None):
209 def render(self, source, filename=None):
130 """
210 """
131 Renders a given filename using detected renderer
211 Renders a given filename using detected renderer
132 it detects renderers based on file extension or mimetype.
212 it detects renderers based on file extension or mimetype.
133 At last it will just do a simple html replacing new lines with <br/>
213 At last it will just do a simple html replacing new lines with <br/>
134
214
135 :param file_name:
215 :param file_name:
136 :param source:
216 :param source:
137 """
217 """
138
218
139 renderer = self._detect_renderer(source, filename)
219 renderer = self._detect_renderer(source, filename)
140 readme_data = renderer(source)
220 readme_data = renderer(source)
141 return readme_data
221 return readme_data
142
222
143 @classmethod
223 @classmethod
144 def _flavored_markdown(cls, text):
224 def _flavored_markdown(cls, text):
145 """
225 """
146 Github style flavored markdown
226 Github style flavored markdown
147
227
148 :param text:
228 :param text:
149 """
229 """
150
230
151 # Extract pre blocks.
231 # Extract pre blocks.
152 extractions = {}
232 extractions = {}
153
233
154 def pre_extraction_callback(matchobj):
234 def pre_extraction_callback(matchobj):
155 digest = md5_safe(matchobj.group(0))
235 digest = md5_safe(matchobj.group(0))
156 extractions[digest] = matchobj.group(0)
236 extractions[digest] = matchobj.group(0)
157 return "{gfm-extraction-%s}" % digest
237 return "{gfm-extraction-%s}" % digest
158 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
238 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
159 text = re.sub(pattern, pre_extraction_callback, text)
239 text = re.sub(pattern, pre_extraction_callback, text)
160
240
161 # Prevent foo_bar_baz from ending up with an italic word in the middle.
241 # Prevent foo_bar_baz from ending up with an italic word in the middle.
162 def italic_callback(matchobj):
242 def italic_callback(matchobj):
163 s = matchobj.group(0)
243 s = matchobj.group(0)
164 if list(s).count('_') >= 2:
244 if list(s).count('_') >= 2:
165 return s.replace('_', r'\_')
245 return s.replace('_', r'\_')
166 return s
246 return s
167 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
247 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
168
248
169 # Insert pre block extractions.
249 # Insert pre block extractions.
170 def pre_insert_callback(matchobj):
250 def pre_insert_callback(matchobj):
171 return '\n\n' + extractions[matchobj.group(1)]
251 return '\n\n' + extractions[matchobj.group(1)]
172 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
252 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
173 pre_insert_callback, text)
253 pre_insert_callback, text)
174
254
175 return text
255 return text
176
256
177 @classmethod
257 @classmethod
178 def urlify_text(cls, text):
258 def urlify_text(cls, text):
179 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
259 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
180 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
260 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
181
261
182 def url_func(match_obj):
262 def url_func(match_obj):
183 url_full = match_obj.groups()[0]
263 url_full = match_obj.groups()[0]
184 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
264 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
185
265
186 return url_pat.sub(url_func, text)
266 return url_pat.sub(url_func, text)
187
267
188 @classmethod
268 @classmethod
189 def plain(cls, source, universal_newline=True):
269 def plain(cls, source, universal_newline=True):
190 source = safe_unicode(source)
270 source = safe_unicode(source)
191 if universal_newline:
271 if universal_newline:
192 newline = '\n'
272 newline = '\n'
193 source = newline.join(source.splitlines())
273 source = newline.join(source.splitlines())
194
274
195 source = cls.urlify_text(source)
275 source = cls.urlify_text(source)
196 return '<br />' + source.replace("\n", '<br />')
276 return '<br />' + source.replace("\n", '<br />')
197
277
198 @classmethod
278 @classmethod
199 def markdown(cls, source, safe=True, flavored=True, mentions=False):
279 def markdown(cls, source, safe=True, flavored=True, mentions=False):
200 # It does not allow to insert inline HTML. In presence of HTML tags, it
280 # It does not allow to insert inline HTML. In presence of HTML tags, it
201 # will replace them instead with [HTML_REMOVED]. This is controlled by
281 # will replace them instead with [HTML_REMOVED]. This is controlled by
202 # the safe_mode=True parameter of the markdown method.
282 # the safe_mode=True parameter of the markdown method.
203
283
204 if flavored:
284 if flavored:
205 markdown_renderer = cls.markdown_renderer_flavored
285 markdown_renderer = cls.markdown_renderer_flavored
206 else:
286 else:
207 markdown_renderer = cls.markdown_renderer
287 markdown_renderer = cls.markdown_renderer
208
288
209 if mentions:
289 if mentions:
210 mention_pat = re.compile(MENTIONS_REGEX)
290 mention_pat = re.compile(MENTIONS_REGEX)
211
291
212 def wrapp(match_obj):
292 def wrapp(match_obj):
213 uname = match_obj.groups()[0]
293 uname = match_obj.groups()[0]
214 return ' **@%(uname)s** ' % {'uname': uname}
294 return ' **@%(uname)s** ' % {'uname': uname}
215 mention_hl = mention_pat.sub(wrapp, source).strip()
295 mention_hl = mention_pat.sub(wrapp, source).strip()
216 # we extracted mentions render with this using Mentions false
296 # we extracted mentions render with this using Mentions false
217 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
297 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
218 mentions=False)
298 mentions=False)
219
299
220 source = safe_unicode(source)
300 source = safe_unicode(source)
221 try:
301 try:
222 if flavored:
302 if flavored:
223 source = cls._flavored_markdown(source)
303 source = cls._flavored_markdown(source)
224 return markdown_renderer.convert(source)
304 return markdown_renderer.convert(source)
225 except Exception:
305 except Exception:
226 log.exception('Error when rendering Markdown')
306 log.exception('Error when rendering Markdown')
227 if safe:
307 if safe:
228 log.debug('Fallback to render in plain mode')
308 log.debug('Fallback to render in plain mode')
229 return cls.plain(source)
309 return cls.plain(source)
230 else:
310 else:
231 raise
311 raise
232
312
233 @classmethod
313 @classmethod
234 def rst(cls, source, safe=True, mentions=False):
314 def rst(cls, source, safe=True, mentions=False):
235 if mentions:
315 if mentions:
236 mention_pat = re.compile(MENTIONS_REGEX)
316 mention_pat = re.compile(MENTIONS_REGEX)
237
317
238 def wrapp(match_obj):
318 def wrapp(match_obj):
239 uname = match_obj.groups()[0]
319 uname = match_obj.groups()[0]
240 return ' **@%(uname)s** ' % {'uname': uname}
320 return ' **@%(uname)s** ' % {'uname': uname}
241 mention_hl = mention_pat.sub(wrapp, source).strip()
321 mention_hl = mention_pat.sub(wrapp, source).strip()
242 # we extracted mentions render with this using Mentions false
322 # we extracted mentions render with this using Mentions false
243 return cls.rst(mention_hl, safe=safe, mentions=False)
323 return cls.rst(mention_hl, safe=safe, mentions=False)
244
324
245 source = safe_unicode(source)
325 source = safe_unicode(source)
246 try:
326 try:
247 docutils_settings = dict(
327 docutils_settings = dict(
248 [(alias, None) for alias in
328 [(alias, None) for alias in
249 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
329 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
250
330
251 docutils_settings.update({'input_encoding': 'unicode',
331 docutils_settings.update({'input_encoding': 'unicode',
252 'report_level': 4})
332 'report_level': 4})
253
333
254 for k, v in docutils_settings.iteritems():
334 for k, v in docutils_settings.iteritems():
255 directives.register_directive(k, v)
335 directives.register_directive(k, v)
256
336
257 parts = publish_parts(source=source,
337 parts = publish_parts(source=source,
258 writer_name="html4css1",
338 writer_name="html4css1",
259 settings_overrides=docutils_settings)
339 settings_overrides=docutils_settings)
260
340
261 return parts['html_title'] + parts["fragment"]
341 return parts['html_title'] + parts["fragment"]
262 except Exception:
342 except Exception:
263 log.exception('Error when rendering RST')
343 log.exception('Error when rendering RST')
264 if safe:
344 if safe:
265 log.debug('Fallbacking to render in plain mode')
345 log.debug('Fallbacking to render in plain mode')
266 return cls.plain(source)
346 return cls.plain(source)
267 else:
347 else:
268 raise
348 raise
269
349
270 @classmethod
350 @classmethod
271 def jupyter(cls, source, safe=True):
351 def jupyter(cls, source, safe=True):
272 from rhodecode.lib import helpers
352 from rhodecode.lib import helpers
273
353
274 from traitlets.config import Config
354 from traitlets.config import Config
275 import nbformat
355 import nbformat
276 from nbconvert import HTMLExporter
356 from nbconvert import HTMLExporter
277 from nbconvert.preprocessors import Preprocessor
357 from nbconvert.preprocessors import Preprocessor
278
358
279 class CustomHTMLExporter(HTMLExporter):
359 class CustomHTMLExporter(HTMLExporter):
280 def _template_file_default(self):
360 def _template_file_default(self):
281 return 'basic'
361 return 'basic'
282
362
283 class Sandbox(Preprocessor):
363 class Sandbox(Preprocessor):
284
364
285 def preprocess(self, nb, resources):
365 def preprocess(self, nb, resources):
286 sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)'
366 sandbox_text = 'SandBoxed(IPython.core.display.Javascript object)'
287 for cell in nb['cells']:
367 for cell in nb['cells']:
288 if safe and 'outputs' in cell:
368 if safe and 'outputs' in cell:
289 for cell_output in cell['outputs']:
369 for cell_output in cell['outputs']:
290 if 'data' in cell_output:
370 if 'data' in cell_output:
291 if 'application/javascript' in cell_output['data']:
371 if 'application/javascript' in cell_output['data']:
292 cell_output['data']['text/plain'] = sandbox_text
372 cell_output['data']['text/plain'] = sandbox_text
293 cell_output['data'].pop('application/javascript', None)
373 cell_output['data'].pop('application/javascript', None)
294 return nb, resources
374 return nb, resources
295
375
296 def _sanitize_resources(resources):
376 def _sanitize_resources(resources):
297 """
377 """
298 Skip/sanitize some of the CSS generated and included in jupyter
378 Skip/sanitize some of the CSS generated and included in jupyter
299 so it doesn't messes up UI so much
379 so it doesn't messes up UI so much
300 """
380 """
301
381
302 # TODO(marcink): probably we should replace this with whole custom
382 # TODO(marcink): probably we should replace this with whole custom
303 # CSS set that doesn't screw up, but jupyter generated html has some
383 # CSS set that doesn't screw up, but jupyter generated html has some
304 # special markers, so it requires Custom HTML exporter template with
384 # special markers, so it requires Custom HTML exporter template with
305 # _default_template_path_default, to achieve that
385 # _default_template_path_default, to achieve that
306
386
307 # strip the reset CSS
387 # strip the reset CSS
308 resources[0] = resources[0][resources[0].find('/*! Source'):]
388 resources[0] = resources[0][resources[0].find('/*! Source'):]
309 return resources
389 return resources
310
390
311 def as_html(notebook):
391 def as_html(notebook):
312 conf = Config()
392 conf = Config()
313 conf.CustomHTMLExporter.preprocessors = [Sandbox]
393 conf.CustomHTMLExporter.preprocessors = [Sandbox]
314 html_exporter = CustomHTMLExporter(config=conf)
394 html_exporter = CustomHTMLExporter(config=conf)
315
395
316 (body, resources) = html_exporter.from_notebook_node(notebook)
396 (body, resources) = html_exporter.from_notebook_node(notebook)
317 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
397 header = '<!-- ## IPYTHON NOTEBOOK RENDERING ## -->'
318 js = MakoTemplate(r'''
398 js = MakoTemplate(r'''
319 <!-- Load mathjax -->
399 <!-- Load mathjax -->
320 <!-- MathJax configuration -->
400 <!-- MathJax configuration -->
321 <script type="text/x-mathjax-config">
401 <script type="text/x-mathjax-config">
322 MathJax.Hub.Config({
402 MathJax.Hub.Config({
323 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
403 jax: ["input/TeX","output/HTML-CSS", "output/PreviewHTML"],
324 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
404 extensions: ["tex2jax.js","MathMenu.js","MathZoom.js", "fast-preview.js", "AssistiveMML.js", "[Contrib]/a11y/accessibility-menu.js"],
325 TeX: {
405 TeX: {
326 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
406 extensions: ["AMSmath.js","AMSsymbols.js","noErrors.js","noUndefined.js"]
327 },
407 },
328 tex2jax: {
408 tex2jax: {
329 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
409 inlineMath: [ ['$','$'], ["\\(","\\)"] ],
330 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
410 displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
331 processEscapes: true,
411 processEscapes: true,
332 processEnvironments: true
412 processEnvironments: true
333 },
413 },
334 // Center justify equations in code and markdown cells. Elsewhere
414 // Center justify equations in code and markdown cells. Elsewhere
335 // we use CSS to left justify single line equations in code cells.
415 // we use CSS to left justify single line equations in code cells.
336 displayAlign: 'center',
416 displayAlign: 'center',
337 "HTML-CSS": {
417 "HTML-CSS": {
338 styles: {'.MathJax_Display': {"margin": 0}},
418 styles: {'.MathJax_Display': {"margin": 0}},
339 linebreaks: { automatic: true },
419 linebreaks: { automatic: true },
340 availableFonts: ["STIX", "TeX"]
420 availableFonts: ["STIX", "TeX"]
341 },
421 },
342 showMathMenu: false
422 showMathMenu: false
343 });
423 });
344 </script>
424 </script>
345 <!-- End of mathjax configuration -->
425 <!-- End of mathjax configuration -->
346 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
426 <script src="${h.asset('js/src/math_jax/MathJax.js')}"></script>
347 ''').render(h=helpers)
427 ''').render(h=helpers)
348
428
349 css = '<style>{}</style>'.format(
429 css = '<style>{}</style>'.format(
350 ''.join(_sanitize_resources(resources['inlining']['css'])))
430 ''.join(_sanitize_resources(resources['inlining']['css'])))
351
431
352 body = '\n'.join([header, css, js, body])
432 body = '\n'.join([header, css, js, body])
353 return body, resources
433 return body, resources
354
434
355 notebook = nbformat.reads(source, as_version=4)
435 notebook = nbformat.reads(source, as_version=4)
356 (body, resources) = as_html(notebook)
436 (body, resources) = as_html(notebook)
357 return body
437 return body
358
438
359
439
360 class RstTemplateRenderer(object):
440 class RstTemplateRenderer(object):
361
441
362 def __init__(self):
442 def __init__(self):
363 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
443 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
364 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
444 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
365 self.template_store = TemplateLookup(
445 self.template_store = TemplateLookup(
366 directories=rst_template_dirs,
446 directories=rst_template_dirs,
367 input_encoding='utf-8',
447 input_encoding='utf-8',
368 imports=['from rhodecode.lib import helpers as h'])
448 imports=['from rhodecode.lib import helpers as h'])
369
449
370 def _get_template(self, templatename):
450 def _get_template(self, templatename):
371 return self.template_store.get_template(templatename)
451 return self.template_store.get_template(templatename)
372
452
373 def render(self, template_name, **kwargs):
453 def render(self, template_name, **kwargs):
374 template = self._get_template(template_name)
454 template = self._get_template(template_name)
375 return template.render(**kwargs)
455 return template.render(**kwargs)
@@ -1,78 +1,78 b''
1 <%namespace name="sourceblock" file="/codeblocks/source.mako"/>
1 <%namespace name="sourceblock" file="/codeblocks/source.mako"/>
2
2
3 <div id="codeblock" class="codeblock">
3 <div id="codeblock" class="codeblock">
4 <div class="codeblock-header">
4 <div class="codeblock-header">
5 <div class="stats">
5 <div class="stats">
6 <span> <strong>${c.file}</strong></span>
6 <span> <strong>${c.file}</strong></span>
7 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
7 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
8 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
8 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
9 <span> | ${c.file.mimetype} </span>
9 <span> | ${c.file.mimetype} </span>
10 <span class="item last"> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
10 <span class="item last"> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
11 </div>
11 </div>
12 <div class="buttons">
12 <div class="buttons">
13 <a id="file_history_overview" href="#">
13 <a id="file_history_overview" href="#">
14 ${_('History')}
14 ${_('History')}
15 </a>
15 </a>
16 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
16 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
17 ${_('Show Full History')}
17 ${_('Show Full History')}
18 </a> |
18 </a> |
19 %if c.annotate:
19 %if c.annotate:
20 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
20 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
21 %else:
21 %else:
22 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
22 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
23 %endif
23 %endif
24 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
24 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
25 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
25 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
26 ${_('Download')}
26 ${_('Download')}
27 </a>
27 </a>
28
28
29 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
29 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
30 |
30 |
31 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
31 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
32 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
32 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
33 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
33 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
34 </a>
34 </a>
35 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
35 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
36 </a>
36 </a>
37 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
37 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
38 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
38 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
39 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
39 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
40 %else:
40 %else:
41 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
41 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
42 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
42 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
43 %endif
43 %endif
44 %endif
44 %endif
45 </div>
45 </div>
46 </div>
46 </div>
47 <div id="file_history_container"></div>
47 <div id="file_history_container"></div>
48 <div class="code-body">
48 <div class="code-body">
49 %if c.file.is_binary:
49 %if c.file.is_binary:
50 <div>
50 <div>
51 ${_('Binary file (%s)') % c.file.mimetype}
51 ${_('Binary file (%s)') % c.file.mimetype}
52 </div>
52 </div>
53 %else:
53 %else:
54 % if c.file.size < c.cut_off_limit:
54 % if c.file.size < c.cut_off_limit:
55 %if c.renderer and not c.annotate:
55 %if c.renderer and not c.annotate:
56 ${h.render(c.file.content, renderer=c.renderer)}
56 ${h.render(c.file.content, renderer=c.renderer, relative_url=h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
57 %else:
57 %else:
58 <table class="cb codehilite">
58 <table class="cb codehilite">
59 %if c.annotate:
59 %if c.annotate:
60 <% color_hasher = h.color_hasher() %>
60 <% color_hasher = h.color_hasher() %>
61 %for annotation, lines in c.annotated_lines:
61 %for annotation, lines in c.annotated_lines:
62 ${sourceblock.render_annotation_lines(annotation, lines, color_hasher)}
62 ${sourceblock.render_annotation_lines(annotation, lines, color_hasher)}
63 %endfor
63 %endfor
64 %else:
64 %else:
65 %for line_num, tokens in enumerate(c.lines, 1):
65 %for line_num, tokens in enumerate(c.lines, 1):
66 ${sourceblock.render_line(line_num, tokens)}
66 ${sourceblock.render_line(line_num, tokens)}
67 %endfor
67 %endfor
68 %endif
68 %endif
69 </table>
69 </table>
70 </div>
70 </div>
71 %endif
71 %endif
72 %else:
72 %else:
73 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
73 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
74 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
74 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
75 %endif
75 %endif
76 %endif
76 %endif
77 </div>
77 </div>
78 </div> No newline at end of file
78 </div>
@@ -1,179 +1,255 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import pytest
21 import pytest
22
22
23 from rhodecode.lib.markup_renderer import MarkupRenderer, RstTemplateRenderer
23 from rhodecode.lib.markup_renderer import (
24 MarkupRenderer, RstTemplateRenderer, relative_path, relative_links)
24
25
25
26
26 @pytest.mark.parametrize(
27 @pytest.mark.parametrize(
27 "filename, expected_renderer",
28 "filename, expected_renderer",
28 [
29 [
29 ('readme.md', 'markdown'),
30 ('readme.md', 'markdown'),
30 ('readme.Md', 'markdown'),
31 ('readme.Md', 'markdown'),
31 ('readme.MdoWn', 'markdown'),
32 ('readme.MdoWn', 'markdown'),
32 ('readme.rst', 'rst'),
33 ('readme.rst', 'rst'),
33 ('readme.Rst', 'rst'),
34 ('readme.Rst', 'rst'),
34 ('readme.rest', 'rst'),
35 ('readme.rest', 'rst'),
35 ('readme.rest', 'rst'),
36 ('readme.rest', 'rst'),
36
37
37 ('markdown.xml', 'plain'),
38 ('markdown.xml', 'plain'),
38 ('rest.xml', 'plain'),
39 ('rest.xml', 'plain'),
39 ('readme.xml', 'plain'),
40 ('readme.xml', 'plain'),
40
41
41 ('readme', 'plain'),
42 ('readme', 'plain'),
42 ('README', 'plain'),
43 ('README', 'plain'),
43 ('readme.mdx', 'plain'),
44 ('readme.mdx', 'plain'),
44 ('readme.rstx', 'plain'),
45 ('readme.rstx', 'plain'),
45 ('readmex', 'plain'),
46 ('readmex', 'plain'),
46 ])
47 ])
47 def test_detect_renderer(filename, expected_renderer):
48 def test_detect_renderer(filename, expected_renderer):
48 detected_renderer = MarkupRenderer()._detect_renderer(
49 detected_renderer = MarkupRenderer()._detect_renderer(
49 '', filename=filename).__name__
50 '', filename=filename).__name__
50 assert expected_renderer == detected_renderer
51 assert expected_renderer == detected_renderer
51
52
52
53
53 def test_markdown_xss_link():
54 def test_markdown_xss_link():
54 xss_md = "[link](javascript:alert('XSS: pwned!'))"
55 xss_md = "[link](javascript:alert('XSS: pwned!'))"
55 rendered_html = MarkupRenderer.markdown(xss_md)
56 rendered_html = MarkupRenderer.markdown(xss_md)
56 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
57 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
57
58
58
59
59 def test_markdown_xss_inline_html():
60 def test_markdown_xss_inline_html():
60 xss_md = '\n'.join([
61 xss_md = '\n'.join([
61 '> <a name="n"',
62 '> <a name="n"',
62 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
63 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
63 rendered_html = MarkupRenderer.markdown(xss_md)
64 rendered_html = MarkupRenderer.markdown(xss_md)
64 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
65 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
65
66
66
67
67 def test_markdown_inline_html():
68 def test_markdown_inline_html():
68 xss_md = '\n'.join(['> <a name="n"',
69 xss_md = '\n'.join(['> <a name="n"',
69 '> href="https://rhodecode.com">link</a>'])
70 '> href="https://rhodecode.com">link</a>'])
70 rendered_html = MarkupRenderer.markdown(xss_md)
71 rendered_html = MarkupRenderer.markdown(xss_md)
71 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
72 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
72
73
73
74
74 def test_rst_xss_link():
75 def test_rst_xss_link():
75 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
76 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
76 rendered_html = MarkupRenderer.rst(xss_rst)
77 rendered_html = MarkupRenderer.rst(xss_rst)
77 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
78 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
78
79
79
80
80 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
81 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
81 def test_rst_xss_inline_html():
82 def test_rst_xss_inline_html():
82 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
83 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
83 rendered_html = MarkupRenderer.rst(xss_rst)
84 rendered_html = MarkupRenderer.rst(xss_rst)
84 assert 'href="javascript:alert(' not in rendered_html
85 assert 'href="javascript:alert(' not in rendered_html
85
86
86
87
87 def test_rst_xss_raw_directive():
88 def test_rst_xss_raw_directive():
88 xss_rst = '\n'.join([
89 xss_rst = '\n'.join([
89 '.. raw:: html',
90 '.. raw:: html',
90 '',
91 '',
91 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
92 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
92 rendered_html = MarkupRenderer.rst(xss_rst)
93 rendered_html = MarkupRenderer.rst(xss_rst)
93 assert 'href="javascript:alert(' not in rendered_html
94 assert 'href="javascript:alert(' not in rendered_html
94
95
95
96
96 def test_render_rst_template_without_files():
97 def test_render_rst_template_without_files():
97 expected = u'''\
98 expected = u'''\
98 Pull request updated. Auto status change to |under_review|
99 Pull request updated. Auto status change to |under_review|
99
100
100 .. role:: added
101 .. role:: added
101 .. role:: removed
102 .. role:: removed
102 .. parsed-literal::
103 .. parsed-literal::
103
104
104 Changed commits:
105 Changed commits:
105 * :added:`2 added`
106 * :added:`2 added`
106 * :removed:`3 removed`
107 * :removed:`3 removed`
107
108
108 No file changes found
109 No file changes found
109
110
110 .. |under_review| replace:: *"NEW STATUS"*'''
111 .. |under_review| replace:: *"NEW STATUS"*'''
111
112
112 params = {
113 params = {
113 'under_review_label': 'NEW STATUS',
114 'under_review_label': 'NEW STATUS',
114 'added_commits': ['a', 'b'],
115 'added_commits': ['a', 'b'],
115 'removed_commits': ['a', 'b', 'c'],
116 'removed_commits': ['a', 'b', 'c'],
116 'changed_files': [],
117 'changed_files': [],
117 'added_files': [],
118 'added_files': [],
118 'modified_files': [],
119 'modified_files': [],
119 'removed_files': [],
120 'removed_files': [],
120 }
121 }
121 renderer = RstTemplateRenderer()
122 renderer = RstTemplateRenderer()
122 rendered = renderer.render('pull_request_update.mako', **params)
123 rendered = renderer.render('pull_request_update.mako', **params)
123 assert expected == rendered
124 assert expected == rendered
124
125
125
126
126 def test_render_rst_template_with_files():
127 def test_render_rst_template_with_files():
127 expected = u'''\
128 expected = u'''\
128 Pull request updated. Auto status change to |under_review|
129 Pull request updated. Auto status change to |under_review|
129
130
130 .. role:: added
131 .. role:: added
131 .. role:: removed
132 .. role:: removed
132 .. parsed-literal::
133 .. parsed-literal::
133
134
134 Changed commits:
135 Changed commits:
135 * :added:`1 added`
136 * :added:`1 added`
136 * :removed:`3 removed`
137 * :removed:`3 removed`
137
138
138 Changed files:
139 Changed files:
139 * `A /path/a.py <#a_c--68ed34923b68>`_
140 * `A /path/a.py <#a_c--68ed34923b68>`_
140 * `A /path/b.js <#a_c--64f90608b607>`_
141 * `A /path/b.js <#a_c--64f90608b607>`_
141 * `M /path/d.js <#a_c--85842bf30c6e>`_
142 * `M /path/d.js <#a_c--85842bf30c6e>`_
142 * `M /path/ę.py <#a_c--d713adf009cd>`_
143 * `M /path/ę.py <#a_c--d713adf009cd>`_
143 * R /path/ź.py
144 * R /path/ź.py
144
145
145 .. |under_review| replace:: *"NEW STATUS"*'''
146 .. |under_review| replace:: *"NEW STATUS"*'''
146
147
147 added = ['/path/a.py', '/path/b.js']
148 added = ['/path/a.py', '/path/b.js']
148 modified = ['/path/d.js', u'/path/ę.py']
149 modified = ['/path/d.js', u'/path/ę.py']
149 removed = [u'/path/ź.py']
150 removed = [u'/path/ź.py']
150
151
151 params = {
152 params = {
152 'under_review_label': 'NEW STATUS',
153 'under_review_label': 'NEW STATUS',
153 'added_commits': ['a'],
154 'added_commits': ['a'],
154 'removed_commits': ['a', 'b', 'c'],
155 'removed_commits': ['a', 'b', 'c'],
155 'changed_files': added + modified + removed,
156 'changed_files': added + modified + removed,
156 'added_files': added,
157 'added_files': added,
157 'modified_files': modified,
158 'modified_files': modified,
158 'removed_files': removed,
159 'removed_files': removed,
159 }
160 }
160 renderer = RstTemplateRenderer()
161 renderer = RstTemplateRenderer()
161 rendered = renderer.render('pull_request_update.mako', **params)
162 rendered = renderer.render('pull_request_update.mako', **params)
162
163
163 assert expected == rendered
164 assert expected == rendered
164
165
165
166
166 def test_render_rst_auto_status_template():
167 def test_render_rst_auto_status_template():
167 expected = u'''\
168 expected = u'''\
168 Auto status change to |new_status|
169 Auto status change to |new_status|
169
170
170 .. |new_status| replace:: *"NEW STATUS"*'''
171 .. |new_status| replace:: *"NEW STATUS"*'''
171
172
172 params = {
173 params = {
173 'new_status_label': 'NEW STATUS',
174 'new_status_label': 'NEW STATUS',
174 'pull_request': None,
175 'pull_request': None,
175 'commit_id': None,
176 'commit_id': None,
176 }
177 }
177 renderer = RstTemplateRenderer()
178 renderer = RstTemplateRenderer()
178 rendered = renderer.render('auto_status_change.mako', **params)
179 rendered = renderer.render('auto_status_change.mako', **params)
179 assert expected == rendered
180 assert expected == rendered
181
182
183 @pytest.mark.parametrize(
184 "src_path, server_path, is_path, expected",
185 [
186 ('source.png', '/repo/files/path', lambda p: False,
187 '/repo/files/path/source.png'),
188
189 ('source.png', 'mk/git/blob/master/README.md', lambda p: True,
190 '/mk/git/blob/master/source.png'),
191
192 ('./source.png', 'mk/git/blob/master/README.md', lambda p: True,
193 '/mk/git/blob/master/source.png'),
194
195 ('/source.png', 'mk/git/blob/master/README.md', lambda p: True,
196 '/mk/git/blob/master/source.png'),
197
198 ('./source.png', 'repo/files/path/source.md', lambda p: True,
199 '/repo/files/path/source.png'),
200
201 ('./source.png', '/repo/files/path/file.md', lambda p: True,
202 '/repo/files/path/source.png'),
203
204 ('../source.png', '/repo/files/path/file.md', lambda p: True,
205 '/repo/files/source.png'),
206
207 ('./../source.png', '/repo/files/path/file.md', lambda p: True,
208 '/repo/files/source.png'),
209
210 ('./source.png', '/repo/files/path/file.md', lambda p: True,
211 '/repo/files/path/source.png'),
212
213 ('../../../source.png', 'path/file.md', lambda p: True,
214 '/source.png'),
215
216 ('../../../../../source.png', '/path/file.md', None,
217 '/source.png'),
218
219 ('../../../../../source.png', 'files/path/file.md', None,
220 '/source.png'),
221
222 ('../../../../../https://google.com/image.png', 'files/path/file.md', None,
223 '/https://google.com/image.png'),
224
225 ('https://google.com/image.png', 'files/path/file.md', None,
226 'https://google.com/image.png'),
227
228 ('://foo', '/files/path/file.md', None,
229 '://foo'),
230
231 (u'한글.png', '/files/path/file.md', None,
232 u'/files/path/한글.png'),
233
234 ('my custom image.png', '/files/path/file.md', None,
235 '/files/path/my custom image.png'),
236 ])
237 def test_relative_path(src_path, server_path, is_path, expected):
238 path = relative_path(src_path, server_path, is_path)
239 assert path == expected
240
241
242 @pytest.mark.parametrize(
243 "src_html, expected_html",
244 [
245 ('<div></div>', '<div></div>'),
246 ('<img src="/file.png"></img>', '<img src="/path/raw/file.png">'),
247 ('<img src="data:abcd"/>', '<img src="data:abcd">'),
248 ('<a href="/file.png"></a>', '<a href="/path/raw/file.png"></a>'),
249 ('<a href="#anchor"></a>', '<a href="#anchor"></a>'),
250 ('<a href="./README.md"></a>', '<a href="/path/raw/README.md"></a>'),
251 ('<a href="../README.md"></a>', '<a href="/path/README.md"></a>'),
252
253 ])
254 def test_relative_links(src_html, expected_html):
255 assert relative_links(src_html, '/path/raw/file.md') == expected_html
General Comments 0
You need to be logged in to leave comments. Login now