##// END OF EJS Templates
readme/markup: improved order of generating readme files. Fixes #4050...
marcink -
r396:2ba4c171 default
parent child Browse files
Show More
@@ -1,65 +1,35 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2013-2016 RhodeCode GmbH
3 # Copyright (C) 2013-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Various config settings for RhodeCode
22 Various config settings for RhodeCode
23 """
23 """
24 from rhodecode import EXTENSIONS
24 from rhodecode import EXTENSIONS
25
25
26 from rhodecode.lib.utils2 import __get_lem
26 from rhodecode.lib.utils2 import __get_lem
27
27
28
28
29 # language map is also used by whoosh indexer, which for those specified
29 # language map is also used by whoosh indexer, which for those specified
30 # extensions will index it's content
30 # extensions will index it's content
31 LANGUAGES_EXTENSIONS_MAP = __get_lem()
31 LANGUAGES_EXTENSIONS_MAP = __get_lem()
32
32
33 # list of readme files to search in file tree and display in summary
34 # attached weights defines the search order lower is first
35 ALL_READMES = [
36 ('readme', 0), ('README', 0), ('Readme', 0),
37 ('doc/readme', 1), ('doc/README', 1), ('doc/Readme', 1),
38 ('Docs/readme', 2), ('Docs/README', 2), ('Docs/Readme', 2),
39 ('DOCS/readme', 2), ('DOCS/README', 2), ('DOCS/Readme', 2),
40 ('docs/readme', 2), ('docs/README', 2), ('docs/Readme', 2),
41 ]
42
43 # extension together with weights to search lower is first
44 RST_EXTS = [
45 ('', 0), ('.rst', 1), ('.rest', 1),
46 ('.RST', 2), ('.REST', 2)
47 ]
48
49 MARKDOWN_EXTS = [
50 ('.md', 1), ('.MD', 1),
51 ('.mkdn', 2), ('.MKDN', 2),
52 ('.mdown', 3), ('.MDOWN', 3),
53 ('.markdown', 4), ('.MARKDOWN', 4)
54 ]
55
56 PLAIN_EXTS = [
57 ('.text', 2), ('.TEXT', 2),
58 ('.txt', 3), ('.TXT', 3)
59 ]
60
61 ALL_EXTS = MARKDOWN_EXTS + RST_EXTS + PLAIN_EXTS
62
63 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
33 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
64
34
65 DATE_FORMAT = "%Y-%m-%d"
35 DATE_FORMAT = "%Y-%m-%d"
@@ -1,301 +1,296 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2016 RhodeCode GmbH
3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Summary controller for RhodeCode Enterprise
22 Summary controller for RhodeCode Enterprise
23 """
23 """
24
24
25 import logging
25 import logging
26 from string import lower
26 from string import lower
27 from itertools import product
28
27
29 from pylons import tmpl_context as c, request
28 from pylons import tmpl_context as c, request
30 from pylons.i18n.translation import _
29 from pylons.i18n.translation import _
31 from beaker.cache import cache_region, region_invalidate
30 from beaker.cache import cache_region, region_invalidate
32
31
33 from rhodecode.config.conf import (
32 from rhodecode.config.conf import (LANGUAGES_EXTENSIONS_MAP)
34 ALL_READMES, ALL_EXTS, LANGUAGES_EXTENSIONS_MAP)
35 from rhodecode.controllers import utils
33 from rhodecode.controllers import utils
36 from rhodecode.controllers.changelog import _load_changelog_summary
34 from rhodecode.controllers.changelog import _load_changelog_summary
37 from rhodecode.lib import caches, helpers as h
35 from rhodecode.lib import caches, helpers as h
38 from rhodecode.lib.utils import jsonify
36 from rhodecode.lib.utils import jsonify
39 from rhodecode.lib.utils2 import safe_str
37 from rhodecode.lib.utils2 import safe_str
40 from rhodecode.lib.auth import (
38 from rhodecode.lib.auth import (
41 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
39 LoginRequired, HasRepoPermissionAnyDecorator, NotAnonymous, XHRRequired)
42 from rhodecode.lib.base import BaseRepoController, render
40 from rhodecode.lib.base import BaseRepoController, render
43 from rhodecode.lib.markup_renderer import MarkupRenderer
41 from rhodecode.lib.markup_renderer import MarkupRenderer
44 from rhodecode.lib.ext_json import json
42 from rhodecode.lib.ext_json import json
45 from rhodecode.lib.vcs.backends.base import EmptyCommit
43 from rhodecode.lib.vcs.backends.base import EmptyCommit
46 from rhodecode.lib.vcs.exceptions import (
44 from rhodecode.lib.vcs.exceptions import (
47 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
45 CommitError, EmptyRepositoryError, NodeDoesNotExistError)
48 from rhodecode.model.db import Statistics, CacheKey, User
46 from rhodecode.model.db import Statistics, CacheKey, User
49
47
50 log = logging.getLogger(__name__)
48 log = logging.getLogger(__name__)
51
49
52 README_FILES = [''.join([x[0][0], x[1][0]])
53 for x in sorted(list(product(ALL_READMES, ALL_EXTS)),
54 key=lambda y:y[0][1] + y[1][1])]
55
56
50
57 class SummaryController(BaseRepoController):
51 class SummaryController(BaseRepoController):
58
52
59 def __before__(self):
53 def __before__(self):
60 super(SummaryController, self).__before__()
54 super(SummaryController, self).__before__()
61
55
62 def __get_readme_data(self, db_repo):
56 def __get_readme_data(self, db_repo):
63 repo_name = db_repo.repo_name
57 repo_name = db_repo.repo_name
64 log.debug('Looking for README file')
58 log.debug('Looking for README file')
59 default_renderer = c.visual.default_renderer
65
60
66 @cache_region('long_term')
61 @cache_region('long_term')
67 def _generate_readme(cache_key):
62 def _generate_readme(cache_key):
68 readme_data = None
63 readme_data = None
69 readme_file = None
64 readme_file = None
70 try:
65 try:
71 # gets the landing revision or tip if fails
66 # gets the landing revision or tip if fails
72 commit = db_repo.get_landing_commit()
67 commit = db_repo.get_landing_commit()
73 if isinstance(commit, EmptyCommit):
68 if isinstance(commit, EmptyCommit):
74 raise EmptyRepositoryError()
69 raise EmptyRepositoryError()
75 renderer = MarkupRenderer()
70 renderer = MarkupRenderer()
76 for f in README_FILES:
71 for f in renderer.pick_readme_order(default_renderer):
77 try:
72 try:
78 node = commit.get_node(f)
73 node = commit.get_node(f)
79 except NodeDoesNotExistError:
74 except NodeDoesNotExistError:
80 continue
75 continue
81
76
82 if not node.is_file():
77 if not node.is_file():
83 continue
78 continue
84
79
85 readme_file = f
80 readme_file = f
86 log.debug('Found README file `%s` rendering...',
81 log.debug('Found README file `%s` rendering...',
87 readme_file)
82 readme_file)
88 readme_data = renderer.render(node.content,
83 readme_data = renderer.render(node.content,
89 filename=f)
84 filename=f)
90 break
85 break
91 except CommitError:
86 except CommitError:
92 log.exception("Problem getting commit")
87 log.exception("Problem getting commit")
93 pass
88 pass
94 except EmptyRepositoryError:
89 except EmptyRepositoryError:
95 pass
90 pass
96 except Exception:
91 except Exception:
97 log.exception("General failure")
92 log.exception("General failure")
98
93
99 return readme_data, readme_file
94 return readme_data, readme_file
100
95
101 invalidator_context = CacheKey.repo_context_cache(
96 invalidator_context = CacheKey.repo_context_cache(
102 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
97 _generate_readme, repo_name, CacheKey.CACHE_TYPE_README)
103
98
104 with invalidator_context as context:
99 with invalidator_context as context:
105 context.invalidate()
100 context.invalidate()
106 computed = context.compute()
101 computed = context.compute()
107
102
108 return computed
103 return computed
109
104
110
105
111 @LoginRequired()
106 @LoginRequired()
112 @HasRepoPermissionAnyDecorator(
107 @HasRepoPermissionAnyDecorator(
113 'repository.read', 'repository.write', 'repository.admin')
108 'repository.read', 'repository.write', 'repository.admin')
114 def index(self, repo_name):
109 def index(self, repo_name):
115 username = ''
110 username = ''
116 if c.rhodecode_user.username != User.DEFAULT_USER:
111 if c.rhodecode_user.username != User.DEFAULT_USER:
117 username = safe_str(c.rhodecode_user.username)
112 username = safe_str(c.rhodecode_user.username)
118
113
119 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
114 _def_clone_uri = _def_clone_uri_by_id = c.clone_uri_tmpl
120 if '{repo}' in _def_clone_uri:
115 if '{repo}' in _def_clone_uri:
121 _def_clone_uri_by_id = _def_clone_uri.replace(
116 _def_clone_uri_by_id = _def_clone_uri.replace(
122 '{repo}', '_{repoid}')
117 '{repo}', '_{repoid}')
123 elif '{repoid}' in _def_clone_uri:
118 elif '{repoid}' in _def_clone_uri:
124 _def_clone_uri_by_id = _def_clone_uri.replace(
119 _def_clone_uri_by_id = _def_clone_uri.replace(
125 '_{repoid}', '{repo}')
120 '_{repoid}', '{repo}')
126
121
127 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
122 c.clone_repo_url = c.rhodecode_db_repo.clone_url(
128 user=username, uri_tmpl=_def_clone_uri)
123 user=username, uri_tmpl=_def_clone_uri)
129 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
124 c.clone_repo_url_id = c.rhodecode_db_repo.clone_url(
130 user=username, uri_tmpl=_def_clone_uri_by_id)
125 user=username, uri_tmpl=_def_clone_uri_by_id)
131
126
132 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
127 c.show_stats = bool(c.rhodecode_db_repo.enable_statistics)
133
128
134 stats = self.sa.query(Statistics)\
129 stats = self.sa.query(Statistics)\
135 .filter(Statistics.repository == c.rhodecode_db_repo)\
130 .filter(Statistics.repository == c.rhodecode_db_repo)\
136 .scalar()
131 .scalar()
137
132
138 c.stats_percentage = 0
133 c.stats_percentage = 0
139
134
140 if stats and stats.languages:
135 if stats and stats.languages:
141 c.no_data = False is c.rhodecode_db_repo.enable_statistics
136 c.no_data = False is c.rhodecode_db_repo.enable_statistics
142 lang_stats_d = json.loads(stats.languages)
137 lang_stats_d = json.loads(stats.languages)
143
138
144 # Sort first by decreasing count and second by the file extension,
139 # Sort first by decreasing count and second by the file extension,
145 # so we have a consistent output.
140 # so we have a consistent output.
146 lang_stats_items = sorted(lang_stats_d.iteritems(),
141 lang_stats_items = sorted(lang_stats_d.iteritems(),
147 key=lambda k: (-k[1], k[0]))[:10]
142 key=lambda k: (-k[1], k[0]))[:10]
148 lang_stats = [(x, {"count": y,
143 lang_stats = [(x, {"count": y,
149 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
144 "desc": LANGUAGES_EXTENSIONS_MAP.get(x)})
150 for x, y in lang_stats_items]
145 for x, y in lang_stats_items]
151
146
152 c.trending_languages = json.dumps(lang_stats)
147 c.trending_languages = json.dumps(lang_stats)
153 else:
148 else:
154 c.no_data = True
149 c.no_data = True
155 c.trending_languages = json.dumps({})
150 c.trending_languages = json.dumps({})
156
151
157 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
152 c.enable_downloads = c.rhodecode_db_repo.enable_downloads
158 c.repository_followers = self.scm_model.get_followers(
153 c.repository_followers = self.scm_model.get_followers(
159 c.rhodecode_db_repo)
154 c.rhodecode_db_repo)
160 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
155 c.repository_forks = self.scm_model.get_forks(c.rhodecode_db_repo)
161 c.repository_is_user_following = self.scm_model.is_following_repo(
156 c.repository_is_user_following = self.scm_model.is_following_repo(
162 c.repo_name, c.rhodecode_user.user_id)
157 c.repo_name, c.rhodecode_user.user_id)
163
158
164 if c.repository_requirements_missing:
159 if c.repository_requirements_missing:
165 return render('summary/missing_requirements.html')
160 return render('summary/missing_requirements.html')
166
161
167 c.readme_data, c.readme_file = \
162 c.readme_data, c.readme_file = \
168 self.__get_readme_data(c.rhodecode_db_repo)
163 self.__get_readme_data(c.rhodecode_db_repo)
169
164
170 _load_changelog_summary()
165 _load_changelog_summary()
171
166
172 if request.is_xhr:
167 if request.is_xhr:
173 return render('changelog/changelog_summary_data.html')
168 return render('changelog/changelog_summary_data.html')
174
169
175 return render('summary/summary.html')
170 return render('summary/summary.html')
176
171
177 @LoginRequired()
172 @LoginRequired()
178 @XHRRequired()
173 @XHRRequired()
179 @HasRepoPermissionAnyDecorator(
174 @HasRepoPermissionAnyDecorator(
180 'repository.read', 'repository.write', 'repository.admin')
175 'repository.read', 'repository.write', 'repository.admin')
181 @jsonify
176 @jsonify
182 def repo_stats(self, repo_name, commit_id):
177 def repo_stats(self, repo_name, commit_id):
183 _namespace = caches.get_repo_namespace_key(
178 _namespace = caches.get_repo_namespace_key(
184 caches.SUMMARY_STATS, repo_name)
179 caches.SUMMARY_STATS, repo_name)
185 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
180 show_stats = bool(c.rhodecode_db_repo.enable_statistics)
186 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
181 cache_manager = caches.get_cache_manager('repo_cache_long', _namespace)
187 _cache_key = caches.compute_key_from_params(
182 _cache_key = caches.compute_key_from_params(
188 repo_name, commit_id, show_stats)
183 repo_name, commit_id, show_stats)
189
184
190 def compute_stats():
185 def compute_stats():
191 code_stats = {}
186 code_stats = {}
192 size = 0
187 size = 0
193 try:
188 try:
194 scm_instance = c.rhodecode_db_repo.scm_instance()
189 scm_instance = c.rhodecode_db_repo.scm_instance()
195 commit = scm_instance.get_commit(commit_id)
190 commit = scm_instance.get_commit(commit_id)
196
191
197 for node in commit.get_filenodes_generator():
192 for node in commit.get_filenodes_generator():
198 size += node.size
193 size += node.size
199 if not show_stats:
194 if not show_stats:
200 continue
195 continue
201 ext = lower(node.extension)
196 ext = lower(node.extension)
202 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
197 ext_info = LANGUAGES_EXTENSIONS_MAP.get(ext)
203 if ext_info:
198 if ext_info:
204 if ext in code_stats:
199 if ext in code_stats:
205 code_stats[ext]['count'] += 1
200 code_stats[ext]['count'] += 1
206 else:
201 else:
207 code_stats[ext] = {"count": 1, "desc": ext_info}
202 code_stats[ext] = {"count": 1, "desc": ext_info}
208 except EmptyRepositoryError:
203 except EmptyRepositoryError:
209 pass
204 pass
210 return {'size': h.format_byte_size_binary(size),
205 return {'size': h.format_byte_size_binary(size),
211 'code_stats': code_stats}
206 'code_stats': code_stats}
212
207
213 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
208 stats = cache_manager.get(_cache_key, createfunc=compute_stats)
214 return stats
209 return stats
215
210
216 def _switcher_reference_data(self, repo_name, references, is_svn):
211 def _switcher_reference_data(self, repo_name, references, is_svn):
217 """Prepare reference data for given `references`"""
212 """Prepare reference data for given `references`"""
218 items = []
213 items = []
219 for name, commit_id in references.items():
214 for name, commit_id in references.items():
220 use_commit_id = '/' in name or is_svn
215 use_commit_id = '/' in name or is_svn
221 items.append({
216 items.append({
222 'name': name,
217 'name': name,
223 'commit_id': commit_id,
218 'commit_id': commit_id,
224 'files_url': h.url(
219 'files_url': h.url(
225 'files_home',
220 'files_home',
226 repo_name=repo_name,
221 repo_name=repo_name,
227 f_path=name if is_svn else '',
222 f_path=name if is_svn else '',
228 revision=commit_id if use_commit_id else name,
223 revision=commit_id if use_commit_id else name,
229 at=name)
224 at=name)
230 })
225 })
231 return items
226 return items
232
227
233 @LoginRequired()
228 @LoginRequired()
234 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
229 @HasRepoPermissionAnyDecorator('repository.read', 'repository.write',
235 'repository.admin')
230 'repository.admin')
236 @jsonify
231 @jsonify
237 def repo_refs_data(self, repo_name):
232 def repo_refs_data(self, repo_name):
238 repo = c.rhodecode_repo
233 repo = c.rhodecode_repo
239 refs_to_create = [
234 refs_to_create = [
240 (_("Branch"), repo.branches, 'branch'),
235 (_("Branch"), repo.branches, 'branch'),
241 (_("Tag"), repo.tags, 'tag'),
236 (_("Tag"), repo.tags, 'tag'),
242 (_("Bookmark"), repo.bookmarks, 'book'),
237 (_("Bookmark"), repo.bookmarks, 'book'),
243 ]
238 ]
244 res = self._create_reference_data(repo, refs_to_create)
239 res = self._create_reference_data(repo, refs_to_create)
245 data = {
240 data = {
246 'more': False,
241 'more': False,
247 'results': res
242 'results': res
248 }
243 }
249 return data
244 return data
250
245
251 @jsonify
246 @jsonify
252 def repo_refs_changelog_data(self, repo_name):
247 def repo_refs_changelog_data(self, repo_name):
253 repo = c.rhodecode_repo
248 repo = c.rhodecode_repo
254
249
255 refs_to_create = [
250 refs_to_create = [
256 (_("Branches"), repo.branches, 'branch'),
251 (_("Branches"), repo.branches, 'branch'),
257 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
252 (_("Closed branches"), repo.branches_closed, 'branch_closed'),
258 # TODO: enable when vcs can handle bookmarks filters
253 # TODO: enable when vcs can handle bookmarks filters
259 # (_("Bookmarks"), repo.bookmarks, "book"),
254 # (_("Bookmarks"), repo.bookmarks, "book"),
260 ]
255 ]
261 res = self._create_reference_data(repo, refs_to_create)
256 res = self._create_reference_data(repo, refs_to_create)
262 data = {
257 data = {
263 'more': False,
258 'more': False,
264 'results': res
259 'results': res
265 }
260 }
266 return data
261 return data
267
262
268 def _create_reference_data(self, repo, refs_to_create):
263 def _create_reference_data(self, repo, refs_to_create):
269 format_ref_id = utils.get_format_ref_id(repo)
264 format_ref_id = utils.get_format_ref_id(repo)
270
265
271 result = []
266 result = []
272 for title, refs, ref_type in refs_to_create:
267 for title, refs, ref_type in refs_to_create:
273 if refs:
268 if refs:
274 result.append({
269 result.append({
275 'text': title,
270 'text': title,
276 'children': self._create_reference_items(
271 'children': self._create_reference_items(
277 repo, refs, ref_type, format_ref_id),
272 repo, refs, ref_type, format_ref_id),
278 })
273 })
279 return result
274 return result
280
275
281 def _create_reference_items(self, repo, refs, ref_type, format_ref_id):
276 def _create_reference_items(self, repo, refs, ref_type, format_ref_id):
282 result = []
277 result = []
283 is_svn = h.is_svn(repo)
278 is_svn = h.is_svn(repo)
284 for name, raw_id in refs.iteritems():
279 for name, raw_id in refs.iteritems():
285 result.append({
280 result.append({
286 'text': name,
281 'text': name,
287 'id': format_ref_id(name, raw_id),
282 'id': format_ref_id(name, raw_id),
288 'raw_id': raw_id,
283 'raw_id': raw_id,
289 'type': ref_type,
284 'type': ref_type,
290 'files_url': self._create_files_url(repo, name, raw_id, is_svn)
285 'files_url': self._create_files_url(repo, name, raw_id, is_svn)
291 })
286 })
292 return result
287 return result
293
288
294 def _create_files_url(self, repo, name, raw_id, is_svn):
289 def _create_files_url(self, repo, name, raw_id, is_svn):
295 use_commit_id = '/' in name or is_svn
290 use_commit_id = '/' in name or is_svn
296 return h.url(
291 return h.url(
297 'files_home',
292 'files_home',
298 repo_name=repo.name,
293 repo_name=repo.name,
299 f_path=name if is_svn else '',
294 f_path=name if is_svn else '',
300 revision=raw_id if use_commit_id else name,
295 revision=raw_id if use_commit_id else name,
301 at=name)
296 at=name)
@@ -1,1898 +1,1888 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2016 RhodeCode GmbH
3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40
40
41 from datetime import datetime
41 from datetime import datetime
42 from functools import partial
42 from functools import partial
43 from pygments.formatters.html import HtmlFormatter
43 from pygments.formatters.html import HtmlFormatter
44 from pygments import highlight as code_highlight
44 from pygments import highlight as code_highlight
45 from pygments.lexers import (
45 from pygments.lexers import (
46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 from pylons import url
47 from pylons import url
48 from pylons.i18n.translation import _, ungettext
48 from pylons.i18n.translation import _, ungettext
49 from pyramid.threadlocal import get_current_request
49 from pyramid.threadlocal import get_current_request
50
50
51 from webhelpers.html import literal, HTML, escape
51 from webhelpers.html import literal, HTML, escape
52 from webhelpers.html.tools import *
52 from webhelpers.html.tools import *
53 from webhelpers.html.builder import make_tag
53 from webhelpers.html.builder import make_tag
54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 submit, text, password, textarea, title, ul, xml_declaration, radio
57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 from webhelpers.html.tools import auto_link, button_to, highlight, \
58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 from webhelpers.pylonslib import Flash as _Flash
60 from webhelpers.pylonslib import Flash as _Flash
61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 replace_whitespace, urlify, truncate, wrap_paragraphs
63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 from webhelpers.date import time_ago_in_words
64 from webhelpers.date import time_ago_in_words
65 from webhelpers.paginate import Page as _Page
65 from webhelpers.paginate import Page as _Page
66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 convert_boolean_attrs, NotGiven, _make_safe_id_component
67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 from webhelpers2.number import format_byte_size
68 from webhelpers2.number import format_byte_size
69
69
70 from rhodecode.lib.annotate import annotate_highlight
70 from rhodecode.lib.annotate import annotate_highlight
71 from rhodecode.lib.action_parser import action_parser
71 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.ext_json import json
72 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 AttributeDict, safe_int, md5, md5_safe
76 AttributeDict, safe_int, md5, md5_safe
77 from rhodecode.lib.markup_renderer import MarkupRenderer
77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.model.changeset_status import ChangesetStatusModel
81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.db import Permission, User, Repository
82 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.repo_group import RepoGroupModel
83 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.settings import IssueTrackerSettingsModel
84 from rhodecode.model.settings import IssueTrackerSettingsModel
85
85
86 log = logging.getLogger(__name__)
86 log = logging.getLogger(__name__)
87
87
88 DEFAULT_USER = User.DEFAULT_USER
88 DEFAULT_USER = User.DEFAULT_USER
89 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
89 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
90
90
91
91
92 def html_escape(text, html_escape_table=None):
92 def html_escape(text, html_escape_table=None):
93 """Produce entities within text."""
93 """Produce entities within text."""
94 if not html_escape_table:
94 if not html_escape_table:
95 html_escape_table = {
95 html_escape_table = {
96 "&": "&amp;",
96 "&": "&amp;",
97 '"': "&quot;",
97 '"': "&quot;",
98 "'": "&apos;",
98 "'": "&apos;",
99 ">": "&gt;",
99 ">": "&gt;",
100 "<": "&lt;",
100 "<": "&lt;",
101 }
101 }
102 return "".join(html_escape_table.get(c, c) for c in text)
102 return "".join(html_escape_table.get(c, c) for c in text)
103
103
104
104
105 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
105 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
106 """
106 """
107 Truncate string ``s`` at the first occurrence of ``sub``.
107 Truncate string ``s`` at the first occurrence of ``sub``.
108
108
109 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
109 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
110 """
110 """
111 suffix_if_chopped = suffix_if_chopped or ''
111 suffix_if_chopped = suffix_if_chopped or ''
112 pos = s.find(sub)
112 pos = s.find(sub)
113 if pos == -1:
113 if pos == -1:
114 return s
114 return s
115
115
116 if inclusive:
116 if inclusive:
117 pos += len(sub)
117 pos += len(sub)
118
118
119 chopped = s[:pos]
119 chopped = s[:pos]
120 left = s[pos:].strip()
120 left = s[pos:].strip()
121
121
122 if left and suffix_if_chopped:
122 if left and suffix_if_chopped:
123 chopped += suffix_if_chopped
123 chopped += suffix_if_chopped
124
124
125 return chopped
125 return chopped
126
126
127
127
128 def shorter(text, size=20):
128 def shorter(text, size=20):
129 postfix = '...'
129 postfix = '...'
130 if len(text) > size:
130 if len(text) > size:
131 return text[:size - len(postfix)] + postfix
131 return text[:size - len(postfix)] + postfix
132 return text
132 return text
133
133
134
134
135 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
135 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
136 """
136 """
137 Reset button
137 Reset button
138 """
138 """
139 _set_input_attrs(attrs, type, name, value)
139 _set_input_attrs(attrs, type, name, value)
140 _set_id_attr(attrs, id, name)
140 _set_id_attr(attrs, id, name)
141 convert_boolean_attrs(attrs, ["disabled"])
141 convert_boolean_attrs(attrs, ["disabled"])
142 return HTML.input(**attrs)
142 return HTML.input(**attrs)
143
143
144 reset = _reset
144 reset = _reset
145 safeid = _make_safe_id_component
145 safeid = _make_safe_id_component
146
146
147
147
148 def branding(name, length=40):
148 def branding(name, length=40):
149 return truncate(name, length, indicator="")
149 return truncate(name, length, indicator="")
150
150
151
151
152 def FID(raw_id, path):
152 def FID(raw_id, path):
153 """
153 """
154 Creates a unique ID for filenode based on it's hash of path and commit
154 Creates a unique ID for filenode based on it's hash of path and commit
155 it's safe to use in urls
155 it's safe to use in urls
156
156
157 :param raw_id:
157 :param raw_id:
158 :param path:
158 :param path:
159 """
159 """
160
160
161 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
161 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
162
162
163
163
164 class _GetError(object):
164 class _GetError(object):
165 """Get error from form_errors, and represent it as span wrapped error
165 """Get error from form_errors, and represent it as span wrapped error
166 message
166 message
167
167
168 :param field_name: field to fetch errors for
168 :param field_name: field to fetch errors for
169 :param form_errors: form errors dict
169 :param form_errors: form errors dict
170 """
170 """
171
171
172 def __call__(self, field_name, form_errors):
172 def __call__(self, field_name, form_errors):
173 tmpl = """<span class="error_msg">%s</span>"""
173 tmpl = """<span class="error_msg">%s</span>"""
174 if form_errors and field_name in form_errors:
174 if form_errors and field_name in form_errors:
175 return literal(tmpl % form_errors.get(field_name))
175 return literal(tmpl % form_errors.get(field_name))
176
176
177 get_error = _GetError()
177 get_error = _GetError()
178
178
179
179
180 class _ToolTip(object):
180 class _ToolTip(object):
181
181
182 def __call__(self, tooltip_title, trim_at=50):
182 def __call__(self, tooltip_title, trim_at=50):
183 """
183 """
184 Special function just to wrap our text into nice formatted
184 Special function just to wrap our text into nice formatted
185 autowrapped text
185 autowrapped text
186
186
187 :param tooltip_title:
187 :param tooltip_title:
188 """
188 """
189 tooltip_title = escape(tooltip_title)
189 tooltip_title = escape(tooltip_title)
190 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
190 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
191 return tooltip_title
191 return tooltip_title
192 tooltip = _ToolTip()
192 tooltip = _ToolTip()
193
193
194
194
195 def files_breadcrumbs(repo_name, commit_id, file_path):
195 def files_breadcrumbs(repo_name, commit_id, file_path):
196 if isinstance(file_path, str):
196 if isinstance(file_path, str):
197 file_path = safe_unicode(file_path)
197 file_path = safe_unicode(file_path)
198
198
199 # TODO: johbo: Is this always a url like path, or is this operating
199 # TODO: johbo: Is this always a url like path, or is this operating
200 # system dependent?
200 # system dependent?
201 path_segments = file_path.split('/')
201 path_segments = file_path.split('/')
202
202
203 repo_name_html = escape(repo_name)
203 repo_name_html = escape(repo_name)
204 if len(path_segments) == 1 and path_segments[0] == '':
204 if len(path_segments) == 1 and path_segments[0] == '':
205 url_segments = [repo_name_html]
205 url_segments = [repo_name_html]
206 else:
206 else:
207 url_segments = [
207 url_segments = [
208 link_to(
208 link_to(
209 repo_name_html,
209 repo_name_html,
210 url('files_home',
210 url('files_home',
211 repo_name=repo_name,
211 repo_name=repo_name,
212 revision=commit_id,
212 revision=commit_id,
213 f_path=''),
213 f_path=''),
214 class_='pjax-link')]
214 class_='pjax-link')]
215
215
216 last_cnt = len(path_segments) - 1
216 last_cnt = len(path_segments) - 1
217 for cnt, segment in enumerate(path_segments):
217 for cnt, segment in enumerate(path_segments):
218 if not segment:
218 if not segment:
219 continue
219 continue
220 segment_html = escape(segment)
220 segment_html = escape(segment)
221
221
222 if cnt != last_cnt:
222 if cnt != last_cnt:
223 url_segments.append(
223 url_segments.append(
224 link_to(
224 link_to(
225 segment_html,
225 segment_html,
226 url('files_home',
226 url('files_home',
227 repo_name=repo_name,
227 repo_name=repo_name,
228 revision=commit_id,
228 revision=commit_id,
229 f_path='/'.join(path_segments[:cnt + 1])),
229 f_path='/'.join(path_segments[:cnt + 1])),
230 class_='pjax-link'))
230 class_='pjax-link'))
231 else:
231 else:
232 url_segments.append(segment_html)
232 url_segments.append(segment_html)
233
233
234 return literal('/'.join(url_segments))
234 return literal('/'.join(url_segments))
235
235
236
236
237 class CodeHtmlFormatter(HtmlFormatter):
237 class CodeHtmlFormatter(HtmlFormatter):
238 """
238 """
239 My code Html Formatter for source codes
239 My code Html Formatter for source codes
240 """
240 """
241
241
242 def wrap(self, source, outfile):
242 def wrap(self, source, outfile):
243 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
243 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
244
244
245 def _wrap_code(self, source):
245 def _wrap_code(self, source):
246 for cnt, it in enumerate(source):
246 for cnt, it in enumerate(source):
247 i, t = it
247 i, t = it
248 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
248 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
249 yield i, t
249 yield i, t
250
250
251 def _wrap_tablelinenos(self, inner):
251 def _wrap_tablelinenos(self, inner):
252 dummyoutfile = StringIO.StringIO()
252 dummyoutfile = StringIO.StringIO()
253 lncount = 0
253 lncount = 0
254 for t, line in inner:
254 for t, line in inner:
255 if t:
255 if t:
256 lncount += 1
256 lncount += 1
257 dummyoutfile.write(line)
257 dummyoutfile.write(line)
258
258
259 fl = self.linenostart
259 fl = self.linenostart
260 mw = len(str(lncount + fl - 1))
260 mw = len(str(lncount + fl - 1))
261 sp = self.linenospecial
261 sp = self.linenospecial
262 st = self.linenostep
262 st = self.linenostep
263 la = self.lineanchors
263 la = self.lineanchors
264 aln = self.anchorlinenos
264 aln = self.anchorlinenos
265 nocls = self.noclasses
265 nocls = self.noclasses
266 if sp:
266 if sp:
267 lines = []
267 lines = []
268
268
269 for i in range(fl, fl + lncount):
269 for i in range(fl, fl + lncount):
270 if i % st == 0:
270 if i % st == 0:
271 if i % sp == 0:
271 if i % sp == 0:
272 if aln:
272 if aln:
273 lines.append('<a href="#%s%d" class="special">%*d</a>' %
273 lines.append('<a href="#%s%d" class="special">%*d</a>' %
274 (la, i, mw, i))
274 (la, i, mw, i))
275 else:
275 else:
276 lines.append('<span class="special">%*d</span>' % (mw, i))
276 lines.append('<span class="special">%*d</span>' % (mw, i))
277 else:
277 else:
278 if aln:
278 if aln:
279 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
279 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
280 else:
280 else:
281 lines.append('%*d' % (mw, i))
281 lines.append('%*d' % (mw, i))
282 else:
282 else:
283 lines.append('')
283 lines.append('')
284 ls = '\n'.join(lines)
284 ls = '\n'.join(lines)
285 else:
285 else:
286 lines = []
286 lines = []
287 for i in range(fl, fl + lncount):
287 for i in range(fl, fl + lncount):
288 if i % st == 0:
288 if i % st == 0:
289 if aln:
289 if aln:
290 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
290 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
291 else:
291 else:
292 lines.append('%*d' % (mw, i))
292 lines.append('%*d' % (mw, i))
293 else:
293 else:
294 lines.append('')
294 lines.append('')
295 ls = '\n'.join(lines)
295 ls = '\n'.join(lines)
296
296
297 # in case you wonder about the seemingly redundant <div> here: since the
297 # in case you wonder about the seemingly redundant <div> here: since the
298 # content in the other cell also is wrapped in a div, some browsers in
298 # content in the other cell also is wrapped in a div, some browsers in
299 # some configurations seem to mess up the formatting...
299 # some configurations seem to mess up the formatting...
300 if nocls:
300 if nocls:
301 yield 0, ('<table class="%stable">' % self.cssclass +
301 yield 0, ('<table class="%stable">' % self.cssclass +
302 '<tr><td><div class="linenodiv" '
302 '<tr><td><div class="linenodiv" '
303 'style="background-color: #f0f0f0; padding-right: 10px">'
303 'style="background-color: #f0f0f0; padding-right: 10px">'
304 '<pre style="line-height: 125%">' +
304 '<pre style="line-height: 125%">' +
305 ls + '</pre></div></td><td id="hlcode" class="code">')
305 ls + '</pre></div></td><td id="hlcode" class="code">')
306 else:
306 else:
307 yield 0, ('<table class="%stable">' % self.cssclass +
307 yield 0, ('<table class="%stable">' % self.cssclass +
308 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
308 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
309 ls + '</pre></div></td><td id="hlcode" class="code">')
309 ls + '</pre></div></td><td id="hlcode" class="code">')
310 yield 0, dummyoutfile.getvalue()
310 yield 0, dummyoutfile.getvalue()
311 yield 0, '</td></tr></table>'
311 yield 0, '</td></tr></table>'
312
312
313
313
314 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
314 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
315 def __init__(self, **kw):
315 def __init__(self, **kw):
316 # only show these line numbers if set
316 # only show these line numbers if set
317 self.only_lines = kw.pop('only_line_numbers', [])
317 self.only_lines = kw.pop('only_line_numbers', [])
318 self.query_terms = kw.pop('query_terms', [])
318 self.query_terms = kw.pop('query_terms', [])
319 self.max_lines = kw.pop('max_lines', 5)
319 self.max_lines = kw.pop('max_lines', 5)
320 self.line_context = kw.pop('line_context', 3)
320 self.line_context = kw.pop('line_context', 3)
321 self.url = kw.pop('url', None)
321 self.url = kw.pop('url', None)
322
322
323 super(CodeHtmlFormatter, self).__init__(**kw)
323 super(CodeHtmlFormatter, self).__init__(**kw)
324
324
325 def _wrap_code(self, source):
325 def _wrap_code(self, source):
326 for cnt, it in enumerate(source):
326 for cnt, it in enumerate(source):
327 i, t = it
327 i, t = it
328 t = '<pre>%s</pre>' % t
328 t = '<pre>%s</pre>' % t
329 yield i, t
329 yield i, t
330
330
331 def _wrap_tablelinenos(self, inner):
331 def _wrap_tablelinenos(self, inner):
332 yield 0, '<table class="code-highlight %stable">' % self.cssclass
332 yield 0, '<table class="code-highlight %stable">' % self.cssclass
333
333
334 last_shown_line_number = 0
334 last_shown_line_number = 0
335 current_line_number = 1
335 current_line_number = 1
336
336
337 for t, line in inner:
337 for t, line in inner:
338 if not t:
338 if not t:
339 yield t, line
339 yield t, line
340 continue
340 continue
341
341
342 if current_line_number in self.only_lines:
342 if current_line_number in self.only_lines:
343 if last_shown_line_number + 1 != current_line_number:
343 if last_shown_line_number + 1 != current_line_number:
344 yield 0, '<tr>'
344 yield 0, '<tr>'
345 yield 0, '<td class="line">...</td>'
345 yield 0, '<td class="line">...</td>'
346 yield 0, '<td id="hlcode" class="code"></td>'
346 yield 0, '<td id="hlcode" class="code"></td>'
347 yield 0, '</tr>'
347 yield 0, '</tr>'
348
348
349 yield 0, '<tr>'
349 yield 0, '<tr>'
350 if self.url:
350 if self.url:
351 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
351 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
352 self.url, current_line_number, current_line_number)
352 self.url, current_line_number, current_line_number)
353 else:
353 else:
354 yield 0, '<td class="line"><a href="">%i</a></td>' % (
354 yield 0, '<td class="line"><a href="">%i</a></td>' % (
355 current_line_number)
355 current_line_number)
356 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
356 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
357 yield 0, '</tr>'
357 yield 0, '</tr>'
358
358
359 last_shown_line_number = current_line_number
359 last_shown_line_number = current_line_number
360
360
361 current_line_number += 1
361 current_line_number += 1
362
362
363
363
364 yield 0, '</table>'
364 yield 0, '</table>'
365
365
366
366
367 def extract_phrases(text_query):
367 def extract_phrases(text_query):
368 """
368 """
369 Extracts phrases from search term string making sure phrases
369 Extracts phrases from search term string making sure phrases
370 contained in double quotes are kept together - and discarding empty values
370 contained in double quotes are kept together - and discarding empty values
371 or fully whitespace values eg.
371 or fully whitespace values eg.
372
372
373 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
373 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
374
374
375 """
375 """
376
376
377 in_phrase = False
377 in_phrase = False
378 buf = ''
378 buf = ''
379 phrases = []
379 phrases = []
380 for char in text_query:
380 for char in text_query:
381 if in_phrase:
381 if in_phrase:
382 if char == '"': # end phrase
382 if char == '"': # end phrase
383 phrases.append(buf)
383 phrases.append(buf)
384 buf = ''
384 buf = ''
385 in_phrase = False
385 in_phrase = False
386 continue
386 continue
387 else:
387 else:
388 buf += char
388 buf += char
389 continue
389 continue
390 else:
390 else:
391 if char == '"': # start phrase
391 if char == '"': # start phrase
392 in_phrase = True
392 in_phrase = True
393 phrases.append(buf)
393 phrases.append(buf)
394 buf = ''
394 buf = ''
395 continue
395 continue
396 elif char == ' ':
396 elif char == ' ':
397 phrases.append(buf)
397 phrases.append(buf)
398 buf = ''
398 buf = ''
399 continue
399 continue
400 else:
400 else:
401 buf += char
401 buf += char
402
402
403 phrases.append(buf)
403 phrases.append(buf)
404 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
404 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
405 return phrases
405 return phrases
406
406
407
407
408 def get_matching_offsets(text, phrases):
408 def get_matching_offsets(text, phrases):
409 """
409 """
410 Returns a list of string offsets in `text` that the list of `terms` match
410 Returns a list of string offsets in `text` that the list of `terms` match
411
411
412 >>> get_matching_offsets('some text here', ['some', 'here'])
412 >>> get_matching_offsets('some text here', ['some', 'here'])
413 [(0, 4), (10, 14)]
413 [(0, 4), (10, 14)]
414
414
415 """
415 """
416 offsets = []
416 offsets = []
417 for phrase in phrases:
417 for phrase in phrases:
418 for match in re.finditer(phrase, text):
418 for match in re.finditer(phrase, text):
419 offsets.append((match.start(), match.end()))
419 offsets.append((match.start(), match.end()))
420
420
421 return offsets
421 return offsets
422
422
423
423
424 def normalize_text_for_matching(x):
424 def normalize_text_for_matching(x):
425 """
425 """
426 Replaces all non alnum characters to spaces and lower cases the string,
426 Replaces all non alnum characters to spaces and lower cases the string,
427 useful for comparing two text strings without punctuation
427 useful for comparing two text strings without punctuation
428 """
428 """
429 return re.sub(r'[^\w]', ' ', x.lower())
429 return re.sub(r'[^\w]', ' ', x.lower())
430
430
431
431
432 def get_matching_line_offsets(lines, terms):
432 def get_matching_line_offsets(lines, terms):
433 """ Return a set of `lines` indices (starting from 1) matching a
433 """ Return a set of `lines` indices (starting from 1) matching a
434 text search query, along with `context` lines above/below matching lines
434 text search query, along with `context` lines above/below matching lines
435
435
436 :param lines: list of strings representing lines
436 :param lines: list of strings representing lines
437 :param terms: search term string to match in lines eg. 'some text'
437 :param terms: search term string to match in lines eg. 'some text'
438 :param context: number of lines above/below a matching line to add to result
438 :param context: number of lines above/below a matching line to add to result
439 :param max_lines: cut off for lines of interest
439 :param max_lines: cut off for lines of interest
440 eg.
440 eg.
441
441
442 text = '''
442 text = '''
443 words words words
443 words words words
444 words words words
444 words words words
445 some text some
445 some text some
446 words words words
446 words words words
447 words words words
447 words words words
448 text here what
448 text here what
449 '''
449 '''
450 get_matching_line_offsets(text, 'text', context=1)
450 get_matching_line_offsets(text, 'text', context=1)
451 {3: [(5, 9)], 6: [(0, 4)]]
451 {3: [(5, 9)], 6: [(0, 4)]]
452
452
453 """
453 """
454 matching_lines = {}
454 matching_lines = {}
455 phrases = [normalize_text_for_matching(phrase)
455 phrases = [normalize_text_for_matching(phrase)
456 for phrase in extract_phrases(terms)]
456 for phrase in extract_phrases(terms)]
457
457
458 for line_index, line in enumerate(lines, start=1):
458 for line_index, line in enumerate(lines, start=1):
459 match_offsets = get_matching_offsets(
459 match_offsets = get_matching_offsets(
460 normalize_text_for_matching(line), phrases)
460 normalize_text_for_matching(line), phrases)
461 if match_offsets:
461 if match_offsets:
462 matching_lines[line_index] = match_offsets
462 matching_lines[line_index] = match_offsets
463
463
464 return matching_lines
464 return matching_lines
465
465
466
466
467 def get_lexer_safe(mimetype=None, filepath=None):
467 def get_lexer_safe(mimetype=None, filepath=None):
468 """
468 """
469 Tries to return a relevant pygments lexer using mimetype/filepath name,
469 Tries to return a relevant pygments lexer using mimetype/filepath name,
470 defaulting to plain text if none could be found
470 defaulting to plain text if none could be found
471 """
471 """
472 lexer = None
472 lexer = None
473 try:
473 try:
474 if mimetype:
474 if mimetype:
475 lexer = get_lexer_for_mimetype(mimetype)
475 lexer = get_lexer_for_mimetype(mimetype)
476 if not lexer:
476 if not lexer:
477 lexer = get_lexer_for_filename(filepath)
477 lexer = get_lexer_for_filename(filepath)
478 except pygments.util.ClassNotFound:
478 except pygments.util.ClassNotFound:
479 pass
479 pass
480
480
481 if not lexer:
481 if not lexer:
482 lexer = get_lexer_by_name('text')
482 lexer = get_lexer_by_name('text')
483
483
484 return lexer
484 return lexer
485
485
486
486
487 def pygmentize(filenode, **kwargs):
487 def pygmentize(filenode, **kwargs):
488 """
488 """
489 pygmentize function using pygments
489 pygmentize function using pygments
490
490
491 :param filenode:
491 :param filenode:
492 """
492 """
493 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
493 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
494 return literal(code_highlight(filenode.content, lexer,
494 return literal(code_highlight(filenode.content, lexer,
495 CodeHtmlFormatter(**kwargs)))
495 CodeHtmlFormatter(**kwargs)))
496
496
497
497
498 def pygmentize_annotation(repo_name, filenode, **kwargs):
498 def pygmentize_annotation(repo_name, filenode, **kwargs):
499 """
499 """
500 pygmentize function for annotation
500 pygmentize function for annotation
501
501
502 :param filenode:
502 :param filenode:
503 """
503 """
504
504
505 color_dict = {}
505 color_dict = {}
506
506
507 def gen_color(n=10000):
507 def gen_color(n=10000):
508 """generator for getting n of evenly distributed colors using
508 """generator for getting n of evenly distributed colors using
509 hsv color and golden ratio. It always return same order of colors
509 hsv color and golden ratio. It always return same order of colors
510
510
511 :returns: RGB tuple
511 :returns: RGB tuple
512 """
512 """
513
513
514 def hsv_to_rgb(h, s, v):
514 def hsv_to_rgb(h, s, v):
515 if s == 0.0:
515 if s == 0.0:
516 return v, v, v
516 return v, v, v
517 i = int(h * 6.0) # XXX assume int() truncates!
517 i = int(h * 6.0) # XXX assume int() truncates!
518 f = (h * 6.0) - i
518 f = (h * 6.0) - i
519 p = v * (1.0 - s)
519 p = v * (1.0 - s)
520 q = v * (1.0 - s * f)
520 q = v * (1.0 - s * f)
521 t = v * (1.0 - s * (1.0 - f))
521 t = v * (1.0 - s * (1.0 - f))
522 i = i % 6
522 i = i % 6
523 if i == 0:
523 if i == 0:
524 return v, t, p
524 return v, t, p
525 if i == 1:
525 if i == 1:
526 return q, v, p
526 return q, v, p
527 if i == 2:
527 if i == 2:
528 return p, v, t
528 return p, v, t
529 if i == 3:
529 if i == 3:
530 return p, q, v
530 return p, q, v
531 if i == 4:
531 if i == 4:
532 return t, p, v
532 return t, p, v
533 if i == 5:
533 if i == 5:
534 return v, p, q
534 return v, p, q
535
535
536 golden_ratio = 0.618033988749895
536 golden_ratio = 0.618033988749895
537 h = 0.22717784590367374
537 h = 0.22717784590367374
538
538
539 for _ in xrange(n):
539 for _ in xrange(n):
540 h += golden_ratio
540 h += golden_ratio
541 h %= 1
541 h %= 1
542 HSV_tuple = [h, 0.95, 0.95]
542 HSV_tuple = [h, 0.95, 0.95]
543 RGB_tuple = hsv_to_rgb(*HSV_tuple)
543 RGB_tuple = hsv_to_rgb(*HSV_tuple)
544 yield map(lambda x: str(int(x * 256)), RGB_tuple)
544 yield map(lambda x: str(int(x * 256)), RGB_tuple)
545
545
546 cgenerator = gen_color()
546 cgenerator = gen_color()
547
547
548 def get_color_string(commit_id):
548 def get_color_string(commit_id):
549 if commit_id in color_dict:
549 if commit_id in color_dict:
550 col = color_dict[commit_id]
550 col = color_dict[commit_id]
551 else:
551 else:
552 col = color_dict[commit_id] = cgenerator.next()
552 col = color_dict[commit_id] = cgenerator.next()
553 return "color: rgb(%s)! important;" % (', '.join(col))
553 return "color: rgb(%s)! important;" % (', '.join(col))
554
554
555 def url_func(repo_name):
555 def url_func(repo_name):
556
556
557 def _url_func(commit):
557 def _url_func(commit):
558 author = commit.author
558 author = commit.author
559 date = commit.date
559 date = commit.date
560 message = tooltip(commit.message)
560 message = tooltip(commit.message)
561
561
562 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
562 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
563 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
563 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
564 "</b> %s<br/></div>")
564 "</b> %s<br/></div>")
565
565
566 tooltip_html = tooltip_html % (author, date, message)
566 tooltip_html = tooltip_html % (author, date, message)
567 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
567 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
568 uri = link_to(
568 uri = link_to(
569 lnk_format,
569 lnk_format,
570 url('changeset_home', repo_name=repo_name,
570 url('changeset_home', repo_name=repo_name,
571 revision=commit.raw_id),
571 revision=commit.raw_id),
572 style=get_color_string(commit.raw_id),
572 style=get_color_string(commit.raw_id),
573 class_='tooltip',
573 class_='tooltip',
574 title=tooltip_html
574 title=tooltip_html
575 )
575 )
576
576
577 uri += '\n'
577 uri += '\n'
578 return uri
578 return uri
579 return _url_func
579 return _url_func
580
580
581 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
581 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
582
582
583
583
584 def is_following_repo(repo_name, user_id):
584 def is_following_repo(repo_name, user_id):
585 from rhodecode.model.scm import ScmModel
585 from rhodecode.model.scm import ScmModel
586 return ScmModel().is_following_repo(repo_name, user_id)
586 return ScmModel().is_following_repo(repo_name, user_id)
587
587
588
588
589 class _Message(object):
589 class _Message(object):
590 """A message returned by ``Flash.pop_messages()``.
590 """A message returned by ``Flash.pop_messages()``.
591
591
592 Converting the message to a string returns the message text. Instances
592 Converting the message to a string returns the message text. Instances
593 also have the following attributes:
593 also have the following attributes:
594
594
595 * ``message``: the message text.
595 * ``message``: the message text.
596 * ``category``: the category specified when the message was created.
596 * ``category``: the category specified when the message was created.
597 """
597 """
598
598
599 def __init__(self, category, message):
599 def __init__(self, category, message):
600 self.category = category
600 self.category = category
601 self.message = message
601 self.message = message
602
602
603 def __str__(self):
603 def __str__(self):
604 return self.message
604 return self.message
605
605
606 __unicode__ = __str__
606 __unicode__ = __str__
607
607
608 def __html__(self):
608 def __html__(self):
609 return escape(safe_unicode(self.message))
609 return escape(safe_unicode(self.message))
610
610
611
611
612 class Flash(_Flash):
612 class Flash(_Flash):
613
613
614 def pop_messages(self):
614 def pop_messages(self):
615 """Return all accumulated messages and delete them from the session.
615 """Return all accumulated messages and delete them from the session.
616
616
617 The return value is a list of ``Message`` objects.
617 The return value is a list of ``Message`` objects.
618 """
618 """
619 from pylons import session
619 from pylons import session
620
620
621 messages = []
621 messages = []
622
622
623 # Pop the 'old' pylons flash messages. They are tuples of the form
623 # Pop the 'old' pylons flash messages. They are tuples of the form
624 # (category, message)
624 # (category, message)
625 for cat, msg in session.pop(self.session_key, []):
625 for cat, msg in session.pop(self.session_key, []):
626 messages.append(_Message(cat, msg))
626 messages.append(_Message(cat, msg))
627
627
628 # Pop the 'new' pyramid flash messages for each category as list
628 # Pop the 'new' pyramid flash messages for each category as list
629 # of strings.
629 # of strings.
630 for cat in self.categories:
630 for cat in self.categories:
631 for msg in session.pop_flash(queue=cat):
631 for msg in session.pop_flash(queue=cat):
632 messages.append(_Message(cat, msg))
632 messages.append(_Message(cat, msg))
633 # Map messages from the default queue to the 'notice' category.
633 # Map messages from the default queue to the 'notice' category.
634 for msg in session.pop_flash():
634 for msg in session.pop_flash():
635 messages.append(_Message('notice', msg))
635 messages.append(_Message('notice', msg))
636
636
637 session.save()
637 session.save()
638 return messages
638 return messages
639
639
640 flash = Flash()
640 flash = Flash()
641
641
642 #==============================================================================
642 #==============================================================================
643 # SCM FILTERS available via h.
643 # SCM FILTERS available via h.
644 #==============================================================================
644 #==============================================================================
645 from rhodecode.lib.vcs.utils import author_name, author_email
645 from rhodecode.lib.vcs.utils import author_name, author_email
646 from rhodecode.lib.utils2 import credentials_filter, age as _age
646 from rhodecode.lib.utils2 import credentials_filter, age as _age
647 from rhodecode.model.db import User, ChangesetStatus
647 from rhodecode.model.db import User, ChangesetStatus
648
648
649 age = _age
649 age = _age
650 capitalize = lambda x: x.capitalize()
650 capitalize = lambda x: x.capitalize()
651 email = author_email
651 email = author_email
652 short_id = lambda x: x[:12]
652 short_id = lambda x: x[:12]
653 hide_credentials = lambda x: ''.join(credentials_filter(x))
653 hide_credentials = lambda x: ''.join(credentials_filter(x))
654
654
655
655
656 def age_component(datetime_iso, value=None, time_is_local=False):
656 def age_component(datetime_iso, value=None, time_is_local=False):
657 title = value or format_date(datetime_iso)
657 title = value or format_date(datetime_iso)
658
658
659 # detect if we have a timezone info, otherwise, add it
659 # detect if we have a timezone info, otherwise, add it
660 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
660 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
661 tzinfo = '+00:00'
661 tzinfo = '+00:00'
662
662
663 if time_is_local:
663 if time_is_local:
664 tzinfo = time.strftime("+%H:%M",
664 tzinfo = time.strftime("+%H:%M",
665 time.gmtime(
665 time.gmtime(
666 (datetime.now() - datetime.utcnow()).seconds + 1
666 (datetime.now() - datetime.utcnow()).seconds + 1
667 )
667 )
668 )
668 )
669
669
670 return literal(
670 return literal(
671 '<time class="timeago tooltip" '
671 '<time class="timeago tooltip" '
672 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
672 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
673 datetime_iso, title, tzinfo))
673 datetime_iso, title, tzinfo))
674
674
675
675
676 def _shorten_commit_id(commit_id):
676 def _shorten_commit_id(commit_id):
677 from rhodecode import CONFIG
677 from rhodecode import CONFIG
678 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
678 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
679 return commit_id[:def_len]
679 return commit_id[:def_len]
680
680
681
681
682 def show_id(commit):
682 def show_id(commit):
683 """
683 """
684 Configurable function that shows ID
684 Configurable function that shows ID
685 by default it's r123:fffeeefffeee
685 by default it's r123:fffeeefffeee
686
686
687 :param commit: commit instance
687 :param commit: commit instance
688 """
688 """
689 from rhodecode import CONFIG
689 from rhodecode import CONFIG
690 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
690 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
691
691
692 raw_id = _shorten_commit_id(commit.raw_id)
692 raw_id = _shorten_commit_id(commit.raw_id)
693 if show_idx:
693 if show_idx:
694 return 'r%s:%s' % (commit.idx, raw_id)
694 return 'r%s:%s' % (commit.idx, raw_id)
695 else:
695 else:
696 return '%s' % (raw_id, )
696 return '%s' % (raw_id, )
697
697
698
698
699 def format_date(date):
699 def format_date(date):
700 """
700 """
701 use a standardized formatting for dates used in RhodeCode
701 use a standardized formatting for dates used in RhodeCode
702
702
703 :param date: date/datetime object
703 :param date: date/datetime object
704 :return: formatted date
704 :return: formatted date
705 """
705 """
706
706
707 if date:
707 if date:
708 _fmt = "%a, %d %b %Y %H:%M:%S"
708 _fmt = "%a, %d %b %Y %H:%M:%S"
709 return safe_unicode(date.strftime(_fmt))
709 return safe_unicode(date.strftime(_fmt))
710
710
711 return u""
711 return u""
712
712
713
713
714 class _RepoChecker(object):
714 class _RepoChecker(object):
715
715
716 def __init__(self, backend_alias):
716 def __init__(self, backend_alias):
717 self._backend_alias = backend_alias
717 self._backend_alias = backend_alias
718
718
719 def __call__(self, repository):
719 def __call__(self, repository):
720 if hasattr(repository, 'alias'):
720 if hasattr(repository, 'alias'):
721 _type = repository.alias
721 _type = repository.alias
722 elif hasattr(repository, 'repo_type'):
722 elif hasattr(repository, 'repo_type'):
723 _type = repository.repo_type
723 _type = repository.repo_type
724 else:
724 else:
725 _type = repository
725 _type = repository
726 return _type == self._backend_alias
726 return _type == self._backend_alias
727
727
728 is_git = _RepoChecker('git')
728 is_git = _RepoChecker('git')
729 is_hg = _RepoChecker('hg')
729 is_hg = _RepoChecker('hg')
730 is_svn = _RepoChecker('svn')
730 is_svn = _RepoChecker('svn')
731
731
732
732
733 def get_repo_type_by_name(repo_name):
733 def get_repo_type_by_name(repo_name):
734 repo = Repository.get_by_repo_name(repo_name)
734 repo = Repository.get_by_repo_name(repo_name)
735 return repo.repo_type
735 return repo.repo_type
736
736
737
737
738 def is_svn_without_proxy(repository):
738 def is_svn_without_proxy(repository):
739 from rhodecode import CONFIG
739 from rhodecode import CONFIG
740 if is_svn(repository):
740 if is_svn(repository):
741 if not CONFIG.get('rhodecode_proxy_subversion_http_requests', False):
741 if not CONFIG.get('rhodecode_proxy_subversion_http_requests', False):
742 return True
742 return True
743 return False
743 return False
744
744
745
745
746 def discover_user(author):
746 def discover_user(author):
747 """
747 """
748 Tries to discover RhodeCode User based on the autho string. Author string
748 Tries to discover RhodeCode User based on the autho string. Author string
749 is typically `FirstName LastName <email@address.com>`
749 is typically `FirstName LastName <email@address.com>`
750 """
750 """
751
751
752 # if author is already an instance use it for extraction
752 # if author is already an instance use it for extraction
753 if isinstance(author, User):
753 if isinstance(author, User):
754 return author
754 return author
755
755
756 # Valid email in the attribute passed, see if they're in the system
756 # Valid email in the attribute passed, see if they're in the system
757 _email = author_email(author)
757 _email = author_email(author)
758 if _email != '':
758 if _email != '':
759 user = User.get_by_email(_email, case_insensitive=True, cache=True)
759 user = User.get_by_email(_email, case_insensitive=True, cache=True)
760 if user is not None:
760 if user is not None:
761 return user
761 return user
762
762
763 # Maybe it's a username, we try to extract it and fetch by username ?
763 # Maybe it's a username, we try to extract it and fetch by username ?
764 _author = author_name(author)
764 _author = author_name(author)
765 user = User.get_by_username(_author, case_insensitive=True, cache=True)
765 user = User.get_by_username(_author, case_insensitive=True, cache=True)
766 if user is not None:
766 if user is not None:
767 return user
767 return user
768
768
769 return None
769 return None
770
770
771
771
772 def email_or_none(author):
772 def email_or_none(author):
773 # extract email from the commit string
773 # extract email from the commit string
774 _email = author_email(author)
774 _email = author_email(author)
775 if _email != '':
775 if _email != '':
776 # check it against RhodeCode database, and use the MAIN email for this
776 # check it against RhodeCode database, and use the MAIN email for this
777 # user
777 # user
778 user = User.get_by_email(_email, case_insensitive=True, cache=True)
778 user = User.get_by_email(_email, case_insensitive=True, cache=True)
779 if user is not None:
779 if user is not None:
780 return user.email
780 return user.email
781 return _email
781 return _email
782
782
783 # See if it contains a username we can get an email from
783 # See if it contains a username we can get an email from
784 user = User.get_by_username(author_name(author), case_insensitive=True,
784 user = User.get_by_username(author_name(author), case_insensitive=True,
785 cache=True)
785 cache=True)
786 if user is not None:
786 if user is not None:
787 return user.email
787 return user.email
788
788
789 # No valid email, not a valid user in the system, none!
789 # No valid email, not a valid user in the system, none!
790 return None
790 return None
791
791
792
792
793 def link_to_user(author, length=0, **kwargs):
793 def link_to_user(author, length=0, **kwargs):
794 user = discover_user(author)
794 user = discover_user(author)
795 # user can be None, but if we have it already it means we can re-use it
795 # user can be None, but if we have it already it means we can re-use it
796 # in the person() function, so we save 1 intensive-query
796 # in the person() function, so we save 1 intensive-query
797 if user:
797 if user:
798 author = user
798 author = user
799
799
800 display_person = person(author, 'username_or_name_or_email')
800 display_person = person(author, 'username_or_name_or_email')
801 if length:
801 if length:
802 display_person = shorter(display_person, length)
802 display_person = shorter(display_person, length)
803
803
804 if user:
804 if user:
805 return link_to(
805 return link_to(
806 escape(display_person),
806 escape(display_person),
807 url('user_profile', username=user.username),
807 url('user_profile', username=user.username),
808 **kwargs)
808 **kwargs)
809 else:
809 else:
810 return escape(display_person)
810 return escape(display_person)
811
811
812
812
813 def person(author, show_attr="username_and_name"):
813 def person(author, show_attr="username_and_name"):
814 user = discover_user(author)
814 user = discover_user(author)
815 if user:
815 if user:
816 return getattr(user, show_attr)
816 return getattr(user, show_attr)
817 else:
817 else:
818 _author = author_name(author)
818 _author = author_name(author)
819 _email = email(author)
819 _email = email(author)
820 return _author or _email
820 return _author or _email
821
821
822
822
823 def person_by_id(id_, show_attr="username_and_name"):
823 def person_by_id(id_, show_attr="username_and_name"):
824 # attr to return from fetched user
824 # attr to return from fetched user
825 person_getter = lambda usr: getattr(usr, show_attr)
825 person_getter = lambda usr: getattr(usr, show_attr)
826
826
827 #maybe it's an ID ?
827 #maybe it's an ID ?
828 if str(id_).isdigit() or isinstance(id_, int):
828 if str(id_).isdigit() or isinstance(id_, int):
829 id_ = int(id_)
829 id_ = int(id_)
830 user = User.get(id_)
830 user = User.get(id_)
831 if user is not None:
831 if user is not None:
832 return person_getter(user)
832 return person_getter(user)
833 return id_
833 return id_
834
834
835
835
836 def gravatar_with_user(author, show_disabled=False):
836 def gravatar_with_user(author, show_disabled=False):
837 from rhodecode.lib.utils import PartialRenderer
837 from rhodecode.lib.utils import PartialRenderer
838 _render = PartialRenderer('base/base.html')
838 _render = PartialRenderer('base/base.html')
839 return _render('gravatar_with_user', author, show_disabled=show_disabled)
839 return _render('gravatar_with_user', author, show_disabled=show_disabled)
840
840
841
841
842 def desc_stylize(value):
842 def desc_stylize(value):
843 """
843 """
844 converts tags from value into html equivalent
844 converts tags from value into html equivalent
845
845
846 :param value:
846 :param value:
847 """
847 """
848 if not value:
848 if not value:
849 return ''
849 return ''
850
850
851 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
851 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
852 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
852 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
853 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
853 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
854 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
854 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
855 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
855 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
856 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
856 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
857 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
857 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
858 '<div class="metatag" tag="lang">\\2</div>', value)
858 '<div class="metatag" tag="lang">\\2</div>', value)
859 value = re.sub(r'\[([a-z]+)\]',
859 value = re.sub(r'\[([a-z]+)\]',
860 '<div class="metatag" tag="\\1">\\1</div>', value)
860 '<div class="metatag" tag="\\1">\\1</div>', value)
861
861
862 return value
862 return value
863
863
864
864
865 def escaped_stylize(value):
865 def escaped_stylize(value):
866 """
866 """
867 converts tags from value into html equivalent, but escaping its value first
867 converts tags from value into html equivalent, but escaping its value first
868 """
868 """
869 if not value:
869 if not value:
870 return ''
870 return ''
871
871
872 # Using default webhelper escape method, but has to force it as a
872 # Using default webhelper escape method, but has to force it as a
873 # plain unicode instead of a markup tag to be used in regex expressions
873 # plain unicode instead of a markup tag to be used in regex expressions
874 value = unicode(escape(safe_unicode(value)))
874 value = unicode(escape(safe_unicode(value)))
875
875
876 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
876 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
877 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
877 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
878 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
878 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
879 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
879 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
880 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
880 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
881 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
881 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
882 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
882 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
883 '<div class="metatag" tag="lang">\\2</div>', value)
883 '<div class="metatag" tag="lang">\\2</div>', value)
884 value = re.sub(r'\[([a-z]+)\]',
884 value = re.sub(r'\[([a-z]+)\]',
885 '<div class="metatag" tag="\\1">\\1</div>', value)
885 '<div class="metatag" tag="\\1">\\1</div>', value)
886
886
887 return value
887 return value
888
888
889
889
890 def bool2icon(value):
890 def bool2icon(value):
891 """
891 """
892 Returns boolean value of a given value, represented as html element with
892 Returns boolean value of a given value, represented as html element with
893 classes that will represent icons
893 classes that will represent icons
894
894
895 :param value: given value to convert to html node
895 :param value: given value to convert to html node
896 """
896 """
897
897
898 if value: # does bool conversion
898 if value: # does bool conversion
899 return HTML.tag('i', class_="icon-true")
899 return HTML.tag('i', class_="icon-true")
900 else: # not true as bool
900 else: # not true as bool
901 return HTML.tag('i', class_="icon-false")
901 return HTML.tag('i', class_="icon-false")
902
902
903
903
904 #==============================================================================
904 #==============================================================================
905 # PERMS
905 # PERMS
906 #==============================================================================
906 #==============================================================================
907 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
907 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
908 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
908 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
909 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token
909 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token
910
910
911
911
912 #==============================================================================
912 #==============================================================================
913 # GRAVATAR URL
913 # GRAVATAR URL
914 #==============================================================================
914 #==============================================================================
915 class InitialsGravatar(object):
915 class InitialsGravatar(object):
916 def __init__(self, email_address, first_name, last_name, size=30,
916 def __init__(self, email_address, first_name, last_name, size=30,
917 background=None, text_color='#fff'):
917 background=None, text_color='#fff'):
918 self.size = size
918 self.size = size
919 self.first_name = first_name
919 self.first_name = first_name
920 self.last_name = last_name
920 self.last_name = last_name
921 self.email_address = email_address
921 self.email_address = email_address
922 self.background = background or self.str2color(email_address)
922 self.background = background or self.str2color(email_address)
923 self.text_color = text_color
923 self.text_color = text_color
924
924
925 def get_color_bank(self):
925 def get_color_bank(self):
926 """
926 """
927 returns a predefined list of colors that gravatars can use.
927 returns a predefined list of colors that gravatars can use.
928 Those are randomized distinct colors that guarantee readability and
928 Those are randomized distinct colors that guarantee readability and
929 uniqueness.
929 uniqueness.
930
930
931 generated with: http://phrogz.net/css/distinct-colors.html
931 generated with: http://phrogz.net/css/distinct-colors.html
932 """
932 """
933 return [
933 return [
934 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
934 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
935 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
935 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
936 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
936 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
937 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
937 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
938 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
938 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
939 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
939 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
940 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
940 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
941 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
941 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
942 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
942 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
943 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
943 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
944 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
944 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
945 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
945 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
946 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
946 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
947 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
947 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
948 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
948 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
949 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
949 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
950 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
950 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
951 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
951 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
952 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
952 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
953 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
953 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
954 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
954 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
955 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
955 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
956 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
956 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
957 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
957 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
958 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
958 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
959 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
959 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
960 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
960 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
961 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
961 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
962 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
962 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
963 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
963 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
964 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
964 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
965 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
965 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
966 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
966 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
967 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
967 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
968 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
968 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
969 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
969 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
970 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
970 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
971 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
971 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
972 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
972 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
973 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
973 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
974 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
974 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
975 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
975 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
976 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
976 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
977 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
977 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
978 '#4f8c46', '#368dd9', '#5c0073'
978 '#4f8c46', '#368dd9', '#5c0073'
979 ]
979 ]
980
980
981 def rgb_to_hex_color(self, rgb_tuple):
981 def rgb_to_hex_color(self, rgb_tuple):
982 """
982 """
983 Converts an rgb_tuple passed to an hex color.
983 Converts an rgb_tuple passed to an hex color.
984
984
985 :param rgb_tuple: tuple with 3 ints represents rgb color space
985 :param rgb_tuple: tuple with 3 ints represents rgb color space
986 """
986 """
987 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
987 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
988
988
989 def email_to_int_list(self, email_str):
989 def email_to_int_list(self, email_str):
990 """
990 """
991 Get every byte of the hex digest value of email and turn it to integer.
991 Get every byte of the hex digest value of email and turn it to integer.
992 It's going to be always between 0-255
992 It's going to be always between 0-255
993 """
993 """
994 digest = md5_safe(email_str.lower())
994 digest = md5_safe(email_str.lower())
995 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
995 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
996
996
997 def pick_color_bank_index(self, email_str, color_bank):
997 def pick_color_bank_index(self, email_str, color_bank):
998 return self.email_to_int_list(email_str)[0] % len(color_bank)
998 return self.email_to_int_list(email_str)[0] % len(color_bank)
999
999
1000 def str2color(self, email_str):
1000 def str2color(self, email_str):
1001 """
1001 """
1002 Tries to map in a stable algorithm an email to color
1002 Tries to map in a stable algorithm an email to color
1003
1003
1004 :param email_str:
1004 :param email_str:
1005 """
1005 """
1006 color_bank = self.get_color_bank()
1006 color_bank = self.get_color_bank()
1007 # pick position (module it's length so we always find it in the
1007 # pick position (module it's length so we always find it in the
1008 # bank even if it's smaller than 256 values
1008 # bank even if it's smaller than 256 values
1009 pos = self.pick_color_bank_index(email_str, color_bank)
1009 pos = self.pick_color_bank_index(email_str, color_bank)
1010 return color_bank[pos]
1010 return color_bank[pos]
1011
1011
1012 def normalize_email(self, email_address):
1012 def normalize_email(self, email_address):
1013 import unicodedata
1013 import unicodedata
1014 # default host used to fill in the fake/missing email
1014 # default host used to fill in the fake/missing email
1015 default_host = u'localhost'
1015 default_host = u'localhost'
1016
1016
1017 if not email_address:
1017 if not email_address:
1018 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1018 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1019
1019
1020 email_address = safe_unicode(email_address)
1020 email_address = safe_unicode(email_address)
1021
1021
1022 if u'@' not in email_address:
1022 if u'@' not in email_address:
1023 email_address = u'%s@%s' % (email_address, default_host)
1023 email_address = u'%s@%s' % (email_address, default_host)
1024
1024
1025 if email_address.endswith(u'@'):
1025 if email_address.endswith(u'@'):
1026 email_address = u'%s%s' % (email_address, default_host)
1026 email_address = u'%s%s' % (email_address, default_host)
1027
1027
1028 email_address = unicodedata.normalize('NFKD', email_address)\
1028 email_address = unicodedata.normalize('NFKD', email_address)\
1029 .encode('ascii', 'ignore')
1029 .encode('ascii', 'ignore')
1030 return email_address
1030 return email_address
1031
1031
1032 def get_initials(self):
1032 def get_initials(self):
1033 """
1033 """
1034 Returns 2 letter initials calculated based on the input.
1034 Returns 2 letter initials calculated based on the input.
1035 The algorithm picks first given email address, and takes first letter
1035 The algorithm picks first given email address, and takes first letter
1036 of part before @, and then the first letter of server name. In case
1036 of part before @, and then the first letter of server name. In case
1037 the part before @ is in a format of `somestring.somestring2` it replaces
1037 the part before @ is in a format of `somestring.somestring2` it replaces
1038 the server letter with first letter of somestring2
1038 the server letter with first letter of somestring2
1039
1039
1040 In case function was initialized with both first and lastname, this
1040 In case function was initialized with both first and lastname, this
1041 overrides the extraction from email by first letter of the first and
1041 overrides the extraction from email by first letter of the first and
1042 last name. We add special logic to that functionality, In case Full name
1042 last name. We add special logic to that functionality, In case Full name
1043 is compound, like Guido Von Rossum, we use last part of the last name
1043 is compound, like Guido Von Rossum, we use last part of the last name
1044 (Von Rossum) picking `R`.
1044 (Von Rossum) picking `R`.
1045
1045
1046 Function also normalizes the non-ascii characters to they ascii
1046 Function also normalizes the non-ascii characters to they ascii
1047 representation, eg Δ„ => A
1047 representation, eg Δ„ => A
1048 """
1048 """
1049 import unicodedata
1049 import unicodedata
1050 # replace non-ascii to ascii
1050 # replace non-ascii to ascii
1051 first_name = unicodedata.normalize(
1051 first_name = unicodedata.normalize(
1052 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1052 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1053 last_name = unicodedata.normalize(
1053 last_name = unicodedata.normalize(
1054 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1054 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1055
1055
1056 # do NFKD encoding, and also make sure email has proper format
1056 # do NFKD encoding, and also make sure email has proper format
1057 email_address = self.normalize_email(self.email_address)
1057 email_address = self.normalize_email(self.email_address)
1058
1058
1059 # first push the email initials
1059 # first push the email initials
1060 prefix, server = email_address.split('@', 1)
1060 prefix, server = email_address.split('@', 1)
1061
1061
1062 # check if prefix is maybe a 'firstname.lastname' syntax
1062 # check if prefix is maybe a 'firstname.lastname' syntax
1063 _dot_split = prefix.rsplit('.', 1)
1063 _dot_split = prefix.rsplit('.', 1)
1064 if len(_dot_split) == 2:
1064 if len(_dot_split) == 2:
1065 initials = [_dot_split[0][0], _dot_split[1][0]]
1065 initials = [_dot_split[0][0], _dot_split[1][0]]
1066 else:
1066 else:
1067 initials = [prefix[0], server[0]]
1067 initials = [prefix[0], server[0]]
1068
1068
1069 # then try to replace either firtname or lastname
1069 # then try to replace either firtname or lastname
1070 fn_letter = (first_name or " ")[0].strip()
1070 fn_letter = (first_name or " ")[0].strip()
1071 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1071 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1072
1072
1073 if fn_letter:
1073 if fn_letter:
1074 initials[0] = fn_letter
1074 initials[0] = fn_letter
1075
1075
1076 if ln_letter:
1076 if ln_letter:
1077 initials[1] = ln_letter
1077 initials[1] = ln_letter
1078
1078
1079 return ''.join(initials).upper()
1079 return ''.join(initials).upper()
1080
1080
1081 def get_img_data_by_type(self, font_family, img_type):
1081 def get_img_data_by_type(self, font_family, img_type):
1082 default_user = """
1082 default_user = """
1083 <svg xmlns="http://www.w3.org/2000/svg"
1083 <svg xmlns="http://www.w3.org/2000/svg"
1084 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1084 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1085 viewBox="-15 -10 439.165 429.164"
1085 viewBox="-15 -10 439.165 429.164"
1086
1086
1087 xml:space="preserve"
1087 xml:space="preserve"
1088 style="background:{background};" >
1088 style="background:{background};" >
1089
1089
1090 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1090 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1091 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1091 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1092 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1092 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1093 168.596,153.916,216.671,
1093 168.596,153.916,216.671,
1094 204.583,216.671z" fill="{text_color}"/>
1094 204.583,216.671z" fill="{text_color}"/>
1095 <path d="M407.164,374.717L360.88,
1095 <path d="M407.164,374.717L360.88,
1096 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1096 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1097 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1097 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1098 15.366-44.203,23.488-69.076,23.488c-24.877,
1098 15.366-44.203,23.488-69.076,23.488c-24.877,
1099 0-48.762-8.122-69.078-23.488
1099 0-48.762-8.122-69.078-23.488
1100 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1100 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1101 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1101 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1102 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1102 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1103 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1103 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1104 19.402-10.527 C409.699,390.129,
1104 19.402-10.527 C409.699,390.129,
1105 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1105 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1106 </svg>""".format(
1106 </svg>""".format(
1107 size=self.size,
1107 size=self.size,
1108 background='#979797', # @grey4
1108 background='#979797', # @grey4
1109 text_color=self.text_color,
1109 text_color=self.text_color,
1110 font_family=font_family)
1110 font_family=font_family)
1111
1111
1112 return {
1112 return {
1113 "default_user": default_user
1113 "default_user": default_user
1114 }[img_type]
1114 }[img_type]
1115
1115
1116 def get_img_data(self, svg_type=None):
1116 def get_img_data(self, svg_type=None):
1117 """
1117 """
1118 generates the svg metadata for image
1118 generates the svg metadata for image
1119 """
1119 """
1120
1120
1121 font_family = ','.join([
1121 font_family = ','.join([
1122 'proximanovaregular',
1122 'proximanovaregular',
1123 'Proxima Nova Regular',
1123 'Proxima Nova Regular',
1124 'Proxima Nova',
1124 'Proxima Nova',
1125 'Arial',
1125 'Arial',
1126 'Lucida Grande',
1126 'Lucida Grande',
1127 'sans-serif'
1127 'sans-serif'
1128 ])
1128 ])
1129 if svg_type:
1129 if svg_type:
1130 return self.get_img_data_by_type(font_family, svg_type)
1130 return self.get_img_data_by_type(font_family, svg_type)
1131
1131
1132 initials = self.get_initials()
1132 initials = self.get_initials()
1133 img_data = """
1133 img_data = """
1134 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1134 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1135 width="{size}" height="{size}"
1135 width="{size}" height="{size}"
1136 style="width: 100%; height: 100%; background-color: {background}"
1136 style="width: 100%; height: 100%; background-color: {background}"
1137 viewBox="0 0 {size} {size}">
1137 viewBox="0 0 {size} {size}">
1138 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1138 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1139 pointer-events="auto" fill="{text_color}"
1139 pointer-events="auto" fill="{text_color}"
1140 font-family="{font_family}"
1140 font-family="{font_family}"
1141 style="font-weight: 400; font-size: {f_size}px;">{text}
1141 style="font-weight: 400; font-size: {f_size}px;">{text}
1142 </text>
1142 </text>
1143 </svg>""".format(
1143 </svg>""".format(
1144 size=self.size,
1144 size=self.size,
1145 f_size=self.size/1.85, # scale the text inside the box nicely
1145 f_size=self.size/1.85, # scale the text inside the box nicely
1146 background=self.background,
1146 background=self.background,
1147 text_color=self.text_color,
1147 text_color=self.text_color,
1148 text=initials.upper(),
1148 text=initials.upper(),
1149 font_family=font_family)
1149 font_family=font_family)
1150
1150
1151 return img_data
1151 return img_data
1152
1152
1153 def generate_svg(self, svg_type=None):
1153 def generate_svg(self, svg_type=None):
1154 img_data = self.get_img_data(svg_type)
1154 img_data = self.get_img_data(svg_type)
1155 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1155 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1156
1156
1157
1157
1158 def initials_gravatar(email_address, first_name, last_name, size=30):
1158 def initials_gravatar(email_address, first_name, last_name, size=30):
1159 svg_type = None
1159 svg_type = None
1160 if email_address == User.DEFAULT_USER_EMAIL:
1160 if email_address == User.DEFAULT_USER_EMAIL:
1161 svg_type = 'default_user'
1161 svg_type = 'default_user'
1162 klass = InitialsGravatar(email_address, first_name, last_name, size)
1162 klass = InitialsGravatar(email_address, first_name, last_name, size)
1163 return klass.generate_svg(svg_type=svg_type)
1163 return klass.generate_svg(svg_type=svg_type)
1164
1164
1165
1165
1166 def gravatar_url(email_address, size=30):
1166 def gravatar_url(email_address, size=30):
1167 # doh, we need to re-import those to mock it later
1167 # doh, we need to re-import those to mock it later
1168 from pylons import tmpl_context as c
1168 from pylons import tmpl_context as c
1169
1169
1170 _use_gravatar = c.visual.use_gravatar
1170 _use_gravatar = c.visual.use_gravatar
1171 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1171 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1172
1172
1173 email_address = email_address or User.DEFAULT_USER_EMAIL
1173 email_address = email_address or User.DEFAULT_USER_EMAIL
1174 if isinstance(email_address, unicode):
1174 if isinstance(email_address, unicode):
1175 # hashlib crashes on unicode items
1175 # hashlib crashes on unicode items
1176 email_address = safe_str(email_address)
1176 email_address = safe_str(email_address)
1177
1177
1178 # empty email or default user
1178 # empty email or default user
1179 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1179 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1180 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1180 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1181
1181
1182 if _use_gravatar:
1182 if _use_gravatar:
1183 # TODO: Disuse pyramid thread locals. Think about another solution to
1183 # TODO: Disuse pyramid thread locals. Think about another solution to
1184 # get the host and schema here.
1184 # get the host and schema here.
1185 request = get_current_request()
1185 request = get_current_request()
1186 tmpl = safe_str(_gravatar_url)
1186 tmpl = safe_str(_gravatar_url)
1187 tmpl = tmpl.replace('{email}', email_address)\
1187 tmpl = tmpl.replace('{email}', email_address)\
1188 .replace('{md5email}', md5_safe(email_address.lower())) \
1188 .replace('{md5email}', md5_safe(email_address.lower())) \
1189 .replace('{netloc}', request.host)\
1189 .replace('{netloc}', request.host)\
1190 .replace('{scheme}', request.scheme)\
1190 .replace('{scheme}', request.scheme)\
1191 .replace('{size}', safe_str(size))
1191 .replace('{size}', safe_str(size))
1192 return tmpl
1192 return tmpl
1193 else:
1193 else:
1194 return initials_gravatar(email_address, '', '', size=size)
1194 return initials_gravatar(email_address, '', '', size=size)
1195
1195
1196
1196
1197 class Page(_Page):
1197 class Page(_Page):
1198 """
1198 """
1199 Custom pager to match rendering style with paginator
1199 Custom pager to match rendering style with paginator
1200 """
1200 """
1201
1201
1202 def _get_pos(self, cur_page, max_page, items):
1202 def _get_pos(self, cur_page, max_page, items):
1203 edge = (items / 2) + 1
1203 edge = (items / 2) + 1
1204 if (cur_page <= edge):
1204 if (cur_page <= edge):
1205 radius = max(items / 2, items - cur_page)
1205 radius = max(items / 2, items - cur_page)
1206 elif (max_page - cur_page) < edge:
1206 elif (max_page - cur_page) < edge:
1207 radius = (items - 1) - (max_page - cur_page)
1207 radius = (items - 1) - (max_page - cur_page)
1208 else:
1208 else:
1209 radius = items / 2
1209 radius = items / 2
1210
1210
1211 left = max(1, (cur_page - (radius)))
1211 left = max(1, (cur_page - (radius)))
1212 right = min(max_page, cur_page + (radius))
1212 right = min(max_page, cur_page + (radius))
1213 return left, cur_page, right
1213 return left, cur_page, right
1214
1214
1215 def _range(self, regexp_match):
1215 def _range(self, regexp_match):
1216 """
1216 """
1217 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1217 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1218
1218
1219 Arguments:
1219 Arguments:
1220
1220
1221 regexp_match
1221 regexp_match
1222 A "re" (regular expressions) match object containing the
1222 A "re" (regular expressions) match object containing the
1223 radius of linked pages around the current page in
1223 radius of linked pages around the current page in
1224 regexp_match.group(1) as a string
1224 regexp_match.group(1) as a string
1225
1225
1226 This function is supposed to be called as a callable in
1226 This function is supposed to be called as a callable in
1227 re.sub.
1227 re.sub.
1228
1228
1229 """
1229 """
1230 radius = int(regexp_match.group(1))
1230 radius = int(regexp_match.group(1))
1231
1231
1232 # Compute the first and last page number within the radius
1232 # Compute the first and last page number within the radius
1233 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1233 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1234 # -> leftmost_page = 5
1234 # -> leftmost_page = 5
1235 # -> rightmost_page = 9
1235 # -> rightmost_page = 9
1236 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1236 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1237 self.last_page,
1237 self.last_page,
1238 (radius * 2) + 1)
1238 (radius * 2) + 1)
1239 nav_items = []
1239 nav_items = []
1240
1240
1241 # Create a link to the first page (unless we are on the first page
1241 # Create a link to the first page (unless we are on the first page
1242 # or there would be no need to insert '..' spacers)
1242 # or there would be no need to insert '..' spacers)
1243 if self.page != self.first_page and self.first_page < leftmost_page:
1243 if self.page != self.first_page and self.first_page < leftmost_page:
1244 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1244 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1245
1245
1246 # Insert dots if there are pages between the first page
1246 # Insert dots if there are pages between the first page
1247 # and the currently displayed page range
1247 # and the currently displayed page range
1248 if leftmost_page - self.first_page > 1:
1248 if leftmost_page - self.first_page > 1:
1249 # Wrap in a SPAN tag if nolink_attr is set
1249 # Wrap in a SPAN tag if nolink_attr is set
1250 text = '..'
1250 text = '..'
1251 if self.dotdot_attr:
1251 if self.dotdot_attr:
1252 text = HTML.span(c=text, **self.dotdot_attr)
1252 text = HTML.span(c=text, **self.dotdot_attr)
1253 nav_items.append(text)
1253 nav_items.append(text)
1254
1254
1255 for thispage in xrange(leftmost_page, rightmost_page + 1):
1255 for thispage in xrange(leftmost_page, rightmost_page + 1):
1256 # Hilight the current page number and do not use a link
1256 # Hilight the current page number and do not use a link
1257 if thispage == self.page:
1257 if thispage == self.page:
1258 text = '%s' % (thispage,)
1258 text = '%s' % (thispage,)
1259 # Wrap in a SPAN tag if nolink_attr is set
1259 # Wrap in a SPAN tag if nolink_attr is set
1260 if self.curpage_attr:
1260 if self.curpage_attr:
1261 text = HTML.span(c=text, **self.curpage_attr)
1261 text = HTML.span(c=text, **self.curpage_attr)
1262 nav_items.append(text)
1262 nav_items.append(text)
1263 # Otherwise create just a link to that page
1263 # Otherwise create just a link to that page
1264 else:
1264 else:
1265 text = '%s' % (thispage,)
1265 text = '%s' % (thispage,)
1266 nav_items.append(self._pagerlink(thispage, text))
1266 nav_items.append(self._pagerlink(thispage, text))
1267
1267
1268 # Insert dots if there are pages between the displayed
1268 # Insert dots if there are pages between the displayed
1269 # page numbers and the end of the page range
1269 # page numbers and the end of the page range
1270 if self.last_page - rightmost_page > 1:
1270 if self.last_page - rightmost_page > 1:
1271 text = '..'
1271 text = '..'
1272 # Wrap in a SPAN tag if nolink_attr is set
1272 # Wrap in a SPAN tag if nolink_attr is set
1273 if self.dotdot_attr:
1273 if self.dotdot_attr:
1274 text = HTML.span(c=text, **self.dotdot_attr)
1274 text = HTML.span(c=text, **self.dotdot_attr)
1275 nav_items.append(text)
1275 nav_items.append(text)
1276
1276
1277 # Create a link to the very last page (unless we are on the last
1277 # Create a link to the very last page (unless we are on the last
1278 # page or there would be no need to insert '..' spacers)
1278 # page or there would be no need to insert '..' spacers)
1279 if self.page != self.last_page and rightmost_page < self.last_page:
1279 if self.page != self.last_page and rightmost_page < self.last_page:
1280 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1280 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1281
1281
1282 ## prerender links
1282 ## prerender links
1283 #_page_link = url.current()
1283 #_page_link = url.current()
1284 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1284 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1285 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1285 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1286 return self.separator.join(nav_items)
1286 return self.separator.join(nav_items)
1287
1287
1288 def pager(self, format='~2~', page_param='page', partial_param='partial',
1288 def pager(self, format='~2~', page_param='page', partial_param='partial',
1289 show_if_single_page=False, separator=' ', onclick=None,
1289 show_if_single_page=False, separator=' ', onclick=None,
1290 symbol_first='<<', symbol_last='>>',
1290 symbol_first='<<', symbol_last='>>',
1291 symbol_previous='<', symbol_next='>',
1291 symbol_previous='<', symbol_next='>',
1292 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1292 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1293 curpage_attr={'class': 'pager_curpage'},
1293 curpage_attr={'class': 'pager_curpage'},
1294 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1294 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1295
1295
1296 self.curpage_attr = curpage_attr
1296 self.curpage_attr = curpage_attr
1297 self.separator = separator
1297 self.separator = separator
1298 self.pager_kwargs = kwargs
1298 self.pager_kwargs = kwargs
1299 self.page_param = page_param
1299 self.page_param = page_param
1300 self.partial_param = partial_param
1300 self.partial_param = partial_param
1301 self.onclick = onclick
1301 self.onclick = onclick
1302 self.link_attr = link_attr
1302 self.link_attr = link_attr
1303 self.dotdot_attr = dotdot_attr
1303 self.dotdot_attr = dotdot_attr
1304
1304
1305 # Don't show navigator if there is no more than one page
1305 # Don't show navigator if there is no more than one page
1306 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1306 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1307 return ''
1307 return ''
1308
1308
1309 from string import Template
1309 from string import Template
1310 # Replace ~...~ in token format by range of pages
1310 # Replace ~...~ in token format by range of pages
1311 result = re.sub(r'~(\d+)~', self._range, format)
1311 result = re.sub(r'~(\d+)~', self._range, format)
1312
1312
1313 # Interpolate '%' variables
1313 # Interpolate '%' variables
1314 result = Template(result).safe_substitute({
1314 result = Template(result).safe_substitute({
1315 'first_page': self.first_page,
1315 'first_page': self.first_page,
1316 'last_page': self.last_page,
1316 'last_page': self.last_page,
1317 'page': self.page,
1317 'page': self.page,
1318 'page_count': self.page_count,
1318 'page_count': self.page_count,
1319 'items_per_page': self.items_per_page,
1319 'items_per_page': self.items_per_page,
1320 'first_item': self.first_item,
1320 'first_item': self.first_item,
1321 'last_item': self.last_item,
1321 'last_item': self.last_item,
1322 'item_count': self.item_count,
1322 'item_count': self.item_count,
1323 'link_first': self.page > self.first_page and \
1323 'link_first': self.page > self.first_page and \
1324 self._pagerlink(self.first_page, symbol_first) or '',
1324 self._pagerlink(self.first_page, symbol_first) or '',
1325 'link_last': self.page < self.last_page and \
1325 'link_last': self.page < self.last_page and \
1326 self._pagerlink(self.last_page, symbol_last) or '',
1326 self._pagerlink(self.last_page, symbol_last) or '',
1327 'link_previous': self.previous_page and \
1327 'link_previous': self.previous_page and \
1328 self._pagerlink(self.previous_page, symbol_previous) \
1328 self._pagerlink(self.previous_page, symbol_previous) \
1329 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1329 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1330 'link_next': self.next_page and \
1330 'link_next': self.next_page and \
1331 self._pagerlink(self.next_page, symbol_next) \
1331 self._pagerlink(self.next_page, symbol_next) \
1332 or HTML.span(symbol_next, class_="pg-next disabled")
1332 or HTML.span(symbol_next, class_="pg-next disabled")
1333 })
1333 })
1334
1334
1335 return literal(result)
1335 return literal(result)
1336
1336
1337
1337
1338 #==============================================================================
1338 #==============================================================================
1339 # REPO PAGER, PAGER FOR REPOSITORY
1339 # REPO PAGER, PAGER FOR REPOSITORY
1340 #==============================================================================
1340 #==============================================================================
1341 class RepoPage(Page):
1341 class RepoPage(Page):
1342
1342
1343 def __init__(self, collection, page=1, items_per_page=20,
1343 def __init__(self, collection, page=1, items_per_page=20,
1344 item_count=None, url=None, **kwargs):
1344 item_count=None, url=None, **kwargs):
1345
1345
1346 """Create a "RepoPage" instance. special pager for paging
1346 """Create a "RepoPage" instance. special pager for paging
1347 repository
1347 repository
1348 """
1348 """
1349 self._url_generator = url
1349 self._url_generator = url
1350
1350
1351 # Safe the kwargs class-wide so they can be used in the pager() method
1351 # Safe the kwargs class-wide so they can be used in the pager() method
1352 self.kwargs = kwargs
1352 self.kwargs = kwargs
1353
1353
1354 # Save a reference to the collection
1354 # Save a reference to the collection
1355 self.original_collection = collection
1355 self.original_collection = collection
1356
1356
1357 self.collection = collection
1357 self.collection = collection
1358
1358
1359 # The self.page is the number of the current page.
1359 # The self.page is the number of the current page.
1360 # The first page has the number 1!
1360 # The first page has the number 1!
1361 try:
1361 try:
1362 self.page = int(page) # make it int() if we get it as a string
1362 self.page = int(page) # make it int() if we get it as a string
1363 except (ValueError, TypeError):
1363 except (ValueError, TypeError):
1364 self.page = 1
1364 self.page = 1
1365
1365
1366 self.items_per_page = items_per_page
1366 self.items_per_page = items_per_page
1367
1367
1368 # Unless the user tells us how many items the collections has
1368 # Unless the user tells us how many items the collections has
1369 # we calculate that ourselves.
1369 # we calculate that ourselves.
1370 if item_count is not None:
1370 if item_count is not None:
1371 self.item_count = item_count
1371 self.item_count = item_count
1372 else:
1372 else:
1373 self.item_count = len(self.collection)
1373 self.item_count = len(self.collection)
1374
1374
1375 # Compute the number of the first and last available page
1375 # Compute the number of the first and last available page
1376 if self.item_count > 0:
1376 if self.item_count > 0:
1377 self.first_page = 1
1377 self.first_page = 1
1378 self.page_count = int(math.ceil(float(self.item_count) /
1378 self.page_count = int(math.ceil(float(self.item_count) /
1379 self.items_per_page))
1379 self.items_per_page))
1380 self.last_page = self.first_page + self.page_count - 1
1380 self.last_page = self.first_page + self.page_count - 1
1381
1381
1382 # Make sure that the requested page number is the range of
1382 # Make sure that the requested page number is the range of
1383 # valid pages
1383 # valid pages
1384 if self.page > self.last_page:
1384 if self.page > self.last_page:
1385 self.page = self.last_page
1385 self.page = self.last_page
1386 elif self.page < self.first_page:
1386 elif self.page < self.first_page:
1387 self.page = self.first_page
1387 self.page = self.first_page
1388
1388
1389 # Note: the number of items on this page can be less than
1389 # Note: the number of items on this page can be less than
1390 # items_per_page if the last page is not full
1390 # items_per_page if the last page is not full
1391 self.first_item = max(0, (self.item_count) - (self.page *
1391 self.first_item = max(0, (self.item_count) - (self.page *
1392 items_per_page))
1392 items_per_page))
1393 self.last_item = ((self.item_count - 1) - items_per_page *
1393 self.last_item = ((self.item_count - 1) - items_per_page *
1394 (self.page - 1))
1394 (self.page - 1))
1395
1395
1396 self.items = list(self.collection[self.first_item:self.last_item + 1])
1396 self.items = list(self.collection[self.first_item:self.last_item + 1])
1397
1397
1398 # Links to previous and next page
1398 # Links to previous and next page
1399 if self.page > self.first_page:
1399 if self.page > self.first_page:
1400 self.previous_page = self.page - 1
1400 self.previous_page = self.page - 1
1401 else:
1401 else:
1402 self.previous_page = None
1402 self.previous_page = None
1403
1403
1404 if self.page < self.last_page:
1404 if self.page < self.last_page:
1405 self.next_page = self.page + 1
1405 self.next_page = self.page + 1
1406 else:
1406 else:
1407 self.next_page = None
1407 self.next_page = None
1408
1408
1409 # No items available
1409 # No items available
1410 else:
1410 else:
1411 self.first_page = None
1411 self.first_page = None
1412 self.page_count = 0
1412 self.page_count = 0
1413 self.last_page = None
1413 self.last_page = None
1414 self.first_item = None
1414 self.first_item = None
1415 self.last_item = None
1415 self.last_item = None
1416 self.previous_page = None
1416 self.previous_page = None
1417 self.next_page = None
1417 self.next_page = None
1418 self.items = []
1418 self.items = []
1419
1419
1420 # This is a subclass of the 'list' type. Initialise the list now.
1420 # This is a subclass of the 'list' type. Initialise the list now.
1421 list.__init__(self, reversed(self.items))
1421 list.__init__(self, reversed(self.items))
1422
1422
1423
1423
1424 def changed_tooltip(nodes):
1424 def changed_tooltip(nodes):
1425 """
1425 """
1426 Generates a html string for changed nodes in commit page.
1426 Generates a html string for changed nodes in commit page.
1427 It limits the output to 30 entries
1427 It limits the output to 30 entries
1428
1428
1429 :param nodes: LazyNodesGenerator
1429 :param nodes: LazyNodesGenerator
1430 """
1430 """
1431 if nodes:
1431 if nodes:
1432 pref = ': <br/> '
1432 pref = ': <br/> '
1433 suf = ''
1433 suf = ''
1434 if len(nodes) > 30:
1434 if len(nodes) > 30:
1435 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1435 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1436 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1436 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1437 for x in nodes[:30]]) + suf)
1437 for x in nodes[:30]]) + suf)
1438 else:
1438 else:
1439 return ': ' + _('No Files')
1439 return ': ' + _('No Files')
1440
1440
1441
1441
1442 def breadcrumb_repo_link(repo):
1442 def breadcrumb_repo_link(repo):
1443 """
1443 """
1444 Makes a breadcrumbs path link to repo
1444 Makes a breadcrumbs path link to repo
1445
1445
1446 ex::
1446 ex::
1447 group >> subgroup >> repo
1447 group >> subgroup >> repo
1448
1448
1449 :param repo: a Repository instance
1449 :param repo: a Repository instance
1450 """
1450 """
1451
1451
1452 path = [
1452 path = [
1453 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1453 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1454 for group in repo.groups_with_parents
1454 for group in repo.groups_with_parents
1455 ] + [
1455 ] + [
1456 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1456 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1457 ]
1457 ]
1458
1458
1459 return literal(' &raquo; '.join(path))
1459 return literal(' &raquo; '.join(path))
1460
1460
1461
1461
1462 def format_byte_size_binary(file_size):
1462 def format_byte_size_binary(file_size):
1463 """
1463 """
1464 Formats file/folder sizes to standard.
1464 Formats file/folder sizes to standard.
1465 """
1465 """
1466 formatted_size = format_byte_size(file_size, binary=True)
1466 formatted_size = format_byte_size(file_size, binary=True)
1467 return formatted_size
1467 return formatted_size
1468
1468
1469
1469
1470 def fancy_file_stats(stats):
1470 def fancy_file_stats(stats):
1471 """
1471 """
1472 Displays a fancy two colored bar for number of added/deleted
1472 Displays a fancy two colored bar for number of added/deleted
1473 lines of code on file
1473 lines of code on file
1474
1474
1475 :param stats: two element list of added/deleted lines of code
1475 :param stats: two element list of added/deleted lines of code
1476 """
1476 """
1477 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1477 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1478 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1478 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1479
1479
1480 def cgen(l_type, a_v, d_v):
1480 def cgen(l_type, a_v, d_v):
1481 mapping = {'tr': 'top-right-rounded-corner-mid',
1481 mapping = {'tr': 'top-right-rounded-corner-mid',
1482 'tl': 'top-left-rounded-corner-mid',
1482 'tl': 'top-left-rounded-corner-mid',
1483 'br': 'bottom-right-rounded-corner-mid',
1483 'br': 'bottom-right-rounded-corner-mid',
1484 'bl': 'bottom-left-rounded-corner-mid'}
1484 'bl': 'bottom-left-rounded-corner-mid'}
1485 map_getter = lambda x: mapping[x]
1485 map_getter = lambda x: mapping[x]
1486
1486
1487 if l_type == 'a' and d_v:
1487 if l_type == 'a' and d_v:
1488 #case when added and deleted are present
1488 #case when added and deleted are present
1489 return ' '.join(map(map_getter, ['tl', 'bl']))
1489 return ' '.join(map(map_getter, ['tl', 'bl']))
1490
1490
1491 if l_type == 'a' and not d_v:
1491 if l_type == 'a' and not d_v:
1492 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1492 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1493
1493
1494 if l_type == 'd' and a_v:
1494 if l_type == 'd' and a_v:
1495 return ' '.join(map(map_getter, ['tr', 'br']))
1495 return ' '.join(map(map_getter, ['tr', 'br']))
1496
1496
1497 if l_type == 'd' and not a_v:
1497 if l_type == 'd' and not a_v:
1498 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1498 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1499
1499
1500 a, d = stats['added'], stats['deleted']
1500 a, d = stats['added'], stats['deleted']
1501 width = 100
1501 width = 100
1502
1502
1503 if stats['binary']: # binary operations like chmod/rename etc
1503 if stats['binary']: # binary operations like chmod/rename etc
1504 lbl = []
1504 lbl = []
1505 bin_op = 0 # undefined
1505 bin_op = 0 # undefined
1506
1506
1507 # prefix with bin for binary files
1507 # prefix with bin for binary files
1508 if BIN_FILENODE in stats['ops']:
1508 if BIN_FILENODE in stats['ops']:
1509 lbl += ['bin']
1509 lbl += ['bin']
1510
1510
1511 if NEW_FILENODE in stats['ops']:
1511 if NEW_FILENODE in stats['ops']:
1512 lbl += [_('new file')]
1512 lbl += [_('new file')]
1513 bin_op = NEW_FILENODE
1513 bin_op = NEW_FILENODE
1514 elif MOD_FILENODE in stats['ops']:
1514 elif MOD_FILENODE in stats['ops']:
1515 lbl += [_('mod')]
1515 lbl += [_('mod')]
1516 bin_op = MOD_FILENODE
1516 bin_op = MOD_FILENODE
1517 elif DEL_FILENODE in stats['ops']:
1517 elif DEL_FILENODE in stats['ops']:
1518 lbl += [_('del')]
1518 lbl += [_('del')]
1519 bin_op = DEL_FILENODE
1519 bin_op = DEL_FILENODE
1520 elif RENAMED_FILENODE in stats['ops']:
1520 elif RENAMED_FILENODE in stats['ops']:
1521 lbl += [_('rename')]
1521 lbl += [_('rename')]
1522 bin_op = RENAMED_FILENODE
1522 bin_op = RENAMED_FILENODE
1523
1523
1524 # chmod can go with other operations, so we add a + to lbl if needed
1524 # chmod can go with other operations, so we add a + to lbl if needed
1525 if CHMOD_FILENODE in stats['ops']:
1525 if CHMOD_FILENODE in stats['ops']:
1526 lbl += [_('chmod')]
1526 lbl += [_('chmod')]
1527 if bin_op == 0:
1527 if bin_op == 0:
1528 bin_op = CHMOD_FILENODE
1528 bin_op = CHMOD_FILENODE
1529
1529
1530 lbl = '+'.join(lbl)
1530 lbl = '+'.join(lbl)
1531 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1531 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1532 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1532 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1533 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1533 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1534 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1534 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1535
1535
1536 t = stats['added'] + stats['deleted']
1536 t = stats['added'] + stats['deleted']
1537 unit = float(width) / (t or 1)
1537 unit = float(width) / (t or 1)
1538
1538
1539 # needs > 9% of width to be visible or 0 to be hidden
1539 # needs > 9% of width to be visible or 0 to be hidden
1540 a_p = max(9, unit * a) if a > 0 else 0
1540 a_p = max(9, unit * a) if a > 0 else 0
1541 d_p = max(9, unit * d) if d > 0 else 0
1541 d_p = max(9, unit * d) if d > 0 else 0
1542 p_sum = a_p + d_p
1542 p_sum = a_p + d_p
1543
1543
1544 if p_sum > width:
1544 if p_sum > width:
1545 #adjust the percentage to be == 100% since we adjusted to 9
1545 #adjust the percentage to be == 100% since we adjusted to 9
1546 if a_p > d_p:
1546 if a_p > d_p:
1547 a_p = a_p - (p_sum - width)
1547 a_p = a_p - (p_sum - width)
1548 else:
1548 else:
1549 d_p = d_p - (p_sum - width)
1549 d_p = d_p - (p_sum - width)
1550
1550
1551 a_v = a if a > 0 else ''
1551 a_v = a if a > 0 else ''
1552 d_v = d if d > 0 else ''
1552 d_v = d if d > 0 else ''
1553
1553
1554 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1554 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1555 cgen('a', a_v, d_v), a_p, a_v
1555 cgen('a', a_v, d_v), a_p, a_v
1556 )
1556 )
1557 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1557 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1558 cgen('d', a_v, d_v), d_p, d_v
1558 cgen('d', a_v, d_v), d_p, d_v
1559 )
1559 )
1560 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1560 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1561
1561
1562
1562
1563 def urlify_text(text_, safe=True):
1563 def urlify_text(text_, safe=True):
1564 """
1564 """
1565 Extrac urls from text and make html links out of them
1565 Extrac urls from text and make html links out of them
1566
1566
1567 :param text_:
1567 :param text_:
1568 """
1568 """
1569
1569
1570 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1570 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1571 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1571 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1572
1572
1573 def url_func(match_obj):
1573 def url_func(match_obj):
1574 url_full = match_obj.groups()[0]
1574 url_full = match_obj.groups()[0]
1575 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1575 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1576 _newtext = url_pat.sub(url_func, text_)
1576 _newtext = url_pat.sub(url_func, text_)
1577 if safe:
1577 if safe:
1578 return literal(_newtext)
1578 return literal(_newtext)
1579 return _newtext
1579 return _newtext
1580
1580
1581
1581
1582 def urlify_commits(text_, repository):
1582 def urlify_commits(text_, repository):
1583 """
1583 """
1584 Extract commit ids from text and make link from them
1584 Extract commit ids from text and make link from them
1585
1585
1586 :param text_:
1586 :param text_:
1587 :param repository: repo name to build the URL with
1587 :param repository: repo name to build the URL with
1588 """
1588 """
1589 from pylons import url # doh, we need to re-import url to mock it later
1589 from pylons import url # doh, we need to re-import url to mock it later
1590 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1590 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1591
1591
1592 def url_func(match_obj):
1592 def url_func(match_obj):
1593 commit_id = match_obj.groups()[1]
1593 commit_id = match_obj.groups()[1]
1594 pref = match_obj.groups()[0]
1594 pref = match_obj.groups()[0]
1595 suf = match_obj.groups()[2]
1595 suf = match_obj.groups()[2]
1596
1596
1597 tmpl = (
1597 tmpl = (
1598 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1598 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1599 '%(commit_id)s</a>%(suf)s'
1599 '%(commit_id)s</a>%(suf)s'
1600 )
1600 )
1601 return tmpl % {
1601 return tmpl % {
1602 'pref': pref,
1602 'pref': pref,
1603 'cls': 'revision-link',
1603 'cls': 'revision-link',
1604 'url': url('changeset_home', repo_name=repository,
1604 'url': url('changeset_home', repo_name=repository,
1605 revision=commit_id),
1605 revision=commit_id),
1606 'commit_id': commit_id,
1606 'commit_id': commit_id,
1607 'suf': suf
1607 'suf': suf
1608 }
1608 }
1609
1609
1610 newtext = URL_PAT.sub(url_func, text_)
1610 newtext = URL_PAT.sub(url_func, text_)
1611
1611
1612 return newtext
1612 return newtext
1613
1613
1614
1614
1615 def _process_url_func(match_obj, repo_name, uid, entry):
1615 def _process_url_func(match_obj, repo_name, uid, entry):
1616 pref = ''
1616 pref = ''
1617 if match_obj.group().startswith(' '):
1617 if match_obj.group().startswith(' '):
1618 pref = ' '
1618 pref = ' '
1619
1619
1620 issue_id = ''.join(match_obj.groups())
1620 issue_id = ''.join(match_obj.groups())
1621 tmpl = (
1621 tmpl = (
1622 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1622 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1623 '%(issue-prefix)s%(id-repr)s'
1623 '%(issue-prefix)s%(id-repr)s'
1624 '</a>')
1624 '</a>')
1625
1625
1626 (repo_name_cleaned,
1626 (repo_name_cleaned,
1627 parent_group_name) = RepoGroupModel().\
1627 parent_group_name) = RepoGroupModel().\
1628 _get_group_name_and_parent(repo_name)
1628 _get_group_name_and_parent(repo_name)
1629
1629
1630 # variables replacement
1630 # variables replacement
1631 named_vars = {
1631 named_vars = {
1632 'id': issue_id,
1632 'id': issue_id,
1633 'repo': repo_name,
1633 'repo': repo_name,
1634 'repo_name': repo_name_cleaned,
1634 'repo_name': repo_name_cleaned,
1635 'group_name': parent_group_name
1635 'group_name': parent_group_name
1636 }
1636 }
1637 # named regex variables
1637 # named regex variables
1638 named_vars.update(match_obj.groupdict())
1638 named_vars.update(match_obj.groupdict())
1639 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1639 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1640
1640
1641 return tmpl % {
1641 return tmpl % {
1642 'pref': pref,
1642 'pref': pref,
1643 'cls': 'issue-tracker-link',
1643 'cls': 'issue-tracker-link',
1644 'url': _url,
1644 'url': _url,
1645 'id-repr': issue_id,
1645 'id-repr': issue_id,
1646 'issue-prefix': entry['pref'],
1646 'issue-prefix': entry['pref'],
1647 'serv': entry['url'],
1647 'serv': entry['url'],
1648 }
1648 }
1649
1649
1650
1650
1651 def process_patterns(text_string, repo_name, config):
1651 def process_patterns(text_string, repo_name, config):
1652 repo = None
1652 repo = None
1653 if repo_name:
1653 if repo_name:
1654 # Retrieving repo_name to avoid invalid repo_name to explode on
1654 # Retrieving repo_name to avoid invalid repo_name to explode on
1655 # IssueTrackerSettingsModel but still passing invalid name further down
1655 # IssueTrackerSettingsModel but still passing invalid name further down
1656 repo = Repository.get_by_repo_name(repo_name, cache=True)
1656 repo = Repository.get_by_repo_name(repo_name, cache=True)
1657
1657
1658 settings_model = IssueTrackerSettingsModel(repo=repo)
1658 settings_model = IssueTrackerSettingsModel(repo=repo)
1659 active_entries = settings_model.get_settings(cache=True)
1659 active_entries = settings_model.get_settings(cache=True)
1660
1660
1661 newtext = text_string
1661 newtext = text_string
1662 for uid, entry in active_entries.items():
1662 for uid, entry in active_entries.items():
1663 url_func = partial(
1663 url_func = partial(
1664 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1664 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1665
1665
1666 log.debug('found issue tracker entry with uid %s' % (uid,))
1666 log.debug('found issue tracker entry with uid %s' % (uid,))
1667
1667
1668 if not (entry['pat'] and entry['url']):
1668 if not (entry['pat'] and entry['url']):
1669 log.debug('skipping due to missing data')
1669 log.debug('skipping due to missing data')
1670 continue
1670 continue
1671
1671
1672 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1672 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1673 % (uid, entry['pat'], entry['url'], entry['pref']))
1673 % (uid, entry['pat'], entry['url'], entry['pref']))
1674
1674
1675 try:
1675 try:
1676 pattern = re.compile(r'%s' % entry['pat'])
1676 pattern = re.compile(r'%s' % entry['pat'])
1677 except re.error:
1677 except re.error:
1678 log.exception(
1678 log.exception(
1679 'issue tracker pattern: `%s` failed to compile',
1679 'issue tracker pattern: `%s` failed to compile',
1680 entry['pat'])
1680 entry['pat'])
1681 continue
1681 continue
1682
1682
1683 newtext = pattern.sub(url_func, newtext)
1683 newtext = pattern.sub(url_func, newtext)
1684 log.debug('processed prefix:uid `%s`' % (uid,))
1684 log.debug('processed prefix:uid `%s`' % (uid,))
1685
1685
1686 return newtext
1686 return newtext
1687
1687
1688
1688
1689 def urlify_commit_message(commit_text, repository=None):
1689 def urlify_commit_message(commit_text, repository=None):
1690 """
1690 """
1691 Parses given text message and makes proper links.
1691 Parses given text message and makes proper links.
1692 issues are linked to given issue-server, and rest is a commit link
1692 issues are linked to given issue-server, and rest is a commit link
1693
1693
1694 :param commit_text:
1694 :param commit_text:
1695 :param repository:
1695 :param repository:
1696 """
1696 """
1697 from pylons import url # doh, we need to re-import url to mock it later
1697 from pylons import url # doh, we need to re-import url to mock it later
1698 from rhodecode import CONFIG
1698 from rhodecode import CONFIG
1699
1699
1700 def escaper(string):
1700 def escaper(string):
1701 return string.replace('<', '&lt;').replace('>', '&gt;')
1701 return string.replace('<', '&lt;').replace('>', '&gt;')
1702
1702
1703 newtext = escaper(commit_text)
1703 newtext = escaper(commit_text)
1704 # urlify commits - extract commit ids and make link out of them, if we have
1704 # urlify commits - extract commit ids and make link out of them, if we have
1705 # the scope of repository present.
1705 # the scope of repository present.
1706 if repository:
1706 if repository:
1707 newtext = urlify_commits(newtext, repository)
1707 newtext = urlify_commits(newtext, repository)
1708
1708
1709 # extract http/https links and make them real urls
1709 # extract http/https links and make them real urls
1710 newtext = urlify_text(newtext, safe=False)
1710 newtext = urlify_text(newtext, safe=False)
1711
1711
1712 # process issue tracker patterns
1712 # process issue tracker patterns
1713 newtext = process_patterns(newtext, repository or '', CONFIG)
1713 newtext = process_patterns(newtext, repository or '', CONFIG)
1714
1714
1715 return literal(newtext)
1715 return literal(newtext)
1716
1716
1717
1717
1718 def rst(source, mentions=False):
1718 def rst(source, mentions=False):
1719 return literal('<div class="rst-block">%s</div>' %
1719 return literal('<div class="rst-block">%s</div>' %
1720 MarkupRenderer.rst(source, mentions=mentions))
1720 MarkupRenderer.rst(source, mentions=mentions))
1721
1721
1722
1722
1723 def markdown(source, mentions=False):
1723 def markdown(source, mentions=False):
1724 return literal('<div class="markdown-block">%s</div>' %
1724 return literal('<div class="markdown-block">%s</div>' %
1725 MarkupRenderer.markdown(source, flavored=True,
1725 MarkupRenderer.markdown(source, flavored=True,
1726 mentions=mentions))
1726 mentions=mentions))
1727
1727
1728 def renderer_from_filename(filename, exclude=None):
1728 def renderer_from_filename(filename, exclude=None):
1729 from rhodecode.config.conf import MARKDOWN_EXTS, RST_EXTS
1729 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1730
1731 def _filter(elements):
1732 if isinstance(exclude, (list, tuple)):
1733 return [x for x in elements if x not in exclude]
1734 return elements
1735
1736 if filename.endswith(tuple(_filter([x[0] for x in MARKDOWN_EXTS if x[0]]))):
1737 return 'markdown'
1738 if filename.endswith(tuple(_filter([x[0] for x in RST_EXTS if x[0]]))):
1739 return 'rst'
1740
1730
1741
1731
1742 def render(source, renderer='rst', mentions=False):
1732 def render(source, renderer='rst', mentions=False):
1743 if renderer == 'rst':
1733 if renderer == 'rst':
1744 return rst(source, mentions=mentions)
1734 return rst(source, mentions=mentions)
1745 if renderer == 'markdown':
1735 if renderer == 'markdown':
1746 return markdown(source, mentions=mentions)
1736 return markdown(source, mentions=mentions)
1747
1737
1748
1738
1749 def commit_status(repo, commit_id):
1739 def commit_status(repo, commit_id):
1750 return ChangesetStatusModel().get_status(repo, commit_id)
1740 return ChangesetStatusModel().get_status(repo, commit_id)
1751
1741
1752
1742
1753 def commit_status_lbl(commit_status):
1743 def commit_status_lbl(commit_status):
1754 return dict(ChangesetStatus.STATUSES).get(commit_status)
1744 return dict(ChangesetStatus.STATUSES).get(commit_status)
1755
1745
1756
1746
1757 def commit_time(repo_name, commit_id):
1747 def commit_time(repo_name, commit_id):
1758 repo = Repository.get_by_repo_name(repo_name)
1748 repo = Repository.get_by_repo_name(repo_name)
1759 commit = repo.get_commit(commit_id=commit_id)
1749 commit = repo.get_commit(commit_id=commit_id)
1760 return commit.date
1750 return commit.date
1761
1751
1762
1752
1763 def get_permission_name(key):
1753 def get_permission_name(key):
1764 return dict(Permission.PERMS).get(key)
1754 return dict(Permission.PERMS).get(key)
1765
1755
1766
1756
1767 def journal_filter_help():
1757 def journal_filter_help():
1768 return _(
1758 return _(
1769 'Example filter terms:\n' +
1759 'Example filter terms:\n' +
1770 ' repository:vcs\n' +
1760 ' repository:vcs\n' +
1771 ' username:marcin\n' +
1761 ' username:marcin\n' +
1772 ' action:*push*\n' +
1762 ' action:*push*\n' +
1773 ' ip:127.0.0.1\n' +
1763 ' ip:127.0.0.1\n' +
1774 ' date:20120101\n' +
1764 ' date:20120101\n' +
1775 ' date:[20120101100000 TO 20120102]\n' +
1765 ' date:[20120101100000 TO 20120102]\n' +
1776 '\n' +
1766 '\n' +
1777 'Generate wildcards using \'*\' character:\n' +
1767 'Generate wildcards using \'*\' character:\n' +
1778 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1768 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1779 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1769 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1780 '\n' +
1770 '\n' +
1781 'Optional AND / OR operators in queries\n' +
1771 'Optional AND / OR operators in queries\n' +
1782 ' "repository:vcs OR repository:test"\n' +
1772 ' "repository:vcs OR repository:test"\n' +
1783 ' "username:test AND repository:test*"\n'
1773 ' "username:test AND repository:test*"\n'
1784 )
1774 )
1785
1775
1786
1776
1787 def not_mapped_error(repo_name):
1777 def not_mapped_error(repo_name):
1788 flash(_('%s repository is not mapped to db perhaps'
1778 flash(_('%s repository is not mapped to db perhaps'
1789 ' it was created or renamed from the filesystem'
1779 ' it was created or renamed from the filesystem'
1790 ' please run the application again'
1780 ' please run the application again'
1791 ' in order to rescan repositories') % repo_name, category='error')
1781 ' in order to rescan repositories') % repo_name, category='error')
1792
1782
1793
1783
1794 def ip_range(ip_addr):
1784 def ip_range(ip_addr):
1795 from rhodecode.model.db import UserIpMap
1785 from rhodecode.model.db import UserIpMap
1796 s, e = UserIpMap._get_ip_range(ip_addr)
1786 s, e = UserIpMap._get_ip_range(ip_addr)
1797 return '%s - %s' % (s, e)
1787 return '%s - %s' % (s, e)
1798
1788
1799
1789
1800 def form(url, method='post', needs_csrf_token=True, **attrs):
1790 def form(url, method='post', needs_csrf_token=True, **attrs):
1801 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1791 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1802 if method.lower() != 'get' and needs_csrf_token:
1792 if method.lower() != 'get' and needs_csrf_token:
1803 raise Exception(
1793 raise Exception(
1804 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1794 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1805 'CSRF token. If the endpoint does not require such token you can ' +
1795 'CSRF token. If the endpoint does not require such token you can ' +
1806 'explicitly set the parameter needs_csrf_token to false.')
1796 'explicitly set the parameter needs_csrf_token to false.')
1807
1797
1808 return wh_form(url, method=method, **attrs)
1798 return wh_form(url, method=method, **attrs)
1809
1799
1810
1800
1811 def secure_form(url, method="POST", multipart=False, **attrs):
1801 def secure_form(url, method="POST", multipart=False, **attrs):
1812 """Start a form tag that points the action to an url. This
1802 """Start a form tag that points the action to an url. This
1813 form tag will also include the hidden field containing
1803 form tag will also include the hidden field containing
1814 the auth token.
1804 the auth token.
1815
1805
1816 The url options should be given either as a string, or as a
1806 The url options should be given either as a string, or as a
1817 ``url()`` function. The method for the form defaults to POST.
1807 ``url()`` function. The method for the form defaults to POST.
1818
1808
1819 Options:
1809 Options:
1820
1810
1821 ``multipart``
1811 ``multipart``
1822 If set to True, the enctype is set to "multipart/form-data".
1812 If set to True, the enctype is set to "multipart/form-data".
1823 ``method``
1813 ``method``
1824 The method to use when submitting the form, usually either
1814 The method to use when submitting the form, usually either
1825 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1815 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1826 hidden input with name _method is added to simulate the verb
1816 hidden input with name _method is added to simulate the verb
1827 over POST.
1817 over POST.
1828
1818
1829 """
1819 """
1830 from webhelpers.pylonslib.secure_form import insecure_form
1820 from webhelpers.pylonslib.secure_form import insecure_form
1831 from rhodecode.lib.auth import get_csrf_token, csrf_token_key
1821 from rhodecode.lib.auth import get_csrf_token, csrf_token_key
1832 form = insecure_form(url, method, multipart, **attrs)
1822 form = insecure_form(url, method, multipart, **attrs)
1833 token = HTML.div(hidden(csrf_token_key, get_csrf_token()), style="display: none;")
1823 token = HTML.div(hidden(csrf_token_key, get_csrf_token()), style="display: none;")
1834 return literal("%s\n%s" % (form, token))
1824 return literal("%s\n%s" % (form, token))
1835
1825
1836 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1826 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1837 select_html = select(name, selected, options, **attrs)
1827 select_html = select(name, selected, options, **attrs)
1838 select2 = """
1828 select2 = """
1839 <script>
1829 <script>
1840 $(document).ready(function() {
1830 $(document).ready(function() {
1841 $('#%s').select2({
1831 $('#%s').select2({
1842 containerCssClass: 'drop-menu',
1832 containerCssClass: 'drop-menu',
1843 dropdownCssClass: 'drop-menu-dropdown',
1833 dropdownCssClass: 'drop-menu-dropdown',
1844 dropdownAutoWidth: true%s
1834 dropdownAutoWidth: true%s
1845 });
1835 });
1846 });
1836 });
1847 </script>
1837 </script>
1848 """
1838 """
1849 filter_option = """,
1839 filter_option = """,
1850 minimumResultsForSearch: -1
1840 minimumResultsForSearch: -1
1851 """
1841 """
1852 input_id = attrs.get('id') or name
1842 input_id = attrs.get('id') or name
1853 filter_enabled = "" if enable_filter else filter_option
1843 filter_enabled = "" if enable_filter else filter_option
1854 select_script = literal(select2 % (input_id, filter_enabled))
1844 select_script = literal(select2 % (input_id, filter_enabled))
1855
1845
1856 return literal(select_html+select_script)
1846 return literal(select_html+select_script)
1857
1847
1858
1848
1859 def get_visual_attr(tmpl_context_var, attr_name):
1849 def get_visual_attr(tmpl_context_var, attr_name):
1860 """
1850 """
1861 A safe way to get a variable from visual variable of template context
1851 A safe way to get a variable from visual variable of template context
1862
1852
1863 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1853 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1864 :param attr_name: name of the attribute we fetch from the c.visual
1854 :param attr_name: name of the attribute we fetch from the c.visual
1865 """
1855 """
1866 visual = getattr(tmpl_context_var, 'visual', None)
1856 visual = getattr(tmpl_context_var, 'visual', None)
1867 if not visual:
1857 if not visual:
1868 return
1858 return
1869 else:
1859 else:
1870 return getattr(visual, attr_name, None)
1860 return getattr(visual, attr_name, None)
1871
1861
1872
1862
1873 def get_last_path_part(file_node):
1863 def get_last_path_part(file_node):
1874 if not file_node.path:
1864 if not file_node.path:
1875 return u''
1865 return u''
1876
1866
1877 path = safe_unicode(file_node.path.split('/')[-1])
1867 path = safe_unicode(file_node.path.split('/')[-1])
1878 return u'../' + path
1868 return u'../' + path
1879
1869
1880
1870
1881 def route_path(*args, **kwds):
1871 def route_path(*args, **kwds):
1882 """
1872 """
1883 Wrapper around pyramids `route_path` function. It is used to generate
1873 Wrapper around pyramids `route_path` function. It is used to generate
1884 URLs from within pylons views or templates. This will be removed when
1874 URLs from within pylons views or templates. This will be removed when
1885 pyramid migration if finished.
1875 pyramid migration if finished.
1886 """
1876 """
1887 req = get_current_request()
1877 req = get_current_request()
1888 return req.route_path(*args, **kwds)
1878 return req.route_path(*args, **kwds)
1889
1879
1890
1880
1891 def resource_path(*args, **kwds):
1881 def resource_path(*args, **kwds):
1892 """
1882 """
1893 Wrapper around pyramids `route_path` function. It is used to generate
1883 Wrapper around pyramids `route_path` function. It is used to generate
1894 URLs from within pylons views or templates. This will be removed when
1884 URLs from within pylons views or templates. This will be removed when
1895 pyramid migration if finished.
1885 pyramid migration if finished.
1896 """
1886 """
1897 req = get_current_request()
1887 req = get_current_request()
1898 return req.resource_path(*args, **kwds)
1888 return req.resource_path(*args, **kwds)
@@ -1,231 +1,305 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2016 RhodeCode GmbH
3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Renderer for markup languages with ability to parse using rst or markdown
23 Renderer for markup languages with ability to parse using rst or markdown
24 """
24 """
25
25
26
27 import re
26 import re
28 import os
27 import os
29 import logging
28 import logging
29 import itertools
30
30 from mako.lookup import TemplateLookup
31 from mako.lookup import TemplateLookup
31
32
32 from docutils.core import publish_parts
33 from docutils.core import publish_parts
33 from docutils.parsers.rst import directives
34 from docutils.parsers.rst import directives
34 import markdown
35 import markdown
35
36
36 from rhodecode.lib.markdown_ext import (
37 from rhodecode.lib.markdown_ext import (
37 UrlizeExtension, GithubFlavoredMarkdownExtension)
38 UrlizeExtension, GithubFlavoredMarkdownExtension)
38 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
39 from rhodecode.lib.utils2 import safe_unicode, md5_safe, MENTIONS_REGEX
39
40
40 log = logging.getLogger(__name__)
41 log = logging.getLogger(__name__)
41
42
42 # default renderer used to generate automated comments
43 # default renderer used to generate automated comments
43 DEFAULT_COMMENTS_RENDERER = 'rst'
44 DEFAULT_COMMENTS_RENDERER = 'rst'
44
45
45
46
46 class MarkupRenderer(object):
47 class MarkupRenderer(object):
47 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
48 RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES = ['include', 'meta', 'raw']
48
49
49 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
50 MARKDOWN_PAT = re.compile(r'\.(md|mkdn?|mdown|markdown)$', re.IGNORECASE)
50 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
51 RST_PAT = re.compile(r'\.re?st$', re.IGNORECASE)
51 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
52 PLAIN_PAT = re.compile(r'^readme$', re.IGNORECASE)
52
53
54 # list of readme files to search in file tree and display in summary
55 # attached weights defines the search order lower is first
56 ALL_READMES = [
57 ('readme', 0), ('README', 0), ('Readme', 0),
58 ('doc/readme', 1), ('doc/README', 1), ('doc/Readme', 1),
59 ('Docs/readme', 2), ('Docs/README', 2), ('Docs/Readme', 2),
60 ('DOCS/readme', 2), ('DOCS/README', 2), ('DOCS/Readme', 2),
61 ('docs/readme', 2), ('docs/README', 2), ('docs/Readme', 2),
62 ]
63 # extension together with weights. Lower is first means we control how
64 # extensions are attached to readme names with those.
65 PLAIN_EXTS = [
66 ('', 0), # special case that renders READMES names without extension
67 ('.text', 2), ('.TEXT', 2),
68 ('.txt', 3), ('.TXT', 3)
69 ]
70
71 RST_EXTS = [
72 ('.rst', 1), ('.rest', 1),
73 ('.RST', 2), ('.REST', 2)
74 ]
75
76 MARKDOWN_EXTS = [
77 ('.md', 1), ('.MD', 1),
78 ('.mkdn', 2), ('.MKDN', 2),
79 ('.mdown', 3), ('.MDOWN', 3),
80 ('.markdown', 4), ('.MARKDOWN', 4)
81 ]
82
83 ALL_EXTS = PLAIN_EXTS + MARKDOWN_EXTS + RST_EXTS
84
53 def _detect_renderer(self, source, filename=None):
85 def _detect_renderer(self, source, filename=None):
54 """
86 """
55 runs detection of what renderer should be used for generating html
87 runs detection of what renderer should be used for generating html
56 from a markup language
88 from a markup language
57
89
58 filename can be also explicitly a renderer name
90 filename can be also explicitly a renderer name
59
91
60 :param source:
92 :param source:
61 :param filename:
93 :param filename:
62 """
94 """
63
95
64 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
96 if MarkupRenderer.MARKDOWN_PAT.findall(filename):
65 detected_renderer = 'markdown'
97 detected_renderer = 'markdown'
66 elif MarkupRenderer.RST_PAT.findall(filename):
98 elif MarkupRenderer.RST_PAT.findall(filename):
67 detected_renderer = 'rst'
99 detected_renderer = 'rst'
68 elif MarkupRenderer.PLAIN_PAT.findall(filename):
100 elif MarkupRenderer.PLAIN_PAT.findall(filename):
69 detected_renderer = 'rst'
101 detected_renderer = 'rst'
70 else:
102 else:
71 detected_renderer = 'plain'
103 detected_renderer = 'plain'
72
104
73 return getattr(MarkupRenderer, detected_renderer)
105 return getattr(MarkupRenderer, detected_renderer)
74
106
107 @classmethod
108 def renderer_from_filename(cls, filename, exclude):
109 """
110 Detect renderer from filename and optionally use exlcude list to
111 remove some options. This is mostly used in helpers
112 """
113 def _filter(elements):
114 if isinstance(exclude, (list, tuple)):
115 return [x for x in elements if x not in exclude]
116 return elements
117
118 if filename.endswith(
119 tuple(_filter([x[0] for x in cls.MARKDOWN_EXTS if x[0]]))):
120 return 'markdown'
121 if filename.endswith(tuple(_filter([x[0] for x in cls.RST_EXTS if x[0]]))):
122 return 'rst'
123
124 return 'plain'
125
126 @classmethod
127 def generate_readmes(cls, all_readmes, extensions):
128 combined = itertools.product(all_readmes, extensions)
129 # sort by filename weight(y[0][1]) + extensions weight(y[1][1])
130 prioritized_readmes = sorted(combined, key=lambda y: y[0][1] + y[1][1])
131 # filename, extension
132 return [''.join([x[0][0], x[1][0]]) for x in prioritized_readmes]
133
134 def pick_readme_order(self, default_renderer):
135
136 if default_renderer == 'markdown':
137 markdown = self.generate_readmes(self.ALL_READMES, self.MARKDOWN_EXTS)
138 readme_order = markdown + self.generate_readmes(
139 self.ALL_READMES, self.RST_EXTS + self.PLAIN_EXTS)
140 elif default_renderer == 'rst':
141 markdown = self.generate_readmes(self.ALL_READMES, self.RST_EXTS)
142 readme_order = markdown + self.generate_readmes(
143 self.ALL_READMES, self.MARKDOWN_EXTS + self.PLAIN_EXTS)
144 else:
145 readme_order = self.generate_readmes(self.ALL_READMES, self.ALL_EXTS)
146
147 return readme_order
148
75 def render(self, source, filename=None):
149 def render(self, source, filename=None):
76 """
150 """
77 Renders a given filename using detected renderer
151 Renders a given filename using detected renderer
78 it detects renderers based on file extension or mimetype.
152 it detects renderers based on file extension or mimetype.
79 At last it will just do a simple html replacing new lines with <br/>
153 At last it will just do a simple html replacing new lines with <br/>
80
154
81 :param file_name:
155 :param file_name:
82 :param source:
156 :param source:
83 """
157 """
84
158
85 renderer = self._detect_renderer(source, filename)
159 renderer = self._detect_renderer(source, filename)
86 readme_data = renderer(source)
160 readme_data = renderer(source)
87 return readme_data
161 return readme_data
88
162
89 @classmethod
163 @classmethod
90 def _flavored_markdown(cls, text):
164 def _flavored_markdown(cls, text):
91 """
165 """
92 Github style flavored markdown
166 Github style flavored markdown
93
167
94 :param text:
168 :param text:
95 """
169 """
96
170
97 # Extract pre blocks.
171 # Extract pre blocks.
98 extractions = {}
172 extractions = {}
99
173
100 def pre_extraction_callback(matchobj):
174 def pre_extraction_callback(matchobj):
101 digest = md5_safe(matchobj.group(0))
175 digest = md5_safe(matchobj.group(0))
102 extractions[digest] = matchobj.group(0)
176 extractions[digest] = matchobj.group(0)
103 return "{gfm-extraction-%s}" % digest
177 return "{gfm-extraction-%s}" % digest
104 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
178 pattern = re.compile(r'<pre>.*?</pre>', re.MULTILINE | re.DOTALL)
105 text = re.sub(pattern, pre_extraction_callback, text)
179 text = re.sub(pattern, pre_extraction_callback, text)
106
180
107 # Prevent foo_bar_baz from ending up with an italic word in the middle.
181 # Prevent foo_bar_baz from ending up with an italic word in the middle.
108 def italic_callback(matchobj):
182 def italic_callback(matchobj):
109 s = matchobj.group(0)
183 s = matchobj.group(0)
110 if list(s).count('_') >= 2:
184 if list(s).count('_') >= 2:
111 return s.replace('_', r'\_')
185 return s.replace('_', r'\_')
112 return s
186 return s
113 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
187 text = re.sub(r'^(?! {4}|\t)\w+_\w+_\w[\w_]*', italic_callback, text)
114
188
115 # Insert pre block extractions.
189 # Insert pre block extractions.
116 def pre_insert_callback(matchobj):
190 def pre_insert_callback(matchobj):
117 return '\n\n' + extractions[matchobj.group(1)]
191 return '\n\n' + extractions[matchobj.group(1)]
118 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
192 text = re.sub(r'\{gfm-extraction-([0-9a-f]{32})\}',
119 pre_insert_callback, text)
193 pre_insert_callback, text)
120
194
121 return text
195 return text
122
196
123 @classmethod
197 @classmethod
124 def urlify_text(cls, text):
198 def urlify_text(cls, text):
125 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
199 url_pat = re.compile(r'(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
126 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
200 r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)')
127
201
128 def url_func(match_obj):
202 def url_func(match_obj):
129 url_full = match_obj.groups()[0]
203 url_full = match_obj.groups()[0]
130 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
204 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
131
205
132 return url_pat.sub(url_func, text)
206 return url_pat.sub(url_func, text)
133
207
134 @classmethod
208 @classmethod
135 def plain(cls, source, universal_newline=True):
209 def plain(cls, source, universal_newline=True):
136 source = safe_unicode(source)
210 source = safe_unicode(source)
137 if universal_newline:
211 if universal_newline:
138 newline = '\n'
212 newline = '\n'
139 source = newline.join(source.splitlines())
213 source = newline.join(source.splitlines())
140
214
141 source = cls.urlify_text(source)
215 source = cls.urlify_text(source)
142 return '<br />' + source.replace("\n", '<br />')
216 return '<br />' + source.replace("\n", '<br />')
143
217
144 @classmethod
218 @classmethod
145 def markdown(cls, source, safe=True, flavored=True, mentions=False):
219 def markdown(cls, source, safe=True, flavored=True, mentions=False):
146 # It does not allow to insert inline HTML. In presence of HTML tags, it
220 # It does not allow to insert inline HTML. In presence of HTML tags, it
147 # will replace them instead with [HTML_REMOVED]. This is controlled by
221 # will replace them instead with [HTML_REMOVED]. This is controlled by
148 # the safe_mode=True parameter of the markdown method.
222 # the safe_mode=True parameter of the markdown method.
149 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
223 extensions = ['codehilite', 'extra', 'def_list', 'sane_lists']
150 if flavored:
224 if flavored:
151 extensions.append(GithubFlavoredMarkdownExtension())
225 extensions.append(GithubFlavoredMarkdownExtension())
152
226
153 if mentions:
227 if mentions:
154 mention_pat = re.compile(MENTIONS_REGEX)
228 mention_pat = re.compile(MENTIONS_REGEX)
155
229
156 def wrapp(match_obj):
230 def wrapp(match_obj):
157 uname = match_obj.groups()[0]
231 uname = match_obj.groups()[0]
158 return ' **@%(uname)s** ' % {'uname': uname}
232 return ' **@%(uname)s** ' % {'uname': uname}
159 mention_hl = mention_pat.sub(wrapp, source).strip()
233 mention_hl = mention_pat.sub(wrapp, source).strip()
160 # we extracted mentions render with this using Mentions false
234 # we extracted mentions render with this using Mentions false
161 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
235 return cls.markdown(mention_hl, safe=safe, flavored=flavored,
162 mentions=False)
236 mentions=False)
163
237
164 source = safe_unicode(source)
238 source = safe_unicode(source)
165 try:
239 try:
166 if flavored:
240 if flavored:
167 source = cls._flavored_markdown(source)
241 source = cls._flavored_markdown(source)
168 return markdown.markdown(
242 return markdown.markdown(
169 source, extensions, safe_mode=True, enable_attributes=False)
243 source, extensions, safe_mode=True, enable_attributes=False)
170 except Exception:
244 except Exception:
171 log.exception('Error when rendering Markdown')
245 log.exception('Error when rendering Markdown')
172 if safe:
246 if safe:
173 log.debug('Fallback to render in plain mode')
247 log.debug('Fallback to render in plain mode')
174 return cls.plain(source)
248 return cls.plain(source)
175 else:
249 else:
176 raise
250 raise
177
251
178 @classmethod
252 @classmethod
179 def rst(cls, source, safe=True, mentions=False):
253 def rst(cls, source, safe=True, mentions=False):
180 if mentions:
254 if mentions:
181 mention_pat = re.compile(MENTIONS_REGEX)
255 mention_pat = re.compile(MENTIONS_REGEX)
182
256
183 def wrapp(match_obj):
257 def wrapp(match_obj):
184 uname = match_obj.groups()[0]
258 uname = match_obj.groups()[0]
185 return ' **@%(uname)s** ' % {'uname': uname}
259 return ' **@%(uname)s** ' % {'uname': uname}
186 mention_hl = mention_pat.sub(wrapp, source).strip()
260 mention_hl = mention_pat.sub(wrapp, source).strip()
187 # we extracted mentions render with this using Mentions false
261 # we extracted mentions render with this using Mentions false
188 return cls.rst(mention_hl, safe=safe, mentions=False)
262 return cls.rst(mention_hl, safe=safe, mentions=False)
189
263
190 source = safe_unicode(source)
264 source = safe_unicode(source)
191 try:
265 try:
192 docutils_settings = dict(
266 docutils_settings = dict(
193 [(alias, None) for alias in
267 [(alias, None) for alias in
194 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
268 cls.RESTRUCTUREDTEXT_DISALLOWED_DIRECTIVES])
195
269
196 docutils_settings.update({'input_encoding': 'unicode',
270 docutils_settings.update({'input_encoding': 'unicode',
197 'report_level': 4})
271 'report_level': 4})
198
272
199 for k, v in docutils_settings.iteritems():
273 for k, v in docutils_settings.iteritems():
200 directives.register_directive(k, v)
274 directives.register_directive(k, v)
201
275
202 parts = publish_parts(source=source,
276 parts = publish_parts(source=source,
203 writer_name="html4css1",
277 writer_name="html4css1",
204 settings_overrides=docutils_settings)
278 settings_overrides=docutils_settings)
205
279
206 return parts['html_title'] + parts["fragment"]
280 return parts['html_title'] + parts["fragment"]
207 except Exception:
281 except Exception:
208 log.exception('Error when rendering RST')
282 log.exception('Error when rendering RST')
209 if safe:
283 if safe:
210 log.debug('Fallbacking to render in plain mode')
284 log.debug('Fallbacking to render in plain mode')
211 return cls.plain(source)
285 return cls.plain(source)
212 else:
286 else:
213 raise
287 raise
214
288
215
289
216 class RstTemplateRenderer(object):
290 class RstTemplateRenderer(object):
217
291
218 def __init__(self):
292 def __init__(self):
219 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
293 base = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
220 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
294 rst_template_dirs = [os.path.join(base, 'templates', 'rst_templates')]
221 self.template_store = TemplateLookup(
295 self.template_store = TemplateLookup(
222 directories=rst_template_dirs,
296 directories=rst_template_dirs,
223 input_encoding='utf-8',
297 input_encoding='utf-8',
224 imports=['from rhodecode.lib import helpers as h'])
298 imports=['from rhodecode.lib import helpers as h'])
225
299
226 def _get_template(self, templatename):
300 def _get_template(self, templatename):
227 return self.template_store.get_template(templatename)
301 return self.template_store.get_template(templatename)
228
302
229 def render(self, template_name, **kwargs):
303 def render(self, template_name, **kwargs):
230 template = self._get_template(template_name)
304 template = self._get_template(template_name)
231 return template.render(**kwargs)
305 return template.render(**kwargs)
@@ -1,179 +1,213 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2016 RhodeCode GmbH
3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import pytest
21 import pytest
22
22
23 from rhodecode.lib.markup_renderer import MarkupRenderer, RstTemplateRenderer
23 from rhodecode.lib.markup_renderer import MarkupRenderer, RstTemplateRenderer
24
24
25
25
26 @pytest.mark.parametrize(
26 @pytest.mark.parametrize(
27 "filename, expected_renderer",
27 "filename, expected_renderer",
28 [
28 [
29 ('readme.md', 'markdown'),
29 ('readme.md', 'markdown'),
30 ('readme.Md', 'markdown'),
30 ('readme.Md', 'markdown'),
31 ('readme.MdoWn', 'markdown'),
31 ('readme.MdoWn', 'markdown'),
32 ('readme.rst', 'rst'),
32 ('readme.rst', 'rst'),
33 ('readme.Rst', 'rst'),
33 ('readme.Rst', 'rst'),
34 ('readme.rest', 'rst'),
34 ('readme.rest', 'rst'),
35 ('readme.rest', 'rst'),
35 ('readme.rest', 'rst'),
36 ('readme', 'rst'),
36 ('readme', 'rst'),
37 ('README', 'rst'),
37 ('README', 'rst'),
38
38
39 ('markdown.xml', 'plain'),
39 ('markdown.xml', 'plain'),
40 ('rest.xml', 'plain'),
40 ('rest.xml', 'plain'),
41 ('readme.xml', 'plain'),
41 ('readme.xml', 'plain'),
42
42
43 ('readme.mdx', 'plain'),
43 ('readme.mdx', 'plain'),
44 ('readme.rstx', 'plain'),
44 ('readme.rstx', 'plain'),
45 ('readmex', 'plain'),
45 ('readmex', 'plain'),
46 ])
46 ])
47 def test_detect_renderer(filename, expected_renderer):
47 def test_detect_renderer(filename, expected_renderer):
48 detected_renderer = MarkupRenderer()._detect_renderer(
48 detected_renderer = MarkupRenderer()._detect_renderer(
49 '', filename=filename).__name__
49 '', filename=filename).__name__
50 assert expected_renderer == detected_renderer
50 assert expected_renderer == detected_renderer
51
51
52
52
53 def test_markdown_xss_link():
53 def test_markdown_xss_link():
54 xss_md = "[link](javascript:alert('XSS: pwned!'))"
54 xss_md = "[link](javascript:alert('XSS: pwned!'))"
55 rendered_html = MarkupRenderer.markdown(xss_md)
55 rendered_html = MarkupRenderer.markdown(xss_md)
56 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
56 assert 'href="javascript:alert(\'XSS: pwned!\')"' not in rendered_html
57
57
58
58
59 def test_markdown_xss_inline_html():
59 def test_markdown_xss_inline_html():
60 xss_md = '\n'.join([
60 xss_md = '\n'.join([
61 '> <a name="n"',
61 '> <a name="n"',
62 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
62 '> href="javascript:alert(\'XSS: pwned!\')">link</a>'])
63 rendered_html = MarkupRenderer.markdown(xss_md)
63 rendered_html = MarkupRenderer.markdown(xss_md)
64 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
64 assert 'href="javascript:alert(\'XSS: pwned!\')">' not in rendered_html
65
65
66
66
67 def test_markdown_inline_html():
67 def test_markdown_inline_html():
68 xss_md = '\n'.join(['> <a name="n"',
68 xss_md = '\n'.join(['> <a name="n"',
69 '> href="https://rhodecode.com">link</a>'])
69 '> href="https://rhodecode.com">link</a>'])
70 rendered_html = MarkupRenderer.markdown(xss_md)
70 rendered_html = MarkupRenderer.markdown(xss_md)
71 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
71 assert '[HTML_REMOVED]link[HTML_REMOVED]' in rendered_html
72
72
73
73
74 def test_rst_xss_link():
74 def test_rst_xss_link():
75 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
75 xss_rst = "`Link<javascript:alert('XSS: pwned!')>`_"
76 rendered_html = MarkupRenderer.rst(xss_rst)
76 rendered_html = MarkupRenderer.rst(xss_rst)
77 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
77 assert "href=javascript:alert('XSS: pwned!')" not in rendered_html
78
78
79
79
80 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
80 @pytest.mark.xfail(reason='Bug in docutils. Waiting answer from the author')
81 def test_rst_xss_inline_html():
81 def test_rst_xss_inline_html():
82 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
82 xss_rst = '<a href="javascript:alert(\'XSS: pwned!\')">link</a>'
83 rendered_html = MarkupRenderer.rst(xss_rst)
83 rendered_html = MarkupRenderer.rst(xss_rst)
84 assert 'href="javascript:alert(' not in rendered_html
84 assert 'href="javascript:alert(' not in rendered_html
85
85
86
86
87 def test_rst_xss_raw_directive():
87 def test_rst_xss_raw_directive():
88 xss_rst = '\n'.join([
88 xss_rst = '\n'.join([
89 '.. raw:: html',
89 '.. raw:: html',
90 '',
90 '',
91 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
91 ' <a href="javascript:alert(\'XSS: pwned!\')">link</a>'])
92 rendered_html = MarkupRenderer.rst(xss_rst)
92 rendered_html = MarkupRenderer.rst(xss_rst)
93 assert 'href="javascript:alert(' not in rendered_html
93 assert 'href="javascript:alert(' not in rendered_html
94
94
95
95
96 def test_render_rst_template_without_files():
96 def test_render_rst_template_without_files():
97 expected = u'''\
97 expected = u'''\
98 Auto status change to |under_review|
98 Auto status change to |under_review|
99
99
100 .. role:: added
100 .. role:: added
101 .. role:: removed
101 .. role:: removed
102 .. parsed-literal::
102 .. parsed-literal::
103
103
104 Changed commits:
104 Changed commits:
105 * :added:`2 added`
105 * :added:`2 added`
106 * :removed:`3 removed`
106 * :removed:`3 removed`
107
107
108 No file changes found
108 No file changes found
109
109
110 .. |under_review| replace:: *"NEW STATUS"*'''
110 .. |under_review| replace:: *"NEW STATUS"*'''
111
111
112 params = {
112 params = {
113 'under_review_label': 'NEW STATUS',
113 'under_review_label': 'NEW STATUS',
114 'added_commits': ['a', 'b'],
114 'added_commits': ['a', 'b'],
115 'removed_commits': ['a', 'b', 'c'],
115 'removed_commits': ['a', 'b', 'c'],
116 'changed_files': [],
116 'changed_files': [],
117 'added_files': [],
117 'added_files': [],
118 'modified_files': [],
118 'modified_files': [],
119 'removed_files': [],
119 'removed_files': [],
120 }
120 }
121 renderer = RstTemplateRenderer()
121 renderer = RstTemplateRenderer()
122 rendered = renderer.render('pull_request_update.mako', **params)
122 rendered = renderer.render('pull_request_update.mako', **params)
123 assert expected == rendered
123 assert expected == rendered
124
124
125
125
126 def test_render_rst_template_with_files():
126 def test_render_rst_template_with_files():
127 expected = u'''\
127 expected = u'''\
128 Auto status change to |under_review|
128 Auto status change to |under_review|
129
129
130 .. role:: added
130 .. role:: added
131 .. role:: removed
131 .. role:: removed
132 .. parsed-literal::
132 .. parsed-literal::
133
133
134 Changed commits:
134 Changed commits:
135 * :added:`1 added`
135 * :added:`1 added`
136 * :removed:`3 removed`
136 * :removed:`3 removed`
137
137
138 Changed files:
138 Changed files:
139 * `A /path/a.py <#a_c--68ed34923b68>`_
139 * `A /path/a.py <#a_c--68ed34923b68>`_
140 * `A /path/b.js <#a_c--64f90608b607>`_
140 * `A /path/b.js <#a_c--64f90608b607>`_
141 * `M /path/d.js <#a_c--85842bf30c6e>`_
141 * `M /path/d.js <#a_c--85842bf30c6e>`_
142 * `M /path/Δ™.py <#a_c--d713adf009cd>`_
142 * `M /path/Δ™.py <#a_c--d713adf009cd>`_
143 * R /path/ΕΊ.py
143 * R /path/ΕΊ.py
144
144
145 .. |under_review| replace:: *"NEW STATUS"*'''
145 .. |under_review| replace:: *"NEW STATUS"*'''
146
146
147 added = ['/path/a.py', '/path/b.js']
147 added = ['/path/a.py', '/path/b.js']
148 modified = ['/path/d.js', u'/path/Δ™.py']
148 modified = ['/path/d.js', u'/path/Δ™.py']
149 removed = [u'/path/ΕΊ.py']
149 removed = [u'/path/ΕΊ.py']
150
150
151 params = {
151 params = {
152 'under_review_label': 'NEW STATUS',
152 'under_review_label': 'NEW STATUS',
153 'added_commits': ['a'],
153 'added_commits': ['a'],
154 'removed_commits': ['a', 'b', 'c'],
154 'removed_commits': ['a', 'b', 'c'],
155 'changed_files': added + modified + removed,
155 'changed_files': added + modified + removed,
156 'added_files': added,
156 'added_files': added,
157 'modified_files': modified,
157 'modified_files': modified,
158 'removed_files': removed,
158 'removed_files': removed,
159 }
159 }
160 renderer = RstTemplateRenderer()
160 renderer = RstTemplateRenderer()
161 rendered = renderer.render('pull_request_update.mako', **params)
161 rendered = renderer.render('pull_request_update.mako', **params)
162
162
163 assert expected == rendered
163 assert expected == rendered
164
164
165
165
166 def test_render_rst_auto_status_template():
166 def test_render_rst_auto_status_template():
167 expected = u'''\
167 expected = u'''\
168 Auto status change to |new_status|
168 Auto status change to |new_status|
169
169
170 .. |new_status| replace:: *"NEW STATUS"*'''
170 .. |new_status| replace:: *"NEW STATUS"*'''
171
171
172 params = {
172 params = {
173 'new_status_label': 'NEW STATUS',
173 'new_status_label': 'NEW STATUS',
174 'pull_request': None,
174 'pull_request': None,
175 'commit_id': None,
175 'commit_id': None,
176 }
176 }
177 renderer = RstTemplateRenderer()
177 renderer = RstTemplateRenderer()
178 rendered = renderer.render('auto_status_change.mako', **params)
178 rendered = renderer.render('auto_status_change.mako', **params)
179 assert expected == rendered
179 assert expected == rendered
180
181
182 @pytest.mark.parametrize(
183 "readmes, exts, order",
184 [
185 ([], [], []),
186
187 ([('readme1', 0), ('text1', 1)], [('.ext', 0), ('.txt', 1)],
188 ['readme1.ext', 'readme1.txt', 'text1.ext', 'text1.txt']),
189
190 ([('readme2', 0), ('text2', 1)], [('.ext', 2), ('.txt', 1)],
191 ['readme2.txt', 'readme2.ext', 'text2.txt', 'text2.ext']),
192
193 ([('readme3', 0), ('text3', 1)], [('.XXX', 1)],
194 ['readme3.XXX', 'text3.XXX']),
195 ])
196 def test_generate_readmes(readmes, exts, order):
197 assert order == MarkupRenderer.generate_readmes(readmes, exts)
198
199
200 @pytest.mark.parametrize(
201 "renderer, expected_order",
202 [
203 ('plain', ['readme', 'README', 'Readme']),
204 ('text', ['readme', 'README', 'Readme']),
205 ('markdown', MarkupRenderer.generate_readmes(
206 MarkupRenderer.ALL_READMES, MarkupRenderer.MARKDOWN_EXTS)),
207 ('rst', MarkupRenderer.generate_readmes(
208 MarkupRenderer.ALL_READMES, MarkupRenderer.RST_EXTS)),
209 ])
210 def test_order_of_readme_generation(renderer, expected_order):
211 mkd_renderer = MarkupRenderer()
212 assert expected_order == mkd_renderer.pick_readme_order(
213 renderer)[:len(expected_order)]
General Comments 0
You need to be logged in to leave comments. Login now