##// END OF EJS Templates
search: added basic example query block.
marcink -
r1684:21d2623d default
parent child Browse files
Show More
@@ -1,111 +1,112 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Search controller for RhodeCode
22 Search controller for RhodeCode
23 """
23 """
24
24
25 import logging
25 import logging
26 import urllib
26 import urllib
27
27
28 from pylons import request, config, tmpl_context as c
28 from pylons import request, config, tmpl_context as c
29
29
30 from webhelpers.util import update_params
30 from webhelpers.util import update_params
31
31
32 from rhodecode.lib.auth import LoginRequired, AuthUser
32 from rhodecode.lib.auth import LoginRequired, AuthUser
33 from rhodecode.lib.base import BaseRepoController, render
33 from rhodecode.lib.base import BaseRepoController, render
34 from rhodecode.lib.helpers import Page
34 from rhodecode.lib.helpers import Page
35 from rhodecode.lib.utils2 import safe_str, safe_int
35 from rhodecode.lib.utils2 import safe_str, safe_int
36 from rhodecode.lib.index import searcher_from_config
36 from rhodecode.lib.index import searcher_from_config
37 from rhodecode.model import validation_schema
37 from rhodecode.model import validation_schema
38 from rhodecode.model.validation_schema.schemas import search_schema
38 from rhodecode.model.validation_schema.schemas import search_schema
39
39
40 log = logging.getLogger(__name__)
40 log = logging.getLogger(__name__)
41
41
42
42
43 class SearchController(BaseRepoController):
43 class SearchController(BaseRepoController):
44
44
45 @LoginRequired()
45 @LoginRequired()
46 def index(self, repo_name=None):
46 def index(self, repo_name=None):
47
47
48 searcher = searcher_from_config(config)
48 searcher = searcher_from_config(config)
49 formatted_results = []
49 formatted_results = []
50 execution_time = ''
50 execution_time = ''
51
51
52 schema = search_schema.SearchParamsSchema()
52 schema = search_schema.SearchParamsSchema()
53
53
54 search_params = {}
54 search_params = {}
55 errors = []
55 errors = []
56 try:
56 try:
57 search_params = schema.deserialize(
57 search_params = schema.deserialize(
58 dict(search_query=request.GET.get('q'),
58 dict(search_query=request.GET.get('q'),
59 search_type=request.GET.get('type'),
59 search_type=request.GET.get('type'),
60 search_sort=request.GET.get('sort'),
60 search_sort=request.GET.get('sort'),
61 page_limit=request.GET.get('page_limit'),
61 page_limit=request.GET.get('page_limit'),
62 requested_page=request.GET.get('page'))
62 requested_page=request.GET.get('page'))
63 )
63 )
64 except validation_schema.Invalid as e:
64 except validation_schema.Invalid as e:
65 errors = e.children
65 errors = e.children
66
66
67 def url_generator(**kw):
67 def url_generator(**kw):
68 q = urllib.quote(safe_str(search_query))
68 q = urllib.quote(safe_str(search_query))
69 return update_params(
69 return update_params(
70 "?q=%s&type=%s" % (q, safe_str(search_type)), **kw)
70 "?q=%s&type=%s" % (q, safe_str(search_type)), **kw)
71
71
72 search_query = search_params.get('search_query')
72 search_query = search_params.get('search_query')
73 search_type = search_params.get('search_type')
73 search_type = search_params.get('search_type')
74 search_sort = search_params.get('search_sort')
74 search_sort = search_params.get('search_sort')
75 if search_params.get('search_query'):
75 if search_params.get('search_query'):
76 page_limit = search_params['page_limit']
76 page_limit = search_params['page_limit']
77 requested_page = search_params['requested_page']
77 requested_page = search_params['requested_page']
78
78
79 c.perm_user = AuthUser(user_id=c.rhodecode_user.user_id,
79 c.perm_user = AuthUser(user_id=c.rhodecode_user.user_id,
80 ip_addr=self.ip_addr)
80 ip_addr=self.ip_addr)
81
81
82 try:
82 try:
83 search_result = searcher.search(
83 search_result = searcher.search(
84 search_query, search_type, c.perm_user, repo_name,
84 search_query, search_type, c.perm_user, repo_name,
85 requested_page, page_limit, search_sort)
85 requested_page, page_limit, search_sort)
86
86
87 formatted_results = Page(
87 formatted_results = Page(
88 search_result['results'], page=requested_page,
88 search_result['results'], page=requested_page,
89 item_count=search_result['count'],
89 item_count=search_result['count'],
90 items_per_page=page_limit, url=url_generator)
90 items_per_page=page_limit, url=url_generator)
91 finally:
91 finally:
92 searcher.cleanup()
92 searcher.cleanup()
93
93
94 if not search_result['error']:
94 if not search_result['error']:
95 execution_time = '%s results (%.3f seconds)' % (
95 execution_time = '%s results (%.3f seconds)' % (
96 search_result['count'],
96 search_result['count'],
97 search_result['runtime'])
97 search_result['runtime'])
98 elif not errors:
98 elif not errors:
99 node = schema['search_query']
99 node = schema['search_query']
100 errors = [
100 errors = [
101 validation_schema.Invalid(node, search_result['error'])]
101 validation_schema.Invalid(node, search_result['error'])]
102
102
103 c.sort = search_sort
103 c.sort = search_sort
104 c.url_generator = url_generator
104 c.url_generator = url_generator
105 c.errors = errors
105 c.errors = errors
106 c.formatted_results = formatted_results
106 c.formatted_results = formatted_results
107 c.runtime = execution_time
107 c.runtime = execution_time
108 c.cur_query = search_query
108 c.cur_query = search_query
109 c.search_type = search_type
109 c.search_type = search_type
110 c.searcher = searcher
110 # Return a rendered template
111 # Return a rendered template
111 return render('/search/search.mako')
112 return render('/search/search.mako')
@@ -1,1987 +1,2005 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40 import itertools
40 import itertools
41 import fnmatch
41 import fnmatch
42
42
43 from datetime import datetime
43 from datetime import datetime
44 from functools import partial
44 from functools import partial
45 from pygments.formatters.html import HtmlFormatter
45 from pygments.formatters.html import HtmlFormatter
46 from pygments import highlight as code_highlight
46 from pygments import highlight as code_highlight
47 from pygments.lexers import (
47 from pygments.lexers import (
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 from pylons import url as pylons_url
49 from pylons import url as pylons_url
50 from pylons.i18n.translation import _, ungettext
50 from pylons.i18n.translation import _, ungettext
51 from pyramid.threadlocal import get_current_request
51 from pyramid.threadlocal import get_current_request
52
52
53 from webhelpers.html import literal, HTML, escape
53 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html.tools import *
54 from webhelpers.html.tools import *
55 from webhelpers.html.builder import make_tag
55 from webhelpers.html.builder import make_tag
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 submit, text, password, textarea, title, ul, xml_declaration, radio
59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 from webhelpers.pylonslib import Flash as _Flash
62 from webhelpers.pylonslib import Flash as _Flash
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def url(*args, **kw):
94 def url(*args, **kw):
95 return pylons_url(*args, **kw)
95 return pylons_url(*args, **kw)
96
96
97
97
98 def pylons_url_current(*args, **kw):
98 def pylons_url_current(*args, **kw):
99 """
99 """
100 This function overrides pylons.url.current() which returns the current
100 This function overrides pylons.url.current() which returns the current
101 path so that it will also work from a pyramid only context. This
101 path so that it will also work from a pyramid only context. This
102 should be removed once port to pyramid is complete.
102 should be removed once port to pyramid is complete.
103 """
103 """
104 if not args and not kw:
104 if not args and not kw:
105 request = get_current_request()
105 request = get_current_request()
106 return request.path
106 return request.path
107 return pylons_url.current(*args, **kw)
107 return pylons_url.current(*args, **kw)
108
108
109 url.current = pylons_url_current
109 url.current = pylons_url_current
110
110
111
111
112 def url_replace(**qargs):
112 def url_replace(**qargs):
113 """ Returns the current request url while replacing query string args """
113 """ Returns the current request url while replacing query string args """
114
114
115 request = get_current_request()
115 request = get_current_request()
116 new_args = request.GET.mixed()
116 new_args = request.GET.mixed()
117 new_args.update(qargs)
117 new_args.update(qargs)
118 return url('', **new_args)
118 return url('', **new_args)
119
119
120
120
121 def asset(path, ver=None, **kwargs):
121 def asset(path, ver=None, **kwargs):
122 """
122 """
123 Helper to generate a static asset file path for rhodecode assets
123 Helper to generate a static asset file path for rhodecode assets
124
124
125 eg. h.asset('images/image.png', ver='3923')
125 eg. h.asset('images/image.png', ver='3923')
126
126
127 :param path: path of asset
127 :param path: path of asset
128 :param ver: optional version query param to append as ?ver=
128 :param ver: optional version query param to append as ?ver=
129 """
129 """
130 request = get_current_request()
130 request = get_current_request()
131 query = {}
131 query = {}
132 query.update(kwargs)
132 query.update(kwargs)
133 if ver:
133 if ver:
134 query = {'ver': ver}
134 query = {'ver': ver}
135 return request.static_path(
135 return request.static_path(
136 'rhodecode:public/{}'.format(path), _query=query)
136 'rhodecode:public/{}'.format(path), _query=query)
137
137
138
138
139 default_html_escape_table = {
139 default_html_escape_table = {
140 ord('&'): u'&amp;',
140 ord('&'): u'&amp;',
141 ord('<'): u'&lt;',
141 ord('<'): u'&lt;',
142 ord('>'): u'&gt;',
142 ord('>'): u'&gt;',
143 ord('"'): u'&quot;',
143 ord('"'): u'&quot;',
144 ord("'"): u'&#39;',
144 ord("'"): u'&#39;',
145 }
145 }
146
146
147
147
148 def html_escape(text, html_escape_table=default_html_escape_table):
148 def html_escape(text, html_escape_table=default_html_escape_table):
149 """Produce entities within text."""
149 """Produce entities within text."""
150 return text.translate(html_escape_table)
150 return text.translate(html_escape_table)
151
151
152
152
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 """
154 """
155 Truncate string ``s`` at the first occurrence of ``sub``.
155 Truncate string ``s`` at the first occurrence of ``sub``.
156
156
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 """
158 """
159 suffix_if_chopped = suffix_if_chopped or ''
159 suffix_if_chopped = suffix_if_chopped or ''
160 pos = s.find(sub)
160 pos = s.find(sub)
161 if pos == -1:
161 if pos == -1:
162 return s
162 return s
163
163
164 if inclusive:
164 if inclusive:
165 pos += len(sub)
165 pos += len(sub)
166
166
167 chopped = s[:pos]
167 chopped = s[:pos]
168 left = s[pos:].strip()
168 left = s[pos:].strip()
169
169
170 if left and suffix_if_chopped:
170 if left and suffix_if_chopped:
171 chopped += suffix_if_chopped
171 chopped += suffix_if_chopped
172
172
173 return chopped
173 return chopped
174
174
175
175
176 def shorter(text, size=20):
176 def shorter(text, size=20):
177 postfix = '...'
177 postfix = '...'
178 if len(text) > size:
178 if len(text) > size:
179 return text[:size - len(postfix)] + postfix
179 return text[:size - len(postfix)] + postfix
180 return text
180 return text
181
181
182
182
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 """
184 """
185 Reset button
185 Reset button
186 """
186 """
187 _set_input_attrs(attrs, type, name, value)
187 _set_input_attrs(attrs, type, name, value)
188 _set_id_attr(attrs, id, name)
188 _set_id_attr(attrs, id, name)
189 convert_boolean_attrs(attrs, ["disabled"])
189 convert_boolean_attrs(attrs, ["disabled"])
190 return HTML.input(**attrs)
190 return HTML.input(**attrs)
191
191
192 reset = _reset
192 reset = _reset
193 safeid = _make_safe_id_component
193 safeid = _make_safe_id_component
194
194
195
195
196 def branding(name, length=40):
196 def branding(name, length=40):
197 return truncate(name, length, indicator="")
197 return truncate(name, length, indicator="")
198
198
199
199
200 def FID(raw_id, path):
200 def FID(raw_id, path):
201 """
201 """
202 Creates a unique ID for filenode based on it's hash of path and commit
202 Creates a unique ID for filenode based on it's hash of path and commit
203 it's safe to use in urls
203 it's safe to use in urls
204
204
205 :param raw_id:
205 :param raw_id:
206 :param path:
206 :param path:
207 """
207 """
208
208
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210
210
211
211
212 class _GetError(object):
212 class _GetError(object):
213 """Get error from form_errors, and represent it as span wrapped error
213 """Get error from form_errors, and represent it as span wrapped error
214 message
214 message
215
215
216 :param field_name: field to fetch errors for
216 :param field_name: field to fetch errors for
217 :param form_errors: form errors dict
217 :param form_errors: form errors dict
218 """
218 """
219
219
220 def __call__(self, field_name, form_errors):
220 def __call__(self, field_name, form_errors):
221 tmpl = """<span class="error_msg">%s</span>"""
221 tmpl = """<span class="error_msg">%s</span>"""
222 if form_errors and field_name in form_errors:
222 if form_errors and field_name in form_errors:
223 return literal(tmpl % form_errors.get(field_name))
223 return literal(tmpl % form_errors.get(field_name))
224
224
225 get_error = _GetError()
225 get_error = _GetError()
226
226
227
227
228 class _ToolTip(object):
228 class _ToolTip(object):
229
229
230 def __call__(self, tooltip_title, trim_at=50):
230 def __call__(self, tooltip_title, trim_at=50):
231 """
231 """
232 Special function just to wrap our text into nice formatted
232 Special function just to wrap our text into nice formatted
233 autowrapped text
233 autowrapped text
234
234
235 :param tooltip_title:
235 :param tooltip_title:
236 """
236 """
237 tooltip_title = escape(tooltip_title)
237 tooltip_title = escape(tooltip_title)
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 return tooltip_title
239 return tooltip_title
240 tooltip = _ToolTip()
240 tooltip = _ToolTip()
241
241
242
242
243 def files_breadcrumbs(repo_name, commit_id, file_path):
243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 if isinstance(file_path, str):
244 if isinstance(file_path, str):
245 file_path = safe_unicode(file_path)
245 file_path = safe_unicode(file_path)
246
246
247 # TODO: johbo: Is this always a url like path, or is this operating
247 # TODO: johbo: Is this always a url like path, or is this operating
248 # system dependent?
248 # system dependent?
249 path_segments = file_path.split('/')
249 path_segments = file_path.split('/')
250
250
251 repo_name_html = escape(repo_name)
251 repo_name_html = escape(repo_name)
252 if len(path_segments) == 1 and path_segments[0] == '':
252 if len(path_segments) == 1 and path_segments[0] == '':
253 url_segments = [repo_name_html]
253 url_segments = [repo_name_html]
254 else:
254 else:
255 url_segments = [
255 url_segments = [
256 link_to(
256 link_to(
257 repo_name_html,
257 repo_name_html,
258 url('files_home',
258 url('files_home',
259 repo_name=repo_name,
259 repo_name=repo_name,
260 revision=commit_id,
260 revision=commit_id,
261 f_path=''),
261 f_path=''),
262 class_='pjax-link')]
262 class_='pjax-link')]
263
263
264 last_cnt = len(path_segments) - 1
264 last_cnt = len(path_segments) - 1
265 for cnt, segment in enumerate(path_segments):
265 for cnt, segment in enumerate(path_segments):
266 if not segment:
266 if not segment:
267 continue
267 continue
268 segment_html = escape(segment)
268 segment_html = escape(segment)
269
269
270 if cnt != last_cnt:
270 if cnt != last_cnt:
271 url_segments.append(
271 url_segments.append(
272 link_to(
272 link_to(
273 segment_html,
273 segment_html,
274 url('files_home',
274 url('files_home',
275 repo_name=repo_name,
275 repo_name=repo_name,
276 revision=commit_id,
276 revision=commit_id,
277 f_path='/'.join(path_segments[:cnt + 1])),
277 f_path='/'.join(path_segments[:cnt + 1])),
278 class_='pjax-link'))
278 class_='pjax-link'))
279 else:
279 else:
280 url_segments.append(segment_html)
280 url_segments.append(segment_html)
281
281
282 return literal('/'.join(url_segments))
282 return literal('/'.join(url_segments))
283
283
284
284
285 class CodeHtmlFormatter(HtmlFormatter):
285 class CodeHtmlFormatter(HtmlFormatter):
286 """
286 """
287 My code Html Formatter for source codes
287 My code Html Formatter for source codes
288 """
288 """
289
289
290 def wrap(self, source, outfile):
290 def wrap(self, source, outfile):
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292
292
293 def _wrap_code(self, source):
293 def _wrap_code(self, source):
294 for cnt, it in enumerate(source):
294 for cnt, it in enumerate(source):
295 i, t = it
295 i, t = it
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 yield i, t
297 yield i, t
298
298
299 def _wrap_tablelinenos(self, inner):
299 def _wrap_tablelinenos(self, inner):
300 dummyoutfile = StringIO.StringIO()
300 dummyoutfile = StringIO.StringIO()
301 lncount = 0
301 lncount = 0
302 for t, line in inner:
302 for t, line in inner:
303 if t:
303 if t:
304 lncount += 1
304 lncount += 1
305 dummyoutfile.write(line)
305 dummyoutfile.write(line)
306
306
307 fl = self.linenostart
307 fl = self.linenostart
308 mw = len(str(lncount + fl - 1))
308 mw = len(str(lncount + fl - 1))
309 sp = self.linenospecial
309 sp = self.linenospecial
310 st = self.linenostep
310 st = self.linenostep
311 la = self.lineanchors
311 la = self.lineanchors
312 aln = self.anchorlinenos
312 aln = self.anchorlinenos
313 nocls = self.noclasses
313 nocls = self.noclasses
314 if sp:
314 if sp:
315 lines = []
315 lines = []
316
316
317 for i in range(fl, fl + lncount):
317 for i in range(fl, fl + lncount):
318 if i % st == 0:
318 if i % st == 0:
319 if i % sp == 0:
319 if i % sp == 0:
320 if aln:
320 if aln:
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 (la, i, mw, i))
322 (la, i, mw, i))
323 else:
323 else:
324 lines.append('<span class="special">%*d</span>' % (mw, i))
324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 else:
325 else:
326 if aln:
326 if aln:
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 else:
328 else:
329 lines.append('%*d' % (mw, i))
329 lines.append('%*d' % (mw, i))
330 else:
330 else:
331 lines.append('')
331 lines.append('')
332 ls = '\n'.join(lines)
332 ls = '\n'.join(lines)
333 else:
333 else:
334 lines = []
334 lines = []
335 for i in range(fl, fl + lncount):
335 for i in range(fl, fl + lncount):
336 if i % st == 0:
336 if i % st == 0:
337 if aln:
337 if aln:
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 else:
339 else:
340 lines.append('%*d' % (mw, i))
340 lines.append('%*d' % (mw, i))
341 else:
341 else:
342 lines.append('')
342 lines.append('')
343 ls = '\n'.join(lines)
343 ls = '\n'.join(lines)
344
344
345 # in case you wonder about the seemingly redundant <div> here: since the
345 # in case you wonder about the seemingly redundant <div> here: since the
346 # content in the other cell also is wrapped in a div, some browsers in
346 # content in the other cell also is wrapped in a div, some browsers in
347 # some configurations seem to mess up the formatting...
347 # some configurations seem to mess up the formatting...
348 if nocls:
348 if nocls:
349 yield 0, ('<table class="%stable">' % self.cssclass +
349 yield 0, ('<table class="%stable">' % self.cssclass +
350 '<tr><td><div class="linenodiv" '
350 '<tr><td><div class="linenodiv" '
351 'style="background-color: #f0f0f0; padding-right: 10px">'
351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 '<pre style="line-height: 125%">' +
352 '<pre style="line-height: 125%">' +
353 ls + '</pre></div></td><td id="hlcode" class="code">')
353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 else:
354 else:
355 yield 0, ('<table class="%stable">' % self.cssclass +
355 yield 0, ('<table class="%stable">' % self.cssclass +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 ls + '</pre></div></td><td id="hlcode" class="code">')
357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 yield 0, dummyoutfile.getvalue()
358 yield 0, dummyoutfile.getvalue()
359 yield 0, '</td></tr></table>'
359 yield 0, '</td></tr></table>'
360
360
361
361
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 def __init__(self, **kw):
363 def __init__(self, **kw):
364 # only show these line numbers if set
364 # only show these line numbers if set
365 self.only_lines = kw.pop('only_line_numbers', [])
365 self.only_lines = kw.pop('only_line_numbers', [])
366 self.query_terms = kw.pop('query_terms', [])
366 self.query_terms = kw.pop('query_terms', [])
367 self.max_lines = kw.pop('max_lines', 5)
367 self.max_lines = kw.pop('max_lines', 5)
368 self.line_context = kw.pop('line_context', 3)
368 self.line_context = kw.pop('line_context', 3)
369 self.url = kw.pop('url', None)
369 self.url = kw.pop('url', None)
370
370
371 super(CodeHtmlFormatter, self).__init__(**kw)
371 super(CodeHtmlFormatter, self).__init__(**kw)
372
372
373 def _wrap_code(self, source):
373 def _wrap_code(self, source):
374 for cnt, it in enumerate(source):
374 for cnt, it in enumerate(source):
375 i, t = it
375 i, t = it
376 t = '<pre>%s</pre>' % t
376 t = '<pre>%s</pre>' % t
377 yield i, t
377 yield i, t
378
378
379 def _wrap_tablelinenos(self, inner):
379 def _wrap_tablelinenos(self, inner):
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381
381
382 last_shown_line_number = 0
382 last_shown_line_number = 0
383 current_line_number = 1
383 current_line_number = 1
384
384
385 for t, line in inner:
385 for t, line in inner:
386 if not t:
386 if not t:
387 yield t, line
387 yield t, line
388 continue
388 continue
389
389
390 if current_line_number in self.only_lines:
390 if current_line_number in self.only_lines:
391 if last_shown_line_number + 1 != current_line_number:
391 if last_shown_line_number + 1 != current_line_number:
392 yield 0, '<tr>'
392 yield 0, '<tr>'
393 yield 0, '<td class="line">...</td>'
393 yield 0, '<td class="line">...</td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
394 yield 0, '<td id="hlcode" class="code"></td>'
395 yield 0, '</tr>'
395 yield 0, '</tr>'
396
396
397 yield 0, '<tr>'
397 yield 0, '<tr>'
398 if self.url:
398 if self.url:
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 self.url, current_line_number, current_line_number)
400 self.url, current_line_number, current_line_number)
401 else:
401 else:
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 current_line_number)
403 current_line_number)
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 yield 0, '</tr>'
405 yield 0, '</tr>'
406
406
407 last_shown_line_number = current_line_number
407 last_shown_line_number = current_line_number
408
408
409 current_line_number += 1
409 current_line_number += 1
410
410
411
411
412 yield 0, '</table>'
412 yield 0, '</table>'
413
413
414
414
415 def extract_phrases(text_query):
415 def extract_phrases(text_query):
416 """
416 """
417 Extracts phrases from search term string making sure phrases
417 Extracts phrases from search term string making sure phrases
418 contained in double quotes are kept together - and discarding empty values
418 contained in double quotes are kept together - and discarding empty values
419 or fully whitespace values eg.
419 or fully whitespace values eg.
420
420
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422
422
423 """
423 """
424
424
425 in_phrase = False
425 in_phrase = False
426 buf = ''
426 buf = ''
427 phrases = []
427 phrases = []
428 for char in text_query:
428 for char in text_query:
429 if in_phrase:
429 if in_phrase:
430 if char == '"': # end phrase
430 if char == '"': # end phrase
431 phrases.append(buf)
431 phrases.append(buf)
432 buf = ''
432 buf = ''
433 in_phrase = False
433 in_phrase = False
434 continue
434 continue
435 else:
435 else:
436 buf += char
436 buf += char
437 continue
437 continue
438 else:
438 else:
439 if char == '"': # start phrase
439 if char == '"': # start phrase
440 in_phrase = True
440 in_phrase = True
441 phrases.append(buf)
441 phrases.append(buf)
442 buf = ''
442 buf = ''
443 continue
443 continue
444 elif char == ' ':
444 elif char == ' ':
445 phrases.append(buf)
445 phrases.append(buf)
446 buf = ''
446 buf = ''
447 continue
447 continue
448 else:
448 else:
449 buf += char
449 buf += char
450
450
451 phrases.append(buf)
451 phrases.append(buf)
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 return phrases
453 return phrases
454
454
455
455
456 def get_matching_offsets(text, phrases):
456 def get_matching_offsets(text, phrases):
457 """
457 """
458 Returns a list of string offsets in `text` that the list of `terms` match
458 Returns a list of string offsets in `text` that the list of `terms` match
459
459
460 >>> get_matching_offsets('some text here', ['some', 'here'])
460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 [(0, 4), (10, 14)]
461 [(0, 4), (10, 14)]
462
462
463 """
463 """
464 offsets = []
464 offsets = []
465 for phrase in phrases:
465 for phrase in phrases:
466 for match in re.finditer(phrase, text):
466 for match in re.finditer(phrase, text):
467 offsets.append((match.start(), match.end()))
467 offsets.append((match.start(), match.end()))
468
468
469 return offsets
469 return offsets
470
470
471
471
472 def normalize_text_for_matching(x):
472 def normalize_text_for_matching(x):
473 """
473 """
474 Replaces all non alnum characters to spaces and lower cases the string,
474 Replaces all non alnum characters to spaces and lower cases the string,
475 useful for comparing two text strings without punctuation
475 useful for comparing two text strings without punctuation
476 """
476 """
477 return re.sub(r'[^\w]', ' ', x.lower())
477 return re.sub(r'[^\w]', ' ', x.lower())
478
478
479
479
480 def get_matching_line_offsets(lines, terms):
480 def get_matching_line_offsets(lines, terms):
481 """ Return a set of `lines` indices (starting from 1) matching a
481 """ Return a set of `lines` indices (starting from 1) matching a
482 text search query, along with `context` lines above/below matching lines
482 text search query, along with `context` lines above/below matching lines
483
483
484 :param lines: list of strings representing lines
484 :param lines: list of strings representing lines
485 :param terms: search term string to match in lines eg. 'some text'
485 :param terms: search term string to match in lines eg. 'some text'
486 :param context: number of lines above/below a matching line to add to result
486 :param context: number of lines above/below a matching line to add to result
487 :param max_lines: cut off for lines of interest
487 :param max_lines: cut off for lines of interest
488 eg.
488 eg.
489
489
490 text = '''
490 text = '''
491 words words words
491 words words words
492 words words words
492 words words words
493 some text some
493 some text some
494 words words words
494 words words words
495 words words words
495 words words words
496 text here what
496 text here what
497 '''
497 '''
498 get_matching_line_offsets(text, 'text', context=1)
498 get_matching_line_offsets(text, 'text', context=1)
499 {3: [(5, 9)], 6: [(0, 4)]]
499 {3: [(5, 9)], 6: [(0, 4)]]
500
500
501 """
501 """
502 matching_lines = {}
502 matching_lines = {}
503 phrases = [normalize_text_for_matching(phrase)
503 phrases = [normalize_text_for_matching(phrase)
504 for phrase in extract_phrases(terms)]
504 for phrase in extract_phrases(terms)]
505
505
506 for line_index, line in enumerate(lines, start=1):
506 for line_index, line in enumerate(lines, start=1):
507 match_offsets = get_matching_offsets(
507 match_offsets = get_matching_offsets(
508 normalize_text_for_matching(line), phrases)
508 normalize_text_for_matching(line), phrases)
509 if match_offsets:
509 if match_offsets:
510 matching_lines[line_index] = match_offsets
510 matching_lines[line_index] = match_offsets
511
511
512 return matching_lines
512 return matching_lines
513
513
514
514
515 def hsv_to_rgb(h, s, v):
515 def hsv_to_rgb(h, s, v):
516 """ Convert hsv color values to rgb """
516 """ Convert hsv color values to rgb """
517
517
518 if s == 0.0:
518 if s == 0.0:
519 return v, v, v
519 return v, v, v
520 i = int(h * 6.0) # XXX assume int() truncates!
520 i = int(h * 6.0) # XXX assume int() truncates!
521 f = (h * 6.0) - i
521 f = (h * 6.0) - i
522 p = v * (1.0 - s)
522 p = v * (1.0 - s)
523 q = v * (1.0 - s * f)
523 q = v * (1.0 - s * f)
524 t = v * (1.0 - s * (1.0 - f))
524 t = v * (1.0 - s * (1.0 - f))
525 i = i % 6
525 i = i % 6
526 if i == 0:
526 if i == 0:
527 return v, t, p
527 return v, t, p
528 if i == 1:
528 if i == 1:
529 return q, v, p
529 return q, v, p
530 if i == 2:
530 if i == 2:
531 return p, v, t
531 return p, v, t
532 if i == 3:
532 if i == 3:
533 return p, q, v
533 return p, q, v
534 if i == 4:
534 if i == 4:
535 return t, p, v
535 return t, p, v
536 if i == 5:
536 if i == 5:
537 return v, p, q
537 return v, p, q
538
538
539
539
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 """
541 """
542 Generator for getting n of evenly distributed colors using
542 Generator for getting n of evenly distributed colors using
543 hsv color and golden ratio. It always return same order of colors
543 hsv color and golden ratio. It always return same order of colors
544
544
545 :param n: number of colors to generate
545 :param n: number of colors to generate
546 :param saturation: saturation of returned colors
546 :param saturation: saturation of returned colors
547 :param lightness: lightness of returned colors
547 :param lightness: lightness of returned colors
548 :returns: RGB tuple
548 :returns: RGB tuple
549 """
549 """
550
550
551 golden_ratio = 0.618033988749895
551 golden_ratio = 0.618033988749895
552 h = 0.22717784590367374
552 h = 0.22717784590367374
553
553
554 for _ in xrange(n):
554 for _ in xrange(n):
555 h += golden_ratio
555 h += golden_ratio
556 h %= 1
556 h %= 1
557 HSV_tuple = [h, saturation, lightness]
557 HSV_tuple = [h, saturation, lightness]
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560
560
561
561
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 """
563 """
564 Returns a function which when called with an argument returns a unique
564 Returns a function which when called with an argument returns a unique
565 color for that argument, eg.
565 color for that argument, eg.
566
566
567 :param n: number of colors to generate
567 :param n: number of colors to generate
568 :param saturation: saturation of returned colors
568 :param saturation: saturation of returned colors
569 :param lightness: lightness of returned colors
569 :param lightness: lightness of returned colors
570 :returns: css RGB string
570 :returns: css RGB string
571
571
572 >>> color_hash = color_hasher()
572 >>> color_hash = color_hasher()
573 >>> color_hash('hello')
573 >>> color_hash('hello')
574 'rgb(34, 12, 59)'
574 'rgb(34, 12, 59)'
575 >>> color_hash('hello')
575 >>> color_hash('hello')
576 'rgb(34, 12, 59)'
576 'rgb(34, 12, 59)'
577 >>> color_hash('other')
577 >>> color_hash('other')
578 'rgb(90, 224, 159)'
578 'rgb(90, 224, 159)'
579 """
579 """
580
580
581 color_dict = {}
581 color_dict = {}
582 cgenerator = unique_color_generator(
582 cgenerator = unique_color_generator(
583 saturation=saturation, lightness=lightness)
583 saturation=saturation, lightness=lightness)
584
584
585 def get_color_string(thing):
585 def get_color_string(thing):
586 if thing in color_dict:
586 if thing in color_dict:
587 col = color_dict[thing]
587 col = color_dict[thing]
588 else:
588 else:
589 col = color_dict[thing] = cgenerator.next()
589 col = color_dict[thing] = cgenerator.next()
590 return "rgb(%s)" % (', '.join(col))
590 return "rgb(%s)" % (', '.join(col))
591
591
592 return get_color_string
592 return get_color_string
593
593
594
594
595 def get_lexer_safe(mimetype=None, filepath=None):
595 def get_lexer_safe(mimetype=None, filepath=None):
596 """
596 """
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 defaulting to plain text if none could be found
598 defaulting to plain text if none could be found
599 """
599 """
600 lexer = None
600 lexer = None
601 try:
601 try:
602 if mimetype:
602 if mimetype:
603 lexer = get_lexer_for_mimetype(mimetype)
603 lexer = get_lexer_for_mimetype(mimetype)
604 if not lexer:
604 if not lexer:
605 lexer = get_lexer_for_filename(filepath)
605 lexer = get_lexer_for_filename(filepath)
606 except pygments.util.ClassNotFound:
606 except pygments.util.ClassNotFound:
607 pass
607 pass
608
608
609 if not lexer:
609 if not lexer:
610 lexer = get_lexer_by_name('text')
610 lexer = get_lexer_by_name('text')
611
611
612 return lexer
612 return lexer
613
613
614
614
615 def get_lexer_for_filenode(filenode):
615 def get_lexer_for_filenode(filenode):
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 return lexer
617 return lexer
618
618
619
619
620 def pygmentize(filenode, **kwargs):
620 def pygmentize(filenode, **kwargs):
621 """
621 """
622 pygmentize function using pygments
622 pygmentize function using pygments
623
623
624 :param filenode:
624 :param filenode:
625 """
625 """
626 lexer = get_lexer_for_filenode(filenode)
626 lexer = get_lexer_for_filenode(filenode)
627 return literal(code_highlight(filenode.content, lexer,
627 return literal(code_highlight(filenode.content, lexer,
628 CodeHtmlFormatter(**kwargs)))
628 CodeHtmlFormatter(**kwargs)))
629
629
630
630
631 def is_following_repo(repo_name, user_id):
631 def is_following_repo(repo_name, user_id):
632 from rhodecode.model.scm import ScmModel
632 from rhodecode.model.scm import ScmModel
633 return ScmModel().is_following_repo(repo_name, user_id)
633 return ScmModel().is_following_repo(repo_name, user_id)
634
634
635
635
636 class _Message(object):
636 class _Message(object):
637 """A message returned by ``Flash.pop_messages()``.
637 """A message returned by ``Flash.pop_messages()``.
638
638
639 Converting the message to a string returns the message text. Instances
639 Converting the message to a string returns the message text. Instances
640 also have the following attributes:
640 also have the following attributes:
641
641
642 * ``message``: the message text.
642 * ``message``: the message text.
643 * ``category``: the category specified when the message was created.
643 * ``category``: the category specified when the message was created.
644 """
644 """
645
645
646 def __init__(self, category, message):
646 def __init__(self, category, message):
647 self.category = category
647 self.category = category
648 self.message = message
648 self.message = message
649
649
650 def __str__(self):
650 def __str__(self):
651 return self.message
651 return self.message
652
652
653 __unicode__ = __str__
653 __unicode__ = __str__
654
654
655 def __html__(self):
655 def __html__(self):
656 return escape(safe_unicode(self.message))
656 return escape(safe_unicode(self.message))
657
657
658
658
659 class Flash(_Flash):
659 class Flash(_Flash):
660
660
661 def pop_messages(self):
661 def pop_messages(self):
662 """Return all accumulated messages and delete them from the session.
662 """Return all accumulated messages and delete them from the session.
663
663
664 The return value is a list of ``Message`` objects.
664 The return value is a list of ``Message`` objects.
665 """
665 """
666 from pylons import session
666 from pylons import session
667
667
668 messages = []
668 messages = []
669
669
670 # Pop the 'old' pylons flash messages. They are tuples of the form
670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 # (category, message)
671 # (category, message)
672 for cat, msg in session.pop(self.session_key, []):
672 for cat, msg in session.pop(self.session_key, []):
673 messages.append(_Message(cat, msg))
673 messages.append(_Message(cat, msg))
674
674
675 # Pop the 'new' pyramid flash messages for each category as list
675 # Pop the 'new' pyramid flash messages for each category as list
676 # of strings.
676 # of strings.
677 for cat in self.categories:
677 for cat in self.categories:
678 for msg in session.pop_flash(queue=cat):
678 for msg in session.pop_flash(queue=cat):
679 messages.append(_Message(cat, msg))
679 messages.append(_Message(cat, msg))
680 # Map messages from the default queue to the 'notice' category.
680 # Map messages from the default queue to the 'notice' category.
681 for msg in session.pop_flash():
681 for msg in session.pop_flash():
682 messages.append(_Message('notice', msg))
682 messages.append(_Message('notice', msg))
683
683
684 session.save()
684 session.save()
685 return messages
685 return messages
686
686
687 def json_alerts(self):
687 def json_alerts(self):
688 payloads = []
688 payloads = []
689 messages = flash.pop_messages()
689 messages = flash.pop_messages()
690 if messages:
690 if messages:
691 for message in messages:
691 for message in messages:
692 subdata = {}
692 subdata = {}
693 if hasattr(message.message, 'rsplit'):
693 if hasattr(message.message, 'rsplit'):
694 flash_data = message.message.rsplit('|DELIM|', 1)
694 flash_data = message.message.rsplit('|DELIM|', 1)
695 org_message = flash_data[0]
695 org_message = flash_data[0]
696 if len(flash_data) > 1:
696 if len(flash_data) > 1:
697 subdata = json.loads(flash_data[1])
697 subdata = json.loads(flash_data[1])
698 else:
698 else:
699 org_message = message.message
699 org_message = message.message
700 payloads.append({
700 payloads.append({
701 'message': {
701 'message': {
702 'message': u'{}'.format(org_message),
702 'message': u'{}'.format(org_message),
703 'level': message.category,
703 'level': message.category,
704 'force': True,
704 'force': True,
705 'subdata': subdata
705 'subdata': subdata
706 }
706 }
707 })
707 })
708 return json.dumps(payloads)
708 return json.dumps(payloads)
709
709
710 flash = Flash()
710 flash = Flash()
711
711
712 #==============================================================================
712 #==============================================================================
713 # SCM FILTERS available via h.
713 # SCM FILTERS available via h.
714 #==============================================================================
714 #==============================================================================
715 from rhodecode.lib.vcs.utils import author_name, author_email
715 from rhodecode.lib.vcs.utils import author_name, author_email
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 from rhodecode.model.db import User, ChangesetStatus
717 from rhodecode.model.db import User, ChangesetStatus
718
718
719 age = _age
719 age = _age
720 capitalize = lambda x: x.capitalize()
720 capitalize = lambda x: x.capitalize()
721 email = author_email
721 email = author_email
722 short_id = lambda x: x[:12]
722 short_id = lambda x: x[:12]
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724
724
725
725
726 def age_component(datetime_iso, value=None, time_is_local=False):
726 def age_component(datetime_iso, value=None, time_is_local=False):
727 title = value or format_date(datetime_iso)
727 title = value or format_date(datetime_iso)
728 tzinfo = '+00:00'
728 tzinfo = '+00:00'
729
729
730 # detect if we have a timezone info, otherwise, add it
730 # detect if we have a timezone info, otherwise, add it
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 if time_is_local:
732 if time_is_local:
733 tzinfo = time.strftime("+%H:%M",
733 tzinfo = time.strftime("+%H:%M",
734 time.gmtime(
734 time.gmtime(
735 (datetime.now() - datetime.utcnow()).seconds + 1
735 (datetime.now() - datetime.utcnow()).seconds + 1
736 )
736 )
737 )
737 )
738
738
739 return literal(
739 return literal(
740 '<time class="timeago tooltip" '
740 '<time class="timeago tooltip" '
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 datetime_iso, title, tzinfo))
742 datetime_iso, title, tzinfo))
743
743
744
744
745 def _shorten_commit_id(commit_id):
745 def _shorten_commit_id(commit_id):
746 from rhodecode import CONFIG
746 from rhodecode import CONFIG
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 return commit_id[:def_len]
748 return commit_id[:def_len]
749
749
750
750
751 def show_id(commit):
751 def show_id(commit):
752 """
752 """
753 Configurable function that shows ID
753 Configurable function that shows ID
754 by default it's r123:fffeeefffeee
754 by default it's r123:fffeeefffeee
755
755
756 :param commit: commit instance
756 :param commit: commit instance
757 """
757 """
758 from rhodecode import CONFIG
758 from rhodecode import CONFIG
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760
760
761 raw_id = _shorten_commit_id(commit.raw_id)
761 raw_id = _shorten_commit_id(commit.raw_id)
762 if show_idx:
762 if show_idx:
763 return 'r%s:%s' % (commit.idx, raw_id)
763 return 'r%s:%s' % (commit.idx, raw_id)
764 else:
764 else:
765 return '%s' % (raw_id, )
765 return '%s' % (raw_id, )
766
766
767
767
768 def format_date(date):
768 def format_date(date):
769 """
769 """
770 use a standardized formatting for dates used in RhodeCode
770 use a standardized formatting for dates used in RhodeCode
771
771
772 :param date: date/datetime object
772 :param date: date/datetime object
773 :return: formatted date
773 :return: formatted date
774 """
774 """
775
775
776 if date:
776 if date:
777 _fmt = "%a, %d %b %Y %H:%M:%S"
777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 return safe_unicode(date.strftime(_fmt))
778 return safe_unicode(date.strftime(_fmt))
779
779
780 return u""
780 return u""
781
781
782
782
783 class _RepoChecker(object):
783 class _RepoChecker(object):
784
784
785 def __init__(self, backend_alias):
785 def __init__(self, backend_alias):
786 self._backend_alias = backend_alias
786 self._backend_alias = backend_alias
787
787
788 def __call__(self, repository):
788 def __call__(self, repository):
789 if hasattr(repository, 'alias'):
789 if hasattr(repository, 'alias'):
790 _type = repository.alias
790 _type = repository.alias
791 elif hasattr(repository, 'repo_type'):
791 elif hasattr(repository, 'repo_type'):
792 _type = repository.repo_type
792 _type = repository.repo_type
793 else:
793 else:
794 _type = repository
794 _type = repository
795 return _type == self._backend_alias
795 return _type == self._backend_alias
796
796
797 is_git = _RepoChecker('git')
797 is_git = _RepoChecker('git')
798 is_hg = _RepoChecker('hg')
798 is_hg = _RepoChecker('hg')
799 is_svn = _RepoChecker('svn')
799 is_svn = _RepoChecker('svn')
800
800
801
801
802 def get_repo_type_by_name(repo_name):
802 def get_repo_type_by_name(repo_name):
803 repo = Repository.get_by_repo_name(repo_name)
803 repo = Repository.get_by_repo_name(repo_name)
804 return repo.repo_type
804 return repo.repo_type
805
805
806
806
807 def is_svn_without_proxy(repository):
807 def is_svn_without_proxy(repository):
808 if is_svn(repository):
808 if is_svn(repository):
809 from rhodecode.model.settings import VcsSettingsModel
809 from rhodecode.model.settings import VcsSettingsModel
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 return False
812 return False
813
813
814
814
815 def discover_user(author):
815 def discover_user(author):
816 """
816 """
817 Tries to discover RhodeCode User based on the autho string. Author string
817 Tries to discover RhodeCode User based on the autho string. Author string
818 is typically `FirstName LastName <email@address.com>`
818 is typically `FirstName LastName <email@address.com>`
819 """
819 """
820
820
821 # if author is already an instance use it for extraction
821 # if author is already an instance use it for extraction
822 if isinstance(author, User):
822 if isinstance(author, User):
823 return author
823 return author
824
824
825 # Valid email in the attribute passed, see if they're in the system
825 # Valid email in the attribute passed, see if they're in the system
826 _email = author_email(author)
826 _email = author_email(author)
827 if _email != '':
827 if _email != '':
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 if user is not None:
829 if user is not None:
830 return user
830 return user
831
831
832 # Maybe it's a username, we try to extract it and fetch by username ?
832 # Maybe it's a username, we try to extract it and fetch by username ?
833 _author = author_name(author)
833 _author = author_name(author)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 if user is not None:
835 if user is not None:
836 return user
836 return user
837
837
838 return None
838 return None
839
839
840
840
841 def email_or_none(author):
841 def email_or_none(author):
842 # extract email from the commit string
842 # extract email from the commit string
843 _email = author_email(author)
843 _email = author_email(author)
844
844
845 # If we have an email, use it, otherwise
845 # If we have an email, use it, otherwise
846 # see if it contains a username we can get an email from
846 # see if it contains a username we can get an email from
847 if _email != '':
847 if _email != '':
848 return _email
848 return _email
849 else:
849 else:
850 user = User.get_by_username(
850 user = User.get_by_username(
851 author_name(author), case_insensitive=True, cache=True)
851 author_name(author), case_insensitive=True, cache=True)
852
852
853 if user is not None:
853 if user is not None:
854 return user.email
854 return user.email
855
855
856 # No valid email, not a valid user in the system, none!
856 # No valid email, not a valid user in the system, none!
857 return None
857 return None
858
858
859
859
860 def link_to_user(author, length=0, **kwargs):
860 def link_to_user(author, length=0, **kwargs):
861 user = discover_user(author)
861 user = discover_user(author)
862 # user can be None, but if we have it already it means we can re-use it
862 # user can be None, but if we have it already it means we can re-use it
863 # in the person() function, so we save 1 intensive-query
863 # in the person() function, so we save 1 intensive-query
864 if user:
864 if user:
865 author = user
865 author = user
866
866
867 display_person = person(author, 'username_or_name_or_email')
867 display_person = person(author, 'username_or_name_or_email')
868 if length:
868 if length:
869 display_person = shorter(display_person, length)
869 display_person = shorter(display_person, length)
870
870
871 if user:
871 if user:
872 return link_to(
872 return link_to(
873 escape(display_person),
873 escape(display_person),
874 route_path('user_profile', username=user.username),
874 route_path('user_profile', username=user.username),
875 **kwargs)
875 **kwargs)
876 else:
876 else:
877 return escape(display_person)
877 return escape(display_person)
878
878
879
879
880 def person(author, show_attr="username_and_name"):
880 def person(author, show_attr="username_and_name"):
881 user = discover_user(author)
881 user = discover_user(author)
882 if user:
882 if user:
883 return getattr(user, show_attr)
883 return getattr(user, show_attr)
884 else:
884 else:
885 _author = author_name(author)
885 _author = author_name(author)
886 _email = email(author)
886 _email = email(author)
887 return _author or _email
887 return _author or _email
888
888
889
889
890 def author_string(email):
890 def author_string(email):
891 if email:
891 if email:
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 if user:
893 if user:
894 if user.firstname or user.lastname:
894 if user.firstname or user.lastname:
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 else:
896 else:
897 return email
897 return email
898 else:
898 else:
899 return email
899 return email
900 else:
900 else:
901 return None
901 return None
902
902
903
903
904 def person_by_id(id_, show_attr="username_and_name"):
904 def person_by_id(id_, show_attr="username_and_name"):
905 # attr to return from fetched user
905 # attr to return from fetched user
906 person_getter = lambda usr: getattr(usr, show_attr)
906 person_getter = lambda usr: getattr(usr, show_attr)
907
907
908 #maybe it's an ID ?
908 #maybe it's an ID ?
909 if str(id_).isdigit() or isinstance(id_, int):
909 if str(id_).isdigit() or isinstance(id_, int):
910 id_ = int(id_)
910 id_ = int(id_)
911 user = User.get(id_)
911 user = User.get(id_)
912 if user is not None:
912 if user is not None:
913 return person_getter(user)
913 return person_getter(user)
914 return id_
914 return id_
915
915
916
916
917 def gravatar_with_user(author, show_disabled=False):
917 def gravatar_with_user(author, show_disabled=False):
918 from rhodecode.lib.utils import PartialRenderer
918 from rhodecode.lib.utils import PartialRenderer
919 _render = PartialRenderer('base/base.mako')
919 _render = PartialRenderer('base/base.mako')
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921
921
922
922
923 def desc_stylize(value):
923 def desc_stylize(value):
924 """
924 """
925 converts tags from value into html equivalent
925 converts tags from value into html equivalent
926
926
927 :param value:
927 :param value:
928 """
928 """
929 if not value:
929 if not value:
930 return ''
930 return ''
931
931
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 '<div class="metatag" tag="lang">\\2</div>', value)
939 '<div class="metatag" tag="lang">\\2</div>', value)
940 value = re.sub(r'\[([a-z]+)\]',
940 value = re.sub(r'\[([a-z]+)\]',
941 '<div class="metatag" tag="\\1">\\1</div>', value)
941 '<div class="metatag" tag="\\1">\\1</div>', value)
942
942
943 return value
943 return value
944
944
945
945
946 def escaped_stylize(value):
946 def escaped_stylize(value):
947 """
947 """
948 converts tags from value into html equivalent, but escaping its value first
948 converts tags from value into html equivalent, but escaping its value first
949 """
949 """
950 if not value:
950 if not value:
951 return ''
951 return ''
952
952
953 # Using default webhelper escape method, but has to force it as a
953 # Using default webhelper escape method, but has to force it as a
954 # plain unicode instead of a markup tag to be used in regex expressions
954 # plain unicode instead of a markup tag to be used in regex expressions
955 value = unicode(escape(safe_unicode(value)))
955 value = unicode(escape(safe_unicode(value)))
956
956
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 '<div class="metatag" tag="lang">\\2</div>', value)
964 '<div class="metatag" tag="lang">\\2</div>', value)
965 value = re.sub(r'\[([a-z]+)\]',
965 value = re.sub(r'\[([a-z]+)\]',
966 '<div class="metatag" tag="\\1">\\1</div>', value)
966 '<div class="metatag" tag="\\1">\\1</div>', value)
967
967
968 return value
968 return value
969
969
970
970
971 def bool2icon(value):
971 def bool2icon(value):
972 """
972 """
973 Returns boolean value of a given value, represented as html element with
973 Returns boolean value of a given value, represented as html element with
974 classes that will represent icons
974 classes that will represent icons
975
975
976 :param value: given value to convert to html node
976 :param value: given value to convert to html node
977 """
977 """
978
978
979 if value: # does bool conversion
979 if value: # does bool conversion
980 return HTML.tag('i', class_="icon-true")
980 return HTML.tag('i', class_="icon-true")
981 else: # not true as bool
981 else: # not true as bool
982 return HTML.tag('i', class_="icon-false")
982 return HTML.tag('i', class_="icon-false")
983
983
984
984
985 #==============================================================================
985 #==============================================================================
986 # PERMS
986 # PERMS
987 #==============================================================================
987 #==============================================================================
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 csrf_token_key
991 csrf_token_key
992
992
993
993
994 #==============================================================================
994 #==============================================================================
995 # GRAVATAR URL
995 # GRAVATAR URL
996 #==============================================================================
996 #==============================================================================
997 class InitialsGravatar(object):
997 class InitialsGravatar(object):
998 def __init__(self, email_address, first_name, last_name, size=30,
998 def __init__(self, email_address, first_name, last_name, size=30,
999 background=None, text_color='#fff'):
999 background=None, text_color='#fff'):
1000 self.size = size
1000 self.size = size
1001 self.first_name = first_name
1001 self.first_name = first_name
1002 self.last_name = last_name
1002 self.last_name = last_name
1003 self.email_address = email_address
1003 self.email_address = email_address
1004 self.background = background or self.str2color(email_address)
1004 self.background = background or self.str2color(email_address)
1005 self.text_color = text_color
1005 self.text_color = text_color
1006
1006
1007 def get_color_bank(self):
1007 def get_color_bank(self):
1008 """
1008 """
1009 returns a predefined list of colors that gravatars can use.
1009 returns a predefined list of colors that gravatars can use.
1010 Those are randomized distinct colors that guarantee readability and
1010 Those are randomized distinct colors that guarantee readability and
1011 uniqueness.
1011 uniqueness.
1012
1012
1013 generated with: http://phrogz.net/css/distinct-colors.html
1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 """
1014 """
1015 return [
1015 return [
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 '#4f8c46', '#368dd9', '#5c0073'
1060 '#4f8c46', '#368dd9', '#5c0073'
1061 ]
1061 ]
1062
1062
1063 def rgb_to_hex_color(self, rgb_tuple):
1063 def rgb_to_hex_color(self, rgb_tuple):
1064 """
1064 """
1065 Converts an rgb_tuple passed to an hex color.
1065 Converts an rgb_tuple passed to an hex color.
1066
1066
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 """
1068 """
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070
1070
1071 def email_to_int_list(self, email_str):
1071 def email_to_int_list(self, email_str):
1072 """
1072 """
1073 Get every byte of the hex digest value of email and turn it to integer.
1073 Get every byte of the hex digest value of email and turn it to integer.
1074 It's going to be always between 0-255
1074 It's going to be always between 0-255
1075 """
1075 """
1076 digest = md5_safe(email_str.lower())
1076 digest = md5_safe(email_str.lower())
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078
1078
1079 def pick_color_bank_index(self, email_str, color_bank):
1079 def pick_color_bank_index(self, email_str, color_bank):
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081
1081
1082 def str2color(self, email_str):
1082 def str2color(self, email_str):
1083 """
1083 """
1084 Tries to map in a stable algorithm an email to color
1084 Tries to map in a stable algorithm an email to color
1085
1085
1086 :param email_str:
1086 :param email_str:
1087 """
1087 """
1088 color_bank = self.get_color_bank()
1088 color_bank = self.get_color_bank()
1089 # pick position (module it's length so we always find it in the
1089 # pick position (module it's length so we always find it in the
1090 # bank even if it's smaller than 256 values
1090 # bank even if it's smaller than 256 values
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 return color_bank[pos]
1092 return color_bank[pos]
1093
1093
1094 def normalize_email(self, email_address):
1094 def normalize_email(self, email_address):
1095 import unicodedata
1095 import unicodedata
1096 # default host used to fill in the fake/missing email
1096 # default host used to fill in the fake/missing email
1097 default_host = u'localhost'
1097 default_host = u'localhost'
1098
1098
1099 if not email_address:
1099 if not email_address:
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101
1101
1102 email_address = safe_unicode(email_address)
1102 email_address = safe_unicode(email_address)
1103
1103
1104 if u'@' not in email_address:
1104 if u'@' not in email_address:
1105 email_address = u'%s@%s' % (email_address, default_host)
1105 email_address = u'%s@%s' % (email_address, default_host)
1106
1106
1107 if email_address.endswith(u'@'):
1107 if email_address.endswith(u'@'):
1108 email_address = u'%s%s' % (email_address, default_host)
1108 email_address = u'%s%s' % (email_address, default_host)
1109
1109
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 .encode('ascii', 'ignore')
1111 .encode('ascii', 'ignore')
1112 return email_address
1112 return email_address
1113
1113
1114 def get_initials(self):
1114 def get_initials(self):
1115 """
1115 """
1116 Returns 2 letter initials calculated based on the input.
1116 Returns 2 letter initials calculated based on the input.
1117 The algorithm picks first given email address, and takes first letter
1117 The algorithm picks first given email address, and takes first letter
1118 of part before @, and then the first letter of server name. In case
1118 of part before @, and then the first letter of server name. In case
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 the server letter with first letter of somestring2
1120 the server letter with first letter of somestring2
1121
1121
1122 In case function was initialized with both first and lastname, this
1122 In case function was initialized with both first and lastname, this
1123 overrides the extraction from email by first letter of the first and
1123 overrides the extraction from email by first letter of the first and
1124 last name. We add special logic to that functionality, In case Full name
1124 last name. We add special logic to that functionality, In case Full name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 (Von Rossum) picking `R`.
1126 (Von Rossum) picking `R`.
1127
1127
1128 Function also normalizes the non-ascii characters to they ascii
1128 Function also normalizes the non-ascii characters to they ascii
1129 representation, eg Δ„ => A
1129 representation, eg Δ„ => A
1130 """
1130 """
1131 import unicodedata
1131 import unicodedata
1132 # replace non-ascii to ascii
1132 # replace non-ascii to ascii
1133 first_name = unicodedata.normalize(
1133 first_name = unicodedata.normalize(
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 last_name = unicodedata.normalize(
1135 last_name = unicodedata.normalize(
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137
1137
1138 # do NFKD encoding, and also make sure email has proper format
1138 # do NFKD encoding, and also make sure email has proper format
1139 email_address = self.normalize_email(self.email_address)
1139 email_address = self.normalize_email(self.email_address)
1140
1140
1141 # first push the email initials
1141 # first push the email initials
1142 prefix, server = email_address.split('@', 1)
1142 prefix, server = email_address.split('@', 1)
1143
1143
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 _dot_split = prefix.rsplit('.', 1)
1145 _dot_split = prefix.rsplit('.', 1)
1146 if len(_dot_split) == 2:
1146 if len(_dot_split) == 2:
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 else:
1148 else:
1149 initials = [prefix[0], server[0]]
1149 initials = [prefix[0], server[0]]
1150
1150
1151 # then try to replace either firtname or lastname
1151 # then try to replace either firtname or lastname
1152 fn_letter = (first_name or " ")[0].strip()
1152 fn_letter = (first_name or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154
1154
1155 if fn_letter:
1155 if fn_letter:
1156 initials[0] = fn_letter
1156 initials[0] = fn_letter
1157
1157
1158 if ln_letter:
1158 if ln_letter:
1159 initials[1] = ln_letter
1159 initials[1] = ln_letter
1160
1160
1161 return ''.join(initials).upper()
1161 return ''.join(initials).upper()
1162
1162
1163 def get_img_data_by_type(self, font_family, img_type):
1163 def get_img_data_by_type(self, font_family, img_type):
1164 default_user = """
1164 default_user = """
1165 <svg xmlns="http://www.w3.org/2000/svg"
1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 viewBox="-15 -10 439.165 429.164"
1167 viewBox="-15 -10 439.165 429.164"
1168
1168
1169 xml:space="preserve"
1169 xml:space="preserve"
1170 style="background:{background};" >
1170 style="background:{background};" >
1171
1171
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 168.596,153.916,216.671,
1175 168.596,153.916,216.671,
1176 204.583,216.671z" fill="{text_color}"/>
1176 204.583,216.671z" fill="{text_color}"/>
1177 <path d="M407.164,374.717L360.88,
1177 <path d="M407.164,374.717L360.88,
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 0-48.762-8.122-69.078-23.488
1181 0-48.762-8.122-69.078-23.488
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 19.402-10.527 C409.699,390.129,
1186 19.402-10.527 C409.699,390.129,
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 </svg>""".format(
1188 </svg>""".format(
1189 size=self.size,
1189 size=self.size,
1190 background='#979797', # @grey4
1190 background='#979797', # @grey4
1191 text_color=self.text_color,
1191 text_color=self.text_color,
1192 font_family=font_family)
1192 font_family=font_family)
1193
1193
1194 return {
1194 return {
1195 "default_user": default_user
1195 "default_user": default_user
1196 }[img_type]
1196 }[img_type]
1197
1197
1198 def get_img_data(self, svg_type=None):
1198 def get_img_data(self, svg_type=None):
1199 """
1199 """
1200 generates the svg metadata for image
1200 generates the svg metadata for image
1201 """
1201 """
1202
1202
1203 font_family = ','.join([
1203 font_family = ','.join([
1204 'proximanovaregular',
1204 'proximanovaregular',
1205 'Proxima Nova Regular',
1205 'Proxima Nova Regular',
1206 'Proxima Nova',
1206 'Proxima Nova',
1207 'Arial',
1207 'Arial',
1208 'Lucida Grande',
1208 'Lucida Grande',
1209 'sans-serif'
1209 'sans-serif'
1210 ])
1210 ])
1211 if svg_type:
1211 if svg_type:
1212 return self.get_img_data_by_type(font_family, svg_type)
1212 return self.get_img_data_by_type(font_family, svg_type)
1213
1213
1214 initials = self.get_initials()
1214 initials = self.get_initials()
1215 img_data = """
1215 img_data = """
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 width="{size}" height="{size}"
1217 width="{size}" height="{size}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1218 style="width: 100%; height: 100%; background-color: {background}"
1219 viewBox="0 0 {size} {size}">
1219 viewBox="0 0 {size} {size}">
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 pointer-events="auto" fill="{text_color}"
1221 pointer-events="auto" fill="{text_color}"
1222 font-family="{font_family}"
1222 font-family="{font_family}"
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 </text>
1224 </text>
1225 </svg>""".format(
1225 </svg>""".format(
1226 size=self.size,
1226 size=self.size,
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 background=self.background,
1228 background=self.background,
1229 text_color=self.text_color,
1229 text_color=self.text_color,
1230 text=initials.upper(),
1230 text=initials.upper(),
1231 font_family=font_family)
1231 font_family=font_family)
1232
1232
1233 return img_data
1233 return img_data
1234
1234
1235 def generate_svg(self, svg_type=None):
1235 def generate_svg(self, svg_type=None):
1236 img_data = self.get_img_data(svg_type)
1236 img_data = self.get_img_data(svg_type)
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238
1238
1239
1239
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 svg_type = None
1241 svg_type = None
1242 if email_address == User.DEFAULT_USER_EMAIL:
1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 svg_type = 'default_user'
1243 svg_type = 'default_user'
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 return klass.generate_svg(svg_type=svg_type)
1245 return klass.generate_svg(svg_type=svg_type)
1246
1246
1247
1247
1248 def gravatar_url(email_address, size=30):
1248 def gravatar_url(email_address, size=30):
1249 # doh, we need to re-import those to mock it later
1249 # doh, we need to re-import those to mock it later
1250 from pylons import tmpl_context as c
1250 from pylons import tmpl_context as c
1251
1251
1252 _use_gravatar = c.visual.use_gravatar
1252 _use_gravatar = c.visual.use_gravatar
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254
1254
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 if isinstance(email_address, unicode):
1256 if isinstance(email_address, unicode):
1257 # hashlib crashes on unicode items
1257 # hashlib crashes on unicode items
1258 email_address = safe_str(email_address)
1258 email_address = safe_str(email_address)
1259
1259
1260 # empty email or default user
1260 # empty email or default user
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263
1263
1264 if _use_gravatar:
1264 if _use_gravatar:
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 # get the host and schema here.
1266 # get the host and schema here.
1267 request = get_current_request()
1267 request = get_current_request()
1268 tmpl = safe_str(_gravatar_url)
1268 tmpl = safe_str(_gravatar_url)
1269 tmpl = tmpl.replace('{email}', email_address)\
1269 tmpl = tmpl.replace('{email}', email_address)\
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 .replace('{netloc}', request.host)\
1271 .replace('{netloc}', request.host)\
1272 .replace('{scheme}', request.scheme)\
1272 .replace('{scheme}', request.scheme)\
1273 .replace('{size}', safe_str(size))
1273 .replace('{size}', safe_str(size))
1274 return tmpl
1274 return tmpl
1275 else:
1275 else:
1276 return initials_gravatar(email_address, '', '', size=size)
1276 return initials_gravatar(email_address, '', '', size=size)
1277
1277
1278
1278
1279 class Page(_Page):
1279 class Page(_Page):
1280 """
1280 """
1281 Custom pager to match rendering style with paginator
1281 Custom pager to match rendering style with paginator
1282 """
1282 """
1283
1283
1284 def _get_pos(self, cur_page, max_page, items):
1284 def _get_pos(self, cur_page, max_page, items):
1285 edge = (items / 2) + 1
1285 edge = (items / 2) + 1
1286 if (cur_page <= edge):
1286 if (cur_page <= edge):
1287 radius = max(items / 2, items - cur_page)
1287 radius = max(items / 2, items - cur_page)
1288 elif (max_page - cur_page) < edge:
1288 elif (max_page - cur_page) < edge:
1289 radius = (items - 1) - (max_page - cur_page)
1289 radius = (items - 1) - (max_page - cur_page)
1290 else:
1290 else:
1291 radius = items / 2
1291 radius = items / 2
1292
1292
1293 left = max(1, (cur_page - (radius)))
1293 left = max(1, (cur_page - (radius)))
1294 right = min(max_page, cur_page + (radius))
1294 right = min(max_page, cur_page + (radius))
1295 return left, cur_page, right
1295 return left, cur_page, right
1296
1296
1297 def _range(self, regexp_match):
1297 def _range(self, regexp_match):
1298 """
1298 """
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300
1300
1301 Arguments:
1301 Arguments:
1302
1302
1303 regexp_match
1303 regexp_match
1304 A "re" (regular expressions) match object containing the
1304 A "re" (regular expressions) match object containing the
1305 radius of linked pages around the current page in
1305 radius of linked pages around the current page in
1306 regexp_match.group(1) as a string
1306 regexp_match.group(1) as a string
1307
1307
1308 This function is supposed to be called as a callable in
1308 This function is supposed to be called as a callable in
1309 re.sub.
1309 re.sub.
1310
1310
1311 """
1311 """
1312 radius = int(regexp_match.group(1))
1312 radius = int(regexp_match.group(1))
1313
1313
1314 # Compute the first and last page number within the radius
1314 # Compute the first and last page number within the radius
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 # -> leftmost_page = 5
1316 # -> leftmost_page = 5
1317 # -> rightmost_page = 9
1317 # -> rightmost_page = 9
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 self.last_page,
1319 self.last_page,
1320 (radius * 2) + 1)
1320 (radius * 2) + 1)
1321 nav_items = []
1321 nav_items = []
1322
1322
1323 # Create a link to the first page (unless we are on the first page
1323 # Create a link to the first page (unless we are on the first page
1324 # or there would be no need to insert '..' spacers)
1324 # or there would be no need to insert '..' spacers)
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327
1327
1328 # Insert dots if there are pages between the first page
1328 # Insert dots if there are pages between the first page
1329 # and the currently displayed page range
1329 # and the currently displayed page range
1330 if leftmost_page - self.first_page > 1:
1330 if leftmost_page - self.first_page > 1:
1331 # Wrap in a SPAN tag if nolink_attr is set
1331 # Wrap in a SPAN tag if nolink_attr is set
1332 text = '..'
1332 text = '..'
1333 if self.dotdot_attr:
1333 if self.dotdot_attr:
1334 text = HTML.span(c=text, **self.dotdot_attr)
1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 nav_items.append(text)
1335 nav_items.append(text)
1336
1336
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 # Hilight the current page number and do not use a link
1338 # Hilight the current page number and do not use a link
1339 if thispage == self.page:
1339 if thispage == self.page:
1340 text = '%s' % (thispage,)
1340 text = '%s' % (thispage,)
1341 # Wrap in a SPAN tag if nolink_attr is set
1341 # Wrap in a SPAN tag if nolink_attr is set
1342 if self.curpage_attr:
1342 if self.curpage_attr:
1343 text = HTML.span(c=text, **self.curpage_attr)
1343 text = HTML.span(c=text, **self.curpage_attr)
1344 nav_items.append(text)
1344 nav_items.append(text)
1345 # Otherwise create just a link to that page
1345 # Otherwise create just a link to that page
1346 else:
1346 else:
1347 text = '%s' % (thispage,)
1347 text = '%s' % (thispage,)
1348 nav_items.append(self._pagerlink(thispage, text))
1348 nav_items.append(self._pagerlink(thispage, text))
1349
1349
1350 # Insert dots if there are pages between the displayed
1350 # Insert dots if there are pages between the displayed
1351 # page numbers and the end of the page range
1351 # page numbers and the end of the page range
1352 if self.last_page - rightmost_page > 1:
1352 if self.last_page - rightmost_page > 1:
1353 text = '..'
1353 text = '..'
1354 # Wrap in a SPAN tag if nolink_attr is set
1354 # Wrap in a SPAN tag if nolink_attr is set
1355 if self.dotdot_attr:
1355 if self.dotdot_attr:
1356 text = HTML.span(c=text, **self.dotdot_attr)
1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 nav_items.append(text)
1357 nav_items.append(text)
1358
1358
1359 # Create a link to the very last page (unless we are on the last
1359 # Create a link to the very last page (unless we are on the last
1360 # page or there would be no need to insert '..' spacers)
1360 # page or there would be no need to insert '..' spacers)
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363
1363
1364 ## prerender links
1364 ## prerender links
1365 #_page_link = url.current()
1365 #_page_link = url.current()
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 return self.separator.join(nav_items)
1368 return self.separator.join(nav_items)
1369
1369
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 show_if_single_page=False, separator=' ', onclick=None,
1371 show_if_single_page=False, separator=' ', onclick=None,
1372 symbol_first='<<', symbol_last='>>',
1372 symbol_first='<<', symbol_last='>>',
1373 symbol_previous='<', symbol_next='>',
1373 symbol_previous='<', symbol_next='>',
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 curpage_attr={'class': 'pager_curpage'},
1375 curpage_attr={'class': 'pager_curpage'},
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377
1377
1378 self.curpage_attr = curpage_attr
1378 self.curpage_attr = curpage_attr
1379 self.separator = separator
1379 self.separator = separator
1380 self.pager_kwargs = kwargs
1380 self.pager_kwargs = kwargs
1381 self.page_param = page_param
1381 self.page_param = page_param
1382 self.partial_param = partial_param
1382 self.partial_param = partial_param
1383 self.onclick = onclick
1383 self.onclick = onclick
1384 self.link_attr = link_attr
1384 self.link_attr = link_attr
1385 self.dotdot_attr = dotdot_attr
1385 self.dotdot_attr = dotdot_attr
1386
1386
1387 # Don't show navigator if there is no more than one page
1387 # Don't show navigator if there is no more than one page
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 return ''
1389 return ''
1390
1390
1391 from string import Template
1391 from string import Template
1392 # Replace ~...~ in token format by range of pages
1392 # Replace ~...~ in token format by range of pages
1393 result = re.sub(r'~(\d+)~', self._range, format)
1393 result = re.sub(r'~(\d+)~', self._range, format)
1394
1394
1395 # Interpolate '%' variables
1395 # Interpolate '%' variables
1396 result = Template(result).safe_substitute({
1396 result = Template(result).safe_substitute({
1397 'first_page': self.first_page,
1397 'first_page': self.first_page,
1398 'last_page': self.last_page,
1398 'last_page': self.last_page,
1399 'page': self.page,
1399 'page': self.page,
1400 'page_count': self.page_count,
1400 'page_count': self.page_count,
1401 'items_per_page': self.items_per_page,
1401 'items_per_page': self.items_per_page,
1402 'first_item': self.first_item,
1402 'first_item': self.first_item,
1403 'last_item': self.last_item,
1403 'last_item': self.last_item,
1404 'item_count': self.item_count,
1404 'item_count': self.item_count,
1405 'link_first': self.page > self.first_page and \
1405 'link_first': self.page > self.first_page and \
1406 self._pagerlink(self.first_page, symbol_first) or '',
1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 'link_last': self.page < self.last_page and \
1407 'link_last': self.page < self.last_page and \
1408 self._pagerlink(self.last_page, symbol_last) or '',
1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 'link_previous': self.previous_page and \
1409 'link_previous': self.previous_page and \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 'link_next': self.next_page and \
1412 'link_next': self.next_page and \
1413 self._pagerlink(self.next_page, symbol_next) \
1413 self._pagerlink(self.next_page, symbol_next) \
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 })
1415 })
1416
1416
1417 return literal(result)
1417 return literal(result)
1418
1418
1419
1419
1420 #==============================================================================
1420 #==============================================================================
1421 # REPO PAGER, PAGER FOR REPOSITORY
1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 #==============================================================================
1422 #==============================================================================
1423 class RepoPage(Page):
1423 class RepoPage(Page):
1424
1424
1425 def __init__(self, collection, page=1, items_per_page=20,
1425 def __init__(self, collection, page=1, items_per_page=20,
1426 item_count=None, url=None, **kwargs):
1426 item_count=None, url=None, **kwargs):
1427
1427
1428 """Create a "RepoPage" instance. special pager for paging
1428 """Create a "RepoPage" instance. special pager for paging
1429 repository
1429 repository
1430 """
1430 """
1431 self._url_generator = url
1431 self._url_generator = url
1432
1432
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 self.kwargs = kwargs
1434 self.kwargs = kwargs
1435
1435
1436 # Save a reference to the collection
1436 # Save a reference to the collection
1437 self.original_collection = collection
1437 self.original_collection = collection
1438
1438
1439 self.collection = collection
1439 self.collection = collection
1440
1440
1441 # The self.page is the number of the current page.
1441 # The self.page is the number of the current page.
1442 # The first page has the number 1!
1442 # The first page has the number 1!
1443 try:
1443 try:
1444 self.page = int(page) # make it int() if we get it as a string
1444 self.page = int(page) # make it int() if we get it as a string
1445 except (ValueError, TypeError):
1445 except (ValueError, TypeError):
1446 self.page = 1
1446 self.page = 1
1447
1447
1448 self.items_per_page = items_per_page
1448 self.items_per_page = items_per_page
1449
1449
1450 # Unless the user tells us how many items the collections has
1450 # Unless the user tells us how many items the collections has
1451 # we calculate that ourselves.
1451 # we calculate that ourselves.
1452 if item_count is not None:
1452 if item_count is not None:
1453 self.item_count = item_count
1453 self.item_count = item_count
1454 else:
1454 else:
1455 self.item_count = len(self.collection)
1455 self.item_count = len(self.collection)
1456
1456
1457 # Compute the number of the first and last available page
1457 # Compute the number of the first and last available page
1458 if self.item_count > 0:
1458 if self.item_count > 0:
1459 self.first_page = 1
1459 self.first_page = 1
1460 self.page_count = int(math.ceil(float(self.item_count) /
1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 self.items_per_page))
1461 self.items_per_page))
1462 self.last_page = self.first_page + self.page_count - 1
1462 self.last_page = self.first_page + self.page_count - 1
1463
1463
1464 # Make sure that the requested page number is the range of
1464 # Make sure that the requested page number is the range of
1465 # valid pages
1465 # valid pages
1466 if self.page > self.last_page:
1466 if self.page > self.last_page:
1467 self.page = self.last_page
1467 self.page = self.last_page
1468 elif self.page < self.first_page:
1468 elif self.page < self.first_page:
1469 self.page = self.first_page
1469 self.page = self.first_page
1470
1470
1471 # Note: the number of items on this page can be less than
1471 # Note: the number of items on this page can be less than
1472 # items_per_page if the last page is not full
1472 # items_per_page if the last page is not full
1473 self.first_item = max(0, (self.item_count) - (self.page *
1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 items_per_page))
1474 items_per_page))
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 (self.page - 1))
1476 (self.page - 1))
1477
1477
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479
1479
1480 # Links to previous and next page
1480 # Links to previous and next page
1481 if self.page > self.first_page:
1481 if self.page > self.first_page:
1482 self.previous_page = self.page - 1
1482 self.previous_page = self.page - 1
1483 else:
1483 else:
1484 self.previous_page = None
1484 self.previous_page = None
1485
1485
1486 if self.page < self.last_page:
1486 if self.page < self.last_page:
1487 self.next_page = self.page + 1
1487 self.next_page = self.page + 1
1488 else:
1488 else:
1489 self.next_page = None
1489 self.next_page = None
1490
1490
1491 # No items available
1491 # No items available
1492 else:
1492 else:
1493 self.first_page = None
1493 self.first_page = None
1494 self.page_count = 0
1494 self.page_count = 0
1495 self.last_page = None
1495 self.last_page = None
1496 self.first_item = None
1496 self.first_item = None
1497 self.last_item = None
1497 self.last_item = None
1498 self.previous_page = None
1498 self.previous_page = None
1499 self.next_page = None
1499 self.next_page = None
1500 self.items = []
1500 self.items = []
1501
1501
1502 # This is a subclass of the 'list' type. Initialise the list now.
1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 list.__init__(self, reversed(self.items))
1503 list.__init__(self, reversed(self.items))
1504
1504
1505
1505
1506 def changed_tooltip(nodes):
1506 def changed_tooltip(nodes):
1507 """
1507 """
1508 Generates a html string for changed nodes in commit page.
1508 Generates a html string for changed nodes in commit page.
1509 It limits the output to 30 entries
1509 It limits the output to 30 entries
1510
1510
1511 :param nodes: LazyNodesGenerator
1511 :param nodes: LazyNodesGenerator
1512 """
1512 """
1513 if nodes:
1513 if nodes:
1514 pref = ': <br/> '
1514 pref = ': <br/> '
1515 suf = ''
1515 suf = ''
1516 if len(nodes) > 30:
1516 if len(nodes) > 30:
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 for x in nodes[:30]]) + suf)
1519 for x in nodes[:30]]) + suf)
1520 else:
1520 else:
1521 return ': ' + _('No Files')
1521 return ': ' + _('No Files')
1522
1522
1523
1523
1524 def breadcrumb_repo_link(repo):
1524 def breadcrumb_repo_link(repo):
1525 """
1525 """
1526 Makes a breadcrumbs path link to repo
1526 Makes a breadcrumbs path link to repo
1527
1527
1528 ex::
1528 ex::
1529 group >> subgroup >> repo
1529 group >> subgroup >> repo
1530
1530
1531 :param repo: a Repository instance
1531 :param repo: a Repository instance
1532 """
1532 """
1533
1533
1534 path = [
1534 path = [
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 for group in repo.groups_with_parents
1536 for group in repo.groups_with_parents
1537 ] + [
1537 ] + [
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 ]
1539 ]
1540
1540
1541 return literal(' &raquo; '.join(path))
1541 return literal(' &raquo; '.join(path))
1542
1542
1543
1543
1544 def format_byte_size_binary(file_size):
1544 def format_byte_size_binary(file_size):
1545 """
1545 """
1546 Formats file/folder sizes to standard.
1546 Formats file/folder sizes to standard.
1547 """
1547 """
1548 formatted_size = format_byte_size(file_size, binary=True)
1548 formatted_size = format_byte_size(file_size, binary=True)
1549 return formatted_size
1549 return formatted_size
1550
1550
1551
1551
1552 def urlify_text(text_, safe=True):
1552 def urlify_text(text_, safe=True):
1553 """
1553 """
1554 Extrac urls from text and make html links out of them
1554 Extrac urls from text and make html links out of them
1555
1555
1556 :param text_:
1556 :param text_:
1557 """
1557 """
1558
1558
1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1561
1561
1562 def url_func(match_obj):
1562 def url_func(match_obj):
1563 url_full = match_obj.groups()[0]
1563 url_full = match_obj.groups()[0]
1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1565 _newtext = url_pat.sub(url_func, text_)
1565 _newtext = url_pat.sub(url_func, text_)
1566 if safe:
1566 if safe:
1567 return literal(_newtext)
1567 return literal(_newtext)
1568 return _newtext
1568 return _newtext
1569
1569
1570
1570
1571 def urlify_commits(text_, repository):
1571 def urlify_commits(text_, repository):
1572 """
1572 """
1573 Extract commit ids from text and make link from them
1573 Extract commit ids from text and make link from them
1574
1574
1575 :param text_:
1575 :param text_:
1576 :param repository: repo name to build the URL with
1576 :param repository: repo name to build the URL with
1577 """
1577 """
1578 from pylons import url # doh, we need to re-import url to mock it later
1578 from pylons import url # doh, we need to re-import url to mock it later
1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1580
1580
1581 def url_func(match_obj):
1581 def url_func(match_obj):
1582 commit_id = match_obj.groups()[1]
1582 commit_id = match_obj.groups()[1]
1583 pref = match_obj.groups()[0]
1583 pref = match_obj.groups()[0]
1584 suf = match_obj.groups()[2]
1584 suf = match_obj.groups()[2]
1585
1585
1586 tmpl = (
1586 tmpl = (
1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1588 '%(commit_id)s</a>%(suf)s'
1588 '%(commit_id)s</a>%(suf)s'
1589 )
1589 )
1590 return tmpl % {
1590 return tmpl % {
1591 'pref': pref,
1591 'pref': pref,
1592 'cls': 'revision-link',
1592 'cls': 'revision-link',
1593 'url': url('changeset_home', repo_name=repository,
1593 'url': url('changeset_home', repo_name=repository,
1594 revision=commit_id, qualified=True),
1594 revision=commit_id, qualified=True),
1595 'commit_id': commit_id,
1595 'commit_id': commit_id,
1596 'suf': suf
1596 'suf': suf
1597 }
1597 }
1598
1598
1599 newtext = URL_PAT.sub(url_func, text_)
1599 newtext = URL_PAT.sub(url_func, text_)
1600
1600
1601 return newtext
1601 return newtext
1602
1602
1603
1603
1604 def _process_url_func(match_obj, repo_name, uid, entry,
1604 def _process_url_func(match_obj, repo_name, uid, entry,
1605 return_raw_data=False, link_format='html'):
1605 return_raw_data=False, link_format='html'):
1606 pref = ''
1606 pref = ''
1607 if match_obj.group().startswith(' '):
1607 if match_obj.group().startswith(' '):
1608 pref = ' '
1608 pref = ' '
1609
1609
1610 issue_id = ''.join(match_obj.groups())
1610 issue_id = ''.join(match_obj.groups())
1611
1611
1612 if link_format == 'html':
1612 if link_format == 'html':
1613 tmpl = (
1613 tmpl = (
1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1615 '%(issue-prefix)s%(id-repr)s'
1615 '%(issue-prefix)s%(id-repr)s'
1616 '</a>')
1616 '</a>')
1617 elif link_format == 'rst':
1617 elif link_format == 'rst':
1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1619 elif link_format == 'markdown':
1619 elif link_format == 'markdown':
1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1621 else:
1621 else:
1622 raise ValueError('Bad link_format:{}'.format(link_format))
1622 raise ValueError('Bad link_format:{}'.format(link_format))
1623
1623
1624 (repo_name_cleaned,
1624 (repo_name_cleaned,
1625 parent_group_name) = RepoGroupModel().\
1625 parent_group_name) = RepoGroupModel().\
1626 _get_group_name_and_parent(repo_name)
1626 _get_group_name_and_parent(repo_name)
1627
1627
1628 # variables replacement
1628 # variables replacement
1629 named_vars = {
1629 named_vars = {
1630 'id': issue_id,
1630 'id': issue_id,
1631 'repo': repo_name,
1631 'repo': repo_name,
1632 'repo_name': repo_name_cleaned,
1632 'repo_name': repo_name_cleaned,
1633 'group_name': parent_group_name
1633 'group_name': parent_group_name
1634 }
1634 }
1635 # named regex variables
1635 # named regex variables
1636 named_vars.update(match_obj.groupdict())
1636 named_vars.update(match_obj.groupdict())
1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1638
1638
1639 data = {
1639 data = {
1640 'pref': pref,
1640 'pref': pref,
1641 'cls': 'issue-tracker-link',
1641 'cls': 'issue-tracker-link',
1642 'url': _url,
1642 'url': _url,
1643 'id-repr': issue_id,
1643 'id-repr': issue_id,
1644 'issue-prefix': entry['pref'],
1644 'issue-prefix': entry['pref'],
1645 'serv': entry['url'],
1645 'serv': entry['url'],
1646 }
1646 }
1647 if return_raw_data:
1647 if return_raw_data:
1648 return {
1648 return {
1649 'id': issue_id,
1649 'id': issue_id,
1650 'url': _url
1650 'url': _url
1651 }
1651 }
1652 return tmpl % data
1652 return tmpl % data
1653
1653
1654
1654
1655 def process_patterns(text_string, repo_name, link_format='html'):
1655 def process_patterns(text_string, repo_name, link_format='html'):
1656 allowed_formats = ['html', 'rst', 'markdown']
1656 allowed_formats = ['html', 'rst', 'markdown']
1657 if link_format not in allowed_formats:
1657 if link_format not in allowed_formats:
1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1659 allowed_formats, link_format))
1659 allowed_formats, link_format))
1660
1660
1661 repo = None
1661 repo = None
1662 if repo_name:
1662 if repo_name:
1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1664 # IssueTrackerSettingsModel but still passing invalid name further down
1664 # IssueTrackerSettingsModel but still passing invalid name further down
1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1666
1666
1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1668 active_entries = settings_model.get_settings(cache=True)
1668 active_entries = settings_model.get_settings(cache=True)
1669
1669
1670 issues_data = []
1670 issues_data = []
1671 newtext = text_string
1671 newtext = text_string
1672
1672
1673 for uid, entry in active_entries.items():
1673 for uid, entry in active_entries.items():
1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1675
1675
1676 if not (entry['pat'] and entry['url']):
1676 if not (entry['pat'] and entry['url']):
1677 log.debug('skipping due to missing data')
1677 log.debug('skipping due to missing data')
1678 continue
1678 continue
1679
1679
1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1682
1682
1683 try:
1683 try:
1684 pattern = re.compile(r'%s' % entry['pat'])
1684 pattern = re.compile(r'%s' % entry['pat'])
1685 except re.error:
1685 except re.error:
1686 log.exception(
1686 log.exception(
1687 'issue tracker pattern: `%s` failed to compile',
1687 'issue tracker pattern: `%s` failed to compile',
1688 entry['pat'])
1688 entry['pat'])
1689 continue
1689 continue
1690
1690
1691 data_func = partial(
1691 data_func = partial(
1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1693 return_raw_data=True)
1693 return_raw_data=True)
1694
1694
1695 for match_obj in pattern.finditer(text_string):
1695 for match_obj in pattern.finditer(text_string):
1696 issues_data.append(data_func(match_obj))
1696 issues_data.append(data_func(match_obj))
1697
1697
1698 url_func = partial(
1698 url_func = partial(
1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1700 link_format=link_format)
1700 link_format=link_format)
1701
1701
1702 newtext = pattern.sub(url_func, newtext)
1702 newtext = pattern.sub(url_func, newtext)
1703 log.debug('processed prefix:uid `%s`' % (uid,))
1703 log.debug('processed prefix:uid `%s`' % (uid,))
1704
1704
1705 return newtext, issues_data
1705 return newtext, issues_data
1706
1706
1707
1707
1708 def urlify_commit_message(commit_text, repository=None):
1708 def urlify_commit_message(commit_text, repository=None):
1709 """
1709 """
1710 Parses given text message and makes proper links.
1710 Parses given text message and makes proper links.
1711 issues are linked to given issue-server, and rest is a commit link
1711 issues are linked to given issue-server, and rest is a commit link
1712
1712
1713 :param commit_text:
1713 :param commit_text:
1714 :param repository:
1714 :param repository:
1715 """
1715 """
1716 from pylons import url # doh, we need to re-import url to mock it later
1716 from pylons import url # doh, we need to re-import url to mock it later
1717
1717
1718 def escaper(string):
1718 def escaper(string):
1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1720
1720
1721 newtext = escaper(commit_text)
1721 newtext = escaper(commit_text)
1722
1722
1723 # extract http/https links and make them real urls
1723 # extract http/https links and make them real urls
1724 newtext = urlify_text(newtext, safe=False)
1724 newtext = urlify_text(newtext, safe=False)
1725
1725
1726 # urlify commits - extract commit ids and make link out of them, if we have
1726 # urlify commits - extract commit ids and make link out of them, if we have
1727 # the scope of repository present.
1727 # the scope of repository present.
1728 if repository:
1728 if repository:
1729 newtext = urlify_commits(newtext, repository)
1729 newtext = urlify_commits(newtext, repository)
1730
1730
1731 # process issue tracker patterns
1731 # process issue tracker patterns
1732 newtext, issues = process_patterns(newtext, repository or '')
1732 newtext, issues = process_patterns(newtext, repository or '')
1733
1733
1734 return literal(newtext)
1734 return literal(newtext)
1735
1735
1736
1736
1737 def render_binary(repo_name, file_obj):
1737 def render_binary(repo_name, file_obj):
1738 """
1738 """
1739 Choose how to render a binary file
1739 Choose how to render a binary file
1740 """
1740 """
1741 filename = file_obj.name
1741 filename = file_obj.name
1742
1742
1743 # images
1743 # images
1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1745 if fnmatch.fnmatch(filename, pat=ext):
1745 if fnmatch.fnmatch(filename, pat=ext):
1746 alt = filename
1746 alt = filename
1747 src = url('files_raw_home', repo_name=repo_name,
1747 src = url('files_raw_home', repo_name=repo_name,
1748 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1748 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1749 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1749 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1750
1750
1751
1751
1752 def renderer_from_filename(filename, exclude=None):
1752 def renderer_from_filename(filename, exclude=None):
1753 """
1753 """
1754 choose a renderer based on filename, this works only for text based files
1754 choose a renderer based on filename, this works only for text based files
1755 """
1755 """
1756
1756
1757 # ipython
1757 # ipython
1758 for ext in ['*.ipynb']:
1758 for ext in ['*.ipynb']:
1759 if fnmatch.fnmatch(filename, pat=ext):
1759 if fnmatch.fnmatch(filename, pat=ext):
1760 return 'jupyter'
1760 return 'jupyter'
1761
1761
1762 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1762 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1763 if is_markup:
1763 if is_markup:
1764 return is_markup
1764 return is_markup
1765 return None
1765 return None
1766
1766
1767
1767
1768 def render(source, renderer='rst', mentions=False, relative_url=None,
1768 def render(source, renderer='rst', mentions=False, relative_url=None,
1769 repo_name=None):
1769 repo_name=None):
1770
1770
1771 def maybe_convert_relative_links(html_source):
1771 def maybe_convert_relative_links(html_source):
1772 if relative_url:
1772 if relative_url:
1773 return relative_links(html_source, relative_url)
1773 return relative_links(html_source, relative_url)
1774 return html_source
1774 return html_source
1775
1775
1776 if renderer == 'rst':
1776 if renderer == 'rst':
1777 if repo_name:
1777 if repo_name:
1778 # process patterns on comments if we pass in repo name
1778 # process patterns on comments if we pass in repo name
1779 source, issues = process_patterns(
1779 source, issues = process_patterns(
1780 source, repo_name, link_format='rst')
1780 source, repo_name, link_format='rst')
1781
1781
1782 return literal(
1782 return literal(
1783 '<div class="rst-block">%s</div>' %
1783 '<div class="rst-block">%s</div>' %
1784 maybe_convert_relative_links(
1784 maybe_convert_relative_links(
1785 MarkupRenderer.rst(source, mentions=mentions)))
1785 MarkupRenderer.rst(source, mentions=mentions)))
1786 elif renderer == 'markdown':
1786 elif renderer == 'markdown':
1787 if repo_name:
1787 if repo_name:
1788 # process patterns on comments if we pass in repo name
1788 # process patterns on comments if we pass in repo name
1789 source, issues = process_patterns(
1789 source, issues = process_patterns(
1790 source, repo_name, link_format='markdown')
1790 source, repo_name, link_format='markdown')
1791
1791
1792 return literal(
1792 return literal(
1793 '<div class="markdown-block">%s</div>' %
1793 '<div class="markdown-block">%s</div>' %
1794 maybe_convert_relative_links(
1794 maybe_convert_relative_links(
1795 MarkupRenderer.markdown(source, flavored=True,
1795 MarkupRenderer.markdown(source, flavored=True,
1796 mentions=mentions)))
1796 mentions=mentions)))
1797 elif renderer == 'jupyter':
1797 elif renderer == 'jupyter':
1798 return literal(
1798 return literal(
1799 '<div class="ipynb">%s</div>' %
1799 '<div class="ipynb">%s</div>' %
1800 maybe_convert_relative_links(
1800 maybe_convert_relative_links(
1801 MarkupRenderer.jupyter(source)))
1801 MarkupRenderer.jupyter(source)))
1802
1802
1803 # None means just show the file-source
1803 # None means just show the file-source
1804 return None
1804 return None
1805
1805
1806
1806
1807 def commit_status(repo, commit_id):
1807 def commit_status(repo, commit_id):
1808 return ChangesetStatusModel().get_status(repo, commit_id)
1808 return ChangesetStatusModel().get_status(repo, commit_id)
1809
1809
1810
1810
1811 def commit_status_lbl(commit_status):
1811 def commit_status_lbl(commit_status):
1812 return dict(ChangesetStatus.STATUSES).get(commit_status)
1812 return dict(ChangesetStatus.STATUSES).get(commit_status)
1813
1813
1814
1814
1815 def commit_time(repo_name, commit_id):
1815 def commit_time(repo_name, commit_id):
1816 repo = Repository.get_by_repo_name(repo_name)
1816 repo = Repository.get_by_repo_name(repo_name)
1817 commit = repo.get_commit(commit_id=commit_id)
1817 commit = repo.get_commit(commit_id=commit_id)
1818 return commit.date
1818 return commit.date
1819
1819
1820
1820
1821 def get_permission_name(key):
1821 def get_permission_name(key):
1822 return dict(Permission.PERMS).get(key)
1822 return dict(Permission.PERMS).get(key)
1823
1823
1824
1824
1825 def journal_filter_help():
1825 def journal_filter_help():
1826 return _(
1826 return _(
1827 'Example filter terms:\n' +
1827 'Example filter terms:\n' +
1828 ' repository:vcs\n' +
1828 ' repository:vcs\n' +
1829 ' username:marcin\n' +
1829 ' username:marcin\n' +
1830 ' action:*push*\n' +
1830 ' action:*push*\n' +
1831 ' ip:127.0.0.1\n' +
1831 ' ip:127.0.0.1\n' +
1832 ' date:20120101\n' +
1832 ' date:20120101\n' +
1833 ' date:[20120101100000 TO 20120102]\n' +
1833 ' date:[20120101100000 TO 20120102]\n' +
1834 '\n' +
1834 '\n' +
1835 'Generate wildcards using \'*\' character:\n' +
1835 'Generate wildcards using \'*\' character:\n' +
1836 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1836 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1837 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1837 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1838 '\n' +
1838 '\n' +
1839 'Optional AND / OR operators in queries\n' +
1839 'Optional AND / OR operators in queries\n' +
1840 ' "repository:vcs OR repository:test"\n' +
1840 ' "repository:vcs OR repository:test"\n' +
1841 ' "username:test AND repository:test*"\n'
1841 ' "username:test AND repository:test*"\n'
1842 )
1842 )
1843
1843
1844
1844
1845 def search_filter_help(searcher):
1846
1847 terms = ''
1848 return _(
1849 'Example filter terms for `{searcher}` search:\n' +
1850 '{terms}\n' +
1851 'Generate wildcards using \'*\' character:\n' +
1852 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1853 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1854 '\n' +
1855 'Optional AND / OR operators in queries\n' +
1856 ' "repo_name:vcs OR repo_name:test"\n' +
1857 ' "owner:test AND repo_name:test*"\n' +
1858 'More: {search_doc}'
1859 ).format(searcher=searcher.name,
1860 terms=terms, search_doc=searcher.query_lang_doc)
1861
1862
1845 def not_mapped_error(repo_name):
1863 def not_mapped_error(repo_name):
1846 flash(_('%s repository is not mapped to db perhaps'
1864 flash(_('%s repository is not mapped to db perhaps'
1847 ' it was created or renamed from the filesystem'
1865 ' it was created or renamed from the filesystem'
1848 ' please run the application again'
1866 ' please run the application again'
1849 ' in order to rescan repositories') % repo_name, category='error')
1867 ' in order to rescan repositories') % repo_name, category='error')
1850
1868
1851
1869
1852 def ip_range(ip_addr):
1870 def ip_range(ip_addr):
1853 from rhodecode.model.db import UserIpMap
1871 from rhodecode.model.db import UserIpMap
1854 s, e = UserIpMap._get_ip_range(ip_addr)
1872 s, e = UserIpMap._get_ip_range(ip_addr)
1855 return '%s - %s' % (s, e)
1873 return '%s - %s' % (s, e)
1856
1874
1857
1875
1858 def form(url, method='post', needs_csrf_token=True, **attrs):
1876 def form(url, method='post', needs_csrf_token=True, **attrs):
1859 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1877 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1860 if method.lower() != 'get' and needs_csrf_token:
1878 if method.lower() != 'get' and needs_csrf_token:
1861 raise Exception(
1879 raise Exception(
1862 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1880 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1863 'CSRF token. If the endpoint does not require such token you can ' +
1881 'CSRF token. If the endpoint does not require such token you can ' +
1864 'explicitly set the parameter needs_csrf_token to false.')
1882 'explicitly set the parameter needs_csrf_token to false.')
1865
1883
1866 return wh_form(url, method=method, **attrs)
1884 return wh_form(url, method=method, **attrs)
1867
1885
1868
1886
1869 def secure_form(url, method="POST", multipart=False, **attrs):
1887 def secure_form(url, method="POST", multipart=False, **attrs):
1870 """Start a form tag that points the action to an url. This
1888 """Start a form tag that points the action to an url. This
1871 form tag will also include the hidden field containing
1889 form tag will also include the hidden field containing
1872 the auth token.
1890 the auth token.
1873
1891
1874 The url options should be given either as a string, or as a
1892 The url options should be given either as a string, or as a
1875 ``url()`` function. The method for the form defaults to POST.
1893 ``url()`` function. The method for the form defaults to POST.
1876
1894
1877 Options:
1895 Options:
1878
1896
1879 ``multipart``
1897 ``multipart``
1880 If set to True, the enctype is set to "multipart/form-data".
1898 If set to True, the enctype is set to "multipart/form-data".
1881 ``method``
1899 ``method``
1882 The method to use when submitting the form, usually either
1900 The method to use when submitting the form, usually either
1883 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1901 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1884 hidden input with name _method is added to simulate the verb
1902 hidden input with name _method is added to simulate the verb
1885 over POST.
1903 over POST.
1886
1904
1887 """
1905 """
1888 from webhelpers.pylonslib.secure_form import insecure_form
1906 from webhelpers.pylonslib.secure_form import insecure_form
1889 form = insecure_form(url, method, multipart, **attrs)
1907 form = insecure_form(url, method, multipart, **attrs)
1890 token = csrf_input()
1908 token = csrf_input()
1891 return literal("%s\n%s" % (form, token))
1909 return literal("%s\n%s" % (form, token))
1892
1910
1893 def csrf_input():
1911 def csrf_input():
1894 return literal(
1912 return literal(
1895 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1913 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1896 csrf_token_key, csrf_token_key, get_csrf_token()))
1914 csrf_token_key, csrf_token_key, get_csrf_token()))
1897
1915
1898 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1916 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1899 select_html = select(name, selected, options, **attrs)
1917 select_html = select(name, selected, options, **attrs)
1900 select2 = """
1918 select2 = """
1901 <script>
1919 <script>
1902 $(document).ready(function() {
1920 $(document).ready(function() {
1903 $('#%s').select2({
1921 $('#%s').select2({
1904 containerCssClass: 'drop-menu',
1922 containerCssClass: 'drop-menu',
1905 dropdownCssClass: 'drop-menu-dropdown',
1923 dropdownCssClass: 'drop-menu-dropdown',
1906 dropdownAutoWidth: true%s
1924 dropdownAutoWidth: true%s
1907 });
1925 });
1908 });
1926 });
1909 </script>
1927 </script>
1910 """
1928 """
1911 filter_option = """,
1929 filter_option = """,
1912 minimumResultsForSearch: -1
1930 minimumResultsForSearch: -1
1913 """
1931 """
1914 input_id = attrs.get('id') or name
1932 input_id = attrs.get('id') or name
1915 filter_enabled = "" if enable_filter else filter_option
1933 filter_enabled = "" if enable_filter else filter_option
1916 select_script = literal(select2 % (input_id, filter_enabled))
1934 select_script = literal(select2 % (input_id, filter_enabled))
1917
1935
1918 return literal(select_html+select_script)
1936 return literal(select_html+select_script)
1919
1937
1920
1938
1921 def get_visual_attr(tmpl_context_var, attr_name):
1939 def get_visual_attr(tmpl_context_var, attr_name):
1922 """
1940 """
1923 A safe way to get a variable from visual variable of template context
1941 A safe way to get a variable from visual variable of template context
1924
1942
1925 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1943 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1926 :param attr_name: name of the attribute we fetch from the c.visual
1944 :param attr_name: name of the attribute we fetch from the c.visual
1927 """
1945 """
1928 visual = getattr(tmpl_context_var, 'visual', None)
1946 visual = getattr(tmpl_context_var, 'visual', None)
1929 if not visual:
1947 if not visual:
1930 return
1948 return
1931 else:
1949 else:
1932 return getattr(visual, attr_name, None)
1950 return getattr(visual, attr_name, None)
1933
1951
1934
1952
1935 def get_last_path_part(file_node):
1953 def get_last_path_part(file_node):
1936 if not file_node.path:
1954 if not file_node.path:
1937 return u''
1955 return u''
1938
1956
1939 path = safe_unicode(file_node.path.split('/')[-1])
1957 path = safe_unicode(file_node.path.split('/')[-1])
1940 return u'../' + path
1958 return u'../' + path
1941
1959
1942
1960
1943 def route_url(*args, **kwds):
1961 def route_url(*args, **kwds):
1944 """
1962 """
1945 Wrapper around pyramids `route_url` (fully qualified url) function.
1963 Wrapper around pyramids `route_url` (fully qualified url) function.
1946 It is used to generate URLs from within pylons views or templates.
1964 It is used to generate URLs from within pylons views or templates.
1947 This will be removed when pyramid migration if finished.
1965 This will be removed when pyramid migration if finished.
1948 """
1966 """
1949 req = get_current_request()
1967 req = get_current_request()
1950 return req.route_url(*args, **kwds)
1968 return req.route_url(*args, **kwds)
1951
1969
1952
1970
1953 def route_path(*args, **kwds):
1971 def route_path(*args, **kwds):
1954 """
1972 """
1955 Wrapper around pyramids `route_path` function. It is used to generate
1973 Wrapper around pyramids `route_path` function. It is used to generate
1956 URLs from within pylons views or templates. This will be removed when
1974 URLs from within pylons views or templates. This will be removed when
1957 pyramid migration if finished.
1975 pyramid migration if finished.
1958 """
1976 """
1959 req = get_current_request()
1977 req = get_current_request()
1960 return req.route_path(*args, **kwds)
1978 return req.route_path(*args, **kwds)
1961
1979
1962
1980
1963 def route_path_or_none(*args, **kwargs):
1981 def route_path_or_none(*args, **kwargs):
1964 try:
1982 try:
1965 return route_path(*args, **kwargs)
1983 return route_path(*args, **kwargs)
1966 except KeyError:
1984 except KeyError:
1967 return None
1985 return None
1968
1986
1969
1987
1970 def static_url(*args, **kwds):
1988 def static_url(*args, **kwds):
1971 """
1989 """
1972 Wrapper around pyramids `route_path` function. It is used to generate
1990 Wrapper around pyramids `route_path` function. It is used to generate
1973 URLs from within pylons views or templates. This will be removed when
1991 URLs from within pylons views or templates. This will be removed when
1974 pyramid migration if finished.
1992 pyramid migration if finished.
1975 """
1993 """
1976 req = get_current_request()
1994 req = get_current_request()
1977 return req.static_url(*args, **kwds)
1995 return req.static_url(*args, **kwds)
1978
1996
1979
1997
1980 def resource_path(*args, **kwds):
1998 def resource_path(*args, **kwds):
1981 """
1999 """
1982 Wrapper around pyramids `route_path` function. It is used to generate
2000 Wrapper around pyramids `route_path` function. It is used to generate
1983 URLs from within pylons views or templates. This will be removed when
2001 URLs from within pylons views or templates. This will be removed when
1984 pyramid migration if finished.
2002 pyramid migration if finished.
1985 """
2003 """
1986 req = get_current_request()
2004 req = get_current_request()
1987 return req.resource_path(*args, **kwds)
2005 return req.resource_path(*args, **kwds)
@@ -1,57 +1,59 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2012-2017 RhodeCode GmbH
3 # Copyright (C) 2012-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Index schema for RhodeCode
22 Index schema for RhodeCode
23 """
23 """
24
24
25 import importlib
25 import importlib
26 import logging
26 import logging
27
27
28 log = logging.getLogger(__name__)
28 log = logging.getLogger(__name__)
29
29
30 # leave defaults for backward compat
30 # leave defaults for backward compat
31 default_searcher = 'rhodecode.lib.index.whoosh'
31 default_searcher = 'rhodecode.lib.index.whoosh'
32 default_location = '%(here)s/data/index'
32 default_location = '%(here)s/data/index'
33
33
34
34
35 class BaseSearch(object):
35 class BaseSearch(object):
36 query_lang_doc = ''
37
36 def __init__(self):
38 def __init__(self):
37 pass
39 pass
38
40
39 def cleanup(self):
41 def cleanup(self):
40 pass
42 pass
41
43
42 def search(self, query, document_type, search_user, repo_name=None,
44 def search(self, query, document_type, search_user, repo_name=None,
43 raise_on_exc=True):
45 raise_on_exc=True):
44 raise Exception('NotImplemented')
46 raise Exception('NotImplemented')
45
47
46
48
47 def searcher_from_config(config, prefix='search.'):
49 def searcher_from_config(config, prefix='search.'):
48 _config = {}
50 _config = {}
49 for key in config.keys():
51 for key in config.keys():
50 if key.startswith(prefix):
52 if key.startswith(prefix):
51 _config[key[len(prefix):]] = config[key]
53 _config[key[len(prefix):]] = config[key]
52
54
53 if 'location' not in _config:
55 if 'location' not in _config:
54 _config['location'] = default_location
56 _config['location'] = default_location
55 imported = importlib.import_module(_config.get('module', default_searcher))
57 imported = importlib.import_module(_config.get('module', default_searcher))
56 searcher = imported.Search(config=_config)
58 searcher = imported.Search(config=_config)
57 return searcher
59 return searcher
@@ -1,280 +1,281 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2012-2017 RhodeCode GmbH
3 # Copyright (C) 2012-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Index schema for RhodeCode
22 Index schema for RhodeCode
23 """
23 """
24
24
25 from __future__ import absolute_import
25 from __future__ import absolute_import
26 import logging
26 import logging
27 import os
27 import os
28 import re
28 import re
29
29
30 from pylons.i18n.translation import _
30 from pylons.i18n.translation import _
31
31
32 from whoosh import query as query_lib, sorting
32 from whoosh import query as query_lib, sorting
33 from whoosh.highlight import HtmlFormatter, ContextFragmenter
33 from whoosh.highlight import HtmlFormatter, ContextFragmenter
34 from whoosh.index import create_in, open_dir, exists_in, EmptyIndexError
34 from whoosh.index import create_in, open_dir, exists_in, EmptyIndexError
35 from whoosh.qparser import QueryParser, QueryParserError
35 from whoosh.qparser import QueryParser, QueryParserError
36
36
37 import rhodecode.lib.helpers as h
37 import rhodecode.lib.helpers as h
38 from rhodecode.lib.index import BaseSearch
38 from rhodecode.lib.index import BaseSearch
39
39
40 log = logging.getLogger(__name__)
40 log = logging.getLogger(__name__)
41
41
42
42
43 try:
43 try:
44 # we first try to import from rhodecode tools, fallback to copies if
44 # we first try to import from rhodecode tools, fallback to copies if
45 # we're unable to
45 # we're unable to
46 from rhodecode_tools.lib.fts_index.whoosh_schema import (
46 from rhodecode_tools.lib.fts_index.whoosh_schema import (
47 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
47 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
48 COMMIT_SCHEMA)
48 COMMIT_SCHEMA)
49 except ImportError:
49 except ImportError:
50 log.warning('rhodecode_tools schema not available, doing a fallback '
50 log.warning('rhodecode_tools schema not available, doing a fallback '
51 'import from `rhodecode.lib.index.whoosh_fallback_schema`')
51 'import from `rhodecode.lib.index.whoosh_fallback_schema`')
52 from rhodecode.lib.index.whoosh_fallback_schema import (
52 from rhodecode.lib.index.whoosh_fallback_schema import (
53 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
53 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
54 COMMIT_SCHEMA)
54 COMMIT_SCHEMA)
55
55
56
56
57 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
57 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
58 FRAGMENTER = ContextFragmenter(200)
58 FRAGMENTER = ContextFragmenter(200)
59
59
60 log = logging.getLogger(__name__)
60 log = logging.getLogger(__name__)
61
61
62
62
63 class Search(BaseSearch):
63 class Search(BaseSearch):
64
64 # this also shows in UI
65 query_lang_doc = 'http://whoosh.readthedocs.io/en/latest/querylang.html'
65 name = 'whoosh'
66 name = 'whoosh'
66
67
67 def __init__(self, config):
68 def __init__(self, config):
68 super(Search, self).__init__()
69 super(Search, self).__init__()
69 self.config = config
70 self.config = config
70 if not os.path.isdir(self.config['location']):
71 if not os.path.isdir(self.config['location']):
71 os.makedirs(self.config['location'])
72 os.makedirs(self.config['location'])
72
73
73 opener = create_in
74 opener = create_in
74 if exists_in(self.config['location'], indexname=FILE_INDEX_NAME):
75 if exists_in(self.config['location'], indexname=FILE_INDEX_NAME):
75 opener = open_dir
76 opener = open_dir
76 file_index = opener(self.config['location'], schema=FILE_SCHEMA,
77 file_index = opener(self.config['location'], schema=FILE_SCHEMA,
77 indexname=FILE_INDEX_NAME)
78 indexname=FILE_INDEX_NAME)
78
79
79 opener = create_in
80 opener = create_in
80 if exists_in(self.config['location'], indexname=COMMIT_INDEX_NAME):
81 if exists_in(self.config['location'], indexname=COMMIT_INDEX_NAME):
81 opener = open_dir
82 opener = open_dir
82 changeset_index = opener(self.config['location'], schema=COMMIT_SCHEMA,
83 changeset_index = opener(self.config['location'], schema=COMMIT_SCHEMA,
83 indexname=COMMIT_INDEX_NAME)
84 indexname=COMMIT_INDEX_NAME)
84
85
85 self.commit_schema = COMMIT_SCHEMA
86 self.commit_schema = COMMIT_SCHEMA
86 self.commit_index = changeset_index
87 self.commit_index = changeset_index
87 self.file_schema = FILE_SCHEMA
88 self.file_schema = FILE_SCHEMA
88 self.file_index = file_index
89 self.file_index = file_index
89 self.searcher = None
90 self.searcher = None
90
91
91 def cleanup(self):
92 def cleanup(self):
92 if self.searcher:
93 if self.searcher:
93 self.searcher.close()
94 self.searcher.close()
94
95
95 def _extend_query(self, query):
96 def _extend_query(self, query):
96 hashes = re.compile('([0-9a-f]{5,40})').findall(query)
97 hashes = re.compile('([0-9a-f]{5,40})').findall(query)
97 if hashes:
98 if hashes:
98 hashes_or_query = ' OR '.join('commit_id:%s*' % h for h in hashes)
99 hashes_or_query = ' OR '.join('commit_id:%s*' % h for h in hashes)
99 query = u'(%s) OR %s' % (query, hashes_or_query)
100 query = u'(%s) OR %s' % (query, hashes_or_query)
100 return query
101 return query
101
102
102 def search(self, query, document_type, search_user,
103 def search(self, query, document_type, search_user,
103 repo_name=None, requested_page=1, page_limit=10, sort=None,
104 repo_name=None, requested_page=1, page_limit=10, sort=None,
104 raise_on_exc=True):
105 raise_on_exc=True):
105
106
106 original_query = query
107 original_query = query
107 query = self._extend_query(query)
108 query = self._extend_query(query)
108
109
109 log.debug(u'QUERY: %s on %s', query, document_type)
110 log.debug(u'QUERY: %s on %s', query, document_type)
110 result = {
111 result = {
111 'results': [],
112 'results': [],
112 'count': 0,
113 'count': 0,
113 'error': None,
114 'error': None,
114 'runtime': 0
115 'runtime': 0
115 }
116 }
116 search_type, index_name, schema_defn = self._prepare_for_search(
117 search_type, index_name, schema_defn = self._prepare_for_search(
117 document_type)
118 document_type)
118 self._init_searcher(index_name)
119 self._init_searcher(index_name)
119 try:
120 try:
120 qp = QueryParser(search_type, schema=schema_defn)
121 qp = QueryParser(search_type, schema=schema_defn)
121 allowed_repos_filter = self._get_repo_filter(
122 allowed_repos_filter = self._get_repo_filter(
122 search_user, repo_name)
123 search_user, repo_name)
123 try:
124 try:
124 query = qp.parse(unicode(query))
125 query = qp.parse(unicode(query))
125 log.debug('query: %s (%s)' % (query, repr(query)))
126 log.debug('query: %s (%s)' % (query, repr(query)))
126
127
127 reverse, sortedby = False, None
128 reverse, sortedby = False, None
128 if search_type == 'message':
129 if search_type == 'message':
129 if sort == 'oldfirst':
130 if sort == 'oldfirst':
130 sortedby = 'date'
131 sortedby = 'date'
131 reverse = False
132 reverse = False
132 elif sort == 'newfirst':
133 elif sort == 'newfirst':
133 sortedby = 'date'
134 sortedby = 'date'
134 reverse = True
135 reverse = True
135
136
136 whoosh_results = self.searcher.search(
137 whoosh_results = self.searcher.search(
137 query, filter=allowed_repos_filter, limit=None,
138 query, filter=allowed_repos_filter, limit=None,
138 sortedby=sortedby, reverse=reverse)
139 sortedby=sortedby, reverse=reverse)
139
140
140 # fixes for 32k limit that whoosh uses for highlight
141 # fixes for 32k limit that whoosh uses for highlight
141 whoosh_results.fragmenter.charlimit = None
142 whoosh_results.fragmenter.charlimit = None
142 res_ln = whoosh_results.scored_length()
143 res_ln = whoosh_results.scored_length()
143 result['runtime'] = whoosh_results.runtime
144 result['runtime'] = whoosh_results.runtime
144 result['count'] = res_ln
145 result['count'] = res_ln
145 result['results'] = WhooshResultWrapper(
146 result['results'] = WhooshResultWrapper(
146 search_type, res_ln, whoosh_results)
147 search_type, res_ln, whoosh_results)
147
148
148 except QueryParserError:
149 except QueryParserError:
149 result['error'] = _('Invalid search query. Try quoting it.')
150 result['error'] = _('Invalid search query. Try quoting it.')
150 except (EmptyIndexError, IOError, OSError):
151 except (EmptyIndexError, IOError, OSError):
151 msg = _('There is no index to search in. '
152 msg = _('There is no index to search in. '
152 'Please run whoosh indexer')
153 'Please run whoosh indexer')
153 log.exception(msg)
154 log.exception(msg)
154 result['error'] = msg
155 result['error'] = msg
155 except Exception:
156 except Exception:
156 msg = _('An error occurred during this search operation')
157 msg = _('An error occurred during this search operation')
157 log.exception(msg)
158 log.exception(msg)
158 result['error'] = msg
159 result['error'] = msg
159
160
160 return result
161 return result
161
162
162 def statistics(self):
163 def statistics(self):
163 stats = [
164 stats = [
164 {'key': _('Index Type'), 'value': 'Whoosh'},
165 {'key': _('Index Type'), 'value': 'Whoosh'},
165 {'key': _('File Index'), 'value': str(self.file_index)},
166 {'key': _('File Index'), 'value': str(self.file_index)},
166 {'key': _('Indexed documents'),
167 {'key': _('Indexed documents'),
167 'value': self.file_index.doc_count()},
168 'value': self.file_index.doc_count()},
168 {'key': _('Last update'),
169 {'key': _('Last update'),
169 'value': h.time_to_datetime(self.file_index.last_modified())},
170 'value': h.time_to_datetime(self.file_index.last_modified())},
170 {'key': _('Commit index'), 'value': str(self.commit_index)},
171 {'key': _('Commit index'), 'value': str(self.commit_index)},
171 {'key': _('Indexed documents'),
172 {'key': _('Indexed documents'),
172 'value': str(self.commit_index.doc_count())},
173 'value': str(self.commit_index.doc_count())},
173 {'key': _('Last update'),
174 {'key': _('Last update'),
174 'value': h.time_to_datetime(self.commit_index.last_modified())}
175 'value': h.time_to_datetime(self.commit_index.last_modified())}
175 ]
176 ]
176 return stats
177 return stats
177
178
178 def _get_repo_filter(self, auth_user, repo_name):
179 def _get_repo_filter(self, auth_user, repo_name):
179
180
180 allowed_to_search = [
181 allowed_to_search = [
181 repo for repo, perm in
182 repo for repo, perm in
182 auth_user.permissions['repositories'].items()
183 auth_user.permissions['repositories'].items()
183 if perm != 'repository.none']
184 if perm != 'repository.none']
184
185
185 if repo_name:
186 if repo_name:
186 repo_filter = [query_lib.Term('repository', repo_name)]
187 repo_filter = [query_lib.Term('repository', repo_name)]
187
188
188 elif 'hg.admin' in auth_user.permissions.get('global', []):
189 elif 'hg.admin' in auth_user.permissions.get('global', []):
189 return None
190 return None
190
191
191 else:
192 else:
192 repo_filter = [query_lib.Term('repository', _rn)
193 repo_filter = [query_lib.Term('repository', _rn)
193 for _rn in allowed_to_search]
194 for _rn in allowed_to_search]
194 # in case we're not allowed to search anywhere, it's a trick
195 # in case we're not allowed to search anywhere, it's a trick
195 # to tell whoosh we're filtering, on ALL results
196 # to tell whoosh we're filtering, on ALL results
196 repo_filter = repo_filter or [query_lib.Term('repository', '')]
197 repo_filter = repo_filter or [query_lib.Term('repository', '')]
197
198
198 return query_lib.Or(repo_filter)
199 return query_lib.Or(repo_filter)
199
200
200 def _prepare_for_search(self, cur_type):
201 def _prepare_for_search(self, cur_type):
201 search_type = {
202 search_type = {
202 'content': 'content',
203 'content': 'content',
203 'commit': 'message',
204 'commit': 'message',
204 'path': 'path',
205 'path': 'path',
205 'repository': 'repository'
206 'repository': 'repository'
206 }.get(cur_type, 'content')
207 }.get(cur_type, 'content')
207
208
208 index_name = {
209 index_name = {
209 'content': FILE_INDEX_NAME,
210 'content': FILE_INDEX_NAME,
210 'commit': COMMIT_INDEX_NAME,
211 'commit': COMMIT_INDEX_NAME,
211 'path': FILE_INDEX_NAME
212 'path': FILE_INDEX_NAME
212 }.get(cur_type, FILE_INDEX_NAME)
213 }.get(cur_type, FILE_INDEX_NAME)
213
214
214 schema_defn = {
215 schema_defn = {
215 'content': self.file_schema,
216 'content': self.file_schema,
216 'commit': self.commit_schema,
217 'commit': self.commit_schema,
217 'path': self.file_schema
218 'path': self.file_schema
218 }.get(cur_type, self.file_schema)
219 }.get(cur_type, self.file_schema)
219
220
220 log.debug('IDX: %s' % index_name)
221 log.debug('IDX: %s' % index_name)
221 log.debug('SCHEMA: %s' % schema_defn)
222 log.debug('SCHEMA: %s' % schema_defn)
222 return search_type, index_name, schema_defn
223 return search_type, index_name, schema_defn
223
224
224 def _init_searcher(self, index_name):
225 def _init_searcher(self, index_name):
225 idx = open_dir(self.config['location'], indexname=index_name)
226 idx = open_dir(self.config['location'], indexname=index_name)
226 self.searcher = idx.searcher()
227 self.searcher = idx.searcher()
227 return self.searcher
228 return self.searcher
228
229
229
230
230 class WhooshResultWrapper(object):
231 class WhooshResultWrapper(object):
231 def __init__(self, search_type, total_hits, results):
232 def __init__(self, search_type, total_hits, results):
232 self.search_type = search_type
233 self.search_type = search_type
233 self.results = results
234 self.results = results
234 self.total_hits = total_hits
235 self.total_hits = total_hits
235
236
236 def __str__(self):
237 def __str__(self):
237 return '<%s at %s>' % (self.__class__.__name__, len(self))
238 return '<%s at %s>' % (self.__class__.__name__, len(self))
238
239
239 def __repr__(self):
240 def __repr__(self):
240 return self.__str__()
241 return self.__str__()
241
242
242 def __len__(self):
243 def __len__(self):
243 return self.total_hits
244 return self.total_hits
244
245
245 def __iter__(self):
246 def __iter__(self):
246 """
247 """
247 Allows Iteration over results,and lazy generate content
248 Allows Iteration over results,and lazy generate content
248
249
249 *Requires* implementation of ``__getitem__`` method.
250 *Requires* implementation of ``__getitem__`` method.
250 """
251 """
251 for hit in self.results:
252 for hit in self.results:
252 yield self.get_full_content(hit)
253 yield self.get_full_content(hit)
253
254
254 def __getitem__(self, key):
255 def __getitem__(self, key):
255 """
256 """
256 Slicing of resultWrapper
257 Slicing of resultWrapper
257 """
258 """
258 i, j = key.start, key.stop
259 i, j = key.start, key.stop
259 for hit in self.results[i:j]:
260 for hit in self.results[i:j]:
260 yield self.get_full_content(hit)
261 yield self.get_full_content(hit)
261
262
262 def get_full_content(self, hit):
263 def get_full_content(self, hit):
263 # TODO: marcink: this feels like an overkill, there's a lot of data
264 # TODO: marcink: this feels like an overkill, there's a lot of data
264 # inside hit object, and we don't need all
265 # inside hit object, and we don't need all
265 res = dict(hit)
266 res = dict(hit)
266
267
267 f_path = '' # noqa
268 f_path = '' # noqa
268 if self.search_type in ['content', 'path']:
269 if self.search_type in ['content', 'path']:
269 f_path = res['path'][len(res['repository']):]
270 f_path = res['path'][len(res['repository']):]
270 f_path = f_path.lstrip(os.sep)
271 f_path = f_path.lstrip(os.sep)
271
272
272 if self.search_type == 'content':
273 if self.search_type == 'content':
273 res.update({'content_short_hl': hit.highlights('content'),
274 res.update({'content_short_hl': hit.highlights('content'),
274 'f_path': f_path})
275 'f_path': f_path})
275 elif self.search_type == 'path':
276 elif self.search_type == 'path':
276 res.update({'f_path': f_path})
277 res.update({'f_path': f_path})
277 elif self.search_type == 'message':
278 elif self.search_type == 'message':
278 res.update({'message_hl': hit.highlights('message')})
279 res.update({'message_hl': hit.highlights('message')})
279
280
280 return res
281 return res
@@ -1,101 +1,108 b''
1 ## -*- coding: utf-8 -*-
1 ## -*- coding: utf-8 -*-
2 <%inherit file="/base/base.mako"/>
2 <%inherit file="/base/base.mako"/>
3
3
4 <%def name="title()">
4 <%def name="title()">
5 %if c.repo_name:
5 %if c.repo_name:
6 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
6 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
7 %else:
7 %else:
8 ${_('Search inside all accessible repositories')}
8 ${_('Search inside all accessible repositories')}
9 %endif
9 %endif
10 %if c.rhodecode_name:
10 %if c.rhodecode_name:
11 &middot; ${h.branding(c.rhodecode_name)}
11 &middot; ${h.branding(c.rhodecode_name)}
12 %endif
12 %endif
13 </%def>
13 </%def>
14
14
15 <%def name="breadcrumbs_links()">
15 <%def name="breadcrumbs_links()">
16 %if c.repo_name:
16 %if c.repo_name:
17 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
17 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
18 %else:
18 %else:
19 ${_('Search inside all accessible repositories')}
19 ${_('Search inside all accessible repositories')}
20 %endif
20 %endif
21 %if c.cur_query:
21 %if c.cur_query:
22 &raquo;
22 &raquo;
23 ${c.cur_query}
23 ${c.cur_query}
24 %endif
24 %endif
25 </%def>
25 </%def>
26
26
27 <%def name="menu_bar_nav()">
27 <%def name="menu_bar_nav()">
28 %if c.repo_name:
28 %if c.repo_name:
29 ${self.menu_items(active='repositories')}
29 ${self.menu_items(active='repositories')}
30 %else:
30 %else:
31 ${self.menu_items(active='search')}
31 ${self.menu_items(active='search')}
32 %endif
32 %endif
33 </%def>
33 </%def>
34
34
35 <%def name="menu_bar_subnav()">
35 <%def name="menu_bar_subnav()">
36 %if c.repo_name:
36 %if c.repo_name:
37 ${self.repo_menu(active='options')}
37 ${self.repo_menu(active='options')}
38 %endif
38 %endif
39 </%def>
39 </%def>
40
40
41 <%def name="main()">
41 <%def name="main()">
42 <div class="box">
42 <div class="box">
43 %if c.repo_name:
43 %if c.repo_name:
44 <!-- box / title -->
44 <!-- box / title -->
45 <div class="title">
45 <div class="title">
46 ${self.repo_page_title(c.rhodecode_db_repo)}
46 ${self.repo_page_title(c.rhodecode_db_repo)}
47 </div>
47 </div>
48 ${h.form(h.url('search_repo_home',repo_name=c.repo_name),method='get')}
48 ${h.form(h.url('search_repo_home',repo_name=c.repo_name),method='get')}
49 %else:
49 %else:
50 <!-- box / title -->
50 <!-- box / title -->
51 <div class="title">
51 <div class="title">
52 ${self.breadcrumbs()}
52 ${self.breadcrumbs()}
53 <ul class="links">&nbsp;</ul>
53 <ul class="links">&nbsp;</ul>
54 </div>
54 </div>
55 <!-- end box / title -->
55 <!-- end box / title -->
56 ${h.form(h.url('search'),method='get')}
56 ${h.form(h.url('search'),method='get')}
57 %endif
57 %endif
58 <div class="form search-form">
58 <div class="form search-form">
59 <div class="fields">
59 <div class="fields">
60 <label for="q">${_('Search item')}:</label>
60 <label for="q">${_('Search item')}:</label>
61 ${h.text('q', c.cur_query)}
61 ${h.text('q', c.cur_query)}
62
62
63 ${h.select('type',c.search_type,[('content',_('File contents')), ('commit',_('Commit messages')), ('path',_('File names')),],id='id_search_type')}
63 ${h.select('type',c.search_type,[('content',_('File contents')), ('commit',_('Commit messages')), ('path',_('File names')),],id='id_search_type')}
64 <input type="submit" value="${_('Search')}" class="btn"/>
64 <input type="submit" value="${_('Search')}" class="btn"/>
65 <br/>
65 <br/>
66
66 <div class="search-feedback-items">
67 <div class="search-feedback-items">
67 % for error in c.errors:
68 % for error in c.errors:
68 <span class="error-message">
69 <span class="error-message">
69 % for k,v in error.asdict().items():
70 % for k,v in error.asdict().items():
70 ${k} - ${v}
71 ${k} - ${v}
71 % endfor
72 % endfor
72 </span>
73 </span>
73 % endfor
74 % endfor
75 <div class="field">
76 <p class="filterexample" style="position: inherit" onclick="$('#search-help').toggle()">${_('Example Queries')}</p>
77 <pre id="search-help" style="display: none">${h.tooltip(h.search_filter_help(c.searcher))}</pre>
78 </div>
79
74 <div class="field">${c.runtime}</div>
80 <div class="field">${c.runtime}</div>
75 </div>
81 </div>
76 </div>
82 </div>
77 </div>
83 </div>
84
78 ${h.end_form()}
85 ${h.end_form()}
79 <div class="search">
86 <div class="search">
80 % if c.search_type == 'content':
87 % if c.search_type == 'content':
81 <%include file='search_content.mako'/>
88 <%include file='search_content.mako'/>
82 % elif c.search_type == 'path':
89 % elif c.search_type == 'path':
83 <%include file='search_path.mako'/>
90 <%include file='search_path.mako'/>
84 % elif c.search_type == 'commit':
91 % elif c.search_type == 'commit':
85 <%include file='search_commit.mako'/>
92 <%include file='search_commit.mako'/>
86 % elif c.search_type == 'repository':
93 % elif c.search_type == 'repository':
87 <%include file='search_repository.mako'/>
94 <%include file='search_repository.mako'/>
88 % endif
95 % endif
89 </div>
96 </div>
90 </div>
97 </div>
91 <script>
98 <script>
92 $(document).ready(function(){
99 $(document).ready(function(){
93 $("#id_search_type").select2({
100 $("#id_search_type").select2({
94 'containerCssClass': "drop-menu",
101 'containerCssClass': "drop-menu",
95 'dropdownCssClass': "drop-menu-dropdown",
102 'dropdownCssClass': "drop-menu-dropdown",
96 'dropdownAutoWidth': true,
103 'dropdownAutoWidth': true,
97 'minimumResultsForSearch': -1
104 'minimumResultsForSearch': -1
98 });
105 });
99 })
106 })
100 </script>
107 </script>
101 </%def>
108 </%def>
General Comments 0
You need to be logged in to leave comments. Login now