##// END OF EJS Templates
search: added basic example query block.
marcink -
r1684:21d2623d default
parent child Browse files
Show More
@@ -1,111 +1,112 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Search controller for RhodeCode
23 23 """
24 24
25 25 import logging
26 26 import urllib
27 27
28 28 from pylons import request, config, tmpl_context as c
29 29
30 30 from webhelpers.util import update_params
31 31
32 32 from rhodecode.lib.auth import LoginRequired, AuthUser
33 33 from rhodecode.lib.base import BaseRepoController, render
34 34 from rhodecode.lib.helpers import Page
35 35 from rhodecode.lib.utils2 import safe_str, safe_int
36 36 from rhodecode.lib.index import searcher_from_config
37 37 from rhodecode.model import validation_schema
38 38 from rhodecode.model.validation_schema.schemas import search_schema
39 39
40 40 log = logging.getLogger(__name__)
41 41
42 42
43 43 class SearchController(BaseRepoController):
44 44
45 45 @LoginRequired()
46 46 def index(self, repo_name=None):
47 47
48 48 searcher = searcher_from_config(config)
49 49 formatted_results = []
50 50 execution_time = ''
51 51
52 52 schema = search_schema.SearchParamsSchema()
53 53
54 54 search_params = {}
55 55 errors = []
56 56 try:
57 57 search_params = schema.deserialize(
58 58 dict(search_query=request.GET.get('q'),
59 59 search_type=request.GET.get('type'),
60 60 search_sort=request.GET.get('sort'),
61 61 page_limit=request.GET.get('page_limit'),
62 62 requested_page=request.GET.get('page'))
63 63 )
64 64 except validation_schema.Invalid as e:
65 65 errors = e.children
66 66
67 67 def url_generator(**kw):
68 68 q = urllib.quote(safe_str(search_query))
69 69 return update_params(
70 70 "?q=%s&type=%s" % (q, safe_str(search_type)), **kw)
71 71
72 72 search_query = search_params.get('search_query')
73 73 search_type = search_params.get('search_type')
74 74 search_sort = search_params.get('search_sort')
75 75 if search_params.get('search_query'):
76 76 page_limit = search_params['page_limit']
77 77 requested_page = search_params['requested_page']
78 78
79 79 c.perm_user = AuthUser(user_id=c.rhodecode_user.user_id,
80 80 ip_addr=self.ip_addr)
81 81
82 82 try:
83 83 search_result = searcher.search(
84 84 search_query, search_type, c.perm_user, repo_name,
85 85 requested_page, page_limit, search_sort)
86 86
87 87 formatted_results = Page(
88 88 search_result['results'], page=requested_page,
89 89 item_count=search_result['count'],
90 90 items_per_page=page_limit, url=url_generator)
91 91 finally:
92 92 searcher.cleanup()
93 93
94 94 if not search_result['error']:
95 95 execution_time = '%s results (%.3f seconds)' % (
96 96 search_result['count'],
97 97 search_result['runtime'])
98 98 elif not errors:
99 99 node = schema['search_query']
100 100 errors = [
101 101 validation_schema.Invalid(node, search_result['error'])]
102 102
103 103 c.sort = search_sort
104 104 c.url_generator = url_generator
105 105 c.errors = errors
106 106 c.formatted_results = formatted_results
107 107 c.runtime = execution_time
108 108 c.cur_query = search_query
109 109 c.search_type = search_type
110 c.searcher = searcher
110 111 # Return a rendered template
111 112 return render('/search/search.mako')
@@ -1,1987 +1,2005 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40 import itertools
41 41 import fnmatch
42 42
43 43 from datetime import datetime
44 44 from functools import partial
45 45 from pygments.formatters.html import HtmlFormatter
46 46 from pygments import highlight as code_highlight
47 47 from pygments.lexers import (
48 48 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
49 49 from pylons import url as pylons_url
50 50 from pylons.i18n.translation import _, ungettext
51 51 from pyramid.threadlocal import get_current_request
52 52
53 53 from webhelpers.html import literal, HTML, escape
54 54 from webhelpers.html.tools import *
55 55 from webhelpers.html.builder import make_tag
56 56 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 57 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 58 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 59 submit, text, password, textarea, title, ul, xml_declaration, radio
60 60 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 61 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 62 from webhelpers.pylonslib import Flash as _Flash
63 63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 66 from webhelpers.date import time_ago_in_words
67 67 from webhelpers.paginate import Page as _Page
68 68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 70 from webhelpers2.number import format_byte_size
71 71
72 72 from rhodecode.lib.action_parser import action_parser
73 73 from rhodecode.lib.ext_json import json
74 74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 77 AttributeDict, safe_int, md5, md5_safe
78 78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 83 from rhodecode.model.db import Permission, User, Repository
84 84 from rhodecode.model.repo_group import RepoGroupModel
85 85 from rhodecode.model.settings import IssueTrackerSettingsModel
86 86
87 87 log = logging.getLogger(__name__)
88 88
89 89
90 90 DEFAULT_USER = User.DEFAULT_USER
91 91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92 92
93 93
94 94 def url(*args, **kw):
95 95 return pylons_url(*args, **kw)
96 96
97 97
98 98 def pylons_url_current(*args, **kw):
99 99 """
100 100 This function overrides pylons.url.current() which returns the current
101 101 path so that it will also work from a pyramid only context. This
102 102 should be removed once port to pyramid is complete.
103 103 """
104 104 if not args and not kw:
105 105 request = get_current_request()
106 106 return request.path
107 107 return pylons_url.current(*args, **kw)
108 108
109 109 url.current = pylons_url_current
110 110
111 111
112 112 def url_replace(**qargs):
113 113 """ Returns the current request url while replacing query string args """
114 114
115 115 request = get_current_request()
116 116 new_args = request.GET.mixed()
117 117 new_args.update(qargs)
118 118 return url('', **new_args)
119 119
120 120
121 121 def asset(path, ver=None, **kwargs):
122 122 """
123 123 Helper to generate a static asset file path for rhodecode assets
124 124
125 125 eg. h.asset('images/image.png', ver='3923')
126 126
127 127 :param path: path of asset
128 128 :param ver: optional version query param to append as ?ver=
129 129 """
130 130 request = get_current_request()
131 131 query = {}
132 132 query.update(kwargs)
133 133 if ver:
134 134 query = {'ver': ver}
135 135 return request.static_path(
136 136 'rhodecode:public/{}'.format(path), _query=query)
137 137
138 138
139 139 default_html_escape_table = {
140 140 ord('&'): u'&amp;',
141 141 ord('<'): u'&lt;',
142 142 ord('>'): u'&gt;',
143 143 ord('"'): u'&quot;',
144 144 ord("'"): u'&#39;',
145 145 }
146 146
147 147
148 148 def html_escape(text, html_escape_table=default_html_escape_table):
149 149 """Produce entities within text."""
150 150 return text.translate(html_escape_table)
151 151
152 152
153 153 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
154 154 """
155 155 Truncate string ``s`` at the first occurrence of ``sub``.
156 156
157 157 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
158 158 """
159 159 suffix_if_chopped = suffix_if_chopped or ''
160 160 pos = s.find(sub)
161 161 if pos == -1:
162 162 return s
163 163
164 164 if inclusive:
165 165 pos += len(sub)
166 166
167 167 chopped = s[:pos]
168 168 left = s[pos:].strip()
169 169
170 170 if left and suffix_if_chopped:
171 171 chopped += suffix_if_chopped
172 172
173 173 return chopped
174 174
175 175
176 176 def shorter(text, size=20):
177 177 postfix = '...'
178 178 if len(text) > size:
179 179 return text[:size - len(postfix)] + postfix
180 180 return text
181 181
182 182
183 183 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 184 """
185 185 Reset button
186 186 """
187 187 _set_input_attrs(attrs, type, name, value)
188 188 _set_id_attr(attrs, id, name)
189 189 convert_boolean_attrs(attrs, ["disabled"])
190 190 return HTML.input(**attrs)
191 191
192 192 reset = _reset
193 193 safeid = _make_safe_id_component
194 194
195 195
196 196 def branding(name, length=40):
197 197 return truncate(name, length, indicator="")
198 198
199 199
200 200 def FID(raw_id, path):
201 201 """
202 202 Creates a unique ID for filenode based on it's hash of path and commit
203 203 it's safe to use in urls
204 204
205 205 :param raw_id:
206 206 :param path:
207 207 """
208 208
209 209 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
210 210
211 211
212 212 class _GetError(object):
213 213 """Get error from form_errors, and represent it as span wrapped error
214 214 message
215 215
216 216 :param field_name: field to fetch errors for
217 217 :param form_errors: form errors dict
218 218 """
219 219
220 220 def __call__(self, field_name, form_errors):
221 221 tmpl = """<span class="error_msg">%s</span>"""
222 222 if form_errors and field_name in form_errors:
223 223 return literal(tmpl % form_errors.get(field_name))
224 224
225 225 get_error = _GetError()
226 226
227 227
228 228 class _ToolTip(object):
229 229
230 230 def __call__(self, tooltip_title, trim_at=50):
231 231 """
232 232 Special function just to wrap our text into nice formatted
233 233 autowrapped text
234 234
235 235 :param tooltip_title:
236 236 """
237 237 tooltip_title = escape(tooltip_title)
238 238 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
239 239 return tooltip_title
240 240 tooltip = _ToolTip()
241 241
242 242
243 243 def files_breadcrumbs(repo_name, commit_id, file_path):
244 244 if isinstance(file_path, str):
245 245 file_path = safe_unicode(file_path)
246 246
247 247 # TODO: johbo: Is this always a url like path, or is this operating
248 248 # system dependent?
249 249 path_segments = file_path.split('/')
250 250
251 251 repo_name_html = escape(repo_name)
252 252 if len(path_segments) == 1 and path_segments[0] == '':
253 253 url_segments = [repo_name_html]
254 254 else:
255 255 url_segments = [
256 256 link_to(
257 257 repo_name_html,
258 258 url('files_home',
259 259 repo_name=repo_name,
260 260 revision=commit_id,
261 261 f_path=''),
262 262 class_='pjax-link')]
263 263
264 264 last_cnt = len(path_segments) - 1
265 265 for cnt, segment in enumerate(path_segments):
266 266 if not segment:
267 267 continue
268 268 segment_html = escape(segment)
269 269
270 270 if cnt != last_cnt:
271 271 url_segments.append(
272 272 link_to(
273 273 segment_html,
274 274 url('files_home',
275 275 repo_name=repo_name,
276 276 revision=commit_id,
277 277 f_path='/'.join(path_segments[:cnt + 1])),
278 278 class_='pjax-link'))
279 279 else:
280 280 url_segments.append(segment_html)
281 281
282 282 return literal('/'.join(url_segments))
283 283
284 284
285 285 class CodeHtmlFormatter(HtmlFormatter):
286 286 """
287 287 My code Html Formatter for source codes
288 288 """
289 289
290 290 def wrap(self, source, outfile):
291 291 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
292 292
293 293 def _wrap_code(self, source):
294 294 for cnt, it in enumerate(source):
295 295 i, t = it
296 296 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
297 297 yield i, t
298 298
299 299 def _wrap_tablelinenos(self, inner):
300 300 dummyoutfile = StringIO.StringIO()
301 301 lncount = 0
302 302 for t, line in inner:
303 303 if t:
304 304 lncount += 1
305 305 dummyoutfile.write(line)
306 306
307 307 fl = self.linenostart
308 308 mw = len(str(lncount + fl - 1))
309 309 sp = self.linenospecial
310 310 st = self.linenostep
311 311 la = self.lineanchors
312 312 aln = self.anchorlinenos
313 313 nocls = self.noclasses
314 314 if sp:
315 315 lines = []
316 316
317 317 for i in range(fl, fl + lncount):
318 318 if i % st == 0:
319 319 if i % sp == 0:
320 320 if aln:
321 321 lines.append('<a href="#%s%d" class="special">%*d</a>' %
322 322 (la, i, mw, i))
323 323 else:
324 324 lines.append('<span class="special">%*d</span>' % (mw, i))
325 325 else:
326 326 if aln:
327 327 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
328 328 else:
329 329 lines.append('%*d' % (mw, i))
330 330 else:
331 331 lines.append('')
332 332 ls = '\n'.join(lines)
333 333 else:
334 334 lines = []
335 335 for i in range(fl, fl + lncount):
336 336 if i % st == 0:
337 337 if aln:
338 338 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
339 339 else:
340 340 lines.append('%*d' % (mw, i))
341 341 else:
342 342 lines.append('')
343 343 ls = '\n'.join(lines)
344 344
345 345 # in case you wonder about the seemingly redundant <div> here: since the
346 346 # content in the other cell also is wrapped in a div, some browsers in
347 347 # some configurations seem to mess up the formatting...
348 348 if nocls:
349 349 yield 0, ('<table class="%stable">' % self.cssclass +
350 350 '<tr><td><div class="linenodiv" '
351 351 'style="background-color: #f0f0f0; padding-right: 10px">'
352 352 '<pre style="line-height: 125%">' +
353 353 ls + '</pre></div></td><td id="hlcode" class="code">')
354 354 else:
355 355 yield 0, ('<table class="%stable">' % self.cssclass +
356 356 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
357 357 ls + '</pre></div></td><td id="hlcode" class="code">')
358 358 yield 0, dummyoutfile.getvalue()
359 359 yield 0, '</td></tr></table>'
360 360
361 361
362 362 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
363 363 def __init__(self, **kw):
364 364 # only show these line numbers if set
365 365 self.only_lines = kw.pop('only_line_numbers', [])
366 366 self.query_terms = kw.pop('query_terms', [])
367 367 self.max_lines = kw.pop('max_lines', 5)
368 368 self.line_context = kw.pop('line_context', 3)
369 369 self.url = kw.pop('url', None)
370 370
371 371 super(CodeHtmlFormatter, self).__init__(**kw)
372 372
373 373 def _wrap_code(self, source):
374 374 for cnt, it in enumerate(source):
375 375 i, t = it
376 376 t = '<pre>%s</pre>' % t
377 377 yield i, t
378 378
379 379 def _wrap_tablelinenos(self, inner):
380 380 yield 0, '<table class="code-highlight %stable">' % self.cssclass
381 381
382 382 last_shown_line_number = 0
383 383 current_line_number = 1
384 384
385 385 for t, line in inner:
386 386 if not t:
387 387 yield t, line
388 388 continue
389 389
390 390 if current_line_number in self.only_lines:
391 391 if last_shown_line_number + 1 != current_line_number:
392 392 yield 0, '<tr>'
393 393 yield 0, '<td class="line">...</td>'
394 394 yield 0, '<td id="hlcode" class="code"></td>'
395 395 yield 0, '</tr>'
396 396
397 397 yield 0, '<tr>'
398 398 if self.url:
399 399 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
400 400 self.url, current_line_number, current_line_number)
401 401 else:
402 402 yield 0, '<td class="line"><a href="">%i</a></td>' % (
403 403 current_line_number)
404 404 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
405 405 yield 0, '</tr>'
406 406
407 407 last_shown_line_number = current_line_number
408 408
409 409 current_line_number += 1
410 410
411 411
412 412 yield 0, '</table>'
413 413
414 414
415 415 def extract_phrases(text_query):
416 416 """
417 417 Extracts phrases from search term string making sure phrases
418 418 contained in double quotes are kept together - and discarding empty values
419 419 or fully whitespace values eg.
420 420
421 421 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
422 422
423 423 """
424 424
425 425 in_phrase = False
426 426 buf = ''
427 427 phrases = []
428 428 for char in text_query:
429 429 if in_phrase:
430 430 if char == '"': # end phrase
431 431 phrases.append(buf)
432 432 buf = ''
433 433 in_phrase = False
434 434 continue
435 435 else:
436 436 buf += char
437 437 continue
438 438 else:
439 439 if char == '"': # start phrase
440 440 in_phrase = True
441 441 phrases.append(buf)
442 442 buf = ''
443 443 continue
444 444 elif char == ' ':
445 445 phrases.append(buf)
446 446 buf = ''
447 447 continue
448 448 else:
449 449 buf += char
450 450
451 451 phrases.append(buf)
452 452 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
453 453 return phrases
454 454
455 455
456 456 def get_matching_offsets(text, phrases):
457 457 """
458 458 Returns a list of string offsets in `text` that the list of `terms` match
459 459
460 460 >>> get_matching_offsets('some text here', ['some', 'here'])
461 461 [(0, 4), (10, 14)]
462 462
463 463 """
464 464 offsets = []
465 465 for phrase in phrases:
466 466 for match in re.finditer(phrase, text):
467 467 offsets.append((match.start(), match.end()))
468 468
469 469 return offsets
470 470
471 471
472 472 def normalize_text_for_matching(x):
473 473 """
474 474 Replaces all non alnum characters to spaces and lower cases the string,
475 475 useful for comparing two text strings without punctuation
476 476 """
477 477 return re.sub(r'[^\w]', ' ', x.lower())
478 478
479 479
480 480 def get_matching_line_offsets(lines, terms):
481 481 """ Return a set of `lines` indices (starting from 1) matching a
482 482 text search query, along with `context` lines above/below matching lines
483 483
484 484 :param lines: list of strings representing lines
485 485 :param terms: search term string to match in lines eg. 'some text'
486 486 :param context: number of lines above/below a matching line to add to result
487 487 :param max_lines: cut off for lines of interest
488 488 eg.
489 489
490 490 text = '''
491 491 words words words
492 492 words words words
493 493 some text some
494 494 words words words
495 495 words words words
496 496 text here what
497 497 '''
498 498 get_matching_line_offsets(text, 'text', context=1)
499 499 {3: [(5, 9)], 6: [(0, 4)]]
500 500
501 501 """
502 502 matching_lines = {}
503 503 phrases = [normalize_text_for_matching(phrase)
504 504 for phrase in extract_phrases(terms)]
505 505
506 506 for line_index, line in enumerate(lines, start=1):
507 507 match_offsets = get_matching_offsets(
508 508 normalize_text_for_matching(line), phrases)
509 509 if match_offsets:
510 510 matching_lines[line_index] = match_offsets
511 511
512 512 return matching_lines
513 513
514 514
515 515 def hsv_to_rgb(h, s, v):
516 516 """ Convert hsv color values to rgb """
517 517
518 518 if s == 0.0:
519 519 return v, v, v
520 520 i = int(h * 6.0) # XXX assume int() truncates!
521 521 f = (h * 6.0) - i
522 522 p = v * (1.0 - s)
523 523 q = v * (1.0 - s * f)
524 524 t = v * (1.0 - s * (1.0 - f))
525 525 i = i % 6
526 526 if i == 0:
527 527 return v, t, p
528 528 if i == 1:
529 529 return q, v, p
530 530 if i == 2:
531 531 return p, v, t
532 532 if i == 3:
533 533 return p, q, v
534 534 if i == 4:
535 535 return t, p, v
536 536 if i == 5:
537 537 return v, p, q
538 538
539 539
540 540 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
541 541 """
542 542 Generator for getting n of evenly distributed colors using
543 543 hsv color and golden ratio. It always return same order of colors
544 544
545 545 :param n: number of colors to generate
546 546 :param saturation: saturation of returned colors
547 547 :param lightness: lightness of returned colors
548 548 :returns: RGB tuple
549 549 """
550 550
551 551 golden_ratio = 0.618033988749895
552 552 h = 0.22717784590367374
553 553
554 554 for _ in xrange(n):
555 555 h += golden_ratio
556 556 h %= 1
557 557 HSV_tuple = [h, saturation, lightness]
558 558 RGB_tuple = hsv_to_rgb(*HSV_tuple)
559 559 yield map(lambda x: str(int(x * 256)), RGB_tuple)
560 560
561 561
562 562 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
563 563 """
564 564 Returns a function which when called with an argument returns a unique
565 565 color for that argument, eg.
566 566
567 567 :param n: number of colors to generate
568 568 :param saturation: saturation of returned colors
569 569 :param lightness: lightness of returned colors
570 570 :returns: css RGB string
571 571
572 572 >>> color_hash = color_hasher()
573 573 >>> color_hash('hello')
574 574 'rgb(34, 12, 59)'
575 575 >>> color_hash('hello')
576 576 'rgb(34, 12, 59)'
577 577 >>> color_hash('other')
578 578 'rgb(90, 224, 159)'
579 579 """
580 580
581 581 color_dict = {}
582 582 cgenerator = unique_color_generator(
583 583 saturation=saturation, lightness=lightness)
584 584
585 585 def get_color_string(thing):
586 586 if thing in color_dict:
587 587 col = color_dict[thing]
588 588 else:
589 589 col = color_dict[thing] = cgenerator.next()
590 590 return "rgb(%s)" % (', '.join(col))
591 591
592 592 return get_color_string
593 593
594 594
595 595 def get_lexer_safe(mimetype=None, filepath=None):
596 596 """
597 597 Tries to return a relevant pygments lexer using mimetype/filepath name,
598 598 defaulting to plain text if none could be found
599 599 """
600 600 lexer = None
601 601 try:
602 602 if mimetype:
603 603 lexer = get_lexer_for_mimetype(mimetype)
604 604 if not lexer:
605 605 lexer = get_lexer_for_filename(filepath)
606 606 except pygments.util.ClassNotFound:
607 607 pass
608 608
609 609 if not lexer:
610 610 lexer = get_lexer_by_name('text')
611 611
612 612 return lexer
613 613
614 614
615 615 def get_lexer_for_filenode(filenode):
616 616 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
617 617 return lexer
618 618
619 619
620 620 def pygmentize(filenode, **kwargs):
621 621 """
622 622 pygmentize function using pygments
623 623
624 624 :param filenode:
625 625 """
626 626 lexer = get_lexer_for_filenode(filenode)
627 627 return literal(code_highlight(filenode.content, lexer,
628 628 CodeHtmlFormatter(**kwargs)))
629 629
630 630
631 631 def is_following_repo(repo_name, user_id):
632 632 from rhodecode.model.scm import ScmModel
633 633 return ScmModel().is_following_repo(repo_name, user_id)
634 634
635 635
636 636 class _Message(object):
637 637 """A message returned by ``Flash.pop_messages()``.
638 638
639 639 Converting the message to a string returns the message text. Instances
640 640 also have the following attributes:
641 641
642 642 * ``message``: the message text.
643 643 * ``category``: the category specified when the message was created.
644 644 """
645 645
646 646 def __init__(self, category, message):
647 647 self.category = category
648 648 self.message = message
649 649
650 650 def __str__(self):
651 651 return self.message
652 652
653 653 __unicode__ = __str__
654 654
655 655 def __html__(self):
656 656 return escape(safe_unicode(self.message))
657 657
658 658
659 659 class Flash(_Flash):
660 660
661 661 def pop_messages(self):
662 662 """Return all accumulated messages and delete them from the session.
663 663
664 664 The return value is a list of ``Message`` objects.
665 665 """
666 666 from pylons import session
667 667
668 668 messages = []
669 669
670 670 # Pop the 'old' pylons flash messages. They are tuples of the form
671 671 # (category, message)
672 672 for cat, msg in session.pop(self.session_key, []):
673 673 messages.append(_Message(cat, msg))
674 674
675 675 # Pop the 'new' pyramid flash messages for each category as list
676 676 # of strings.
677 677 for cat in self.categories:
678 678 for msg in session.pop_flash(queue=cat):
679 679 messages.append(_Message(cat, msg))
680 680 # Map messages from the default queue to the 'notice' category.
681 681 for msg in session.pop_flash():
682 682 messages.append(_Message('notice', msg))
683 683
684 684 session.save()
685 685 return messages
686 686
687 687 def json_alerts(self):
688 688 payloads = []
689 689 messages = flash.pop_messages()
690 690 if messages:
691 691 for message in messages:
692 692 subdata = {}
693 693 if hasattr(message.message, 'rsplit'):
694 694 flash_data = message.message.rsplit('|DELIM|', 1)
695 695 org_message = flash_data[0]
696 696 if len(flash_data) > 1:
697 697 subdata = json.loads(flash_data[1])
698 698 else:
699 699 org_message = message.message
700 700 payloads.append({
701 701 'message': {
702 702 'message': u'{}'.format(org_message),
703 703 'level': message.category,
704 704 'force': True,
705 705 'subdata': subdata
706 706 }
707 707 })
708 708 return json.dumps(payloads)
709 709
710 710 flash = Flash()
711 711
712 712 #==============================================================================
713 713 # SCM FILTERS available via h.
714 714 #==============================================================================
715 715 from rhodecode.lib.vcs.utils import author_name, author_email
716 716 from rhodecode.lib.utils2 import credentials_filter, age as _age
717 717 from rhodecode.model.db import User, ChangesetStatus
718 718
719 719 age = _age
720 720 capitalize = lambda x: x.capitalize()
721 721 email = author_email
722 722 short_id = lambda x: x[:12]
723 723 hide_credentials = lambda x: ''.join(credentials_filter(x))
724 724
725 725
726 726 def age_component(datetime_iso, value=None, time_is_local=False):
727 727 title = value or format_date(datetime_iso)
728 728 tzinfo = '+00:00'
729 729
730 730 # detect if we have a timezone info, otherwise, add it
731 731 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
732 732 if time_is_local:
733 733 tzinfo = time.strftime("+%H:%M",
734 734 time.gmtime(
735 735 (datetime.now() - datetime.utcnow()).seconds + 1
736 736 )
737 737 )
738 738
739 739 return literal(
740 740 '<time class="timeago tooltip" '
741 741 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
742 742 datetime_iso, title, tzinfo))
743 743
744 744
745 745 def _shorten_commit_id(commit_id):
746 746 from rhodecode import CONFIG
747 747 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
748 748 return commit_id[:def_len]
749 749
750 750
751 751 def show_id(commit):
752 752 """
753 753 Configurable function that shows ID
754 754 by default it's r123:fffeeefffeee
755 755
756 756 :param commit: commit instance
757 757 """
758 758 from rhodecode import CONFIG
759 759 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
760 760
761 761 raw_id = _shorten_commit_id(commit.raw_id)
762 762 if show_idx:
763 763 return 'r%s:%s' % (commit.idx, raw_id)
764 764 else:
765 765 return '%s' % (raw_id, )
766 766
767 767
768 768 def format_date(date):
769 769 """
770 770 use a standardized formatting for dates used in RhodeCode
771 771
772 772 :param date: date/datetime object
773 773 :return: formatted date
774 774 """
775 775
776 776 if date:
777 777 _fmt = "%a, %d %b %Y %H:%M:%S"
778 778 return safe_unicode(date.strftime(_fmt))
779 779
780 780 return u""
781 781
782 782
783 783 class _RepoChecker(object):
784 784
785 785 def __init__(self, backend_alias):
786 786 self._backend_alias = backend_alias
787 787
788 788 def __call__(self, repository):
789 789 if hasattr(repository, 'alias'):
790 790 _type = repository.alias
791 791 elif hasattr(repository, 'repo_type'):
792 792 _type = repository.repo_type
793 793 else:
794 794 _type = repository
795 795 return _type == self._backend_alias
796 796
797 797 is_git = _RepoChecker('git')
798 798 is_hg = _RepoChecker('hg')
799 799 is_svn = _RepoChecker('svn')
800 800
801 801
802 802 def get_repo_type_by_name(repo_name):
803 803 repo = Repository.get_by_repo_name(repo_name)
804 804 return repo.repo_type
805 805
806 806
807 807 def is_svn_without_proxy(repository):
808 808 if is_svn(repository):
809 809 from rhodecode.model.settings import VcsSettingsModel
810 810 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
811 811 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
812 812 return False
813 813
814 814
815 815 def discover_user(author):
816 816 """
817 817 Tries to discover RhodeCode User based on the autho string. Author string
818 818 is typically `FirstName LastName <email@address.com>`
819 819 """
820 820
821 821 # if author is already an instance use it for extraction
822 822 if isinstance(author, User):
823 823 return author
824 824
825 825 # Valid email in the attribute passed, see if they're in the system
826 826 _email = author_email(author)
827 827 if _email != '':
828 828 user = User.get_by_email(_email, case_insensitive=True, cache=True)
829 829 if user is not None:
830 830 return user
831 831
832 832 # Maybe it's a username, we try to extract it and fetch by username ?
833 833 _author = author_name(author)
834 834 user = User.get_by_username(_author, case_insensitive=True, cache=True)
835 835 if user is not None:
836 836 return user
837 837
838 838 return None
839 839
840 840
841 841 def email_or_none(author):
842 842 # extract email from the commit string
843 843 _email = author_email(author)
844 844
845 845 # If we have an email, use it, otherwise
846 846 # see if it contains a username we can get an email from
847 847 if _email != '':
848 848 return _email
849 849 else:
850 850 user = User.get_by_username(
851 851 author_name(author), case_insensitive=True, cache=True)
852 852
853 853 if user is not None:
854 854 return user.email
855 855
856 856 # No valid email, not a valid user in the system, none!
857 857 return None
858 858
859 859
860 860 def link_to_user(author, length=0, **kwargs):
861 861 user = discover_user(author)
862 862 # user can be None, but if we have it already it means we can re-use it
863 863 # in the person() function, so we save 1 intensive-query
864 864 if user:
865 865 author = user
866 866
867 867 display_person = person(author, 'username_or_name_or_email')
868 868 if length:
869 869 display_person = shorter(display_person, length)
870 870
871 871 if user:
872 872 return link_to(
873 873 escape(display_person),
874 874 route_path('user_profile', username=user.username),
875 875 **kwargs)
876 876 else:
877 877 return escape(display_person)
878 878
879 879
880 880 def person(author, show_attr="username_and_name"):
881 881 user = discover_user(author)
882 882 if user:
883 883 return getattr(user, show_attr)
884 884 else:
885 885 _author = author_name(author)
886 886 _email = email(author)
887 887 return _author or _email
888 888
889 889
890 890 def author_string(email):
891 891 if email:
892 892 user = User.get_by_email(email, case_insensitive=True, cache=True)
893 893 if user:
894 894 if user.firstname or user.lastname:
895 895 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
896 896 else:
897 897 return email
898 898 else:
899 899 return email
900 900 else:
901 901 return None
902 902
903 903
904 904 def person_by_id(id_, show_attr="username_and_name"):
905 905 # attr to return from fetched user
906 906 person_getter = lambda usr: getattr(usr, show_attr)
907 907
908 908 #maybe it's an ID ?
909 909 if str(id_).isdigit() or isinstance(id_, int):
910 910 id_ = int(id_)
911 911 user = User.get(id_)
912 912 if user is not None:
913 913 return person_getter(user)
914 914 return id_
915 915
916 916
917 917 def gravatar_with_user(author, show_disabled=False):
918 918 from rhodecode.lib.utils import PartialRenderer
919 919 _render = PartialRenderer('base/base.mako')
920 920 return _render('gravatar_with_user', author, show_disabled=show_disabled)
921 921
922 922
923 923 def desc_stylize(value):
924 924 """
925 925 converts tags from value into html equivalent
926 926
927 927 :param value:
928 928 """
929 929 if not value:
930 930 return ''
931 931
932 932 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
933 933 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
934 934 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
935 935 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
936 936 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
937 937 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
938 938 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
939 939 '<div class="metatag" tag="lang">\\2</div>', value)
940 940 value = re.sub(r'\[([a-z]+)\]',
941 941 '<div class="metatag" tag="\\1">\\1</div>', value)
942 942
943 943 return value
944 944
945 945
946 946 def escaped_stylize(value):
947 947 """
948 948 converts tags from value into html equivalent, but escaping its value first
949 949 """
950 950 if not value:
951 951 return ''
952 952
953 953 # Using default webhelper escape method, but has to force it as a
954 954 # plain unicode instead of a markup tag to be used in regex expressions
955 955 value = unicode(escape(safe_unicode(value)))
956 956
957 957 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
958 958 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
959 959 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
960 960 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
961 961 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
962 962 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
963 963 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
964 964 '<div class="metatag" tag="lang">\\2</div>', value)
965 965 value = re.sub(r'\[([a-z]+)\]',
966 966 '<div class="metatag" tag="\\1">\\1</div>', value)
967 967
968 968 return value
969 969
970 970
971 971 def bool2icon(value):
972 972 """
973 973 Returns boolean value of a given value, represented as html element with
974 974 classes that will represent icons
975 975
976 976 :param value: given value to convert to html node
977 977 """
978 978
979 979 if value: # does bool conversion
980 980 return HTML.tag('i', class_="icon-true")
981 981 else: # not true as bool
982 982 return HTML.tag('i', class_="icon-false")
983 983
984 984
985 985 #==============================================================================
986 986 # PERMS
987 987 #==============================================================================
988 988 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
989 989 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
990 990 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
991 991 csrf_token_key
992 992
993 993
994 994 #==============================================================================
995 995 # GRAVATAR URL
996 996 #==============================================================================
997 997 class InitialsGravatar(object):
998 998 def __init__(self, email_address, first_name, last_name, size=30,
999 999 background=None, text_color='#fff'):
1000 1000 self.size = size
1001 1001 self.first_name = first_name
1002 1002 self.last_name = last_name
1003 1003 self.email_address = email_address
1004 1004 self.background = background or self.str2color(email_address)
1005 1005 self.text_color = text_color
1006 1006
1007 1007 def get_color_bank(self):
1008 1008 """
1009 1009 returns a predefined list of colors that gravatars can use.
1010 1010 Those are randomized distinct colors that guarantee readability and
1011 1011 uniqueness.
1012 1012
1013 1013 generated with: http://phrogz.net/css/distinct-colors.html
1014 1014 """
1015 1015 return [
1016 1016 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1017 1017 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1018 1018 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1019 1019 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1020 1020 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1021 1021 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1022 1022 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1023 1023 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1024 1024 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1025 1025 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1026 1026 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1027 1027 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1028 1028 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1029 1029 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1030 1030 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1031 1031 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1032 1032 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1033 1033 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1034 1034 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1035 1035 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1036 1036 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1037 1037 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1038 1038 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1039 1039 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1040 1040 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1041 1041 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1042 1042 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1043 1043 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1044 1044 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1045 1045 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1046 1046 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1047 1047 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1048 1048 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1049 1049 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1050 1050 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1051 1051 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1052 1052 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1053 1053 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1054 1054 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1055 1055 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1056 1056 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1057 1057 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1058 1058 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1059 1059 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1060 1060 '#4f8c46', '#368dd9', '#5c0073'
1061 1061 ]
1062 1062
1063 1063 def rgb_to_hex_color(self, rgb_tuple):
1064 1064 """
1065 1065 Converts an rgb_tuple passed to an hex color.
1066 1066
1067 1067 :param rgb_tuple: tuple with 3 ints represents rgb color space
1068 1068 """
1069 1069 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1070 1070
1071 1071 def email_to_int_list(self, email_str):
1072 1072 """
1073 1073 Get every byte of the hex digest value of email and turn it to integer.
1074 1074 It's going to be always between 0-255
1075 1075 """
1076 1076 digest = md5_safe(email_str.lower())
1077 1077 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1078 1078
1079 1079 def pick_color_bank_index(self, email_str, color_bank):
1080 1080 return self.email_to_int_list(email_str)[0] % len(color_bank)
1081 1081
1082 1082 def str2color(self, email_str):
1083 1083 """
1084 1084 Tries to map in a stable algorithm an email to color
1085 1085
1086 1086 :param email_str:
1087 1087 """
1088 1088 color_bank = self.get_color_bank()
1089 1089 # pick position (module it's length so we always find it in the
1090 1090 # bank even if it's smaller than 256 values
1091 1091 pos = self.pick_color_bank_index(email_str, color_bank)
1092 1092 return color_bank[pos]
1093 1093
1094 1094 def normalize_email(self, email_address):
1095 1095 import unicodedata
1096 1096 # default host used to fill in the fake/missing email
1097 1097 default_host = u'localhost'
1098 1098
1099 1099 if not email_address:
1100 1100 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1101 1101
1102 1102 email_address = safe_unicode(email_address)
1103 1103
1104 1104 if u'@' not in email_address:
1105 1105 email_address = u'%s@%s' % (email_address, default_host)
1106 1106
1107 1107 if email_address.endswith(u'@'):
1108 1108 email_address = u'%s%s' % (email_address, default_host)
1109 1109
1110 1110 email_address = unicodedata.normalize('NFKD', email_address)\
1111 1111 .encode('ascii', 'ignore')
1112 1112 return email_address
1113 1113
1114 1114 def get_initials(self):
1115 1115 """
1116 1116 Returns 2 letter initials calculated based on the input.
1117 1117 The algorithm picks first given email address, and takes first letter
1118 1118 of part before @, and then the first letter of server name. In case
1119 1119 the part before @ is in a format of `somestring.somestring2` it replaces
1120 1120 the server letter with first letter of somestring2
1121 1121
1122 1122 In case function was initialized with both first and lastname, this
1123 1123 overrides the extraction from email by first letter of the first and
1124 1124 last name. We add special logic to that functionality, In case Full name
1125 1125 is compound, like Guido Von Rossum, we use last part of the last name
1126 1126 (Von Rossum) picking `R`.
1127 1127
1128 1128 Function also normalizes the non-ascii characters to they ascii
1129 1129 representation, eg Δ„ => A
1130 1130 """
1131 1131 import unicodedata
1132 1132 # replace non-ascii to ascii
1133 1133 first_name = unicodedata.normalize(
1134 1134 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1135 1135 last_name = unicodedata.normalize(
1136 1136 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1137 1137
1138 1138 # do NFKD encoding, and also make sure email has proper format
1139 1139 email_address = self.normalize_email(self.email_address)
1140 1140
1141 1141 # first push the email initials
1142 1142 prefix, server = email_address.split('@', 1)
1143 1143
1144 1144 # check if prefix is maybe a 'firstname.lastname' syntax
1145 1145 _dot_split = prefix.rsplit('.', 1)
1146 1146 if len(_dot_split) == 2:
1147 1147 initials = [_dot_split[0][0], _dot_split[1][0]]
1148 1148 else:
1149 1149 initials = [prefix[0], server[0]]
1150 1150
1151 1151 # then try to replace either firtname or lastname
1152 1152 fn_letter = (first_name or " ")[0].strip()
1153 1153 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1154 1154
1155 1155 if fn_letter:
1156 1156 initials[0] = fn_letter
1157 1157
1158 1158 if ln_letter:
1159 1159 initials[1] = ln_letter
1160 1160
1161 1161 return ''.join(initials).upper()
1162 1162
1163 1163 def get_img_data_by_type(self, font_family, img_type):
1164 1164 default_user = """
1165 1165 <svg xmlns="http://www.w3.org/2000/svg"
1166 1166 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1167 1167 viewBox="-15 -10 439.165 429.164"
1168 1168
1169 1169 xml:space="preserve"
1170 1170 style="background:{background};" >
1171 1171
1172 1172 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1173 1173 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1174 1174 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1175 1175 168.596,153.916,216.671,
1176 1176 204.583,216.671z" fill="{text_color}"/>
1177 1177 <path d="M407.164,374.717L360.88,
1178 1178 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1179 1179 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1180 1180 15.366-44.203,23.488-69.076,23.488c-24.877,
1181 1181 0-48.762-8.122-69.078-23.488
1182 1182 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1183 1183 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1184 1184 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1185 1185 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1186 1186 19.402-10.527 C409.699,390.129,
1187 1187 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1188 1188 </svg>""".format(
1189 1189 size=self.size,
1190 1190 background='#979797', # @grey4
1191 1191 text_color=self.text_color,
1192 1192 font_family=font_family)
1193 1193
1194 1194 return {
1195 1195 "default_user": default_user
1196 1196 }[img_type]
1197 1197
1198 1198 def get_img_data(self, svg_type=None):
1199 1199 """
1200 1200 generates the svg metadata for image
1201 1201 """
1202 1202
1203 1203 font_family = ','.join([
1204 1204 'proximanovaregular',
1205 1205 'Proxima Nova Regular',
1206 1206 'Proxima Nova',
1207 1207 'Arial',
1208 1208 'Lucida Grande',
1209 1209 'sans-serif'
1210 1210 ])
1211 1211 if svg_type:
1212 1212 return self.get_img_data_by_type(font_family, svg_type)
1213 1213
1214 1214 initials = self.get_initials()
1215 1215 img_data = """
1216 1216 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1217 1217 width="{size}" height="{size}"
1218 1218 style="width: 100%; height: 100%; background-color: {background}"
1219 1219 viewBox="0 0 {size} {size}">
1220 1220 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1221 1221 pointer-events="auto" fill="{text_color}"
1222 1222 font-family="{font_family}"
1223 1223 style="font-weight: 400; font-size: {f_size}px;">{text}
1224 1224 </text>
1225 1225 </svg>""".format(
1226 1226 size=self.size,
1227 1227 f_size=self.size/1.85, # scale the text inside the box nicely
1228 1228 background=self.background,
1229 1229 text_color=self.text_color,
1230 1230 text=initials.upper(),
1231 1231 font_family=font_family)
1232 1232
1233 1233 return img_data
1234 1234
1235 1235 def generate_svg(self, svg_type=None):
1236 1236 img_data = self.get_img_data(svg_type)
1237 1237 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1238 1238
1239 1239
1240 1240 def initials_gravatar(email_address, first_name, last_name, size=30):
1241 1241 svg_type = None
1242 1242 if email_address == User.DEFAULT_USER_EMAIL:
1243 1243 svg_type = 'default_user'
1244 1244 klass = InitialsGravatar(email_address, first_name, last_name, size)
1245 1245 return klass.generate_svg(svg_type=svg_type)
1246 1246
1247 1247
1248 1248 def gravatar_url(email_address, size=30):
1249 1249 # doh, we need to re-import those to mock it later
1250 1250 from pylons import tmpl_context as c
1251 1251
1252 1252 _use_gravatar = c.visual.use_gravatar
1253 1253 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1254 1254
1255 1255 email_address = email_address or User.DEFAULT_USER_EMAIL
1256 1256 if isinstance(email_address, unicode):
1257 1257 # hashlib crashes on unicode items
1258 1258 email_address = safe_str(email_address)
1259 1259
1260 1260 # empty email or default user
1261 1261 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1262 1262 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1263 1263
1264 1264 if _use_gravatar:
1265 1265 # TODO: Disuse pyramid thread locals. Think about another solution to
1266 1266 # get the host and schema here.
1267 1267 request = get_current_request()
1268 1268 tmpl = safe_str(_gravatar_url)
1269 1269 tmpl = tmpl.replace('{email}', email_address)\
1270 1270 .replace('{md5email}', md5_safe(email_address.lower())) \
1271 1271 .replace('{netloc}', request.host)\
1272 1272 .replace('{scheme}', request.scheme)\
1273 1273 .replace('{size}', safe_str(size))
1274 1274 return tmpl
1275 1275 else:
1276 1276 return initials_gravatar(email_address, '', '', size=size)
1277 1277
1278 1278
1279 1279 class Page(_Page):
1280 1280 """
1281 1281 Custom pager to match rendering style with paginator
1282 1282 """
1283 1283
1284 1284 def _get_pos(self, cur_page, max_page, items):
1285 1285 edge = (items / 2) + 1
1286 1286 if (cur_page <= edge):
1287 1287 radius = max(items / 2, items - cur_page)
1288 1288 elif (max_page - cur_page) < edge:
1289 1289 radius = (items - 1) - (max_page - cur_page)
1290 1290 else:
1291 1291 radius = items / 2
1292 1292
1293 1293 left = max(1, (cur_page - (radius)))
1294 1294 right = min(max_page, cur_page + (radius))
1295 1295 return left, cur_page, right
1296 1296
1297 1297 def _range(self, regexp_match):
1298 1298 """
1299 1299 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1300 1300
1301 1301 Arguments:
1302 1302
1303 1303 regexp_match
1304 1304 A "re" (regular expressions) match object containing the
1305 1305 radius of linked pages around the current page in
1306 1306 regexp_match.group(1) as a string
1307 1307
1308 1308 This function is supposed to be called as a callable in
1309 1309 re.sub.
1310 1310
1311 1311 """
1312 1312 radius = int(regexp_match.group(1))
1313 1313
1314 1314 # Compute the first and last page number within the radius
1315 1315 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1316 1316 # -> leftmost_page = 5
1317 1317 # -> rightmost_page = 9
1318 1318 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1319 1319 self.last_page,
1320 1320 (radius * 2) + 1)
1321 1321 nav_items = []
1322 1322
1323 1323 # Create a link to the first page (unless we are on the first page
1324 1324 # or there would be no need to insert '..' spacers)
1325 1325 if self.page != self.first_page and self.first_page < leftmost_page:
1326 1326 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1327 1327
1328 1328 # Insert dots if there are pages between the first page
1329 1329 # and the currently displayed page range
1330 1330 if leftmost_page - self.first_page > 1:
1331 1331 # Wrap in a SPAN tag if nolink_attr is set
1332 1332 text = '..'
1333 1333 if self.dotdot_attr:
1334 1334 text = HTML.span(c=text, **self.dotdot_attr)
1335 1335 nav_items.append(text)
1336 1336
1337 1337 for thispage in xrange(leftmost_page, rightmost_page + 1):
1338 1338 # Hilight the current page number and do not use a link
1339 1339 if thispage == self.page:
1340 1340 text = '%s' % (thispage,)
1341 1341 # Wrap in a SPAN tag if nolink_attr is set
1342 1342 if self.curpage_attr:
1343 1343 text = HTML.span(c=text, **self.curpage_attr)
1344 1344 nav_items.append(text)
1345 1345 # Otherwise create just a link to that page
1346 1346 else:
1347 1347 text = '%s' % (thispage,)
1348 1348 nav_items.append(self._pagerlink(thispage, text))
1349 1349
1350 1350 # Insert dots if there are pages between the displayed
1351 1351 # page numbers and the end of the page range
1352 1352 if self.last_page - rightmost_page > 1:
1353 1353 text = '..'
1354 1354 # Wrap in a SPAN tag if nolink_attr is set
1355 1355 if self.dotdot_attr:
1356 1356 text = HTML.span(c=text, **self.dotdot_attr)
1357 1357 nav_items.append(text)
1358 1358
1359 1359 # Create a link to the very last page (unless we are on the last
1360 1360 # page or there would be no need to insert '..' spacers)
1361 1361 if self.page != self.last_page and rightmost_page < self.last_page:
1362 1362 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1363 1363
1364 1364 ## prerender links
1365 1365 #_page_link = url.current()
1366 1366 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1367 1367 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1368 1368 return self.separator.join(nav_items)
1369 1369
1370 1370 def pager(self, format='~2~', page_param='page', partial_param='partial',
1371 1371 show_if_single_page=False, separator=' ', onclick=None,
1372 1372 symbol_first='<<', symbol_last='>>',
1373 1373 symbol_previous='<', symbol_next='>',
1374 1374 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1375 1375 curpage_attr={'class': 'pager_curpage'},
1376 1376 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1377 1377
1378 1378 self.curpage_attr = curpage_attr
1379 1379 self.separator = separator
1380 1380 self.pager_kwargs = kwargs
1381 1381 self.page_param = page_param
1382 1382 self.partial_param = partial_param
1383 1383 self.onclick = onclick
1384 1384 self.link_attr = link_attr
1385 1385 self.dotdot_attr = dotdot_attr
1386 1386
1387 1387 # Don't show navigator if there is no more than one page
1388 1388 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1389 1389 return ''
1390 1390
1391 1391 from string import Template
1392 1392 # Replace ~...~ in token format by range of pages
1393 1393 result = re.sub(r'~(\d+)~', self._range, format)
1394 1394
1395 1395 # Interpolate '%' variables
1396 1396 result = Template(result).safe_substitute({
1397 1397 'first_page': self.first_page,
1398 1398 'last_page': self.last_page,
1399 1399 'page': self.page,
1400 1400 'page_count': self.page_count,
1401 1401 'items_per_page': self.items_per_page,
1402 1402 'first_item': self.first_item,
1403 1403 'last_item': self.last_item,
1404 1404 'item_count': self.item_count,
1405 1405 'link_first': self.page > self.first_page and \
1406 1406 self._pagerlink(self.first_page, symbol_first) or '',
1407 1407 'link_last': self.page < self.last_page and \
1408 1408 self._pagerlink(self.last_page, symbol_last) or '',
1409 1409 'link_previous': self.previous_page and \
1410 1410 self._pagerlink(self.previous_page, symbol_previous) \
1411 1411 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1412 1412 'link_next': self.next_page and \
1413 1413 self._pagerlink(self.next_page, symbol_next) \
1414 1414 or HTML.span(symbol_next, class_="pg-next disabled")
1415 1415 })
1416 1416
1417 1417 return literal(result)
1418 1418
1419 1419
1420 1420 #==============================================================================
1421 1421 # REPO PAGER, PAGER FOR REPOSITORY
1422 1422 #==============================================================================
1423 1423 class RepoPage(Page):
1424 1424
1425 1425 def __init__(self, collection, page=1, items_per_page=20,
1426 1426 item_count=None, url=None, **kwargs):
1427 1427
1428 1428 """Create a "RepoPage" instance. special pager for paging
1429 1429 repository
1430 1430 """
1431 1431 self._url_generator = url
1432 1432
1433 1433 # Safe the kwargs class-wide so they can be used in the pager() method
1434 1434 self.kwargs = kwargs
1435 1435
1436 1436 # Save a reference to the collection
1437 1437 self.original_collection = collection
1438 1438
1439 1439 self.collection = collection
1440 1440
1441 1441 # The self.page is the number of the current page.
1442 1442 # The first page has the number 1!
1443 1443 try:
1444 1444 self.page = int(page) # make it int() if we get it as a string
1445 1445 except (ValueError, TypeError):
1446 1446 self.page = 1
1447 1447
1448 1448 self.items_per_page = items_per_page
1449 1449
1450 1450 # Unless the user tells us how many items the collections has
1451 1451 # we calculate that ourselves.
1452 1452 if item_count is not None:
1453 1453 self.item_count = item_count
1454 1454 else:
1455 1455 self.item_count = len(self.collection)
1456 1456
1457 1457 # Compute the number of the first and last available page
1458 1458 if self.item_count > 0:
1459 1459 self.first_page = 1
1460 1460 self.page_count = int(math.ceil(float(self.item_count) /
1461 1461 self.items_per_page))
1462 1462 self.last_page = self.first_page + self.page_count - 1
1463 1463
1464 1464 # Make sure that the requested page number is the range of
1465 1465 # valid pages
1466 1466 if self.page > self.last_page:
1467 1467 self.page = self.last_page
1468 1468 elif self.page < self.first_page:
1469 1469 self.page = self.first_page
1470 1470
1471 1471 # Note: the number of items on this page can be less than
1472 1472 # items_per_page if the last page is not full
1473 1473 self.first_item = max(0, (self.item_count) - (self.page *
1474 1474 items_per_page))
1475 1475 self.last_item = ((self.item_count - 1) - items_per_page *
1476 1476 (self.page - 1))
1477 1477
1478 1478 self.items = list(self.collection[self.first_item:self.last_item + 1])
1479 1479
1480 1480 # Links to previous and next page
1481 1481 if self.page > self.first_page:
1482 1482 self.previous_page = self.page - 1
1483 1483 else:
1484 1484 self.previous_page = None
1485 1485
1486 1486 if self.page < self.last_page:
1487 1487 self.next_page = self.page + 1
1488 1488 else:
1489 1489 self.next_page = None
1490 1490
1491 1491 # No items available
1492 1492 else:
1493 1493 self.first_page = None
1494 1494 self.page_count = 0
1495 1495 self.last_page = None
1496 1496 self.first_item = None
1497 1497 self.last_item = None
1498 1498 self.previous_page = None
1499 1499 self.next_page = None
1500 1500 self.items = []
1501 1501
1502 1502 # This is a subclass of the 'list' type. Initialise the list now.
1503 1503 list.__init__(self, reversed(self.items))
1504 1504
1505 1505
1506 1506 def changed_tooltip(nodes):
1507 1507 """
1508 1508 Generates a html string for changed nodes in commit page.
1509 1509 It limits the output to 30 entries
1510 1510
1511 1511 :param nodes: LazyNodesGenerator
1512 1512 """
1513 1513 if nodes:
1514 1514 pref = ': <br/> '
1515 1515 suf = ''
1516 1516 if len(nodes) > 30:
1517 1517 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1518 1518 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1519 1519 for x in nodes[:30]]) + suf)
1520 1520 else:
1521 1521 return ': ' + _('No Files')
1522 1522
1523 1523
1524 1524 def breadcrumb_repo_link(repo):
1525 1525 """
1526 1526 Makes a breadcrumbs path link to repo
1527 1527
1528 1528 ex::
1529 1529 group >> subgroup >> repo
1530 1530
1531 1531 :param repo: a Repository instance
1532 1532 """
1533 1533
1534 1534 path = [
1535 1535 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1536 1536 for group in repo.groups_with_parents
1537 1537 ] + [
1538 1538 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1539 1539 ]
1540 1540
1541 1541 return literal(' &raquo; '.join(path))
1542 1542
1543 1543
1544 1544 def format_byte_size_binary(file_size):
1545 1545 """
1546 1546 Formats file/folder sizes to standard.
1547 1547 """
1548 1548 formatted_size = format_byte_size(file_size, binary=True)
1549 1549 return formatted_size
1550 1550
1551 1551
1552 1552 def urlify_text(text_, safe=True):
1553 1553 """
1554 1554 Extrac urls from text and make html links out of them
1555 1555
1556 1556 :param text_:
1557 1557 """
1558 1558
1559 1559 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1560 1560 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1561 1561
1562 1562 def url_func(match_obj):
1563 1563 url_full = match_obj.groups()[0]
1564 1564 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1565 1565 _newtext = url_pat.sub(url_func, text_)
1566 1566 if safe:
1567 1567 return literal(_newtext)
1568 1568 return _newtext
1569 1569
1570 1570
1571 1571 def urlify_commits(text_, repository):
1572 1572 """
1573 1573 Extract commit ids from text and make link from them
1574 1574
1575 1575 :param text_:
1576 1576 :param repository: repo name to build the URL with
1577 1577 """
1578 1578 from pylons import url # doh, we need to re-import url to mock it later
1579 1579 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1580 1580
1581 1581 def url_func(match_obj):
1582 1582 commit_id = match_obj.groups()[1]
1583 1583 pref = match_obj.groups()[0]
1584 1584 suf = match_obj.groups()[2]
1585 1585
1586 1586 tmpl = (
1587 1587 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1588 1588 '%(commit_id)s</a>%(suf)s'
1589 1589 )
1590 1590 return tmpl % {
1591 1591 'pref': pref,
1592 1592 'cls': 'revision-link',
1593 1593 'url': url('changeset_home', repo_name=repository,
1594 1594 revision=commit_id, qualified=True),
1595 1595 'commit_id': commit_id,
1596 1596 'suf': suf
1597 1597 }
1598 1598
1599 1599 newtext = URL_PAT.sub(url_func, text_)
1600 1600
1601 1601 return newtext
1602 1602
1603 1603
1604 1604 def _process_url_func(match_obj, repo_name, uid, entry,
1605 1605 return_raw_data=False, link_format='html'):
1606 1606 pref = ''
1607 1607 if match_obj.group().startswith(' '):
1608 1608 pref = ' '
1609 1609
1610 1610 issue_id = ''.join(match_obj.groups())
1611 1611
1612 1612 if link_format == 'html':
1613 1613 tmpl = (
1614 1614 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1615 1615 '%(issue-prefix)s%(id-repr)s'
1616 1616 '</a>')
1617 1617 elif link_format == 'rst':
1618 1618 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1619 1619 elif link_format == 'markdown':
1620 1620 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1621 1621 else:
1622 1622 raise ValueError('Bad link_format:{}'.format(link_format))
1623 1623
1624 1624 (repo_name_cleaned,
1625 1625 parent_group_name) = RepoGroupModel().\
1626 1626 _get_group_name_and_parent(repo_name)
1627 1627
1628 1628 # variables replacement
1629 1629 named_vars = {
1630 1630 'id': issue_id,
1631 1631 'repo': repo_name,
1632 1632 'repo_name': repo_name_cleaned,
1633 1633 'group_name': parent_group_name
1634 1634 }
1635 1635 # named regex variables
1636 1636 named_vars.update(match_obj.groupdict())
1637 1637 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1638 1638
1639 1639 data = {
1640 1640 'pref': pref,
1641 1641 'cls': 'issue-tracker-link',
1642 1642 'url': _url,
1643 1643 'id-repr': issue_id,
1644 1644 'issue-prefix': entry['pref'],
1645 1645 'serv': entry['url'],
1646 1646 }
1647 1647 if return_raw_data:
1648 1648 return {
1649 1649 'id': issue_id,
1650 1650 'url': _url
1651 1651 }
1652 1652 return tmpl % data
1653 1653
1654 1654
1655 1655 def process_patterns(text_string, repo_name, link_format='html'):
1656 1656 allowed_formats = ['html', 'rst', 'markdown']
1657 1657 if link_format not in allowed_formats:
1658 1658 raise ValueError('Link format can be only one of:{} got {}'.format(
1659 1659 allowed_formats, link_format))
1660 1660
1661 1661 repo = None
1662 1662 if repo_name:
1663 1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1664 1664 # IssueTrackerSettingsModel but still passing invalid name further down
1665 1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1666 1666
1667 1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1668 1668 active_entries = settings_model.get_settings(cache=True)
1669 1669
1670 1670 issues_data = []
1671 1671 newtext = text_string
1672 1672
1673 1673 for uid, entry in active_entries.items():
1674 1674 log.debug('found issue tracker entry with uid %s' % (uid,))
1675 1675
1676 1676 if not (entry['pat'] and entry['url']):
1677 1677 log.debug('skipping due to missing data')
1678 1678 continue
1679 1679
1680 1680 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1681 1681 % (uid, entry['pat'], entry['url'], entry['pref']))
1682 1682
1683 1683 try:
1684 1684 pattern = re.compile(r'%s' % entry['pat'])
1685 1685 except re.error:
1686 1686 log.exception(
1687 1687 'issue tracker pattern: `%s` failed to compile',
1688 1688 entry['pat'])
1689 1689 continue
1690 1690
1691 1691 data_func = partial(
1692 1692 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1693 1693 return_raw_data=True)
1694 1694
1695 1695 for match_obj in pattern.finditer(text_string):
1696 1696 issues_data.append(data_func(match_obj))
1697 1697
1698 1698 url_func = partial(
1699 1699 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1700 1700 link_format=link_format)
1701 1701
1702 1702 newtext = pattern.sub(url_func, newtext)
1703 1703 log.debug('processed prefix:uid `%s`' % (uid,))
1704 1704
1705 1705 return newtext, issues_data
1706 1706
1707 1707
1708 1708 def urlify_commit_message(commit_text, repository=None):
1709 1709 """
1710 1710 Parses given text message and makes proper links.
1711 1711 issues are linked to given issue-server, and rest is a commit link
1712 1712
1713 1713 :param commit_text:
1714 1714 :param repository:
1715 1715 """
1716 1716 from pylons import url # doh, we need to re-import url to mock it later
1717 1717
1718 1718 def escaper(string):
1719 1719 return string.replace('<', '&lt;').replace('>', '&gt;')
1720 1720
1721 1721 newtext = escaper(commit_text)
1722 1722
1723 1723 # extract http/https links and make them real urls
1724 1724 newtext = urlify_text(newtext, safe=False)
1725 1725
1726 1726 # urlify commits - extract commit ids and make link out of them, if we have
1727 1727 # the scope of repository present.
1728 1728 if repository:
1729 1729 newtext = urlify_commits(newtext, repository)
1730 1730
1731 1731 # process issue tracker patterns
1732 1732 newtext, issues = process_patterns(newtext, repository or '')
1733 1733
1734 1734 return literal(newtext)
1735 1735
1736 1736
1737 1737 def render_binary(repo_name, file_obj):
1738 1738 """
1739 1739 Choose how to render a binary file
1740 1740 """
1741 1741 filename = file_obj.name
1742 1742
1743 1743 # images
1744 1744 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1745 1745 if fnmatch.fnmatch(filename, pat=ext):
1746 1746 alt = filename
1747 1747 src = url('files_raw_home', repo_name=repo_name,
1748 1748 revision=file_obj.commit.raw_id, f_path=file_obj.path)
1749 1749 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1750 1750
1751 1751
1752 1752 def renderer_from_filename(filename, exclude=None):
1753 1753 """
1754 1754 choose a renderer based on filename, this works only for text based files
1755 1755 """
1756 1756
1757 1757 # ipython
1758 1758 for ext in ['*.ipynb']:
1759 1759 if fnmatch.fnmatch(filename, pat=ext):
1760 1760 return 'jupyter'
1761 1761
1762 1762 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1763 1763 if is_markup:
1764 1764 return is_markup
1765 1765 return None
1766 1766
1767 1767
1768 1768 def render(source, renderer='rst', mentions=False, relative_url=None,
1769 1769 repo_name=None):
1770 1770
1771 1771 def maybe_convert_relative_links(html_source):
1772 1772 if relative_url:
1773 1773 return relative_links(html_source, relative_url)
1774 1774 return html_source
1775 1775
1776 1776 if renderer == 'rst':
1777 1777 if repo_name:
1778 1778 # process patterns on comments if we pass in repo name
1779 1779 source, issues = process_patterns(
1780 1780 source, repo_name, link_format='rst')
1781 1781
1782 1782 return literal(
1783 1783 '<div class="rst-block">%s</div>' %
1784 1784 maybe_convert_relative_links(
1785 1785 MarkupRenderer.rst(source, mentions=mentions)))
1786 1786 elif renderer == 'markdown':
1787 1787 if repo_name:
1788 1788 # process patterns on comments if we pass in repo name
1789 1789 source, issues = process_patterns(
1790 1790 source, repo_name, link_format='markdown')
1791 1791
1792 1792 return literal(
1793 1793 '<div class="markdown-block">%s</div>' %
1794 1794 maybe_convert_relative_links(
1795 1795 MarkupRenderer.markdown(source, flavored=True,
1796 1796 mentions=mentions)))
1797 1797 elif renderer == 'jupyter':
1798 1798 return literal(
1799 1799 '<div class="ipynb">%s</div>' %
1800 1800 maybe_convert_relative_links(
1801 1801 MarkupRenderer.jupyter(source)))
1802 1802
1803 1803 # None means just show the file-source
1804 1804 return None
1805 1805
1806 1806
1807 1807 def commit_status(repo, commit_id):
1808 1808 return ChangesetStatusModel().get_status(repo, commit_id)
1809 1809
1810 1810
1811 1811 def commit_status_lbl(commit_status):
1812 1812 return dict(ChangesetStatus.STATUSES).get(commit_status)
1813 1813
1814 1814
1815 1815 def commit_time(repo_name, commit_id):
1816 1816 repo = Repository.get_by_repo_name(repo_name)
1817 1817 commit = repo.get_commit(commit_id=commit_id)
1818 1818 return commit.date
1819 1819
1820 1820
1821 1821 def get_permission_name(key):
1822 1822 return dict(Permission.PERMS).get(key)
1823 1823
1824 1824
1825 1825 def journal_filter_help():
1826 1826 return _(
1827 1827 'Example filter terms:\n' +
1828 1828 ' repository:vcs\n' +
1829 1829 ' username:marcin\n' +
1830 1830 ' action:*push*\n' +
1831 1831 ' ip:127.0.0.1\n' +
1832 1832 ' date:20120101\n' +
1833 1833 ' date:[20120101100000 TO 20120102]\n' +
1834 1834 '\n' +
1835 1835 'Generate wildcards using \'*\' character:\n' +
1836 1836 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1837 1837 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1838 1838 '\n' +
1839 1839 'Optional AND / OR operators in queries\n' +
1840 1840 ' "repository:vcs OR repository:test"\n' +
1841 1841 ' "username:test AND repository:test*"\n'
1842 1842 )
1843 1843
1844 1844
1845 def search_filter_help(searcher):
1846
1847 terms = ''
1848 return _(
1849 'Example filter terms for `{searcher}` search:\n' +
1850 '{terms}\n' +
1851 'Generate wildcards using \'*\' character:\n' +
1852 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1853 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1854 '\n' +
1855 'Optional AND / OR operators in queries\n' +
1856 ' "repo_name:vcs OR repo_name:test"\n' +
1857 ' "owner:test AND repo_name:test*"\n' +
1858 'More: {search_doc}'
1859 ).format(searcher=searcher.name,
1860 terms=terms, search_doc=searcher.query_lang_doc)
1861
1862
1845 1863 def not_mapped_error(repo_name):
1846 1864 flash(_('%s repository is not mapped to db perhaps'
1847 1865 ' it was created or renamed from the filesystem'
1848 1866 ' please run the application again'
1849 1867 ' in order to rescan repositories') % repo_name, category='error')
1850 1868
1851 1869
1852 1870 def ip_range(ip_addr):
1853 1871 from rhodecode.model.db import UserIpMap
1854 1872 s, e = UserIpMap._get_ip_range(ip_addr)
1855 1873 return '%s - %s' % (s, e)
1856 1874
1857 1875
1858 1876 def form(url, method='post', needs_csrf_token=True, **attrs):
1859 1877 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1860 1878 if method.lower() != 'get' and needs_csrf_token:
1861 1879 raise Exception(
1862 1880 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1863 1881 'CSRF token. If the endpoint does not require such token you can ' +
1864 1882 'explicitly set the parameter needs_csrf_token to false.')
1865 1883
1866 1884 return wh_form(url, method=method, **attrs)
1867 1885
1868 1886
1869 1887 def secure_form(url, method="POST", multipart=False, **attrs):
1870 1888 """Start a form tag that points the action to an url. This
1871 1889 form tag will also include the hidden field containing
1872 1890 the auth token.
1873 1891
1874 1892 The url options should be given either as a string, or as a
1875 1893 ``url()`` function. The method for the form defaults to POST.
1876 1894
1877 1895 Options:
1878 1896
1879 1897 ``multipart``
1880 1898 If set to True, the enctype is set to "multipart/form-data".
1881 1899 ``method``
1882 1900 The method to use when submitting the form, usually either
1883 1901 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1884 1902 hidden input with name _method is added to simulate the verb
1885 1903 over POST.
1886 1904
1887 1905 """
1888 1906 from webhelpers.pylonslib.secure_form import insecure_form
1889 1907 form = insecure_form(url, method, multipart, **attrs)
1890 1908 token = csrf_input()
1891 1909 return literal("%s\n%s" % (form, token))
1892 1910
1893 1911 def csrf_input():
1894 1912 return literal(
1895 1913 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1896 1914 csrf_token_key, csrf_token_key, get_csrf_token()))
1897 1915
1898 1916 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1899 1917 select_html = select(name, selected, options, **attrs)
1900 1918 select2 = """
1901 1919 <script>
1902 1920 $(document).ready(function() {
1903 1921 $('#%s').select2({
1904 1922 containerCssClass: 'drop-menu',
1905 1923 dropdownCssClass: 'drop-menu-dropdown',
1906 1924 dropdownAutoWidth: true%s
1907 1925 });
1908 1926 });
1909 1927 </script>
1910 1928 """
1911 1929 filter_option = """,
1912 1930 minimumResultsForSearch: -1
1913 1931 """
1914 1932 input_id = attrs.get('id') or name
1915 1933 filter_enabled = "" if enable_filter else filter_option
1916 1934 select_script = literal(select2 % (input_id, filter_enabled))
1917 1935
1918 1936 return literal(select_html+select_script)
1919 1937
1920 1938
1921 1939 def get_visual_attr(tmpl_context_var, attr_name):
1922 1940 """
1923 1941 A safe way to get a variable from visual variable of template context
1924 1942
1925 1943 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1926 1944 :param attr_name: name of the attribute we fetch from the c.visual
1927 1945 """
1928 1946 visual = getattr(tmpl_context_var, 'visual', None)
1929 1947 if not visual:
1930 1948 return
1931 1949 else:
1932 1950 return getattr(visual, attr_name, None)
1933 1951
1934 1952
1935 1953 def get_last_path_part(file_node):
1936 1954 if not file_node.path:
1937 1955 return u''
1938 1956
1939 1957 path = safe_unicode(file_node.path.split('/')[-1])
1940 1958 return u'../' + path
1941 1959
1942 1960
1943 1961 def route_url(*args, **kwds):
1944 1962 """
1945 1963 Wrapper around pyramids `route_url` (fully qualified url) function.
1946 1964 It is used to generate URLs from within pylons views or templates.
1947 1965 This will be removed when pyramid migration if finished.
1948 1966 """
1949 1967 req = get_current_request()
1950 1968 return req.route_url(*args, **kwds)
1951 1969
1952 1970
1953 1971 def route_path(*args, **kwds):
1954 1972 """
1955 1973 Wrapper around pyramids `route_path` function. It is used to generate
1956 1974 URLs from within pylons views or templates. This will be removed when
1957 1975 pyramid migration if finished.
1958 1976 """
1959 1977 req = get_current_request()
1960 1978 return req.route_path(*args, **kwds)
1961 1979
1962 1980
1963 1981 def route_path_or_none(*args, **kwargs):
1964 1982 try:
1965 1983 return route_path(*args, **kwargs)
1966 1984 except KeyError:
1967 1985 return None
1968 1986
1969 1987
1970 1988 def static_url(*args, **kwds):
1971 1989 """
1972 1990 Wrapper around pyramids `route_path` function. It is used to generate
1973 1991 URLs from within pylons views or templates. This will be removed when
1974 1992 pyramid migration if finished.
1975 1993 """
1976 1994 req = get_current_request()
1977 1995 return req.static_url(*args, **kwds)
1978 1996
1979 1997
1980 1998 def resource_path(*args, **kwds):
1981 1999 """
1982 2000 Wrapper around pyramids `route_path` function. It is used to generate
1983 2001 URLs from within pylons views or templates. This will be removed when
1984 2002 pyramid migration if finished.
1985 2003 """
1986 2004 req = get_current_request()
1987 2005 return req.resource_path(*args, **kwds)
@@ -1,57 +1,59 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2012-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Index schema for RhodeCode
23 23 """
24 24
25 25 import importlib
26 26 import logging
27 27
28 28 log = logging.getLogger(__name__)
29 29
30 30 # leave defaults for backward compat
31 31 default_searcher = 'rhodecode.lib.index.whoosh'
32 32 default_location = '%(here)s/data/index'
33 33
34 34
35 35 class BaseSearch(object):
36 query_lang_doc = ''
37
36 38 def __init__(self):
37 39 pass
38 40
39 41 def cleanup(self):
40 42 pass
41 43
42 44 def search(self, query, document_type, search_user, repo_name=None,
43 45 raise_on_exc=True):
44 46 raise Exception('NotImplemented')
45 47
46 48
47 49 def searcher_from_config(config, prefix='search.'):
48 50 _config = {}
49 51 for key in config.keys():
50 52 if key.startswith(prefix):
51 53 _config[key[len(prefix):]] = config[key]
52 54
53 55 if 'location' not in _config:
54 56 _config['location'] = default_location
55 57 imported = importlib.import_module(_config.get('module', default_searcher))
56 58 searcher = imported.Search(config=_config)
57 59 return searcher
@@ -1,280 +1,281 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2012-2017 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Index schema for RhodeCode
23 23 """
24 24
25 25 from __future__ import absolute_import
26 26 import logging
27 27 import os
28 28 import re
29 29
30 30 from pylons.i18n.translation import _
31 31
32 32 from whoosh import query as query_lib, sorting
33 33 from whoosh.highlight import HtmlFormatter, ContextFragmenter
34 34 from whoosh.index import create_in, open_dir, exists_in, EmptyIndexError
35 35 from whoosh.qparser import QueryParser, QueryParserError
36 36
37 37 import rhodecode.lib.helpers as h
38 38 from rhodecode.lib.index import BaseSearch
39 39
40 40 log = logging.getLogger(__name__)
41 41
42 42
43 43 try:
44 44 # we first try to import from rhodecode tools, fallback to copies if
45 45 # we're unable to
46 46 from rhodecode_tools.lib.fts_index.whoosh_schema import (
47 47 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
48 48 COMMIT_SCHEMA)
49 49 except ImportError:
50 50 log.warning('rhodecode_tools schema not available, doing a fallback '
51 51 'import from `rhodecode.lib.index.whoosh_fallback_schema`')
52 52 from rhodecode.lib.index.whoosh_fallback_schema import (
53 53 ANALYZER, FILE_INDEX_NAME, FILE_SCHEMA, COMMIT_INDEX_NAME,
54 54 COMMIT_SCHEMA)
55 55
56 56
57 57 FORMATTER = HtmlFormatter('span', between='\n<span class="break">...</span>\n')
58 58 FRAGMENTER = ContextFragmenter(200)
59 59
60 60 log = logging.getLogger(__name__)
61 61
62 62
63 63 class Search(BaseSearch):
64
64 # this also shows in UI
65 query_lang_doc = 'http://whoosh.readthedocs.io/en/latest/querylang.html'
65 66 name = 'whoosh'
66 67
67 68 def __init__(self, config):
68 69 super(Search, self).__init__()
69 70 self.config = config
70 71 if not os.path.isdir(self.config['location']):
71 72 os.makedirs(self.config['location'])
72 73
73 74 opener = create_in
74 75 if exists_in(self.config['location'], indexname=FILE_INDEX_NAME):
75 76 opener = open_dir
76 77 file_index = opener(self.config['location'], schema=FILE_SCHEMA,
77 78 indexname=FILE_INDEX_NAME)
78 79
79 80 opener = create_in
80 81 if exists_in(self.config['location'], indexname=COMMIT_INDEX_NAME):
81 82 opener = open_dir
82 83 changeset_index = opener(self.config['location'], schema=COMMIT_SCHEMA,
83 84 indexname=COMMIT_INDEX_NAME)
84 85
85 86 self.commit_schema = COMMIT_SCHEMA
86 87 self.commit_index = changeset_index
87 88 self.file_schema = FILE_SCHEMA
88 89 self.file_index = file_index
89 90 self.searcher = None
90 91
91 92 def cleanup(self):
92 93 if self.searcher:
93 94 self.searcher.close()
94 95
95 96 def _extend_query(self, query):
96 97 hashes = re.compile('([0-9a-f]{5,40})').findall(query)
97 98 if hashes:
98 99 hashes_or_query = ' OR '.join('commit_id:%s*' % h for h in hashes)
99 100 query = u'(%s) OR %s' % (query, hashes_or_query)
100 101 return query
101 102
102 103 def search(self, query, document_type, search_user,
103 104 repo_name=None, requested_page=1, page_limit=10, sort=None,
104 105 raise_on_exc=True):
105 106
106 107 original_query = query
107 108 query = self._extend_query(query)
108 109
109 110 log.debug(u'QUERY: %s on %s', query, document_type)
110 111 result = {
111 112 'results': [],
112 113 'count': 0,
113 114 'error': None,
114 115 'runtime': 0
115 116 }
116 117 search_type, index_name, schema_defn = self._prepare_for_search(
117 118 document_type)
118 119 self._init_searcher(index_name)
119 120 try:
120 121 qp = QueryParser(search_type, schema=schema_defn)
121 122 allowed_repos_filter = self._get_repo_filter(
122 123 search_user, repo_name)
123 124 try:
124 125 query = qp.parse(unicode(query))
125 126 log.debug('query: %s (%s)' % (query, repr(query)))
126 127
127 128 reverse, sortedby = False, None
128 129 if search_type == 'message':
129 130 if sort == 'oldfirst':
130 131 sortedby = 'date'
131 132 reverse = False
132 133 elif sort == 'newfirst':
133 134 sortedby = 'date'
134 135 reverse = True
135 136
136 137 whoosh_results = self.searcher.search(
137 138 query, filter=allowed_repos_filter, limit=None,
138 139 sortedby=sortedby, reverse=reverse)
139 140
140 141 # fixes for 32k limit that whoosh uses for highlight
141 142 whoosh_results.fragmenter.charlimit = None
142 143 res_ln = whoosh_results.scored_length()
143 144 result['runtime'] = whoosh_results.runtime
144 145 result['count'] = res_ln
145 146 result['results'] = WhooshResultWrapper(
146 147 search_type, res_ln, whoosh_results)
147 148
148 149 except QueryParserError:
149 150 result['error'] = _('Invalid search query. Try quoting it.')
150 151 except (EmptyIndexError, IOError, OSError):
151 152 msg = _('There is no index to search in. '
152 153 'Please run whoosh indexer')
153 154 log.exception(msg)
154 155 result['error'] = msg
155 156 except Exception:
156 157 msg = _('An error occurred during this search operation')
157 158 log.exception(msg)
158 159 result['error'] = msg
159 160
160 161 return result
161 162
162 163 def statistics(self):
163 164 stats = [
164 165 {'key': _('Index Type'), 'value': 'Whoosh'},
165 166 {'key': _('File Index'), 'value': str(self.file_index)},
166 167 {'key': _('Indexed documents'),
167 168 'value': self.file_index.doc_count()},
168 169 {'key': _('Last update'),
169 170 'value': h.time_to_datetime(self.file_index.last_modified())},
170 171 {'key': _('Commit index'), 'value': str(self.commit_index)},
171 172 {'key': _('Indexed documents'),
172 173 'value': str(self.commit_index.doc_count())},
173 174 {'key': _('Last update'),
174 175 'value': h.time_to_datetime(self.commit_index.last_modified())}
175 176 ]
176 177 return stats
177 178
178 179 def _get_repo_filter(self, auth_user, repo_name):
179 180
180 181 allowed_to_search = [
181 182 repo for repo, perm in
182 183 auth_user.permissions['repositories'].items()
183 184 if perm != 'repository.none']
184 185
185 186 if repo_name:
186 187 repo_filter = [query_lib.Term('repository', repo_name)]
187 188
188 189 elif 'hg.admin' in auth_user.permissions.get('global', []):
189 190 return None
190 191
191 192 else:
192 193 repo_filter = [query_lib.Term('repository', _rn)
193 194 for _rn in allowed_to_search]
194 195 # in case we're not allowed to search anywhere, it's a trick
195 196 # to tell whoosh we're filtering, on ALL results
196 197 repo_filter = repo_filter or [query_lib.Term('repository', '')]
197 198
198 199 return query_lib.Or(repo_filter)
199 200
200 201 def _prepare_for_search(self, cur_type):
201 202 search_type = {
202 203 'content': 'content',
203 204 'commit': 'message',
204 205 'path': 'path',
205 206 'repository': 'repository'
206 207 }.get(cur_type, 'content')
207 208
208 209 index_name = {
209 210 'content': FILE_INDEX_NAME,
210 211 'commit': COMMIT_INDEX_NAME,
211 212 'path': FILE_INDEX_NAME
212 213 }.get(cur_type, FILE_INDEX_NAME)
213 214
214 215 schema_defn = {
215 216 'content': self.file_schema,
216 217 'commit': self.commit_schema,
217 218 'path': self.file_schema
218 219 }.get(cur_type, self.file_schema)
219 220
220 221 log.debug('IDX: %s' % index_name)
221 222 log.debug('SCHEMA: %s' % schema_defn)
222 223 return search_type, index_name, schema_defn
223 224
224 225 def _init_searcher(self, index_name):
225 226 idx = open_dir(self.config['location'], indexname=index_name)
226 227 self.searcher = idx.searcher()
227 228 return self.searcher
228 229
229 230
230 231 class WhooshResultWrapper(object):
231 232 def __init__(self, search_type, total_hits, results):
232 233 self.search_type = search_type
233 234 self.results = results
234 235 self.total_hits = total_hits
235 236
236 237 def __str__(self):
237 238 return '<%s at %s>' % (self.__class__.__name__, len(self))
238 239
239 240 def __repr__(self):
240 241 return self.__str__()
241 242
242 243 def __len__(self):
243 244 return self.total_hits
244 245
245 246 def __iter__(self):
246 247 """
247 248 Allows Iteration over results,and lazy generate content
248 249
249 250 *Requires* implementation of ``__getitem__`` method.
250 251 """
251 252 for hit in self.results:
252 253 yield self.get_full_content(hit)
253 254
254 255 def __getitem__(self, key):
255 256 """
256 257 Slicing of resultWrapper
257 258 """
258 259 i, j = key.start, key.stop
259 260 for hit in self.results[i:j]:
260 261 yield self.get_full_content(hit)
261 262
262 263 def get_full_content(self, hit):
263 264 # TODO: marcink: this feels like an overkill, there's a lot of data
264 265 # inside hit object, and we don't need all
265 266 res = dict(hit)
266 267
267 268 f_path = '' # noqa
268 269 if self.search_type in ['content', 'path']:
269 270 f_path = res['path'][len(res['repository']):]
270 271 f_path = f_path.lstrip(os.sep)
271 272
272 273 if self.search_type == 'content':
273 274 res.update({'content_short_hl': hit.highlights('content'),
274 275 'f_path': f_path})
275 276 elif self.search_type == 'path':
276 277 res.update({'f_path': f_path})
277 278 elif self.search_type == 'message':
278 279 res.update({'message_hl': hit.highlights('message')})
279 280
280 281 return res
@@ -1,101 +1,108 b''
1 1 ## -*- coding: utf-8 -*-
2 2 <%inherit file="/base/base.mako"/>
3 3
4 4 <%def name="title()">
5 5 %if c.repo_name:
6 6 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
7 7 %else:
8 8 ${_('Search inside all accessible repositories')}
9 9 %endif
10 10 %if c.rhodecode_name:
11 11 &middot; ${h.branding(c.rhodecode_name)}
12 12 %endif
13 13 </%def>
14 14
15 15 <%def name="breadcrumbs_links()">
16 16 %if c.repo_name:
17 17 ${_('Search inside repository %(repo_name)s') % {'repo_name': c.repo_name}}
18 18 %else:
19 19 ${_('Search inside all accessible repositories')}
20 20 %endif
21 21 %if c.cur_query:
22 22 &raquo;
23 23 ${c.cur_query}
24 24 %endif
25 25 </%def>
26 26
27 27 <%def name="menu_bar_nav()">
28 28 %if c.repo_name:
29 29 ${self.menu_items(active='repositories')}
30 30 %else:
31 31 ${self.menu_items(active='search')}
32 32 %endif
33 33 </%def>
34 34
35 35 <%def name="menu_bar_subnav()">
36 36 %if c.repo_name:
37 37 ${self.repo_menu(active='options')}
38 38 %endif
39 39 </%def>
40 40
41 41 <%def name="main()">
42 42 <div class="box">
43 43 %if c.repo_name:
44 44 <!-- box / title -->
45 45 <div class="title">
46 46 ${self.repo_page_title(c.rhodecode_db_repo)}
47 47 </div>
48 48 ${h.form(h.url('search_repo_home',repo_name=c.repo_name),method='get')}
49 49 %else:
50 50 <!-- box / title -->
51 51 <div class="title">
52 52 ${self.breadcrumbs()}
53 53 <ul class="links">&nbsp;</ul>
54 54 </div>
55 55 <!-- end box / title -->
56 56 ${h.form(h.url('search'),method='get')}
57 57 %endif
58 58 <div class="form search-form">
59 59 <div class="fields">
60 60 <label for="q">${_('Search item')}:</label>
61 61 ${h.text('q', c.cur_query)}
62 62
63 63 ${h.select('type',c.search_type,[('content',_('File contents')), ('commit',_('Commit messages')), ('path',_('File names')),],id='id_search_type')}
64 64 <input type="submit" value="${_('Search')}" class="btn"/>
65 65 <br/>
66
66 67 <div class="search-feedback-items">
67 68 % for error in c.errors:
68 69 <span class="error-message">
69 70 % for k,v in error.asdict().items():
70 71 ${k} - ${v}
71 72 % endfor
72 73 </span>
73 74 % endfor
75 <div class="field">
76 <p class="filterexample" style="position: inherit" onclick="$('#search-help').toggle()">${_('Example Queries')}</p>
77 <pre id="search-help" style="display: none">${h.tooltip(h.search_filter_help(c.searcher))}</pre>
78 </div>
79
74 80 <div class="field">${c.runtime}</div>
75 81 </div>
76 82 </div>
77 83 </div>
84
78 85 ${h.end_form()}
79 86 <div class="search">
80 87 % if c.search_type == 'content':
81 88 <%include file='search_content.mako'/>
82 89 % elif c.search_type == 'path':
83 90 <%include file='search_path.mako'/>
84 91 % elif c.search_type == 'commit':
85 92 <%include file='search_commit.mako'/>
86 93 % elif c.search_type == 'repository':
87 94 <%include file='search_repository.mako'/>
88 95 % endif
89 96 </div>
90 97 </div>
91 98 <script>
92 99 $(document).ready(function(){
93 100 $("#id_search_type").select2({
94 101 'containerCssClass': "drop-menu",
95 102 'dropdownCssClass': "drop-menu-dropdown",
96 103 'dropdownAutoWidth': true,
97 104 'minimumResultsForSearch': -1
98 105 });
99 106 })
100 107 </script>
101 108 </%def>
General Comments 0
You need to be logged in to leave comments. Login now