##// END OF EJS Templates
lexers: added small extensions table to extend syntaxt hl for file sources....
marcink -
r796:76d12e4e default
parent child Browse files
Show More
@@ -1,35 +1,39 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2013-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Various config settings for RhodeCode
23 23 """
24 24 from rhodecode import EXTENSIONS
25 25
26 26 from rhodecode.lib.utils2 import __get_lem
27 27
28 28
29 29 # language map is also used by whoosh indexer, which for those specified
30 30 # extensions will index it's content
31 LANGUAGES_EXTENSIONS_MAP = __get_lem()
31 # custom extensions to lexers, format is 'ext': 'LexerClass'
32 extra = {
33 'vbs': 'VbNet'
34 }
35 LANGUAGES_EXTENSIONS_MAP = __get_lem(extra)
32 36
33 37 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
34 38
35 39 DATE_FORMAT = "%Y-%m-%d"
@@ -1,214 +1,214 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Anontation library for usage in rhodecode, previously part of vcs
24 24 """
25 25
26 26 import StringIO
27 27
28 28 from pygments import highlight
29 29 from pygments.formatters import HtmlFormatter
30 30
31 31 from rhodecode.lib.vcs.exceptions import VCSError
32 32 from rhodecode.lib.vcs.nodes import FileNode
33 33
34 34
35 35 def annotate_highlight(
36 36 filenode, annotate_from_commit_func=None,
37 37 order=None, headers=None, **options):
38 38 """
39 39 Returns html portion containing annotated table with 3 columns: line
40 40 numbers, commit information and pygmentized line of code.
41 41
42 42 :param filenode: FileNode object
43 43 :param annotate_from_commit_func: function taking commit and
44 44 returning single annotate cell; needs break line at the end
45 45 :param order: ordered sequence of ``ls`` (line numbers column),
46 46 ``annotate`` (annotate column), ``code`` (code column); Default is
47 47 ``['ls', 'annotate', 'code']``
48 48 :param headers: dictionary with headers (keys are whats in ``order``
49 49 parameter)
50 50 """
51 from rhodecode.lib.utils import get_custom_lexer
51 from rhodecode.lib.helpers import get_lexer_for_filenode
52 52 options['linenos'] = True
53 53 formatter = AnnotateHtmlFormatter(
54 54 filenode=filenode, order=order, headers=headers,
55 55 annotate_from_commit_func=annotate_from_commit_func, **options)
56 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
56 lexer = get_lexer_for_filenode(filenode)
57 57 highlighted = highlight(filenode.content, lexer, formatter)
58 58 return highlighted
59 59
60 60
61 61 class AnnotateHtmlFormatter(HtmlFormatter):
62 62
63 63 def __init__(
64 64 self, filenode, annotate_from_commit_func=None,
65 65 order=None, **options):
66 66 """
67 67 If ``annotate_from_commit_func`` is passed, it should be a function
68 68 which returns string from the given commit. For example, we may pass
69 69 following function as ``annotate_from_commit_func``::
70 70
71 71 def commit_to_anchor(commit):
72 72 return '<a href="/commits/%s/">%s</a>\n' %\
73 73 (commit.id, commit.id)
74 74
75 75 :param annotate_from_commit_func: see above
76 76 :param order: (default: ``['ls', 'annotate', 'code']``); order of
77 77 columns;
78 78 :param options: standard pygment's HtmlFormatter options, there is
79 79 extra option tough, ``headers``. For instance we can pass::
80 80
81 81 formatter = AnnotateHtmlFormatter(filenode, headers={
82 82 'ls': '#',
83 83 'annotate': 'Annotate',
84 84 'code': 'Code',
85 85 })
86 86
87 87 """
88 88 super(AnnotateHtmlFormatter, self).__init__(**options)
89 89 self.annotate_from_commit_func = annotate_from_commit_func
90 90 self.order = order or ('ls', 'annotate', 'code')
91 91 headers = options.pop('headers', None)
92 92 if headers and not (
93 93 'ls' in headers and 'annotate' in headers and 'code' in headers):
94 94 raise ValueError(
95 95 "If headers option dict is specified it must "
96 96 "all 'ls', 'annotate' and 'code' keys")
97 97 self.headers = headers
98 98 if isinstance(filenode, FileNode):
99 99 self.filenode = filenode
100 100 else:
101 101 raise VCSError(
102 102 "This formatter expect FileNode parameter, not %r" %
103 103 type(filenode))
104 104
105 105 def annotate_from_commit(self, commit):
106 106 """
107 107 Returns full html line for single commit per annotated line.
108 108 """
109 109 if self.annotate_from_commit_func:
110 110 return self.annotate_from_commit_func(commit)
111 111 else:
112 112 return commit.id + '\n'
113 113
114 114 def _wrap_tablelinenos(self, inner):
115 115 dummyoutfile = StringIO.StringIO()
116 116 lncount = 0
117 117 for t, line in inner:
118 118 if t:
119 119 lncount += 1
120 120 dummyoutfile.write(line)
121 121
122 122 fl = self.linenostart
123 123 mw = len(str(lncount + fl - 1))
124 124 sp = self.linenospecial
125 125 st = self.linenostep
126 126 la = self.lineanchors
127 127 aln = self.anchorlinenos
128 128 if sp:
129 129 lines = []
130 130
131 131 for i in range(fl, fl + lncount):
132 132 if i % st == 0:
133 133 if i % sp == 0:
134 134 if aln:
135 135 lines.append('<a href="#%s-%d" class="special">'
136 136 '%*d</a>' %
137 137 (la, i, mw, i))
138 138 else:
139 139 lines.append('<span class="special">'
140 140 '%*d</span>' % (mw, i))
141 141 else:
142 142 if aln:
143 143 lines.append('<a href="#%s-%d">'
144 144 '%*d</a>' % (la, i, mw, i))
145 145 else:
146 146 lines.append('%*d' % (mw, i))
147 147 else:
148 148 lines.append('')
149 149 ls = '\n'.join(lines)
150 150 else:
151 151 lines = []
152 152 for i in range(fl, fl + lncount):
153 153 if i % st == 0:
154 154 if aln:
155 155 lines.append('<a href="#%s-%d">%*d</a>' \
156 156 % (la, i, mw, i))
157 157 else:
158 158 lines.append('%*d' % (mw, i))
159 159 else:
160 160 lines.append('')
161 161 ls = '\n'.join(lines)
162 162
163 163 cached = {}
164 164 annotate = []
165 165 for el in self.filenode.annotate:
166 166 commit_id = el[1]
167 167 if commit_id in cached:
168 168 result = cached[commit_id]
169 169 else:
170 170 commit = el[2]()
171 171 result = self.annotate_from_commit(commit)
172 172 cached[commit_id] = result
173 173 annotate.append(result)
174 174
175 175 annotate = ''.join(annotate)
176 176
177 177 # in case you wonder about the seemingly redundant <div> here:
178 178 # since the content in the other cell also is wrapped in a div,
179 179 # some browsers in some configurations seem to mess up the formatting.
180 180 '''
181 181 yield 0, ('<table class="%stable">' % self.cssclass +
182 182 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
183 183 ls + '</pre></div></td>' +
184 184 '<td class="code">')
185 185 yield 0, dummyoutfile.getvalue()
186 186 yield 0, '</td></tr></table>'
187 187
188 188 '''
189 189 headers_row = []
190 190 if self.headers:
191 191 headers_row = ['<tr class="annotate-header">']
192 192 for key in self.order:
193 193 td = ''.join(('<td>', self.headers[key], '</td>'))
194 194 headers_row.append(td)
195 195 headers_row.append('</tr>')
196 196
197 197 body_row_start = ['<tr>']
198 198 for key in self.order:
199 199 if key == 'ls':
200 200 body_row_start.append(
201 201 '<td class="linenos"><div class="linenodiv"><pre>' +
202 202 ls + '</pre></div></td>')
203 203 elif key == 'annotate':
204 204 body_row_start.append(
205 205 '<td class="annotate"><div class="annotatediv"><pre>' +
206 206 annotate + '</pre></div></td>')
207 207 elif key == 'code':
208 208 body_row_start.append('<td class="code">')
209 209 yield 0, ('<table class="%stable">' % self.cssclass +
210 210 ''.join(headers_row) +
211 211 ''.join(body_row_start)
212 212 )
213 213 yield 0, dummyoutfile.getvalue()
214 214 yield 0, '</td></tr></table>'
@@ -1,1973 +1,1978 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Helper functions
23 23
24 24 Consists of functions to typically be used within templates, but also
25 25 available to Controllers. This module is available to both as 'h'.
26 26 """
27 27
28 28 import random
29 29 import hashlib
30 30 import StringIO
31 31 import urllib
32 32 import math
33 33 import logging
34 34 import re
35 35 import urlparse
36 36 import time
37 37 import string
38 38 import hashlib
39 39 import pygments
40 40
41 41 from datetime import datetime
42 42 from functools import partial
43 43 from pygments.formatters.html import HtmlFormatter
44 44 from pygments import highlight as code_highlight
45 45 from pygments.lexers import (
46 46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 47 from pylons import url as pylons_url
48 48 from pylons.i18n.translation import _, ungettext
49 49 from pyramid.threadlocal import get_current_request
50 50
51 51 from webhelpers.html import literal, HTML, escape
52 52 from webhelpers.html.tools import *
53 53 from webhelpers.html.builder import make_tag
54 54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 60 from webhelpers.pylonslib import Flash as _Flash
61 61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 64 from webhelpers.date import time_ago_in_words
65 65 from webhelpers.paginate import Page as _Page
66 66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 68 from webhelpers2.number import format_byte_size
69 69
70 70 from rhodecode.lib.annotate import annotate_highlight
71 71 from rhodecode.lib.action_parser import action_parser
72 72 from rhodecode.lib.ext_json import json
73 73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 76 AttributeDict, safe_int, md5, md5_safe
77 77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 82 from rhodecode.model.db import Permission, User, Repository
83 83 from rhodecode.model.repo_group import RepoGroupModel
84 84 from rhodecode.model.settings import IssueTrackerSettingsModel
85 85
86 86 log = logging.getLogger(__name__)
87 87
88 88
89 89 DEFAULT_USER = User.DEFAULT_USER
90 90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 91
92 92
93 93 def url(*args, **kw):
94 94 return pylons_url(*args, **kw)
95 95
96 96
97 97 def pylons_url_current(*args, **kw):
98 98 """
99 99 This function overrides pylons.url.current() which returns the current
100 100 path so that it will also work from a pyramid only context. This
101 101 should be removed once port to pyramid is complete.
102 102 """
103 103 if not args and not kw:
104 104 request = get_current_request()
105 105 return request.path
106 106 return pylons_url.current(*args, **kw)
107 107
108 108 url.current = pylons_url_current
109 109
110 110
111 111 def asset(path, ver=None):
112 112 """
113 113 Helper to generate a static asset file path for rhodecode assets
114 114
115 115 eg. h.asset('images/image.png', ver='3923')
116 116
117 117 :param path: path of asset
118 118 :param ver: optional version query param to append as ?ver=
119 119 """
120 120 request = get_current_request()
121 121 query = {}
122 122 if ver:
123 123 query = {'ver': ver}
124 124 return request.static_path(
125 125 'rhodecode:public/{}'.format(path), _query=query)
126 126
127 127
128 128 def html_escape(text, html_escape_table=None):
129 129 """Produce entities within text."""
130 130 if not html_escape_table:
131 131 html_escape_table = {
132 132 "&": "&amp;",
133 133 '"': "&quot;",
134 134 "'": "&apos;",
135 135 ">": "&gt;",
136 136 "<": "&lt;",
137 137 }
138 138 return "".join(html_escape_table.get(c, c) for c in text)
139 139
140 140
141 141 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
142 142 """
143 143 Truncate string ``s`` at the first occurrence of ``sub``.
144 144
145 145 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
146 146 """
147 147 suffix_if_chopped = suffix_if_chopped or ''
148 148 pos = s.find(sub)
149 149 if pos == -1:
150 150 return s
151 151
152 152 if inclusive:
153 153 pos += len(sub)
154 154
155 155 chopped = s[:pos]
156 156 left = s[pos:].strip()
157 157
158 158 if left and suffix_if_chopped:
159 159 chopped += suffix_if_chopped
160 160
161 161 return chopped
162 162
163 163
164 164 def shorter(text, size=20):
165 165 postfix = '...'
166 166 if len(text) > size:
167 167 return text[:size - len(postfix)] + postfix
168 168 return text
169 169
170 170
171 171 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
172 172 """
173 173 Reset button
174 174 """
175 175 _set_input_attrs(attrs, type, name, value)
176 176 _set_id_attr(attrs, id, name)
177 177 convert_boolean_attrs(attrs, ["disabled"])
178 178 return HTML.input(**attrs)
179 179
180 180 reset = _reset
181 181 safeid = _make_safe_id_component
182 182
183 183
184 184 def branding(name, length=40):
185 185 return truncate(name, length, indicator="")
186 186
187 187
188 188 def FID(raw_id, path):
189 189 """
190 190 Creates a unique ID for filenode based on it's hash of path and commit
191 191 it's safe to use in urls
192 192
193 193 :param raw_id:
194 194 :param path:
195 195 """
196 196
197 197 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
198 198
199 199
200 200 class _GetError(object):
201 201 """Get error from form_errors, and represent it as span wrapped error
202 202 message
203 203
204 204 :param field_name: field to fetch errors for
205 205 :param form_errors: form errors dict
206 206 """
207 207
208 208 def __call__(self, field_name, form_errors):
209 209 tmpl = """<span class="error_msg">%s</span>"""
210 210 if form_errors and field_name in form_errors:
211 211 return literal(tmpl % form_errors.get(field_name))
212 212
213 213 get_error = _GetError()
214 214
215 215
216 216 class _ToolTip(object):
217 217
218 218 def __call__(self, tooltip_title, trim_at=50):
219 219 """
220 220 Special function just to wrap our text into nice formatted
221 221 autowrapped text
222 222
223 223 :param tooltip_title:
224 224 """
225 225 tooltip_title = escape(tooltip_title)
226 226 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
227 227 return tooltip_title
228 228 tooltip = _ToolTip()
229 229
230 230
231 231 def files_breadcrumbs(repo_name, commit_id, file_path):
232 232 if isinstance(file_path, str):
233 233 file_path = safe_unicode(file_path)
234 234
235 235 # TODO: johbo: Is this always a url like path, or is this operating
236 236 # system dependent?
237 237 path_segments = file_path.split('/')
238 238
239 239 repo_name_html = escape(repo_name)
240 240 if len(path_segments) == 1 and path_segments[0] == '':
241 241 url_segments = [repo_name_html]
242 242 else:
243 243 url_segments = [
244 244 link_to(
245 245 repo_name_html,
246 246 url('files_home',
247 247 repo_name=repo_name,
248 248 revision=commit_id,
249 249 f_path=''),
250 250 class_='pjax-link')]
251 251
252 252 last_cnt = len(path_segments) - 1
253 253 for cnt, segment in enumerate(path_segments):
254 254 if not segment:
255 255 continue
256 256 segment_html = escape(segment)
257 257
258 258 if cnt != last_cnt:
259 259 url_segments.append(
260 260 link_to(
261 261 segment_html,
262 262 url('files_home',
263 263 repo_name=repo_name,
264 264 revision=commit_id,
265 265 f_path='/'.join(path_segments[:cnt + 1])),
266 266 class_='pjax-link'))
267 267 else:
268 268 url_segments.append(segment_html)
269 269
270 270 return literal('/'.join(url_segments))
271 271
272 272
273 273 class CodeHtmlFormatter(HtmlFormatter):
274 274 """
275 275 My code Html Formatter for source codes
276 276 """
277 277
278 278 def wrap(self, source, outfile):
279 279 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
280 280
281 281 def _wrap_code(self, source):
282 282 for cnt, it in enumerate(source):
283 283 i, t = it
284 284 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
285 285 yield i, t
286 286
287 287 def _wrap_tablelinenos(self, inner):
288 288 dummyoutfile = StringIO.StringIO()
289 289 lncount = 0
290 290 for t, line in inner:
291 291 if t:
292 292 lncount += 1
293 293 dummyoutfile.write(line)
294 294
295 295 fl = self.linenostart
296 296 mw = len(str(lncount + fl - 1))
297 297 sp = self.linenospecial
298 298 st = self.linenostep
299 299 la = self.lineanchors
300 300 aln = self.anchorlinenos
301 301 nocls = self.noclasses
302 302 if sp:
303 303 lines = []
304 304
305 305 for i in range(fl, fl + lncount):
306 306 if i % st == 0:
307 307 if i % sp == 0:
308 308 if aln:
309 309 lines.append('<a href="#%s%d" class="special">%*d</a>' %
310 310 (la, i, mw, i))
311 311 else:
312 312 lines.append('<span class="special">%*d</span>' % (mw, i))
313 313 else:
314 314 if aln:
315 315 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
316 316 else:
317 317 lines.append('%*d' % (mw, i))
318 318 else:
319 319 lines.append('')
320 320 ls = '\n'.join(lines)
321 321 else:
322 322 lines = []
323 323 for i in range(fl, fl + lncount):
324 324 if i % st == 0:
325 325 if aln:
326 326 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 327 else:
328 328 lines.append('%*d' % (mw, i))
329 329 else:
330 330 lines.append('')
331 331 ls = '\n'.join(lines)
332 332
333 333 # in case you wonder about the seemingly redundant <div> here: since the
334 334 # content in the other cell also is wrapped in a div, some browsers in
335 335 # some configurations seem to mess up the formatting...
336 336 if nocls:
337 337 yield 0, ('<table class="%stable">' % self.cssclass +
338 338 '<tr><td><div class="linenodiv" '
339 339 'style="background-color: #f0f0f0; padding-right: 10px">'
340 340 '<pre style="line-height: 125%">' +
341 341 ls + '</pre></div></td><td id="hlcode" class="code">')
342 342 else:
343 343 yield 0, ('<table class="%stable">' % self.cssclass +
344 344 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
345 345 ls + '</pre></div></td><td id="hlcode" class="code">')
346 346 yield 0, dummyoutfile.getvalue()
347 347 yield 0, '</td></tr></table>'
348 348
349 349
350 350 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
351 351 def __init__(self, **kw):
352 352 # only show these line numbers if set
353 353 self.only_lines = kw.pop('only_line_numbers', [])
354 354 self.query_terms = kw.pop('query_terms', [])
355 355 self.max_lines = kw.pop('max_lines', 5)
356 356 self.line_context = kw.pop('line_context', 3)
357 357 self.url = kw.pop('url', None)
358 358
359 359 super(CodeHtmlFormatter, self).__init__(**kw)
360 360
361 361 def _wrap_code(self, source):
362 362 for cnt, it in enumerate(source):
363 363 i, t = it
364 364 t = '<pre>%s</pre>' % t
365 365 yield i, t
366 366
367 367 def _wrap_tablelinenos(self, inner):
368 368 yield 0, '<table class="code-highlight %stable">' % self.cssclass
369 369
370 370 last_shown_line_number = 0
371 371 current_line_number = 1
372 372
373 373 for t, line in inner:
374 374 if not t:
375 375 yield t, line
376 376 continue
377 377
378 378 if current_line_number in self.only_lines:
379 379 if last_shown_line_number + 1 != current_line_number:
380 380 yield 0, '<tr>'
381 381 yield 0, '<td class="line">...</td>'
382 382 yield 0, '<td id="hlcode" class="code"></td>'
383 383 yield 0, '</tr>'
384 384
385 385 yield 0, '<tr>'
386 386 if self.url:
387 387 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
388 388 self.url, current_line_number, current_line_number)
389 389 else:
390 390 yield 0, '<td class="line"><a href="">%i</a></td>' % (
391 391 current_line_number)
392 392 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
393 393 yield 0, '</tr>'
394 394
395 395 last_shown_line_number = current_line_number
396 396
397 397 current_line_number += 1
398 398
399 399
400 400 yield 0, '</table>'
401 401
402 402
403 403 def extract_phrases(text_query):
404 404 """
405 405 Extracts phrases from search term string making sure phrases
406 406 contained in double quotes are kept together - and discarding empty values
407 407 or fully whitespace values eg.
408 408
409 409 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
410 410
411 411 """
412 412
413 413 in_phrase = False
414 414 buf = ''
415 415 phrases = []
416 416 for char in text_query:
417 417 if in_phrase:
418 418 if char == '"': # end phrase
419 419 phrases.append(buf)
420 420 buf = ''
421 421 in_phrase = False
422 422 continue
423 423 else:
424 424 buf += char
425 425 continue
426 426 else:
427 427 if char == '"': # start phrase
428 428 in_phrase = True
429 429 phrases.append(buf)
430 430 buf = ''
431 431 continue
432 432 elif char == ' ':
433 433 phrases.append(buf)
434 434 buf = ''
435 435 continue
436 436 else:
437 437 buf += char
438 438
439 439 phrases.append(buf)
440 440 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
441 441 return phrases
442 442
443 443
444 444 def get_matching_offsets(text, phrases):
445 445 """
446 446 Returns a list of string offsets in `text` that the list of `terms` match
447 447
448 448 >>> get_matching_offsets('some text here', ['some', 'here'])
449 449 [(0, 4), (10, 14)]
450 450
451 451 """
452 452 offsets = []
453 453 for phrase in phrases:
454 454 for match in re.finditer(phrase, text):
455 455 offsets.append((match.start(), match.end()))
456 456
457 457 return offsets
458 458
459 459
460 460 def normalize_text_for_matching(x):
461 461 """
462 462 Replaces all non alnum characters to spaces and lower cases the string,
463 463 useful for comparing two text strings without punctuation
464 464 """
465 465 return re.sub(r'[^\w]', ' ', x.lower())
466 466
467 467
468 468 def get_matching_line_offsets(lines, terms):
469 469 """ Return a set of `lines` indices (starting from 1) matching a
470 470 text search query, along with `context` lines above/below matching lines
471 471
472 472 :param lines: list of strings representing lines
473 473 :param terms: search term string to match in lines eg. 'some text'
474 474 :param context: number of lines above/below a matching line to add to result
475 475 :param max_lines: cut off for lines of interest
476 476 eg.
477 477
478 478 text = '''
479 479 words words words
480 480 words words words
481 481 some text some
482 482 words words words
483 483 words words words
484 484 text here what
485 485 '''
486 486 get_matching_line_offsets(text, 'text', context=1)
487 487 {3: [(5, 9)], 6: [(0, 4)]]
488 488
489 489 """
490 490 matching_lines = {}
491 491 phrases = [normalize_text_for_matching(phrase)
492 492 for phrase in extract_phrases(terms)]
493 493
494 494 for line_index, line in enumerate(lines, start=1):
495 495 match_offsets = get_matching_offsets(
496 496 normalize_text_for_matching(line), phrases)
497 497 if match_offsets:
498 498 matching_lines[line_index] = match_offsets
499 499
500 500 return matching_lines
501 501
502 502
503 503 def get_lexer_safe(mimetype=None, filepath=None):
504 504 """
505 505 Tries to return a relevant pygments lexer using mimetype/filepath name,
506 506 defaulting to plain text if none could be found
507 507 """
508 508 lexer = None
509 509 try:
510 510 if mimetype:
511 511 lexer = get_lexer_for_mimetype(mimetype)
512 512 if not lexer:
513 513 lexer = get_lexer_for_filename(filepath)
514 514 except pygments.util.ClassNotFound:
515 515 pass
516 516
517 517 if not lexer:
518 518 lexer = get_lexer_by_name('text')
519 519
520 520 return lexer
521 521
522 522
523 def get_lexer_for_filenode(filenode):
524 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
525 return lexer
526
527
523 528 def pygmentize(filenode, **kwargs):
524 529 """
525 530 pygmentize function using pygments
526 531
527 532 :param filenode:
528 533 """
529 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
534 lexer = get_lexer_for_filenode(filenode)
530 535 return literal(code_highlight(filenode.content, lexer,
531 536 CodeHtmlFormatter(**kwargs)))
532 537
533 538
534 539 def pygmentize_annotation(repo_name, filenode, **kwargs):
535 540 """
536 541 pygmentize function for annotation
537 542
538 543 :param filenode:
539 544 """
540 545
541 546 color_dict = {}
542 547
543 548 def gen_color(n=10000):
544 549 """generator for getting n of evenly distributed colors using
545 550 hsv color and golden ratio. It always return same order of colors
546 551
547 552 :returns: RGB tuple
548 553 """
549 554
550 555 def hsv_to_rgb(h, s, v):
551 556 if s == 0.0:
552 557 return v, v, v
553 558 i = int(h * 6.0) # XXX assume int() truncates!
554 559 f = (h * 6.0) - i
555 560 p = v * (1.0 - s)
556 561 q = v * (1.0 - s * f)
557 562 t = v * (1.0 - s * (1.0 - f))
558 563 i = i % 6
559 564 if i == 0:
560 565 return v, t, p
561 566 if i == 1:
562 567 return q, v, p
563 568 if i == 2:
564 569 return p, v, t
565 570 if i == 3:
566 571 return p, q, v
567 572 if i == 4:
568 573 return t, p, v
569 574 if i == 5:
570 575 return v, p, q
571 576
572 577 golden_ratio = 0.618033988749895
573 578 h = 0.22717784590367374
574 579
575 580 for _ in xrange(n):
576 581 h += golden_ratio
577 582 h %= 1
578 583 HSV_tuple = [h, 0.95, 0.95]
579 584 RGB_tuple = hsv_to_rgb(*HSV_tuple)
580 585 yield map(lambda x: str(int(x * 256)), RGB_tuple)
581 586
582 587 cgenerator = gen_color()
583 588
584 589 def get_color_string(commit_id):
585 590 if commit_id in color_dict:
586 591 col = color_dict[commit_id]
587 592 else:
588 593 col = color_dict[commit_id] = cgenerator.next()
589 594 return "color: rgb(%s)! important;" % (', '.join(col))
590 595
591 596 def url_func(repo_name):
592 597
593 598 def _url_func(commit):
594 599 author = commit.author
595 600 date = commit.date
596 601 message = tooltip(commit.message)
597 602
598 603 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
599 604 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
600 605 "</b> %s<br/></div>")
601 606
602 607 tooltip_html = tooltip_html % (author, date, message)
603 608 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
604 609 uri = link_to(
605 610 lnk_format,
606 611 url('changeset_home', repo_name=repo_name,
607 612 revision=commit.raw_id),
608 613 style=get_color_string(commit.raw_id),
609 614 class_='tooltip',
610 615 title=tooltip_html
611 616 )
612 617
613 618 uri += '\n'
614 619 return uri
615 620 return _url_func
616 621
617 622 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
618 623
619 624
620 625 def is_following_repo(repo_name, user_id):
621 626 from rhodecode.model.scm import ScmModel
622 627 return ScmModel().is_following_repo(repo_name, user_id)
623 628
624 629
625 630 class _Message(object):
626 631 """A message returned by ``Flash.pop_messages()``.
627 632
628 633 Converting the message to a string returns the message text. Instances
629 634 also have the following attributes:
630 635
631 636 * ``message``: the message text.
632 637 * ``category``: the category specified when the message was created.
633 638 """
634 639
635 640 def __init__(self, category, message):
636 641 self.category = category
637 642 self.message = message
638 643
639 644 def __str__(self):
640 645 return self.message
641 646
642 647 __unicode__ = __str__
643 648
644 649 def __html__(self):
645 650 return escape(safe_unicode(self.message))
646 651
647 652
648 653 class Flash(_Flash):
649 654
650 655 def pop_messages(self):
651 656 """Return all accumulated messages and delete them from the session.
652 657
653 658 The return value is a list of ``Message`` objects.
654 659 """
655 660 from pylons import session
656 661
657 662 messages = []
658 663
659 664 # Pop the 'old' pylons flash messages. They are tuples of the form
660 665 # (category, message)
661 666 for cat, msg in session.pop(self.session_key, []):
662 667 messages.append(_Message(cat, msg))
663 668
664 669 # Pop the 'new' pyramid flash messages for each category as list
665 670 # of strings.
666 671 for cat in self.categories:
667 672 for msg in session.pop_flash(queue=cat):
668 673 messages.append(_Message(cat, msg))
669 674 # Map messages from the default queue to the 'notice' category.
670 675 for msg in session.pop_flash():
671 676 messages.append(_Message('notice', msg))
672 677
673 678 session.save()
674 679 return messages
675 680
676 681 flash = Flash()
677 682
678 683 #==============================================================================
679 684 # SCM FILTERS available via h.
680 685 #==============================================================================
681 686 from rhodecode.lib.vcs.utils import author_name, author_email
682 687 from rhodecode.lib.utils2 import credentials_filter, age as _age
683 688 from rhodecode.model.db import User, ChangesetStatus
684 689
685 690 age = _age
686 691 capitalize = lambda x: x.capitalize()
687 692 email = author_email
688 693 short_id = lambda x: x[:12]
689 694 hide_credentials = lambda x: ''.join(credentials_filter(x))
690 695
691 696
692 697 def age_component(datetime_iso, value=None, time_is_local=False):
693 698 title = value or format_date(datetime_iso)
694 699
695 700 # detect if we have a timezone info, otherwise, add it
696 701 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
697 702 tzinfo = '+00:00'
698 703
699 704 if time_is_local:
700 705 tzinfo = time.strftime("+%H:%M",
701 706 time.gmtime(
702 707 (datetime.now() - datetime.utcnow()).seconds + 1
703 708 )
704 709 )
705 710
706 711 return literal(
707 712 '<time class="timeago tooltip" '
708 713 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
709 714 datetime_iso, title, tzinfo))
710 715
711 716
712 717 def _shorten_commit_id(commit_id):
713 718 from rhodecode import CONFIG
714 719 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
715 720 return commit_id[:def_len]
716 721
717 722
718 723 def show_id(commit):
719 724 """
720 725 Configurable function that shows ID
721 726 by default it's r123:fffeeefffeee
722 727
723 728 :param commit: commit instance
724 729 """
725 730 from rhodecode import CONFIG
726 731 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
727 732
728 733 raw_id = _shorten_commit_id(commit.raw_id)
729 734 if show_idx:
730 735 return 'r%s:%s' % (commit.idx, raw_id)
731 736 else:
732 737 return '%s' % (raw_id, )
733 738
734 739
735 740 def format_date(date):
736 741 """
737 742 use a standardized formatting for dates used in RhodeCode
738 743
739 744 :param date: date/datetime object
740 745 :return: formatted date
741 746 """
742 747
743 748 if date:
744 749 _fmt = "%a, %d %b %Y %H:%M:%S"
745 750 return safe_unicode(date.strftime(_fmt))
746 751
747 752 return u""
748 753
749 754
750 755 class _RepoChecker(object):
751 756
752 757 def __init__(self, backend_alias):
753 758 self._backend_alias = backend_alias
754 759
755 760 def __call__(self, repository):
756 761 if hasattr(repository, 'alias'):
757 762 _type = repository.alias
758 763 elif hasattr(repository, 'repo_type'):
759 764 _type = repository.repo_type
760 765 else:
761 766 _type = repository
762 767 return _type == self._backend_alias
763 768
764 769 is_git = _RepoChecker('git')
765 770 is_hg = _RepoChecker('hg')
766 771 is_svn = _RepoChecker('svn')
767 772
768 773
769 774 def get_repo_type_by_name(repo_name):
770 775 repo = Repository.get_by_repo_name(repo_name)
771 776 return repo.repo_type
772 777
773 778
774 779 def is_svn_without_proxy(repository):
775 780 if is_svn(repository):
776 781 from rhodecode.model.settings import VcsSettingsModel
777 782 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
778 783 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
779 784 return False
780 785
781 786
782 787 def discover_user(author):
783 788 """
784 789 Tries to discover RhodeCode User based on the autho string. Author string
785 790 is typically `FirstName LastName <email@address.com>`
786 791 """
787 792
788 793 # if author is already an instance use it for extraction
789 794 if isinstance(author, User):
790 795 return author
791 796
792 797 # Valid email in the attribute passed, see if they're in the system
793 798 _email = author_email(author)
794 799 if _email != '':
795 800 user = User.get_by_email(_email, case_insensitive=True, cache=True)
796 801 if user is not None:
797 802 return user
798 803
799 804 # Maybe it's a username, we try to extract it and fetch by username ?
800 805 _author = author_name(author)
801 806 user = User.get_by_username(_author, case_insensitive=True, cache=True)
802 807 if user is not None:
803 808 return user
804 809
805 810 return None
806 811
807 812
808 813 def email_or_none(author):
809 814 # extract email from the commit string
810 815 _email = author_email(author)
811 816
812 817 # If we have an email, use it, otherwise
813 818 # see if it contains a username we can get an email from
814 819 if _email != '':
815 820 return _email
816 821 else:
817 822 user = User.get_by_username(
818 823 author_name(author), case_insensitive=True, cache=True)
819 824
820 825 if user is not None:
821 826 return user.email
822 827
823 828 # No valid email, not a valid user in the system, none!
824 829 return None
825 830
826 831
827 832 def link_to_user(author, length=0, **kwargs):
828 833 user = discover_user(author)
829 834 # user can be None, but if we have it already it means we can re-use it
830 835 # in the person() function, so we save 1 intensive-query
831 836 if user:
832 837 author = user
833 838
834 839 display_person = person(author, 'username_or_name_or_email')
835 840 if length:
836 841 display_person = shorter(display_person, length)
837 842
838 843 if user:
839 844 return link_to(
840 845 escape(display_person),
841 846 url('user_profile', username=user.username),
842 847 **kwargs)
843 848 else:
844 849 return escape(display_person)
845 850
846 851
847 852 def person(author, show_attr="username_and_name"):
848 853 user = discover_user(author)
849 854 if user:
850 855 return getattr(user, show_attr)
851 856 else:
852 857 _author = author_name(author)
853 858 _email = email(author)
854 859 return _author or _email
855 860
856 861
857 862 def author_string(email):
858 863 if email:
859 864 user = User.get_by_email(email, case_insensitive=True, cache=True)
860 865 if user:
861 866 if user.firstname or user.lastname:
862 867 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
863 868 else:
864 869 return email
865 870 else:
866 871 return email
867 872 else:
868 873 return None
869 874
870 875
871 876 def person_by_id(id_, show_attr="username_and_name"):
872 877 # attr to return from fetched user
873 878 person_getter = lambda usr: getattr(usr, show_attr)
874 879
875 880 #maybe it's an ID ?
876 881 if str(id_).isdigit() or isinstance(id_, int):
877 882 id_ = int(id_)
878 883 user = User.get(id_)
879 884 if user is not None:
880 885 return person_getter(user)
881 886 return id_
882 887
883 888
884 889 def gravatar_with_user(author, show_disabled=False):
885 890 from rhodecode.lib.utils import PartialRenderer
886 891 _render = PartialRenderer('base/base.html')
887 892 return _render('gravatar_with_user', author, show_disabled=show_disabled)
888 893
889 894
890 895 def desc_stylize(value):
891 896 """
892 897 converts tags from value into html equivalent
893 898
894 899 :param value:
895 900 """
896 901 if not value:
897 902 return ''
898 903
899 904 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
900 905 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
901 906 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
902 907 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
903 908 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
904 909 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
905 910 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
906 911 '<div class="metatag" tag="lang">\\2</div>', value)
907 912 value = re.sub(r'\[([a-z]+)\]',
908 913 '<div class="metatag" tag="\\1">\\1</div>', value)
909 914
910 915 return value
911 916
912 917
913 918 def escaped_stylize(value):
914 919 """
915 920 converts tags from value into html equivalent, but escaping its value first
916 921 """
917 922 if not value:
918 923 return ''
919 924
920 925 # Using default webhelper escape method, but has to force it as a
921 926 # plain unicode instead of a markup tag to be used in regex expressions
922 927 value = unicode(escape(safe_unicode(value)))
923 928
924 929 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
925 930 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
926 931 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
927 932 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
928 933 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
929 934 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
930 935 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
931 936 '<div class="metatag" tag="lang">\\2</div>', value)
932 937 value = re.sub(r'\[([a-z]+)\]',
933 938 '<div class="metatag" tag="\\1">\\1</div>', value)
934 939
935 940 return value
936 941
937 942
938 943 def bool2icon(value):
939 944 """
940 945 Returns boolean value of a given value, represented as html element with
941 946 classes that will represent icons
942 947
943 948 :param value: given value to convert to html node
944 949 """
945 950
946 951 if value: # does bool conversion
947 952 return HTML.tag('i', class_="icon-true")
948 953 else: # not true as bool
949 954 return HTML.tag('i', class_="icon-false")
950 955
951 956
952 957 #==============================================================================
953 958 # PERMS
954 959 #==============================================================================
955 960 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
956 961 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
957 962 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
958 963 csrf_token_key
959 964
960 965
961 966 #==============================================================================
962 967 # GRAVATAR URL
963 968 #==============================================================================
964 969 class InitialsGravatar(object):
965 970 def __init__(self, email_address, first_name, last_name, size=30,
966 971 background=None, text_color='#fff'):
967 972 self.size = size
968 973 self.first_name = first_name
969 974 self.last_name = last_name
970 975 self.email_address = email_address
971 976 self.background = background or self.str2color(email_address)
972 977 self.text_color = text_color
973 978
974 979 def get_color_bank(self):
975 980 """
976 981 returns a predefined list of colors that gravatars can use.
977 982 Those are randomized distinct colors that guarantee readability and
978 983 uniqueness.
979 984
980 985 generated with: http://phrogz.net/css/distinct-colors.html
981 986 """
982 987 return [
983 988 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
984 989 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
985 990 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
986 991 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
987 992 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
988 993 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
989 994 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
990 995 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
991 996 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
992 997 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
993 998 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
994 999 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
995 1000 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
996 1001 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
997 1002 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
998 1003 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
999 1004 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1000 1005 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1001 1006 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1002 1007 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1003 1008 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1004 1009 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1005 1010 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1006 1011 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1007 1012 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1008 1013 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1009 1014 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1010 1015 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1011 1016 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1012 1017 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1013 1018 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1014 1019 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1015 1020 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1016 1021 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1017 1022 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1018 1023 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1019 1024 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1020 1025 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1021 1026 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1022 1027 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1023 1028 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1024 1029 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1025 1030 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1026 1031 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1027 1032 '#4f8c46', '#368dd9', '#5c0073'
1028 1033 ]
1029 1034
1030 1035 def rgb_to_hex_color(self, rgb_tuple):
1031 1036 """
1032 1037 Converts an rgb_tuple passed to an hex color.
1033 1038
1034 1039 :param rgb_tuple: tuple with 3 ints represents rgb color space
1035 1040 """
1036 1041 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1037 1042
1038 1043 def email_to_int_list(self, email_str):
1039 1044 """
1040 1045 Get every byte of the hex digest value of email and turn it to integer.
1041 1046 It's going to be always between 0-255
1042 1047 """
1043 1048 digest = md5_safe(email_str.lower())
1044 1049 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1045 1050
1046 1051 def pick_color_bank_index(self, email_str, color_bank):
1047 1052 return self.email_to_int_list(email_str)[0] % len(color_bank)
1048 1053
1049 1054 def str2color(self, email_str):
1050 1055 """
1051 1056 Tries to map in a stable algorithm an email to color
1052 1057
1053 1058 :param email_str:
1054 1059 """
1055 1060 color_bank = self.get_color_bank()
1056 1061 # pick position (module it's length so we always find it in the
1057 1062 # bank even if it's smaller than 256 values
1058 1063 pos = self.pick_color_bank_index(email_str, color_bank)
1059 1064 return color_bank[pos]
1060 1065
1061 1066 def normalize_email(self, email_address):
1062 1067 import unicodedata
1063 1068 # default host used to fill in the fake/missing email
1064 1069 default_host = u'localhost'
1065 1070
1066 1071 if not email_address:
1067 1072 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1068 1073
1069 1074 email_address = safe_unicode(email_address)
1070 1075
1071 1076 if u'@' not in email_address:
1072 1077 email_address = u'%s@%s' % (email_address, default_host)
1073 1078
1074 1079 if email_address.endswith(u'@'):
1075 1080 email_address = u'%s%s' % (email_address, default_host)
1076 1081
1077 1082 email_address = unicodedata.normalize('NFKD', email_address)\
1078 1083 .encode('ascii', 'ignore')
1079 1084 return email_address
1080 1085
1081 1086 def get_initials(self):
1082 1087 """
1083 1088 Returns 2 letter initials calculated based on the input.
1084 1089 The algorithm picks first given email address, and takes first letter
1085 1090 of part before @, and then the first letter of server name. In case
1086 1091 the part before @ is in a format of `somestring.somestring2` it replaces
1087 1092 the server letter with first letter of somestring2
1088 1093
1089 1094 In case function was initialized with both first and lastname, this
1090 1095 overrides the extraction from email by first letter of the first and
1091 1096 last name. We add special logic to that functionality, In case Full name
1092 1097 is compound, like Guido Von Rossum, we use last part of the last name
1093 1098 (Von Rossum) picking `R`.
1094 1099
1095 1100 Function also normalizes the non-ascii characters to they ascii
1096 1101 representation, eg Δ„ => A
1097 1102 """
1098 1103 import unicodedata
1099 1104 # replace non-ascii to ascii
1100 1105 first_name = unicodedata.normalize(
1101 1106 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1102 1107 last_name = unicodedata.normalize(
1103 1108 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1104 1109
1105 1110 # do NFKD encoding, and also make sure email has proper format
1106 1111 email_address = self.normalize_email(self.email_address)
1107 1112
1108 1113 # first push the email initials
1109 1114 prefix, server = email_address.split('@', 1)
1110 1115
1111 1116 # check if prefix is maybe a 'firstname.lastname' syntax
1112 1117 _dot_split = prefix.rsplit('.', 1)
1113 1118 if len(_dot_split) == 2:
1114 1119 initials = [_dot_split[0][0], _dot_split[1][0]]
1115 1120 else:
1116 1121 initials = [prefix[0], server[0]]
1117 1122
1118 1123 # then try to replace either firtname or lastname
1119 1124 fn_letter = (first_name or " ")[0].strip()
1120 1125 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1121 1126
1122 1127 if fn_letter:
1123 1128 initials[0] = fn_letter
1124 1129
1125 1130 if ln_letter:
1126 1131 initials[1] = ln_letter
1127 1132
1128 1133 return ''.join(initials).upper()
1129 1134
1130 1135 def get_img_data_by_type(self, font_family, img_type):
1131 1136 default_user = """
1132 1137 <svg xmlns="http://www.w3.org/2000/svg"
1133 1138 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1134 1139 viewBox="-15 -10 439.165 429.164"
1135 1140
1136 1141 xml:space="preserve"
1137 1142 style="background:{background};" >
1138 1143
1139 1144 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1140 1145 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1141 1146 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1142 1147 168.596,153.916,216.671,
1143 1148 204.583,216.671z" fill="{text_color}"/>
1144 1149 <path d="M407.164,374.717L360.88,
1145 1150 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1146 1151 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1147 1152 15.366-44.203,23.488-69.076,23.488c-24.877,
1148 1153 0-48.762-8.122-69.078-23.488
1149 1154 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1150 1155 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1151 1156 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1152 1157 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1153 1158 19.402-10.527 C409.699,390.129,
1154 1159 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1155 1160 </svg>""".format(
1156 1161 size=self.size,
1157 1162 background='#979797', # @grey4
1158 1163 text_color=self.text_color,
1159 1164 font_family=font_family)
1160 1165
1161 1166 return {
1162 1167 "default_user": default_user
1163 1168 }[img_type]
1164 1169
1165 1170 def get_img_data(self, svg_type=None):
1166 1171 """
1167 1172 generates the svg metadata for image
1168 1173 """
1169 1174
1170 1175 font_family = ','.join([
1171 1176 'proximanovaregular',
1172 1177 'Proxima Nova Regular',
1173 1178 'Proxima Nova',
1174 1179 'Arial',
1175 1180 'Lucida Grande',
1176 1181 'sans-serif'
1177 1182 ])
1178 1183 if svg_type:
1179 1184 return self.get_img_data_by_type(font_family, svg_type)
1180 1185
1181 1186 initials = self.get_initials()
1182 1187 img_data = """
1183 1188 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1184 1189 width="{size}" height="{size}"
1185 1190 style="width: 100%; height: 100%; background-color: {background}"
1186 1191 viewBox="0 0 {size} {size}">
1187 1192 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1188 1193 pointer-events="auto" fill="{text_color}"
1189 1194 font-family="{font_family}"
1190 1195 style="font-weight: 400; font-size: {f_size}px;">{text}
1191 1196 </text>
1192 1197 </svg>""".format(
1193 1198 size=self.size,
1194 1199 f_size=self.size/1.85, # scale the text inside the box nicely
1195 1200 background=self.background,
1196 1201 text_color=self.text_color,
1197 1202 text=initials.upper(),
1198 1203 font_family=font_family)
1199 1204
1200 1205 return img_data
1201 1206
1202 1207 def generate_svg(self, svg_type=None):
1203 1208 img_data = self.get_img_data(svg_type)
1204 1209 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1205 1210
1206 1211
1207 1212 def initials_gravatar(email_address, first_name, last_name, size=30):
1208 1213 svg_type = None
1209 1214 if email_address == User.DEFAULT_USER_EMAIL:
1210 1215 svg_type = 'default_user'
1211 1216 klass = InitialsGravatar(email_address, first_name, last_name, size)
1212 1217 return klass.generate_svg(svg_type=svg_type)
1213 1218
1214 1219
1215 1220 def gravatar_url(email_address, size=30):
1216 1221 # doh, we need to re-import those to mock it later
1217 1222 from pylons import tmpl_context as c
1218 1223
1219 1224 _use_gravatar = c.visual.use_gravatar
1220 1225 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1221 1226
1222 1227 email_address = email_address or User.DEFAULT_USER_EMAIL
1223 1228 if isinstance(email_address, unicode):
1224 1229 # hashlib crashes on unicode items
1225 1230 email_address = safe_str(email_address)
1226 1231
1227 1232 # empty email or default user
1228 1233 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1229 1234 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1230 1235
1231 1236 if _use_gravatar:
1232 1237 # TODO: Disuse pyramid thread locals. Think about another solution to
1233 1238 # get the host and schema here.
1234 1239 request = get_current_request()
1235 1240 tmpl = safe_str(_gravatar_url)
1236 1241 tmpl = tmpl.replace('{email}', email_address)\
1237 1242 .replace('{md5email}', md5_safe(email_address.lower())) \
1238 1243 .replace('{netloc}', request.host)\
1239 1244 .replace('{scheme}', request.scheme)\
1240 1245 .replace('{size}', safe_str(size))
1241 1246 return tmpl
1242 1247 else:
1243 1248 return initials_gravatar(email_address, '', '', size=size)
1244 1249
1245 1250
1246 1251 class Page(_Page):
1247 1252 """
1248 1253 Custom pager to match rendering style with paginator
1249 1254 """
1250 1255
1251 1256 def _get_pos(self, cur_page, max_page, items):
1252 1257 edge = (items / 2) + 1
1253 1258 if (cur_page <= edge):
1254 1259 radius = max(items / 2, items - cur_page)
1255 1260 elif (max_page - cur_page) < edge:
1256 1261 radius = (items - 1) - (max_page - cur_page)
1257 1262 else:
1258 1263 radius = items / 2
1259 1264
1260 1265 left = max(1, (cur_page - (radius)))
1261 1266 right = min(max_page, cur_page + (radius))
1262 1267 return left, cur_page, right
1263 1268
1264 1269 def _range(self, regexp_match):
1265 1270 """
1266 1271 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1267 1272
1268 1273 Arguments:
1269 1274
1270 1275 regexp_match
1271 1276 A "re" (regular expressions) match object containing the
1272 1277 radius of linked pages around the current page in
1273 1278 regexp_match.group(1) as a string
1274 1279
1275 1280 This function is supposed to be called as a callable in
1276 1281 re.sub.
1277 1282
1278 1283 """
1279 1284 radius = int(regexp_match.group(1))
1280 1285
1281 1286 # Compute the first and last page number within the radius
1282 1287 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1283 1288 # -> leftmost_page = 5
1284 1289 # -> rightmost_page = 9
1285 1290 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1286 1291 self.last_page,
1287 1292 (radius * 2) + 1)
1288 1293 nav_items = []
1289 1294
1290 1295 # Create a link to the first page (unless we are on the first page
1291 1296 # or there would be no need to insert '..' spacers)
1292 1297 if self.page != self.first_page and self.first_page < leftmost_page:
1293 1298 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1294 1299
1295 1300 # Insert dots if there are pages between the first page
1296 1301 # and the currently displayed page range
1297 1302 if leftmost_page - self.first_page > 1:
1298 1303 # Wrap in a SPAN tag if nolink_attr is set
1299 1304 text = '..'
1300 1305 if self.dotdot_attr:
1301 1306 text = HTML.span(c=text, **self.dotdot_attr)
1302 1307 nav_items.append(text)
1303 1308
1304 1309 for thispage in xrange(leftmost_page, rightmost_page + 1):
1305 1310 # Hilight the current page number and do not use a link
1306 1311 if thispage == self.page:
1307 1312 text = '%s' % (thispage,)
1308 1313 # Wrap in a SPAN tag if nolink_attr is set
1309 1314 if self.curpage_attr:
1310 1315 text = HTML.span(c=text, **self.curpage_attr)
1311 1316 nav_items.append(text)
1312 1317 # Otherwise create just a link to that page
1313 1318 else:
1314 1319 text = '%s' % (thispage,)
1315 1320 nav_items.append(self._pagerlink(thispage, text))
1316 1321
1317 1322 # Insert dots if there are pages between the displayed
1318 1323 # page numbers and the end of the page range
1319 1324 if self.last_page - rightmost_page > 1:
1320 1325 text = '..'
1321 1326 # Wrap in a SPAN tag if nolink_attr is set
1322 1327 if self.dotdot_attr:
1323 1328 text = HTML.span(c=text, **self.dotdot_attr)
1324 1329 nav_items.append(text)
1325 1330
1326 1331 # Create a link to the very last page (unless we are on the last
1327 1332 # page or there would be no need to insert '..' spacers)
1328 1333 if self.page != self.last_page and rightmost_page < self.last_page:
1329 1334 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1330 1335
1331 1336 ## prerender links
1332 1337 #_page_link = url.current()
1333 1338 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1334 1339 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1335 1340 return self.separator.join(nav_items)
1336 1341
1337 1342 def pager(self, format='~2~', page_param='page', partial_param='partial',
1338 1343 show_if_single_page=False, separator=' ', onclick=None,
1339 1344 symbol_first='<<', symbol_last='>>',
1340 1345 symbol_previous='<', symbol_next='>',
1341 1346 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1342 1347 curpage_attr={'class': 'pager_curpage'},
1343 1348 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1344 1349
1345 1350 self.curpage_attr = curpage_attr
1346 1351 self.separator = separator
1347 1352 self.pager_kwargs = kwargs
1348 1353 self.page_param = page_param
1349 1354 self.partial_param = partial_param
1350 1355 self.onclick = onclick
1351 1356 self.link_attr = link_attr
1352 1357 self.dotdot_attr = dotdot_attr
1353 1358
1354 1359 # Don't show navigator if there is no more than one page
1355 1360 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1356 1361 return ''
1357 1362
1358 1363 from string import Template
1359 1364 # Replace ~...~ in token format by range of pages
1360 1365 result = re.sub(r'~(\d+)~', self._range, format)
1361 1366
1362 1367 # Interpolate '%' variables
1363 1368 result = Template(result).safe_substitute({
1364 1369 'first_page': self.first_page,
1365 1370 'last_page': self.last_page,
1366 1371 'page': self.page,
1367 1372 'page_count': self.page_count,
1368 1373 'items_per_page': self.items_per_page,
1369 1374 'first_item': self.first_item,
1370 1375 'last_item': self.last_item,
1371 1376 'item_count': self.item_count,
1372 1377 'link_first': self.page > self.first_page and \
1373 1378 self._pagerlink(self.first_page, symbol_first) or '',
1374 1379 'link_last': self.page < self.last_page and \
1375 1380 self._pagerlink(self.last_page, symbol_last) or '',
1376 1381 'link_previous': self.previous_page and \
1377 1382 self._pagerlink(self.previous_page, symbol_previous) \
1378 1383 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1379 1384 'link_next': self.next_page and \
1380 1385 self._pagerlink(self.next_page, symbol_next) \
1381 1386 or HTML.span(symbol_next, class_="pg-next disabled")
1382 1387 })
1383 1388
1384 1389 return literal(result)
1385 1390
1386 1391
1387 1392 #==============================================================================
1388 1393 # REPO PAGER, PAGER FOR REPOSITORY
1389 1394 #==============================================================================
1390 1395 class RepoPage(Page):
1391 1396
1392 1397 def __init__(self, collection, page=1, items_per_page=20,
1393 1398 item_count=None, url=None, **kwargs):
1394 1399
1395 1400 """Create a "RepoPage" instance. special pager for paging
1396 1401 repository
1397 1402 """
1398 1403 self._url_generator = url
1399 1404
1400 1405 # Safe the kwargs class-wide so they can be used in the pager() method
1401 1406 self.kwargs = kwargs
1402 1407
1403 1408 # Save a reference to the collection
1404 1409 self.original_collection = collection
1405 1410
1406 1411 self.collection = collection
1407 1412
1408 1413 # The self.page is the number of the current page.
1409 1414 # The first page has the number 1!
1410 1415 try:
1411 1416 self.page = int(page) # make it int() if we get it as a string
1412 1417 except (ValueError, TypeError):
1413 1418 self.page = 1
1414 1419
1415 1420 self.items_per_page = items_per_page
1416 1421
1417 1422 # Unless the user tells us how many items the collections has
1418 1423 # we calculate that ourselves.
1419 1424 if item_count is not None:
1420 1425 self.item_count = item_count
1421 1426 else:
1422 1427 self.item_count = len(self.collection)
1423 1428
1424 1429 # Compute the number of the first and last available page
1425 1430 if self.item_count > 0:
1426 1431 self.first_page = 1
1427 1432 self.page_count = int(math.ceil(float(self.item_count) /
1428 1433 self.items_per_page))
1429 1434 self.last_page = self.first_page + self.page_count - 1
1430 1435
1431 1436 # Make sure that the requested page number is the range of
1432 1437 # valid pages
1433 1438 if self.page > self.last_page:
1434 1439 self.page = self.last_page
1435 1440 elif self.page < self.first_page:
1436 1441 self.page = self.first_page
1437 1442
1438 1443 # Note: the number of items on this page can be less than
1439 1444 # items_per_page if the last page is not full
1440 1445 self.first_item = max(0, (self.item_count) - (self.page *
1441 1446 items_per_page))
1442 1447 self.last_item = ((self.item_count - 1) - items_per_page *
1443 1448 (self.page - 1))
1444 1449
1445 1450 self.items = list(self.collection[self.first_item:self.last_item + 1])
1446 1451
1447 1452 # Links to previous and next page
1448 1453 if self.page > self.first_page:
1449 1454 self.previous_page = self.page - 1
1450 1455 else:
1451 1456 self.previous_page = None
1452 1457
1453 1458 if self.page < self.last_page:
1454 1459 self.next_page = self.page + 1
1455 1460 else:
1456 1461 self.next_page = None
1457 1462
1458 1463 # No items available
1459 1464 else:
1460 1465 self.first_page = None
1461 1466 self.page_count = 0
1462 1467 self.last_page = None
1463 1468 self.first_item = None
1464 1469 self.last_item = None
1465 1470 self.previous_page = None
1466 1471 self.next_page = None
1467 1472 self.items = []
1468 1473
1469 1474 # This is a subclass of the 'list' type. Initialise the list now.
1470 1475 list.__init__(self, reversed(self.items))
1471 1476
1472 1477
1473 1478 def changed_tooltip(nodes):
1474 1479 """
1475 1480 Generates a html string for changed nodes in commit page.
1476 1481 It limits the output to 30 entries
1477 1482
1478 1483 :param nodes: LazyNodesGenerator
1479 1484 """
1480 1485 if nodes:
1481 1486 pref = ': <br/> '
1482 1487 suf = ''
1483 1488 if len(nodes) > 30:
1484 1489 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1485 1490 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1486 1491 for x in nodes[:30]]) + suf)
1487 1492 else:
1488 1493 return ': ' + _('No Files')
1489 1494
1490 1495
1491 1496 def breadcrumb_repo_link(repo):
1492 1497 """
1493 1498 Makes a breadcrumbs path link to repo
1494 1499
1495 1500 ex::
1496 1501 group >> subgroup >> repo
1497 1502
1498 1503 :param repo: a Repository instance
1499 1504 """
1500 1505
1501 1506 path = [
1502 1507 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1503 1508 for group in repo.groups_with_parents
1504 1509 ] + [
1505 1510 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1506 1511 ]
1507 1512
1508 1513 return literal(' &raquo; '.join(path))
1509 1514
1510 1515
1511 1516 def format_byte_size_binary(file_size):
1512 1517 """
1513 1518 Formats file/folder sizes to standard.
1514 1519 """
1515 1520 formatted_size = format_byte_size(file_size, binary=True)
1516 1521 return formatted_size
1517 1522
1518 1523
1519 1524 def fancy_file_stats(stats):
1520 1525 """
1521 1526 Displays a fancy two colored bar for number of added/deleted
1522 1527 lines of code on file
1523 1528
1524 1529 :param stats: two element list of added/deleted lines of code
1525 1530 """
1526 1531 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1527 1532 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1528 1533
1529 1534 def cgen(l_type, a_v, d_v):
1530 1535 mapping = {'tr': 'top-right-rounded-corner-mid',
1531 1536 'tl': 'top-left-rounded-corner-mid',
1532 1537 'br': 'bottom-right-rounded-corner-mid',
1533 1538 'bl': 'bottom-left-rounded-corner-mid'}
1534 1539 map_getter = lambda x: mapping[x]
1535 1540
1536 1541 if l_type == 'a' and d_v:
1537 1542 #case when added and deleted are present
1538 1543 return ' '.join(map(map_getter, ['tl', 'bl']))
1539 1544
1540 1545 if l_type == 'a' and not d_v:
1541 1546 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1542 1547
1543 1548 if l_type == 'd' and a_v:
1544 1549 return ' '.join(map(map_getter, ['tr', 'br']))
1545 1550
1546 1551 if l_type == 'd' and not a_v:
1547 1552 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1548 1553
1549 1554 a, d = stats['added'], stats['deleted']
1550 1555 width = 100
1551 1556
1552 1557 if stats['binary']: # binary operations like chmod/rename etc
1553 1558 lbl = []
1554 1559 bin_op = 0 # undefined
1555 1560
1556 1561 # prefix with bin for binary files
1557 1562 if BIN_FILENODE in stats['ops']:
1558 1563 lbl += ['bin']
1559 1564
1560 1565 if NEW_FILENODE in stats['ops']:
1561 1566 lbl += [_('new file')]
1562 1567 bin_op = NEW_FILENODE
1563 1568 elif MOD_FILENODE in stats['ops']:
1564 1569 lbl += [_('mod')]
1565 1570 bin_op = MOD_FILENODE
1566 1571 elif DEL_FILENODE in stats['ops']:
1567 1572 lbl += [_('del')]
1568 1573 bin_op = DEL_FILENODE
1569 1574 elif RENAMED_FILENODE in stats['ops']:
1570 1575 lbl += [_('rename')]
1571 1576 bin_op = RENAMED_FILENODE
1572 1577
1573 1578 # chmod can go with other operations, so we add a + to lbl if needed
1574 1579 if CHMOD_FILENODE in stats['ops']:
1575 1580 lbl += [_('chmod')]
1576 1581 if bin_op == 0:
1577 1582 bin_op = CHMOD_FILENODE
1578 1583
1579 1584 lbl = '+'.join(lbl)
1580 1585 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1581 1586 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1582 1587 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1583 1588 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1584 1589
1585 1590 t = stats['added'] + stats['deleted']
1586 1591 unit = float(width) / (t or 1)
1587 1592
1588 1593 # needs > 9% of width to be visible or 0 to be hidden
1589 1594 a_p = max(9, unit * a) if a > 0 else 0
1590 1595 d_p = max(9, unit * d) if d > 0 else 0
1591 1596 p_sum = a_p + d_p
1592 1597
1593 1598 if p_sum > width:
1594 1599 #adjust the percentage to be == 100% since we adjusted to 9
1595 1600 if a_p > d_p:
1596 1601 a_p = a_p - (p_sum - width)
1597 1602 else:
1598 1603 d_p = d_p - (p_sum - width)
1599 1604
1600 1605 a_v = a if a > 0 else ''
1601 1606 d_v = d if d > 0 else ''
1602 1607
1603 1608 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1604 1609 cgen('a', a_v, d_v), a_p, a_v
1605 1610 )
1606 1611 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1607 1612 cgen('d', a_v, d_v), d_p, d_v
1608 1613 )
1609 1614 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1610 1615
1611 1616
1612 1617 def urlify_text(text_, safe=True):
1613 1618 """
1614 1619 Extrac urls from text and make html links out of them
1615 1620
1616 1621 :param text_:
1617 1622 """
1618 1623
1619 1624 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1620 1625 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1621 1626
1622 1627 def url_func(match_obj):
1623 1628 url_full = match_obj.groups()[0]
1624 1629 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1625 1630 _newtext = url_pat.sub(url_func, text_)
1626 1631 if safe:
1627 1632 return literal(_newtext)
1628 1633 return _newtext
1629 1634
1630 1635
1631 1636 def urlify_commits(text_, repository):
1632 1637 """
1633 1638 Extract commit ids from text and make link from them
1634 1639
1635 1640 :param text_:
1636 1641 :param repository: repo name to build the URL with
1637 1642 """
1638 1643 from pylons import url # doh, we need to re-import url to mock it later
1639 1644 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1640 1645
1641 1646 def url_func(match_obj):
1642 1647 commit_id = match_obj.groups()[1]
1643 1648 pref = match_obj.groups()[0]
1644 1649 suf = match_obj.groups()[2]
1645 1650
1646 1651 tmpl = (
1647 1652 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1648 1653 '%(commit_id)s</a>%(suf)s'
1649 1654 )
1650 1655 return tmpl % {
1651 1656 'pref': pref,
1652 1657 'cls': 'revision-link',
1653 1658 'url': url('changeset_home', repo_name=repository,
1654 1659 revision=commit_id, qualified=True),
1655 1660 'commit_id': commit_id,
1656 1661 'suf': suf
1657 1662 }
1658 1663
1659 1664 newtext = URL_PAT.sub(url_func, text_)
1660 1665
1661 1666 return newtext
1662 1667
1663 1668
1664 1669 def _process_url_func(match_obj, repo_name, uid, entry,
1665 1670 return_raw_data=False):
1666 1671 pref = ''
1667 1672 if match_obj.group().startswith(' '):
1668 1673 pref = ' '
1669 1674
1670 1675 issue_id = ''.join(match_obj.groups())
1671 1676 tmpl = (
1672 1677 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1673 1678 '%(issue-prefix)s%(id-repr)s'
1674 1679 '</a>')
1675 1680
1676 1681 (repo_name_cleaned,
1677 1682 parent_group_name) = RepoGroupModel().\
1678 1683 _get_group_name_and_parent(repo_name)
1679 1684
1680 1685 # variables replacement
1681 1686 named_vars = {
1682 1687 'id': issue_id,
1683 1688 'repo': repo_name,
1684 1689 'repo_name': repo_name_cleaned,
1685 1690 'group_name': parent_group_name
1686 1691 }
1687 1692 # named regex variables
1688 1693 named_vars.update(match_obj.groupdict())
1689 1694 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1690 1695
1691 1696 data = {
1692 1697 'pref': pref,
1693 1698 'cls': 'issue-tracker-link',
1694 1699 'url': _url,
1695 1700 'id-repr': issue_id,
1696 1701 'issue-prefix': entry['pref'],
1697 1702 'serv': entry['url'],
1698 1703 }
1699 1704 if return_raw_data:
1700 1705 return {
1701 1706 'id': issue_id,
1702 1707 'url': _url
1703 1708 }
1704 1709 return tmpl % data
1705 1710
1706 1711
1707 1712 def process_patterns(text_string, repo_name, config=None):
1708 1713 repo = None
1709 1714 if repo_name:
1710 1715 # Retrieving repo_name to avoid invalid repo_name to explode on
1711 1716 # IssueTrackerSettingsModel but still passing invalid name further down
1712 1717 repo = Repository.get_by_repo_name(repo_name, cache=True)
1713 1718
1714 1719 settings_model = IssueTrackerSettingsModel(repo=repo)
1715 1720 active_entries = settings_model.get_settings(cache=True)
1716 1721
1717 1722 issues_data = []
1718 1723 newtext = text_string
1719 1724 for uid, entry in active_entries.items():
1720 1725 log.debug('found issue tracker entry with uid %s' % (uid,))
1721 1726
1722 1727 if not (entry['pat'] and entry['url']):
1723 1728 log.debug('skipping due to missing data')
1724 1729 continue
1725 1730
1726 1731 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1727 1732 % (uid, entry['pat'], entry['url'], entry['pref']))
1728 1733
1729 1734 try:
1730 1735 pattern = re.compile(r'%s' % entry['pat'])
1731 1736 except re.error:
1732 1737 log.exception(
1733 1738 'issue tracker pattern: `%s` failed to compile',
1734 1739 entry['pat'])
1735 1740 continue
1736 1741
1737 1742 data_func = partial(
1738 1743 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1739 1744 return_raw_data=True)
1740 1745
1741 1746 for match_obj in pattern.finditer(text_string):
1742 1747 issues_data.append(data_func(match_obj))
1743 1748
1744 1749 url_func = partial(
1745 1750 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1746 1751
1747 1752 newtext = pattern.sub(url_func, newtext)
1748 1753 log.debug('processed prefix:uid `%s`' % (uid,))
1749 1754
1750 1755 return newtext, issues_data
1751 1756
1752 1757
1753 1758 def urlify_commit_message(commit_text, repository=None):
1754 1759 """
1755 1760 Parses given text message and makes proper links.
1756 1761 issues are linked to given issue-server, and rest is a commit link
1757 1762
1758 1763 :param commit_text:
1759 1764 :param repository:
1760 1765 """
1761 1766 from pylons import url # doh, we need to re-import url to mock it later
1762 1767
1763 1768 def escaper(string):
1764 1769 return string.replace('<', '&lt;').replace('>', '&gt;')
1765 1770
1766 1771 newtext = escaper(commit_text)
1767 1772
1768 1773 # extract http/https links and make them real urls
1769 1774 newtext = urlify_text(newtext, safe=False)
1770 1775
1771 1776 # urlify commits - extract commit ids and make link out of them, if we have
1772 1777 # the scope of repository present.
1773 1778 if repository:
1774 1779 newtext = urlify_commits(newtext, repository)
1775 1780
1776 1781 # process issue tracker patterns
1777 1782 newtext, issues = process_patterns(newtext, repository or '')
1778 1783
1779 1784 return literal(newtext)
1780 1785
1781 1786
1782 1787 def rst(source, mentions=False):
1783 1788 return literal('<div class="rst-block">%s</div>' %
1784 1789 MarkupRenderer.rst(source, mentions=mentions))
1785 1790
1786 1791
1787 1792 def markdown(source, mentions=False):
1788 1793 return literal('<div class="markdown-block">%s</div>' %
1789 1794 MarkupRenderer.markdown(source, flavored=True,
1790 1795 mentions=mentions))
1791 1796
1792 1797 def renderer_from_filename(filename, exclude=None):
1793 1798 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1794 1799
1795 1800
1796 1801 def render(source, renderer='rst', mentions=False):
1797 1802 if renderer == 'rst':
1798 1803 return rst(source, mentions=mentions)
1799 1804 if renderer == 'markdown':
1800 1805 return markdown(source, mentions=mentions)
1801 1806
1802 1807
1803 1808 def commit_status(repo, commit_id):
1804 1809 return ChangesetStatusModel().get_status(repo, commit_id)
1805 1810
1806 1811
1807 1812 def commit_status_lbl(commit_status):
1808 1813 return dict(ChangesetStatus.STATUSES).get(commit_status)
1809 1814
1810 1815
1811 1816 def commit_time(repo_name, commit_id):
1812 1817 repo = Repository.get_by_repo_name(repo_name)
1813 1818 commit = repo.get_commit(commit_id=commit_id)
1814 1819 return commit.date
1815 1820
1816 1821
1817 1822 def get_permission_name(key):
1818 1823 return dict(Permission.PERMS).get(key)
1819 1824
1820 1825
1821 1826 def journal_filter_help():
1822 1827 return _(
1823 1828 'Example filter terms:\n' +
1824 1829 ' repository:vcs\n' +
1825 1830 ' username:marcin\n' +
1826 1831 ' action:*push*\n' +
1827 1832 ' ip:127.0.0.1\n' +
1828 1833 ' date:20120101\n' +
1829 1834 ' date:[20120101100000 TO 20120102]\n' +
1830 1835 '\n' +
1831 1836 'Generate wildcards using \'*\' character:\n' +
1832 1837 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1833 1838 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1834 1839 '\n' +
1835 1840 'Optional AND / OR operators in queries\n' +
1836 1841 ' "repository:vcs OR repository:test"\n' +
1837 1842 ' "username:test AND repository:test*"\n'
1838 1843 )
1839 1844
1840 1845
1841 1846 def not_mapped_error(repo_name):
1842 1847 flash(_('%s repository is not mapped to db perhaps'
1843 1848 ' it was created or renamed from the filesystem'
1844 1849 ' please run the application again'
1845 1850 ' in order to rescan repositories') % repo_name, category='error')
1846 1851
1847 1852
1848 1853 def ip_range(ip_addr):
1849 1854 from rhodecode.model.db import UserIpMap
1850 1855 s, e = UserIpMap._get_ip_range(ip_addr)
1851 1856 return '%s - %s' % (s, e)
1852 1857
1853 1858
1854 1859 def form(url, method='post', needs_csrf_token=True, **attrs):
1855 1860 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1856 1861 if method.lower() != 'get' and needs_csrf_token:
1857 1862 raise Exception(
1858 1863 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1859 1864 'CSRF token. If the endpoint does not require such token you can ' +
1860 1865 'explicitly set the parameter needs_csrf_token to false.')
1861 1866
1862 1867 return wh_form(url, method=method, **attrs)
1863 1868
1864 1869
1865 1870 def secure_form(url, method="POST", multipart=False, **attrs):
1866 1871 """Start a form tag that points the action to an url. This
1867 1872 form tag will also include the hidden field containing
1868 1873 the auth token.
1869 1874
1870 1875 The url options should be given either as a string, or as a
1871 1876 ``url()`` function. The method for the form defaults to POST.
1872 1877
1873 1878 Options:
1874 1879
1875 1880 ``multipart``
1876 1881 If set to True, the enctype is set to "multipart/form-data".
1877 1882 ``method``
1878 1883 The method to use when submitting the form, usually either
1879 1884 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1880 1885 hidden input with name _method is added to simulate the verb
1881 1886 over POST.
1882 1887
1883 1888 """
1884 1889 from webhelpers.pylonslib.secure_form import insecure_form
1885 1890 form = insecure_form(url, method, multipart, **attrs)
1886 1891 token = csrf_input()
1887 1892 return literal("%s\n%s" % (form, token))
1888 1893
1889 1894 def csrf_input():
1890 1895 return literal(
1891 1896 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1892 1897 csrf_token_key, csrf_token_key, get_csrf_token()))
1893 1898
1894 1899 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1895 1900 select_html = select(name, selected, options, **attrs)
1896 1901 select2 = """
1897 1902 <script>
1898 1903 $(document).ready(function() {
1899 1904 $('#%s').select2({
1900 1905 containerCssClass: 'drop-menu',
1901 1906 dropdownCssClass: 'drop-menu-dropdown',
1902 1907 dropdownAutoWidth: true%s
1903 1908 });
1904 1909 });
1905 1910 </script>
1906 1911 """
1907 1912 filter_option = """,
1908 1913 minimumResultsForSearch: -1
1909 1914 """
1910 1915 input_id = attrs.get('id') or name
1911 1916 filter_enabled = "" if enable_filter else filter_option
1912 1917 select_script = literal(select2 % (input_id, filter_enabled))
1913 1918
1914 1919 return literal(select_html+select_script)
1915 1920
1916 1921
1917 1922 def get_visual_attr(tmpl_context_var, attr_name):
1918 1923 """
1919 1924 A safe way to get a variable from visual variable of template context
1920 1925
1921 1926 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1922 1927 :param attr_name: name of the attribute we fetch from the c.visual
1923 1928 """
1924 1929 visual = getattr(tmpl_context_var, 'visual', None)
1925 1930 if not visual:
1926 1931 return
1927 1932 else:
1928 1933 return getattr(visual, attr_name, None)
1929 1934
1930 1935
1931 1936 def get_last_path_part(file_node):
1932 1937 if not file_node.path:
1933 1938 return u''
1934 1939
1935 1940 path = safe_unicode(file_node.path.split('/')[-1])
1936 1941 return u'../' + path
1937 1942
1938 1943
1939 1944 def route_path(*args, **kwds):
1940 1945 """
1941 1946 Wrapper around pyramids `route_path` function. It is used to generate
1942 1947 URLs from within pylons views or templates. This will be removed when
1943 1948 pyramid migration if finished.
1944 1949 """
1945 1950 req = get_current_request()
1946 1951 return req.route_path(*args, **kwds)
1947 1952
1948 1953
1949 1954 def route_path_or_none(*args, **kwargs):
1950 1955 try:
1951 1956 return route_path(*args, **kwargs)
1952 1957 except KeyError:
1953 1958 return None
1954 1959
1955 1960
1956 1961 def static_url(*args, **kwds):
1957 1962 """
1958 1963 Wrapper around pyramids `route_path` function. It is used to generate
1959 1964 URLs from within pylons views or templates. This will be removed when
1960 1965 pyramid migration if finished.
1961 1966 """
1962 1967 req = get_current_request()
1963 1968 return req.static_url(*args, **kwds)
1964 1969
1965 1970
1966 1971 def resource_path(*args, **kwds):
1967 1972 """
1968 1973 Wrapper around pyramids `route_path` function. It is used to generate
1969 1974 URLs from within pylons views or templates. This will be removed when
1970 1975 pyramid migration if finished.
1971 1976 """
1972 1977 req = get_current_request()
1973 1978 return req.resource_path(*args, **kwds)
@@ -1,886 +1,895 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21
22 22 """
23 23 Some simple helper functions
24 24 """
25 25
26 26
27 27 import collections
28 28 import datetime
29 29 import dateutil.relativedelta
30 30 import hashlib
31 31 import logging
32 32 import re
33 33 import sys
34 34 import time
35 35 import threading
36 36 import urllib
37 37 import urlobject
38 38 import uuid
39 39
40 40 import pygments.lexers
41 41 import sqlalchemy
42 42 import sqlalchemy.engine.url
43 43 import webob
44 44 import routes.util
45 45
46 46 import rhodecode
47 47
48 48
49 49 def md5(s):
50 50 return hashlib.md5(s).hexdigest()
51 51
52 52
53 53 def md5_safe(s):
54 54 return md5(safe_str(s))
55 55
56 56
57 def __get_lem():
57 def __get_lem(extra_mapping=None):
58 58 """
59 59 Get language extension map based on what's inside pygments lexers
60 60 """
61 61 d = collections.defaultdict(lambda: [])
62 62
63 63 def __clean(s):
64 64 s = s.lstrip('*')
65 65 s = s.lstrip('.')
66 66
67 67 if s.find('[') != -1:
68 68 exts = []
69 69 start, stop = s.find('['), s.find(']')
70 70
71 71 for suffix in s[start + 1:stop]:
72 72 exts.append(s[:s.find('[')] + suffix)
73 73 return [e.lower() for e in exts]
74 74 else:
75 75 return [s.lower()]
76 76
77 77 for lx, t in sorted(pygments.lexers.LEXERS.items()):
78 78 m = map(__clean, t[-2])
79 79 if m:
80 80 m = reduce(lambda x, y: x + y, m)
81 81 for ext in m:
82 82 desc = lx.replace('Lexer', '')
83 83 d[ext].append(desc)
84 84
85 return dict(d)
85 data = dict(d)
86
87 extra_mapping = extra_mapping or {}
88 if extra_mapping:
89 for k, v in extra_mapping.items():
90 if k not in data:
91 # register new mapping2lexer
92 data[k] = [v]
93
94 return data
86 95
87 96
88 97 def str2bool(_str):
89 98 """
90 99 returs True/False value from given string, it tries to translate the
91 100 string into boolean
92 101
93 102 :param _str: string value to translate into boolean
94 103 :rtype: boolean
95 104 :returns: boolean from given string
96 105 """
97 106 if _str is None:
98 107 return False
99 108 if _str in (True, False):
100 109 return _str
101 110 _str = str(_str).strip().lower()
102 111 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
103 112
104 113
105 114 def aslist(obj, sep=None, strip=True):
106 115 """
107 116 Returns given string separated by sep as list
108 117
109 118 :param obj:
110 119 :param sep:
111 120 :param strip:
112 121 """
113 if isinstance(obj, (basestring)):
122 if isinstance(obj, (basestring,)):
114 123 lst = obj.split(sep)
115 124 if strip:
116 125 lst = [v.strip() for v in lst]
117 126 return lst
118 127 elif isinstance(obj, (list, tuple)):
119 128 return obj
120 129 elif obj is None:
121 130 return []
122 131 else:
123 132 return [obj]
124 133
125 134
126 135 def convert_line_endings(line, mode):
127 136 """
128 137 Converts a given line "line end" accordingly to given mode
129 138
130 139 Available modes are::
131 140 0 - Unix
132 141 1 - Mac
133 142 2 - DOS
134 143
135 144 :param line: given line to convert
136 145 :param mode: mode to convert to
137 146 :rtype: str
138 147 :return: converted line according to mode
139 148 """
140 149 if mode == 0:
141 150 line = line.replace('\r\n', '\n')
142 151 line = line.replace('\r', '\n')
143 152 elif mode == 1:
144 153 line = line.replace('\r\n', '\r')
145 154 line = line.replace('\n', '\r')
146 155 elif mode == 2:
147 156 line = re.sub('\r(?!\n)|(?<!\r)\n', '\r\n', line)
148 157 return line
149 158
150 159
151 160 def detect_mode(line, default):
152 161 """
153 162 Detects line break for given line, if line break couldn't be found
154 163 given default value is returned
155 164
156 165 :param line: str line
157 166 :param default: default
158 167 :rtype: int
159 168 :return: value of line end on of 0 - Unix, 1 - Mac, 2 - DOS
160 169 """
161 170 if line.endswith('\r\n'):
162 171 return 2
163 172 elif line.endswith('\n'):
164 173 return 0
165 174 elif line.endswith('\r'):
166 175 return 1
167 176 else:
168 177 return default
169 178
170 179
171 180 def safe_int(val, default=None):
172 181 """
173 182 Returns int() of val if val is not convertable to int use default
174 183 instead
175 184
176 185 :param val:
177 186 :param default:
178 187 """
179 188
180 189 try:
181 190 val = int(val)
182 191 except (ValueError, TypeError):
183 192 val = default
184 193
185 194 return val
186 195
187 196
188 197 def safe_unicode(str_, from_encoding=None):
189 198 """
190 199 safe unicode function. Does few trick to turn str_ into unicode
191 200
192 201 In case of UnicodeDecode error, we try to return it with encoding detected
193 202 by chardet library if it fails fallback to unicode with errors replaced
194 203
195 204 :param str_: string to decode
196 205 :rtype: unicode
197 206 :returns: unicode object
198 207 """
199 208 if isinstance(str_, unicode):
200 209 return str_
201 210
202 211 if not from_encoding:
203 212 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
204 213 'utf8'), sep=',')
205 214 from_encoding = DEFAULT_ENCODINGS
206 215
207 216 if not isinstance(from_encoding, (list, tuple)):
208 217 from_encoding = [from_encoding]
209 218
210 219 try:
211 220 return unicode(str_)
212 221 except UnicodeDecodeError:
213 222 pass
214 223
215 224 for enc in from_encoding:
216 225 try:
217 226 return unicode(str_, enc)
218 227 except UnicodeDecodeError:
219 228 pass
220 229
221 230 try:
222 231 import chardet
223 232 encoding = chardet.detect(str_)['encoding']
224 233 if encoding is None:
225 234 raise Exception()
226 235 return str_.decode(encoding)
227 236 except (ImportError, UnicodeDecodeError, Exception):
228 237 return unicode(str_, from_encoding[0], 'replace')
229 238
230 239
231 240 def safe_str(unicode_, to_encoding=None):
232 241 """
233 242 safe str function. Does few trick to turn unicode_ into string
234 243
235 244 In case of UnicodeEncodeError, we try to return it with encoding detected
236 245 by chardet library if it fails fallback to string with errors replaced
237 246
238 247 :param unicode_: unicode to encode
239 248 :rtype: str
240 249 :returns: str object
241 250 """
242 251
243 252 # if it's not basestr cast to str
244 253 if not isinstance(unicode_, basestring):
245 254 return str(unicode_)
246 255
247 256 if isinstance(unicode_, str):
248 257 return unicode_
249 258
250 259 if not to_encoding:
251 260 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
252 261 'utf8'), sep=',')
253 262 to_encoding = DEFAULT_ENCODINGS
254 263
255 264 if not isinstance(to_encoding, (list, tuple)):
256 265 to_encoding = [to_encoding]
257 266
258 267 for enc in to_encoding:
259 268 try:
260 269 return unicode_.encode(enc)
261 270 except UnicodeEncodeError:
262 271 pass
263 272
264 273 try:
265 274 import chardet
266 275 encoding = chardet.detect(unicode_)['encoding']
267 276 if encoding is None:
268 277 raise UnicodeEncodeError()
269 278
270 279 return unicode_.encode(encoding)
271 280 except (ImportError, UnicodeEncodeError):
272 281 return unicode_.encode(to_encoding[0], 'replace')
273 282
274 283
275 284 def remove_suffix(s, suffix):
276 285 if s.endswith(suffix):
277 286 s = s[:-1 * len(suffix)]
278 287 return s
279 288
280 289
281 290 def remove_prefix(s, prefix):
282 291 if s.startswith(prefix):
283 292 s = s[len(prefix):]
284 293 return s
285 294
286 295
287 296 def find_calling_context(ignore_modules=None):
288 297 """
289 298 Look through the calling stack and return the frame which called
290 299 this function and is part of core module ( ie. rhodecode.* )
291 300
292 301 :param ignore_modules: list of modules to ignore eg. ['rhodecode.lib']
293 302 """
294 303
295 304 ignore_modules = ignore_modules or []
296 305
297 306 f = sys._getframe(2)
298 307 while f.f_back is not None:
299 308 name = f.f_globals.get('__name__')
300 309 if name and name.startswith(__name__.split('.')[0]):
301 310 if name not in ignore_modules:
302 311 return f
303 312 f = f.f_back
304 313 return None
305 314
306 315
307 316 def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
308 317 """Custom engine_from_config functions."""
309 318 log = logging.getLogger('sqlalchemy.engine')
310 319 engine = sqlalchemy.engine_from_config(configuration, prefix, **kwargs)
311 320
312 321 def color_sql(sql):
313 322 color_seq = '\033[1;33m' # This is yellow: code 33
314 323 normal = '\x1b[0m'
315 324 return ''.join([color_seq, sql, normal])
316 325
317 326 if configuration['debug']:
318 327 # attach events only for debug configuration
319 328
320 329 def before_cursor_execute(conn, cursor, statement,
321 330 parameters, context, executemany):
322 331 setattr(conn, 'query_start_time', time.time())
323 332 log.info(color_sql(">>>>> STARTING QUERY >>>>>"))
324 333 calling_context = find_calling_context(ignore_modules=[
325 334 'rhodecode.lib.caching_query',
326 335 'rhodecode.model.settings',
327 336 ])
328 337 if calling_context:
329 338 log.info(color_sql('call context %s:%s' % (
330 339 calling_context.f_code.co_filename,
331 340 calling_context.f_lineno,
332 341 )))
333 342
334 343 def after_cursor_execute(conn, cursor, statement,
335 344 parameters, context, executemany):
336 345 delattr(conn, 'query_start_time')
337 346
338 347 sqlalchemy.event.listen(engine, "before_cursor_execute",
339 348 before_cursor_execute)
340 349 sqlalchemy.event.listen(engine, "after_cursor_execute",
341 350 after_cursor_execute)
342 351
343 352 return engine
344 353
345 354
346 355 def get_encryption_key(config):
347 356 secret = config.get('rhodecode.encrypted_values.secret')
348 357 default = config['beaker.session.secret']
349 358 return secret or default
350 359
351 360
352 361 def age(prevdate, now=None, show_short_version=False, show_suffix=True,
353 362 short_format=False):
354 363 """
355 364 Turns a datetime into an age string.
356 365 If show_short_version is True, this generates a shorter string with
357 366 an approximate age; ex. '1 day ago', rather than '1 day and 23 hours ago'.
358 367
359 368 * IMPORTANT*
360 369 Code of this function is written in special way so it's easier to
361 370 backport it to javascript. If you mean to update it, please also update
362 371 `jquery.timeago-extension.js` file
363 372
364 373 :param prevdate: datetime object
365 374 :param now: get current time, if not define we use
366 375 `datetime.datetime.now()`
367 376 :param show_short_version: if it should approximate the date and
368 377 return a shorter string
369 378 :param show_suffix:
370 379 :param short_format: show short format, eg 2D instead of 2 days
371 380 :rtype: unicode
372 381 :returns: unicode words describing age
373 382 """
374 383 from pylons.i18n.translation import _, ungettext
375 384
376 385 def _get_relative_delta(now, prevdate):
377 386 base = dateutil.relativedelta.relativedelta(now, prevdate)
378 387 return {
379 388 'year': base.years,
380 389 'month': base.months,
381 390 'day': base.days,
382 391 'hour': base.hours,
383 392 'minute': base.minutes,
384 393 'second': base.seconds,
385 394 }
386 395
387 396 def _is_leap_year(year):
388 397 return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
389 398
390 399 def get_month(prevdate):
391 400 return prevdate.month
392 401
393 402 def get_year(prevdate):
394 403 return prevdate.year
395 404
396 405 now = now or datetime.datetime.now()
397 406 order = ['year', 'month', 'day', 'hour', 'minute', 'second']
398 407 deltas = {}
399 408 future = False
400 409
401 410 if prevdate > now:
402 411 now_old = now
403 412 now = prevdate
404 413 prevdate = now_old
405 414 future = True
406 415 if future:
407 416 prevdate = prevdate.replace(microsecond=0)
408 417 # Get date parts deltas
409 418 for part in order:
410 419 rel_delta = _get_relative_delta(now, prevdate)
411 420 deltas[part] = rel_delta[part]
412 421
413 422 # Fix negative offsets (there is 1 second between 10:59:59 and 11:00:00,
414 423 # not 1 hour, -59 minutes and -59 seconds)
415 424 offsets = [[5, 60], [4, 60], [3, 24]]
416 425 for element in offsets: # seconds, minutes, hours
417 426 num = element[0]
418 427 length = element[1]
419 428
420 429 part = order[num]
421 430 carry_part = order[num - 1]
422 431
423 432 if deltas[part] < 0:
424 433 deltas[part] += length
425 434 deltas[carry_part] -= 1
426 435
427 436 # Same thing for days except that the increment depends on the (variable)
428 437 # number of days in the month
429 438 month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
430 439 if deltas['day'] < 0:
431 440 if get_month(prevdate) == 2 and _is_leap_year(get_year(prevdate)):
432 441 deltas['day'] += 29
433 442 else:
434 443 deltas['day'] += month_lengths[get_month(prevdate) - 1]
435 444
436 445 deltas['month'] -= 1
437 446
438 447 if deltas['month'] < 0:
439 448 deltas['month'] += 12
440 449 deltas['year'] -= 1
441 450
442 451 # Format the result
443 452 if short_format:
444 453 fmt_funcs = {
445 454 'year': lambda d: u'%dy' % d,
446 455 'month': lambda d: u'%dm' % d,
447 456 'day': lambda d: u'%dd' % d,
448 457 'hour': lambda d: u'%dh' % d,
449 458 'minute': lambda d: u'%dmin' % d,
450 459 'second': lambda d: u'%dsec' % d,
451 460 }
452 461 else:
453 462 fmt_funcs = {
454 463 'year': lambda d: ungettext(u'%d year', '%d years', d) % d,
455 464 'month': lambda d: ungettext(u'%d month', '%d months', d) % d,
456 465 'day': lambda d: ungettext(u'%d day', '%d days', d) % d,
457 466 'hour': lambda d: ungettext(u'%d hour', '%d hours', d) % d,
458 467 'minute': lambda d: ungettext(u'%d minute', '%d minutes', d) % d,
459 468 'second': lambda d: ungettext(u'%d second', '%d seconds', d) % d,
460 469 }
461 470
462 471 i = 0
463 472 for part in order:
464 473 value = deltas[part]
465 474 if value != 0:
466 475
467 476 if i < 5:
468 477 sub_part = order[i + 1]
469 478 sub_value = deltas[sub_part]
470 479 else:
471 480 sub_value = 0
472 481
473 482 if sub_value == 0 or show_short_version:
474 483 _val = fmt_funcs[part](value)
475 484 if future:
476 485 if show_suffix:
477 486 return _(u'in %s') % _val
478 487 else:
479 488 return _val
480 489
481 490 else:
482 491 if show_suffix:
483 492 return _(u'%s ago') % _val
484 493 else:
485 494 return _val
486 495
487 496 val = fmt_funcs[part](value)
488 497 val_detail = fmt_funcs[sub_part](sub_value)
489 498
490 499 if short_format:
491 500 datetime_tmpl = u'%s, %s'
492 501 if show_suffix:
493 502 datetime_tmpl = _(u'%s, %s ago')
494 503 if future:
495 504 datetime_tmpl = _(u'in %s, %s')
496 505 else:
497 506 datetime_tmpl = _(u'%s and %s')
498 507 if show_suffix:
499 508 datetime_tmpl = _(u'%s and %s ago')
500 509 if future:
501 510 datetime_tmpl = _(u'in %s and %s')
502 511
503 512 return datetime_tmpl % (val, val_detail)
504 513 i += 1
505 514 return _(u'just now')
506 515
507 516
508 517 def uri_filter(uri):
509 518 """
510 519 Removes user:password from given url string
511 520
512 521 :param uri:
513 522 :rtype: unicode
514 523 :returns: filtered list of strings
515 524 """
516 525 if not uri:
517 526 return ''
518 527
519 528 proto = ''
520 529
521 530 for pat in ('https://', 'http://'):
522 531 if uri.startswith(pat):
523 532 uri = uri[len(pat):]
524 533 proto = pat
525 534 break
526 535
527 536 # remove passwords and username
528 537 uri = uri[uri.find('@') + 1:]
529 538
530 539 # get the port
531 540 cred_pos = uri.find(':')
532 541 if cred_pos == -1:
533 542 host, port = uri, None
534 543 else:
535 544 host, port = uri[:cred_pos], uri[cred_pos + 1:]
536 545
537 546 return filter(None, [proto, host, port])
538 547
539 548
540 549 def credentials_filter(uri):
541 550 """
542 551 Returns a url with removed credentials
543 552
544 553 :param uri:
545 554 """
546 555
547 556 uri = uri_filter(uri)
548 557 # check if we have port
549 558 if len(uri) > 2 and uri[2]:
550 559 uri[2] = ':' + uri[2]
551 560
552 561 return ''.join(uri)
553 562
554 563
555 564 def get_clone_url(uri_tmpl, qualifed_home_url, repo_name, repo_id, **override):
556 565 parsed_url = urlobject.URLObject(qualifed_home_url)
557 566 decoded_path = safe_unicode(urllib.unquote(parsed_url.path.rstrip('/')))
558 567 args = {
559 568 'scheme': parsed_url.scheme,
560 569 'user': '',
561 570 # path if we use proxy-prefix
562 571 'netloc': parsed_url.netloc+decoded_path,
563 572 'prefix': decoded_path,
564 573 'repo': repo_name,
565 574 'repoid': str(repo_id)
566 575 }
567 576 args.update(override)
568 577 args['user'] = urllib.quote(safe_str(args['user']))
569 578
570 579 for k, v in args.items():
571 580 uri_tmpl = uri_tmpl.replace('{%s}' % k, v)
572 581
573 582 # remove leading @ sign if it's present. Case of empty user
574 583 url_obj = urlobject.URLObject(uri_tmpl)
575 584 url = url_obj.with_netloc(url_obj.netloc.lstrip('@'))
576 585
577 586 return safe_unicode(url)
578 587
579 588
580 589 def get_commit_safe(repo, commit_id=None, commit_idx=None, pre_load=None):
581 590 """
582 591 Safe version of get_commit if this commit doesn't exists for a
583 592 repository it returns a Dummy one instead
584 593
585 594 :param repo: repository instance
586 595 :param commit_id: commit id as str
587 596 :param pre_load: optional list of commit attributes to load
588 597 """
589 598 # TODO(skreft): remove these circular imports
590 599 from rhodecode.lib.vcs.backends.base import BaseRepository, EmptyCommit
591 600 from rhodecode.lib.vcs.exceptions import RepositoryError
592 601 if not isinstance(repo, BaseRepository):
593 602 raise Exception('You must pass an Repository '
594 603 'object as first argument got %s', type(repo))
595 604
596 605 try:
597 606 commit = repo.get_commit(
598 607 commit_id=commit_id, commit_idx=commit_idx, pre_load=pre_load)
599 608 except (RepositoryError, LookupError):
600 609 commit = EmptyCommit()
601 610 return commit
602 611
603 612
604 613 def datetime_to_time(dt):
605 614 if dt:
606 615 return time.mktime(dt.timetuple())
607 616
608 617
609 618 def time_to_datetime(tm):
610 619 if tm:
611 620 if isinstance(tm, basestring):
612 621 try:
613 622 tm = float(tm)
614 623 except ValueError:
615 624 return
616 625 return datetime.datetime.fromtimestamp(tm)
617 626
618 627
619 628 def time_to_utcdatetime(tm):
620 629 if tm:
621 630 if isinstance(tm, basestring):
622 631 try:
623 632 tm = float(tm)
624 633 except ValueError:
625 634 return
626 635 return datetime.datetime.utcfromtimestamp(tm)
627 636
628 637
629 638 MENTIONS_REGEX = re.compile(
630 639 # ^@ or @ without any special chars in front
631 640 r'(?:^@|[^a-zA-Z0-9\-\_\.]@)'
632 641 # main body starts with letter, then can be . - _
633 642 r'([a-zA-Z0-9]{1}[a-zA-Z0-9\-\_\.]+)',
634 643 re.VERBOSE | re.MULTILINE)
635 644
636 645
637 646 def extract_mentioned_users(s):
638 647 """
639 648 Returns unique usernames from given string s that have @mention
640 649
641 650 :param s: string to get mentions
642 651 """
643 652 usrs = set()
644 653 for username in MENTIONS_REGEX.findall(s):
645 654 usrs.add(username)
646 655
647 656 return sorted(list(usrs), key=lambda k: k.lower())
648 657
649 658
650 659 class AttributeDict(dict):
651 660 def __getattr__(self, attr):
652 661 return self.get(attr, None)
653 662 __setattr__ = dict.__setitem__
654 663 __delattr__ = dict.__delitem__
655 664
656 665
657 666 def fix_PATH(os_=None):
658 667 """
659 668 Get current active python path, and append it to PATH variable to fix
660 669 issues of subprocess calls and different python versions
661 670 """
662 671 if os_ is None:
663 672 import os
664 673 else:
665 674 os = os_
666 675
667 676 cur_path = os.path.split(sys.executable)[0]
668 677 if not os.environ['PATH'].startswith(cur_path):
669 678 os.environ['PATH'] = '%s:%s' % (cur_path, os.environ['PATH'])
670 679
671 680
672 681 def obfuscate_url_pw(engine):
673 682 _url = engine or ''
674 683 try:
675 684 _url = sqlalchemy.engine.url.make_url(engine)
676 685 if _url.password:
677 686 _url.password = 'XXXXX'
678 687 except Exception:
679 688 pass
680 689 return unicode(_url)
681 690
682 691
683 692 def get_server_url(environ):
684 693 req = webob.Request(environ)
685 694 return req.host_url + req.script_name
686 695
687 696
688 697 def unique_id(hexlen=32):
689 698 alphabet = "23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnpqrstuvwxyz"
690 699 return suuid(truncate_to=hexlen, alphabet=alphabet)
691 700
692 701
693 702 def suuid(url=None, truncate_to=22, alphabet=None):
694 703 """
695 704 Generate and return a short URL safe UUID.
696 705
697 706 If the url parameter is provided, set the namespace to the provided
698 707 URL and generate a UUID.
699 708
700 709 :param url to get the uuid for
701 710 :truncate_to: truncate the basic 22 UUID to shorter version
702 711
703 712 The IDs won't be universally unique any longer, but the probability of
704 713 a collision will still be very low.
705 714 """
706 715 # Define our alphabet.
707 716 _ALPHABET = alphabet or "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
708 717
709 718 # If no URL is given, generate a random UUID.
710 719 if url is None:
711 720 unique_id = uuid.uuid4().int
712 721 else:
713 722 unique_id = uuid.uuid3(uuid.NAMESPACE_URL, url).int
714 723
715 724 alphabet_length = len(_ALPHABET)
716 725 output = []
717 726 while unique_id > 0:
718 727 digit = unique_id % alphabet_length
719 728 output.append(_ALPHABET[digit])
720 729 unique_id = int(unique_id / alphabet_length)
721 730 return "".join(output)[:truncate_to]
722 731
723 732
724 733 def get_current_rhodecode_user():
725 734 """
726 735 Gets rhodecode user from threadlocal tmpl_context variable if it's
727 736 defined, else returns None.
728 737 """
729 738 from pylons import tmpl_context as c
730 739 if hasattr(c, 'rhodecode_user'):
731 740 return c.rhodecode_user
732 741
733 742 return None
734 743
735 744
736 745 def action_logger_generic(action, namespace=''):
737 746 """
738 747 A generic logger for actions useful to the system overview, tries to find
739 748 an acting user for the context of the call otherwise reports unknown user
740 749
741 750 :param action: logging message eg 'comment 5 deleted'
742 751 :param type: string
743 752
744 753 :param namespace: namespace of the logging message eg. 'repo.comments'
745 754 :param type: string
746 755
747 756 """
748 757
749 758 logger_name = 'rhodecode.actions'
750 759
751 760 if namespace:
752 761 logger_name += '.' + namespace
753 762
754 763 log = logging.getLogger(logger_name)
755 764
756 765 # get a user if we can
757 766 user = get_current_rhodecode_user()
758 767
759 768 logfunc = log.info
760 769
761 770 if not user:
762 771 user = '<unknown user>'
763 772 logfunc = log.warning
764 773
765 774 logfunc('Logging action by {}: {}'.format(user, action))
766 775
767 776
768 777 def escape_split(text, sep=',', maxsplit=-1):
769 778 r"""
770 779 Allows for escaping of the separator: e.g. arg='foo\, bar'
771 780
772 781 It should be noted that the way bash et. al. do command line parsing, those
773 782 single quotes are required.
774 783 """
775 784 escaped_sep = r'\%s' % sep
776 785
777 786 if escaped_sep not in text:
778 787 return text.split(sep, maxsplit)
779 788
780 789 before, _mid, after = text.partition(escaped_sep)
781 790 startlist = before.split(sep, maxsplit) # a regular split is fine here
782 791 unfinished = startlist[-1]
783 792 startlist = startlist[:-1]
784 793
785 794 # recurse because there may be more escaped separators
786 795 endlist = escape_split(after, sep, maxsplit)
787 796
788 797 # finish building the escaped value. we use endlist[0] becaue the first
789 798 # part of the string sent in recursion is the rest of the escaped value.
790 799 unfinished += sep + endlist[0]
791 800
792 801 return startlist + [unfinished] + endlist[1:] # put together all the parts
793 802
794 803
795 804 class OptionalAttr(object):
796 805 """
797 806 Special Optional Option that defines other attribute. Example::
798 807
799 808 def test(apiuser, userid=Optional(OAttr('apiuser')):
800 809 user = Optional.extract(userid)
801 810 # calls
802 811
803 812 """
804 813
805 814 def __init__(self, attr_name):
806 815 self.attr_name = attr_name
807 816
808 817 def __repr__(self):
809 818 return '<OptionalAttr:%s>' % self.attr_name
810 819
811 820 def __call__(self):
812 821 return self
813 822
814 823
815 824 # alias
816 825 OAttr = OptionalAttr
817 826
818 827
819 828 class Optional(object):
820 829 """
821 830 Defines an optional parameter::
822 831
823 832 param = param.getval() if isinstance(param, Optional) else param
824 833 param = param() if isinstance(param, Optional) else param
825 834
826 835 is equivalent of::
827 836
828 837 param = Optional.extract(param)
829 838
830 839 """
831 840
832 841 def __init__(self, type_):
833 842 self.type_ = type_
834 843
835 844 def __repr__(self):
836 845 return '<Optional:%s>' % self.type_.__repr__()
837 846
838 847 def __call__(self):
839 848 return self.getval()
840 849
841 850 def getval(self):
842 851 """
843 852 returns value from this Optional instance
844 853 """
845 854 if isinstance(self.type_, OAttr):
846 855 # use params name
847 856 return self.type_.attr_name
848 857 return self.type_
849 858
850 859 @classmethod
851 860 def extract(cls, val):
852 861 """
853 862 Extracts value from Optional() instance
854 863
855 864 :param val:
856 865 :return: original value if it's not Optional instance else
857 866 value of instance
858 867 """
859 868 if isinstance(val, cls):
860 869 return val.getval()
861 870 return val
862 871
863 872
864 873 def get_routes_generator_for_server_url(server_url):
865 874 parsed_url = urlobject.URLObject(server_url)
866 875 netloc = safe_str(parsed_url.netloc)
867 876 script_name = safe_str(parsed_url.path)
868 877
869 878 if ':' in netloc:
870 879 server_name, server_port = netloc.split(':')
871 880 else:
872 881 server_name = netloc
873 882 server_port = (parsed_url.scheme == 'https' and '443' or '80')
874 883
875 884 environ = {
876 885 'REQUEST_METHOD': 'GET',
877 886 'PATH_INFO': '/',
878 887 'SERVER_NAME': server_name,
879 888 'SERVER_PORT': server_port,
880 889 'SCRIPT_NAME': script_name,
881 890 }
882 891 if parsed_url.scheme == 'https':
883 892 environ['HTTPS'] = 'on'
884 893 environ['wsgi.url_scheme'] = 'https'
885 894
886 895 return routes.util.URLGenerator(rhodecode.CONFIG['routes.map'], environ)
@@ -1,740 +1,756 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2014-2016 RhodeCode GmbH
4 4 #
5 5 # This program is free software: you can redistribute it and/or modify
6 6 # it under the terms of the GNU Affero General Public License, version 3
7 7 # (only), as published by the Free Software Foundation.
8 8 #
9 9 # This program is distributed in the hope that it will be useful,
10 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 12 # GNU General Public License for more details.
13 13 #
14 14 # You should have received a copy of the GNU Affero General Public License
15 15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 #
17 17 # This program is dual-licensed. If you wish to learn more about the
18 18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 20
21 21 """
22 22 Module holding everything related to vcs nodes, with vcs2 architecture.
23 23 """
24 24
25 25
26 26 import stat
27 27
28 28 from zope.cachedescriptors.property import Lazy as LazyProperty
29 29
30 from rhodecode.config.conf import LANGUAGES_EXTENSIONS_MAP
30 31 from rhodecode.lib.utils import safe_unicode, safe_str
31 32 from rhodecode.lib.utils2 import md5
32 33 from rhodecode.lib.vcs import path as vcspath
33 34 from rhodecode.lib.vcs.backends.base import EmptyCommit, FILEMODE_DEFAULT
34 35 from rhodecode.lib.vcs.conf.mtypes import get_mimetypes_db
35 36 from rhodecode.lib.vcs.exceptions import NodeError, RemovedFileNodeError
36 37
37 38 LARGEFILE_PREFIX = '.hglf'
38 39
39 40
40 41 class NodeKind:
41 42 SUBMODULE = -1
42 43 DIR = 1
43 44 FILE = 2
44 45 LARGEFILE = 3
45 46
46 47
47 48 class NodeState:
48 49 ADDED = u'added'
49 50 CHANGED = u'changed'
50 51 NOT_CHANGED = u'not changed'
51 52 REMOVED = u'removed'
52 53
53 54
54 55 class NodeGeneratorBase(object):
55 56 """
56 57 Base class for removed added and changed filenodes, it's a lazy generator
57 58 class that will create filenodes only on iteration or call
58 59
59 60 The len method doesn't need to create filenodes at all
60 61 """
61 62
62 63 def __init__(self, current_paths, cs):
63 64 self.cs = cs
64 65 self.current_paths = current_paths
65 66
66 67 def __call__(self):
67 68 return [n for n in self]
68 69
69 70 def __getslice__(self, i, j):
70 71 for p in self.current_paths[i:j]:
71 72 yield self.cs.get_node(p)
72 73
73 74 def __len__(self):
74 75 return len(self.current_paths)
75 76
76 77 def __iter__(self):
77 78 for p in self.current_paths:
78 79 yield self.cs.get_node(p)
79 80
80 81
81 82 class AddedFileNodesGenerator(NodeGeneratorBase):
82 83 """
83 84 Class holding added files for current commit
84 85 """
85 86
86 87
87 88 class ChangedFileNodesGenerator(NodeGeneratorBase):
88 89 """
89 90 Class holding changed files for current commit
90 91 """
91 92
92 93
93 94 class RemovedFileNodesGenerator(NodeGeneratorBase):
94 95 """
95 96 Class holding removed files for current commit
96 97 """
97 98 def __iter__(self):
98 99 for p in self.current_paths:
99 100 yield RemovedFileNode(path=p)
100 101
101 102 def __getslice__(self, i, j):
102 103 for p in self.current_paths[i:j]:
103 104 yield RemovedFileNode(path=p)
104 105
105 106
106 107 class Node(object):
107 108 """
108 109 Simplest class representing file or directory on repository. SCM backends
109 110 should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node``
110 111 directly.
111 112
112 113 Node's ``path`` cannot start with slash as we operate on *relative* paths
113 114 only. Moreover, every single node is identified by the ``path`` attribute,
114 115 so it cannot end with slash, too. Otherwise, path could lead to mistakes.
115 116 """
116 117
117 118 commit = None
118 119
119 120 def __init__(self, path, kind):
120 121 self._validate_path(path) # can throw exception if path is invalid
121 122 self.path = safe_str(path.rstrip('/')) # we store paths as str
122 123 if path == '' and kind != NodeKind.DIR:
123 124 raise NodeError("Only DirNode and its subclasses may be "
124 125 "initialized with empty path")
125 126 self.kind = kind
126 127
127 128 if self.is_root() and not self.is_dir():
128 129 raise NodeError("Root node cannot be FILE kind")
129 130
130 131 def _validate_path(self, path):
131 132 if path.startswith('/'):
132 133 raise NodeError(
133 134 "Cannot initialize Node objects with slash at "
134 135 "the beginning as only relative paths are supported. "
135 136 "Got %s" % (path,))
136 137
137 138 @LazyProperty
138 139 def parent(self):
139 140 parent_path = self.get_parent_path()
140 141 if parent_path:
141 142 if self.commit:
142 143 return self.commit.get_node(parent_path)
143 144 return DirNode(parent_path)
144 145 return None
145 146
146 147 @LazyProperty
147 148 def unicode_path(self):
148 149 return safe_unicode(self.path)
149 150
150 151 @LazyProperty
151 152 def dir_path(self):
152 153 """
153 154 Returns name of the directory from full path of this vcs node. Empty
154 155 string is returned if there's no directory in the path
155 156 """
156 157 _parts = self.path.rstrip('/').rsplit('/', 1)
157 158 if len(_parts) == 2:
158 159 return safe_unicode(_parts[0])
159 160 return u''
160 161
161 162 @LazyProperty
162 163 def name(self):
163 164 """
164 165 Returns name of the node so if its path
165 166 then only last part is returned.
166 167 """
167 168 return safe_unicode(self.path.rstrip('/').split('/')[-1])
168 169
169 170 @property
170 171 def kind(self):
171 172 return self._kind
172 173
173 174 @kind.setter
174 175 def kind(self, kind):
175 176 if hasattr(self, '_kind'):
176 177 raise NodeError("Cannot change node's kind")
177 178 else:
178 179 self._kind = kind
179 180 # Post setter check (path's trailing slash)
180 181 if self.path.endswith('/'):
181 182 raise NodeError("Node's path cannot end with slash")
182 183
183 184 def __cmp__(self, other):
184 185 """
185 186 Comparator using name of the node, needed for quick list sorting.
186 187 """
187 188 kind_cmp = cmp(self.kind, other.kind)
188 189 if kind_cmp:
189 190 return kind_cmp
190 191 return cmp(self.name, other.name)
191 192
192 193 def __eq__(self, other):
193 194 for attr in ['name', 'path', 'kind']:
194 195 if getattr(self, attr) != getattr(other, attr):
195 196 return False
196 197 if self.is_file():
197 198 if self.content != other.content:
198 199 return False
199 200 else:
200 201 # For DirNode's check without entering each dir
201 202 self_nodes_paths = list(sorted(n.path for n in self.nodes))
202 203 other_nodes_paths = list(sorted(n.path for n in self.nodes))
203 204 if self_nodes_paths != other_nodes_paths:
204 205 return False
205 206 return True
206 207
207 208 def __ne__(self, other):
208 209 return not self.__eq__(other)
209 210
210 211 def __repr__(self):
211 212 return '<%s %r>' % (self.__class__.__name__, self.path)
212 213
213 214 def __str__(self):
214 215 return self.__repr__()
215 216
216 217 def __unicode__(self):
217 218 return self.name
218 219
219 220 def get_parent_path(self):
220 221 """
221 222 Returns node's parent path or empty string if node is root.
222 223 """
223 224 if self.is_root():
224 225 return ''
225 226 return vcspath.dirname(self.path.rstrip('/')) + '/'
226 227
227 228 def is_file(self):
228 229 """
229 230 Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False``
230 231 otherwise.
231 232 """
232 233 return self.kind == NodeKind.FILE
233 234
234 235 def is_dir(self):
235 236 """
236 237 Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False``
237 238 otherwise.
238 239 """
239 240 return self.kind == NodeKind.DIR
240 241
241 242 def is_root(self):
242 243 """
243 244 Returns ``True`` if node is a root node and ``False`` otherwise.
244 245 """
245 246 return self.kind == NodeKind.DIR and self.path == ''
246 247
247 248 def is_submodule(self):
248 249 """
249 250 Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False``
250 251 otherwise.
251 252 """
252 253 return self.kind == NodeKind.SUBMODULE
253 254
254 255 def is_largefile(self):
255 256 """
256 257 Returns ``True`` if node's kind is ``NodeKind.LARGEFILE``, ``False``
257 258 otherwise
258 259 """
259 260 return self.kind == NodeKind.LARGEFILE
260 261
261 262 def is_link(self):
262 263 if self.commit:
263 264 return self.commit.is_link(self.path)
264 265 return False
265 266
266 267 @LazyProperty
267 268 def added(self):
268 269 return self.state is NodeState.ADDED
269 270
270 271 @LazyProperty
271 272 def changed(self):
272 273 return self.state is NodeState.CHANGED
273 274
274 275 @LazyProperty
275 276 def not_changed(self):
276 277 return self.state is NodeState.NOT_CHANGED
277 278
278 279 @LazyProperty
279 280 def removed(self):
280 281 return self.state is NodeState.REMOVED
281 282
282 283
283 284 class FileNode(Node):
284 285 """
285 286 Class representing file nodes.
286 287
287 288 :attribute: path: path to the node, relative to repository's root
288 289 :attribute: content: if given arbitrary sets content of the file
289 290 :attribute: commit: if given, first time content is accessed, callback
290 291 :attribute: mode: stat mode for a node. Default is `FILEMODE_DEFAULT`.
291 292 """
292 293
293 294 def __init__(self, path, content=None, commit=None, mode=None):
294 295 """
295 296 Only one of ``content`` and ``commit`` may be given. Passing both
296 297 would raise ``NodeError`` exception.
297 298
298 299 :param path: relative path to the node
299 300 :param content: content may be passed to constructor
300 301 :param commit: if given, will use it to lazily fetch content
301 302 :param mode: ST_MODE (i.e. 0100644)
302 303 """
303 304 if content and commit:
304 305 raise NodeError("Cannot use both content and commit")
305 306 super(FileNode, self).__init__(path, kind=NodeKind.FILE)
306 307 self.commit = commit
307 308 self._content = content
308 309 self._mode = mode or FILEMODE_DEFAULT
309 310
310 311 @LazyProperty
311 312 def mode(self):
312 313 """
313 314 Returns lazily mode of the FileNode. If `commit` is not set, would
314 315 use value given at initialization or `FILEMODE_DEFAULT` (default).
315 316 """
316 317 if self.commit:
317 318 mode = self.commit.get_file_mode(self.path)
318 319 else:
319 320 mode = self._mode
320 321 return mode
321 322
322 323 @LazyProperty
323 324 def raw_bytes(self):
324 325 """
325 326 Returns lazily the raw bytes of the FileNode.
326 327 """
327 328 if self.commit:
328 329 if self._content is None:
329 330 self._content = self.commit.get_file_content(self.path)
330 331 content = self._content
331 332 else:
332 333 content = self._content
333 334 return content
334 335
335 336 @LazyProperty
336 337 def md5(self):
337 338 """
338 339 Returns md5 of the file node.
339 340 """
340 341 return md5(self.raw_bytes)
341 342
342 343 @LazyProperty
343 344 def content(self):
344 345 """
345 346 Returns lazily content of the FileNode. If possible, would try to
346 347 decode content from UTF-8.
347 348 """
348 349 content = self.raw_bytes
349 350
350 351 if self.is_binary:
351 352 return content
352 353 return safe_unicode(content)
353 354
354 355 @LazyProperty
355 356 def size(self):
356 357 if self.commit:
357 358 return self.commit.get_file_size(self.path)
358 359 raise NodeError(
359 360 "Cannot retrieve size of the file without related "
360 361 "commit attribute")
361 362
362 363 @LazyProperty
363 364 def message(self):
364 365 if self.commit:
365 366 return self.last_commit.message
366 367 raise NodeError(
367 368 "Cannot retrieve message of the file without related "
368 369 "commit attribute")
369 370
370 371 @LazyProperty
371 372 def last_commit(self):
372 373 if self.commit:
373 374 pre_load = ["author", "date", "message"]
374 375 return self.commit.get_file_commit(self.path, pre_load=pre_load)
375 376 raise NodeError(
376 377 "Cannot retrieve last commit of the file without "
377 378 "related commit attribute")
378 379
379 380 def get_mimetype(self):
380 381 """
381 382 Mimetype is calculated based on the file's content. If ``_mimetype``
382 383 attribute is available, it will be returned (backends which store
383 384 mimetypes or can easily recognize them, should set this private
384 385 attribute to indicate that type should *NOT* be calculated).
385 386 """
386 387
387 388 if hasattr(self, '_mimetype'):
388 389 if (isinstance(self._mimetype, (tuple, list,)) and
389 390 len(self._mimetype) == 2):
390 391 return self._mimetype
391 392 else:
392 393 raise NodeError('given _mimetype attribute must be an 2 '
393 394 'element list or tuple')
394 395
395 396 db = get_mimetypes_db()
396 397 mtype, encoding = db.guess_type(self.name)
397 398
398 399 if mtype is None:
399 400 if self.is_binary:
400 401 mtype = 'application/octet-stream'
401 402 encoding = None
402 403 else:
403 404 mtype = 'text/plain'
404 405 encoding = None
405 406
406 407 # try with pygments
407 408 try:
408 409 from pygments.lexers import get_lexer_for_filename
409 410 mt = get_lexer_for_filename(self.name).mimetypes
410 411 except Exception:
411 412 mt = None
412 413
413 414 if mt:
414 415 mtype = mt[0]
415 416
416 417 return mtype, encoding
417 418
418 419 @LazyProperty
419 420 def mimetype(self):
420 421 """
421 422 Wrapper around full mimetype info. It returns only type of fetched
422 423 mimetype without the encoding part. use get_mimetype function to fetch
423 424 full set of (type,encoding)
424 425 """
425 426 return self.get_mimetype()[0]
426 427
427 428 @LazyProperty
428 429 def mimetype_main(self):
429 430 return self.mimetype.split('/')[0]
430 431
431 432 @LazyProperty
432 433 def lexer(self):
433 434 """
434 435 Returns pygment's lexer class. Would try to guess lexer taking file's
435 436 content, name and mimetype.
436 437 """
437 438 from pygments import lexers
439
440 lexer = None
438 441 try:
439 lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False)
442 lexer = lexers.guess_lexer_for_filename(
443 self.name, self.content, stripnl=False)
440 444 except lexers.ClassNotFound:
445 lexer = None
446
447 # try our EXTENSION_MAP
448 if not lexer:
449 try:
450 lexer_class = LANGUAGES_EXTENSIONS_MAP.get(self.extension)
451 if lexer_class:
452 lexer = lexers.get_lexer_by_name(lexer_class[0])
453 except lexers.ClassNotFound:
454 lexer = None
455
456 if not lexer:
441 457 lexer = lexers.TextLexer(stripnl=False)
442 # returns first alias
458
443 459 return lexer
444 460
445 461 @LazyProperty
446 462 def lexer_alias(self):
447 463 """
448 464 Returns first alias of the lexer guessed for this file.
449 465 """
450 466 return self.lexer.aliases[0]
451 467
452 468 @LazyProperty
453 469 def history(self):
454 470 """
455 471 Returns a list of commit for this file in which the file was changed
456 472 """
457 473 if self.commit is None:
458 474 raise NodeError('Unable to get commit for this FileNode')
459 475 return self.commit.get_file_history(self.path)
460 476
461 477 @LazyProperty
462 478 def annotate(self):
463 479 """
464 480 Returns a list of three element tuples with lineno, commit and line
465 481 """
466 482 if self.commit is None:
467 483 raise NodeError('Unable to get commit for this FileNode')
468 484 pre_load = ["author", "date", "message"]
469 485 return self.commit.get_file_annotate(self.path, pre_load=pre_load)
470 486
471 487 @LazyProperty
472 488 def state(self):
473 489 if not self.commit:
474 490 raise NodeError(
475 491 "Cannot check state of the node if it's not "
476 492 "linked with commit")
477 493 elif self.path in (node.path for node in self.commit.added):
478 494 return NodeState.ADDED
479 495 elif self.path in (node.path for node in self.commit.changed):
480 496 return NodeState.CHANGED
481 497 else:
482 498 return NodeState.NOT_CHANGED
483 499
484 500 @LazyProperty
485 501 def is_binary(self):
486 502 """
487 503 Returns True if file has binary content.
488 504 """
489 505 _bin = self.raw_bytes and '\0' in self.raw_bytes
490 506 return _bin
491 507
492 508 @LazyProperty
493 509 def extension(self):
494 510 """Returns filenode extension"""
495 511 return self.name.split('.')[-1]
496 512
497 513 @property
498 514 def is_executable(self):
499 515 """
500 516 Returns ``True`` if file has executable flag turned on.
501 517 """
502 518 return bool(self.mode & stat.S_IXUSR)
503 519
504 520 def get_largefile_node(self):
505 521 """
506 522 Try to return a Mercurial FileNode from this node. It does internal
507 523 checks inside largefile store, if that file exist there it will
508 524 create special instance of LargeFileNode which can get content from
509 525 LF store.
510 526 """
511 527 if self.commit and self.path.startswith(LARGEFILE_PREFIX):
512 528 largefile_path = self.path.split(LARGEFILE_PREFIX)[-1].lstrip('/')
513 529 return self.commit.get_largefile_node(largefile_path)
514 530
515 531 def lines(self, count_empty=False):
516 532 all_lines, empty_lines = 0, 0
517 533
518 534 if not self.is_binary:
519 535 content = self.content
520 536 if count_empty:
521 537 all_lines = 0
522 538 empty_lines = 0
523 539 for line in content.splitlines(True):
524 540 if line == '\n':
525 541 empty_lines += 1
526 542 all_lines += 1
527 543
528 544 return all_lines, all_lines - empty_lines
529 545 else:
530 546 # fast method
531 547 empty_lines = all_lines = content.count('\n')
532 548 if all_lines == 0 and content:
533 549 # one-line without a newline
534 550 empty_lines = all_lines = 1
535 551
536 552 return all_lines, empty_lines
537 553
538 554 def __repr__(self):
539 555 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
540 556 getattr(self.commit, 'short_id', ''))
541 557
542 558
543 559 class RemovedFileNode(FileNode):
544 560 """
545 561 Dummy FileNode class - trying to access any public attribute except path,
546 562 name, kind or state (or methods/attributes checking those two) would raise
547 563 RemovedFileNodeError.
548 564 """
549 565 ALLOWED_ATTRIBUTES = [
550 566 'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind',
551 567 'added', 'changed', 'not_changed', 'removed'
552 568 ]
553 569
554 570 def __init__(self, path):
555 571 """
556 572 :param path: relative path to the node
557 573 """
558 574 super(RemovedFileNode, self).__init__(path=path)
559 575
560 576 def __getattribute__(self, attr):
561 577 if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES:
562 578 return super(RemovedFileNode, self).__getattribute__(attr)
563 579 raise RemovedFileNodeError(
564 580 "Cannot access attribute %s on RemovedFileNode" % attr)
565 581
566 582 @LazyProperty
567 583 def state(self):
568 584 return NodeState.REMOVED
569 585
570 586
571 587 class DirNode(Node):
572 588 """
573 589 DirNode stores list of files and directories within this node.
574 590 Nodes may be used standalone but within repository context they
575 591 lazily fetch data within same repositorty's commit.
576 592 """
577 593
578 594 def __init__(self, path, nodes=(), commit=None):
579 595 """
580 596 Only one of ``nodes`` and ``commit`` may be given. Passing both
581 597 would raise ``NodeError`` exception.
582 598
583 599 :param path: relative path to the node
584 600 :param nodes: content may be passed to constructor
585 601 :param commit: if given, will use it to lazily fetch content
586 602 """
587 603 if nodes and commit:
588 604 raise NodeError("Cannot use both nodes and commit")
589 605 super(DirNode, self).__init__(path, NodeKind.DIR)
590 606 self.commit = commit
591 607 self._nodes = nodes
592 608
593 609 @LazyProperty
594 610 def content(self):
595 611 raise NodeError(
596 612 "%s represents a dir and has no `content` attribute" % self)
597 613
598 614 @LazyProperty
599 615 def nodes(self):
600 616 if self.commit:
601 617 nodes = self.commit.get_nodes(self.path)
602 618 else:
603 619 nodes = self._nodes
604 620 self._nodes_dict = dict((node.path, node) for node in nodes)
605 621 return sorted(nodes)
606 622
607 623 @LazyProperty
608 624 def files(self):
609 625 return sorted((node for node in self.nodes if node.is_file()))
610 626
611 627 @LazyProperty
612 628 def dirs(self):
613 629 return sorted((node for node in self.nodes if node.is_dir()))
614 630
615 631 def __iter__(self):
616 632 for node in self.nodes:
617 633 yield node
618 634
619 635 def get_node(self, path):
620 636 """
621 637 Returns node from within this particular ``DirNode``, so it is now
622 638 allowed to fetch, i.e. node located at 'docs/api/index.rst' from node
623 639 'docs'. In order to access deeper nodes one must fetch nodes between
624 640 them first - this would work::
625 641
626 642 docs = root.get_node('docs')
627 643 docs.get_node('api').get_node('index.rst')
628 644
629 645 :param: path - relative to the current node
630 646
631 647 .. note::
632 648 To access lazily (as in example above) node have to be initialized
633 649 with related commit object - without it node is out of
634 650 context and may know nothing about anything else than nearest
635 651 (located at same level) nodes.
636 652 """
637 653 try:
638 654 path = path.rstrip('/')
639 655 if path == '':
640 656 raise NodeError("Cannot retrieve node without path")
641 657 self.nodes # access nodes first in order to set _nodes_dict
642 658 paths = path.split('/')
643 659 if len(paths) == 1:
644 660 if not self.is_root():
645 661 path = '/'.join((self.path, paths[0]))
646 662 else:
647 663 path = paths[0]
648 664 return self._nodes_dict[path]
649 665 elif len(paths) > 1:
650 666 if self.commit is None:
651 667 raise NodeError(
652 668 "Cannot access deeper nodes without commit")
653 669 else:
654 670 path1, path2 = paths[0], '/'.join(paths[1:])
655 671 return self.get_node(path1).get_node(path2)
656 672 else:
657 673 raise KeyError
658 674 except KeyError:
659 675 raise NodeError("Node does not exist at %s" % path)
660 676
661 677 @LazyProperty
662 678 def state(self):
663 679 raise NodeError("Cannot access state of DirNode")
664 680
665 681 @LazyProperty
666 682 def size(self):
667 683 size = 0
668 684 for root, dirs, files in self.commit.walk(self.path):
669 685 for f in files:
670 686 size += f.size
671 687
672 688 return size
673 689
674 690 def __repr__(self):
675 691 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
676 692 getattr(self.commit, 'short_id', ''))
677 693
678 694
679 695 class RootNode(DirNode):
680 696 """
681 697 DirNode being the root node of the repository.
682 698 """
683 699
684 700 def __init__(self, nodes=(), commit=None):
685 701 super(RootNode, self).__init__(path='', nodes=nodes, commit=commit)
686 702
687 703 def __repr__(self):
688 704 return '<%s>' % self.__class__.__name__
689 705
690 706
691 707 class SubModuleNode(Node):
692 708 """
693 709 represents a SubModule of Git or SubRepo of Mercurial
694 710 """
695 711 is_binary = False
696 712 size = 0
697 713
698 714 def __init__(self, name, url=None, commit=None, alias=None):
699 715 self.path = name
700 716 self.kind = NodeKind.SUBMODULE
701 717 self.alias = alias
702 718
703 719 # we have to use EmptyCommit here since this can point to svn/git/hg
704 720 # submodules we cannot get from repository
705 721 self.commit = EmptyCommit(str(commit), alias=alias)
706 722 self.url = url or self._extract_submodule_url()
707 723
708 724 def __repr__(self):
709 725 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
710 726 getattr(self.commit, 'short_id', ''))
711 727
712 728 def _extract_submodule_url(self):
713 729 # TODO: find a way to parse gits submodule file and extract the
714 730 # linking URL
715 731 return self.path
716 732
717 733 @LazyProperty
718 734 def name(self):
719 735 """
720 736 Returns name of the node so if its path
721 737 then only last part is returned.
722 738 """
723 739 org = safe_unicode(self.path.rstrip('/').split('/')[-1])
724 740 return u'%s @ %s' % (org, self.commit.short_id)
725 741
726 742
727 743 class LargeFileNode(FileNode):
728 744
729 745 def _validate_path(self, path):
730 746 """
731 747 we override check since the LargeFileNode path is system absolute
732 748 """
733 749
734 750 def raw_bytes(self):
735 751 if self.commit:
736 752 with open(self.path, 'rb') as f:
737 753 content = f.read()
738 754 else:
739 755 content = self._content
740 756 return content No newline at end of file
@@ -1,70 +1,71 b''
1 1
2 2 <div id="codeblock" class="codeblock">
3 3 <div class="codeblock-header">
4 4 <div class="stats">
5 5 <span> <strong>${c.file}</strong></span>
6 6 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
7 7 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
8 <span class="item last"> | ${c.file.mimetype}</span>
8 <span> | ${c.file.mimetype} </span>
9 <span class="item last"> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
9 10 </div>
10 11 <div class="buttons">
11 12 <a id="file_history_overview" href="#">
12 13 ${_('History')}
13 14 </a>
14 15 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
15 16 ${_('Show Full History')}
16 17 </a> |
17 18 %if c.annotate:
18 19 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
19 20 %else:
20 21 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
21 22 %endif
22 23 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
23 24 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
24 25 ${_('Download')}
25 26 </a>
26 27
27 28 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
28 29 |
29 30 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
30 31 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
31 32 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
32 33 </a>
33 34 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
34 35 </a>
35 36 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
36 37 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
37 38 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
38 39 %else:
39 40 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
40 41 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
41 42 %endif
42 43 %endif
43 44 </div>
44 45 </div>
45 46 <div id="file_history_container"></div>
46 47 <div class="code-body">
47 48 %if c.file.is_binary:
48 49 <div>
49 50 ${_('Binary file (%s)') % c.file.mimetype}
50 51 </div>
51 52 %else:
52 53 % if c.file.size < c.cut_off_limit:
53 54 %if c.annotate:
54 55 ${h.pygmentize_annotation(c.repo_name,c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
55 56 %elif c.renderer:
56 57 ${h.render(c.file.content, renderer=c.renderer)}
57 58 %else:
58 59 ${h.pygmentize(c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
59 60 %endif
60 61 %else:
61 62 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
62 63 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
63 64 %endif
64 65 %endif
65 66 </div>
66 67 </div>
67 68
68 69 <script>
69 70 var source_page = true;
70 71 </script>
General Comments 0
You need to be logged in to leave comments. Login now