##// END OF EJS Templates
lexers: added small extensions table to extend syntaxt hl for file sources....
marcink -
r796:76d12e4e default
parent child Browse files
Show More
@@ -1,35 +1,39 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2013-2016 RhodeCode GmbH
3 # Copyright (C) 2013-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Various config settings for RhodeCode
22 Various config settings for RhodeCode
23 """
23 """
24 from rhodecode import EXTENSIONS
24 from rhodecode import EXTENSIONS
25
25
26 from rhodecode.lib.utils2 import __get_lem
26 from rhodecode.lib.utils2 import __get_lem
27
27
28
28
29 # language map is also used by whoosh indexer, which for those specified
29 # language map is also used by whoosh indexer, which for those specified
30 # extensions will index it's content
30 # extensions will index it's content
31 LANGUAGES_EXTENSIONS_MAP = __get_lem()
31 # custom extensions to lexers, format is 'ext': 'LexerClass'
32 extra = {
33 'vbs': 'VbNet'
34 }
35 LANGUAGES_EXTENSIONS_MAP = __get_lem(extra)
32
36
33 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
37 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
34
38
35 DATE_FORMAT = "%Y-%m-%d"
39 DATE_FORMAT = "%Y-%m-%d"
@@ -1,214 +1,214 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2016 RhodeCode GmbH
3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Anontation library for usage in rhodecode, previously part of vcs
23 Anontation library for usage in rhodecode, previously part of vcs
24 """
24 """
25
25
26 import StringIO
26 import StringIO
27
27
28 from pygments import highlight
28 from pygments import highlight
29 from pygments.formatters import HtmlFormatter
29 from pygments.formatters import HtmlFormatter
30
30
31 from rhodecode.lib.vcs.exceptions import VCSError
31 from rhodecode.lib.vcs.exceptions import VCSError
32 from rhodecode.lib.vcs.nodes import FileNode
32 from rhodecode.lib.vcs.nodes import FileNode
33
33
34
34
35 def annotate_highlight(
35 def annotate_highlight(
36 filenode, annotate_from_commit_func=None,
36 filenode, annotate_from_commit_func=None,
37 order=None, headers=None, **options):
37 order=None, headers=None, **options):
38 """
38 """
39 Returns html portion containing annotated table with 3 columns: line
39 Returns html portion containing annotated table with 3 columns: line
40 numbers, commit information and pygmentized line of code.
40 numbers, commit information and pygmentized line of code.
41
41
42 :param filenode: FileNode object
42 :param filenode: FileNode object
43 :param annotate_from_commit_func: function taking commit and
43 :param annotate_from_commit_func: function taking commit and
44 returning single annotate cell; needs break line at the end
44 returning single annotate cell; needs break line at the end
45 :param order: ordered sequence of ``ls`` (line numbers column),
45 :param order: ordered sequence of ``ls`` (line numbers column),
46 ``annotate`` (annotate column), ``code`` (code column); Default is
46 ``annotate`` (annotate column), ``code`` (code column); Default is
47 ``['ls', 'annotate', 'code']``
47 ``['ls', 'annotate', 'code']``
48 :param headers: dictionary with headers (keys are whats in ``order``
48 :param headers: dictionary with headers (keys are whats in ``order``
49 parameter)
49 parameter)
50 """
50 """
51 from rhodecode.lib.utils import get_custom_lexer
51 from rhodecode.lib.helpers import get_lexer_for_filenode
52 options['linenos'] = True
52 options['linenos'] = True
53 formatter = AnnotateHtmlFormatter(
53 formatter = AnnotateHtmlFormatter(
54 filenode=filenode, order=order, headers=headers,
54 filenode=filenode, order=order, headers=headers,
55 annotate_from_commit_func=annotate_from_commit_func, **options)
55 annotate_from_commit_func=annotate_from_commit_func, **options)
56 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
56 lexer = get_lexer_for_filenode(filenode)
57 highlighted = highlight(filenode.content, lexer, formatter)
57 highlighted = highlight(filenode.content, lexer, formatter)
58 return highlighted
58 return highlighted
59
59
60
60
61 class AnnotateHtmlFormatter(HtmlFormatter):
61 class AnnotateHtmlFormatter(HtmlFormatter):
62
62
63 def __init__(
63 def __init__(
64 self, filenode, annotate_from_commit_func=None,
64 self, filenode, annotate_from_commit_func=None,
65 order=None, **options):
65 order=None, **options):
66 """
66 """
67 If ``annotate_from_commit_func`` is passed, it should be a function
67 If ``annotate_from_commit_func`` is passed, it should be a function
68 which returns string from the given commit. For example, we may pass
68 which returns string from the given commit. For example, we may pass
69 following function as ``annotate_from_commit_func``::
69 following function as ``annotate_from_commit_func``::
70
70
71 def commit_to_anchor(commit):
71 def commit_to_anchor(commit):
72 return '<a href="/commits/%s/">%s</a>\n' %\
72 return '<a href="/commits/%s/">%s</a>\n' %\
73 (commit.id, commit.id)
73 (commit.id, commit.id)
74
74
75 :param annotate_from_commit_func: see above
75 :param annotate_from_commit_func: see above
76 :param order: (default: ``['ls', 'annotate', 'code']``); order of
76 :param order: (default: ``['ls', 'annotate', 'code']``); order of
77 columns;
77 columns;
78 :param options: standard pygment's HtmlFormatter options, there is
78 :param options: standard pygment's HtmlFormatter options, there is
79 extra option tough, ``headers``. For instance we can pass::
79 extra option tough, ``headers``. For instance we can pass::
80
80
81 formatter = AnnotateHtmlFormatter(filenode, headers={
81 formatter = AnnotateHtmlFormatter(filenode, headers={
82 'ls': '#',
82 'ls': '#',
83 'annotate': 'Annotate',
83 'annotate': 'Annotate',
84 'code': 'Code',
84 'code': 'Code',
85 })
85 })
86
86
87 """
87 """
88 super(AnnotateHtmlFormatter, self).__init__(**options)
88 super(AnnotateHtmlFormatter, self).__init__(**options)
89 self.annotate_from_commit_func = annotate_from_commit_func
89 self.annotate_from_commit_func = annotate_from_commit_func
90 self.order = order or ('ls', 'annotate', 'code')
90 self.order = order or ('ls', 'annotate', 'code')
91 headers = options.pop('headers', None)
91 headers = options.pop('headers', None)
92 if headers and not (
92 if headers and not (
93 'ls' in headers and 'annotate' in headers and 'code' in headers):
93 'ls' in headers and 'annotate' in headers and 'code' in headers):
94 raise ValueError(
94 raise ValueError(
95 "If headers option dict is specified it must "
95 "If headers option dict is specified it must "
96 "all 'ls', 'annotate' and 'code' keys")
96 "all 'ls', 'annotate' and 'code' keys")
97 self.headers = headers
97 self.headers = headers
98 if isinstance(filenode, FileNode):
98 if isinstance(filenode, FileNode):
99 self.filenode = filenode
99 self.filenode = filenode
100 else:
100 else:
101 raise VCSError(
101 raise VCSError(
102 "This formatter expect FileNode parameter, not %r" %
102 "This formatter expect FileNode parameter, not %r" %
103 type(filenode))
103 type(filenode))
104
104
105 def annotate_from_commit(self, commit):
105 def annotate_from_commit(self, commit):
106 """
106 """
107 Returns full html line for single commit per annotated line.
107 Returns full html line for single commit per annotated line.
108 """
108 """
109 if self.annotate_from_commit_func:
109 if self.annotate_from_commit_func:
110 return self.annotate_from_commit_func(commit)
110 return self.annotate_from_commit_func(commit)
111 else:
111 else:
112 return commit.id + '\n'
112 return commit.id + '\n'
113
113
114 def _wrap_tablelinenos(self, inner):
114 def _wrap_tablelinenos(self, inner):
115 dummyoutfile = StringIO.StringIO()
115 dummyoutfile = StringIO.StringIO()
116 lncount = 0
116 lncount = 0
117 for t, line in inner:
117 for t, line in inner:
118 if t:
118 if t:
119 lncount += 1
119 lncount += 1
120 dummyoutfile.write(line)
120 dummyoutfile.write(line)
121
121
122 fl = self.linenostart
122 fl = self.linenostart
123 mw = len(str(lncount + fl - 1))
123 mw = len(str(lncount + fl - 1))
124 sp = self.linenospecial
124 sp = self.linenospecial
125 st = self.linenostep
125 st = self.linenostep
126 la = self.lineanchors
126 la = self.lineanchors
127 aln = self.anchorlinenos
127 aln = self.anchorlinenos
128 if sp:
128 if sp:
129 lines = []
129 lines = []
130
130
131 for i in range(fl, fl + lncount):
131 for i in range(fl, fl + lncount):
132 if i % st == 0:
132 if i % st == 0:
133 if i % sp == 0:
133 if i % sp == 0:
134 if aln:
134 if aln:
135 lines.append('<a href="#%s-%d" class="special">'
135 lines.append('<a href="#%s-%d" class="special">'
136 '%*d</a>' %
136 '%*d</a>' %
137 (la, i, mw, i))
137 (la, i, mw, i))
138 else:
138 else:
139 lines.append('<span class="special">'
139 lines.append('<span class="special">'
140 '%*d</span>' % (mw, i))
140 '%*d</span>' % (mw, i))
141 else:
141 else:
142 if aln:
142 if aln:
143 lines.append('<a href="#%s-%d">'
143 lines.append('<a href="#%s-%d">'
144 '%*d</a>' % (la, i, mw, i))
144 '%*d</a>' % (la, i, mw, i))
145 else:
145 else:
146 lines.append('%*d' % (mw, i))
146 lines.append('%*d' % (mw, i))
147 else:
147 else:
148 lines.append('')
148 lines.append('')
149 ls = '\n'.join(lines)
149 ls = '\n'.join(lines)
150 else:
150 else:
151 lines = []
151 lines = []
152 for i in range(fl, fl + lncount):
152 for i in range(fl, fl + lncount):
153 if i % st == 0:
153 if i % st == 0:
154 if aln:
154 if aln:
155 lines.append('<a href="#%s-%d">%*d</a>' \
155 lines.append('<a href="#%s-%d">%*d</a>' \
156 % (la, i, mw, i))
156 % (la, i, mw, i))
157 else:
157 else:
158 lines.append('%*d' % (mw, i))
158 lines.append('%*d' % (mw, i))
159 else:
159 else:
160 lines.append('')
160 lines.append('')
161 ls = '\n'.join(lines)
161 ls = '\n'.join(lines)
162
162
163 cached = {}
163 cached = {}
164 annotate = []
164 annotate = []
165 for el in self.filenode.annotate:
165 for el in self.filenode.annotate:
166 commit_id = el[1]
166 commit_id = el[1]
167 if commit_id in cached:
167 if commit_id in cached:
168 result = cached[commit_id]
168 result = cached[commit_id]
169 else:
169 else:
170 commit = el[2]()
170 commit = el[2]()
171 result = self.annotate_from_commit(commit)
171 result = self.annotate_from_commit(commit)
172 cached[commit_id] = result
172 cached[commit_id] = result
173 annotate.append(result)
173 annotate.append(result)
174
174
175 annotate = ''.join(annotate)
175 annotate = ''.join(annotate)
176
176
177 # in case you wonder about the seemingly redundant <div> here:
177 # in case you wonder about the seemingly redundant <div> here:
178 # since the content in the other cell also is wrapped in a div,
178 # since the content in the other cell also is wrapped in a div,
179 # some browsers in some configurations seem to mess up the formatting.
179 # some browsers in some configurations seem to mess up the formatting.
180 '''
180 '''
181 yield 0, ('<table class="%stable">' % self.cssclass +
181 yield 0, ('<table class="%stable">' % self.cssclass +
182 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
182 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
183 ls + '</pre></div></td>' +
183 ls + '</pre></div></td>' +
184 '<td class="code">')
184 '<td class="code">')
185 yield 0, dummyoutfile.getvalue()
185 yield 0, dummyoutfile.getvalue()
186 yield 0, '</td></tr></table>'
186 yield 0, '</td></tr></table>'
187
187
188 '''
188 '''
189 headers_row = []
189 headers_row = []
190 if self.headers:
190 if self.headers:
191 headers_row = ['<tr class="annotate-header">']
191 headers_row = ['<tr class="annotate-header">']
192 for key in self.order:
192 for key in self.order:
193 td = ''.join(('<td>', self.headers[key], '</td>'))
193 td = ''.join(('<td>', self.headers[key], '</td>'))
194 headers_row.append(td)
194 headers_row.append(td)
195 headers_row.append('</tr>')
195 headers_row.append('</tr>')
196
196
197 body_row_start = ['<tr>']
197 body_row_start = ['<tr>']
198 for key in self.order:
198 for key in self.order:
199 if key == 'ls':
199 if key == 'ls':
200 body_row_start.append(
200 body_row_start.append(
201 '<td class="linenos"><div class="linenodiv"><pre>' +
201 '<td class="linenos"><div class="linenodiv"><pre>' +
202 ls + '</pre></div></td>')
202 ls + '</pre></div></td>')
203 elif key == 'annotate':
203 elif key == 'annotate':
204 body_row_start.append(
204 body_row_start.append(
205 '<td class="annotate"><div class="annotatediv"><pre>' +
205 '<td class="annotate"><div class="annotatediv"><pre>' +
206 annotate + '</pre></div></td>')
206 annotate + '</pre></div></td>')
207 elif key == 'code':
207 elif key == 'code':
208 body_row_start.append('<td class="code">')
208 body_row_start.append('<td class="code">')
209 yield 0, ('<table class="%stable">' % self.cssclass +
209 yield 0, ('<table class="%stable">' % self.cssclass +
210 ''.join(headers_row) +
210 ''.join(headers_row) +
211 ''.join(body_row_start)
211 ''.join(body_row_start)
212 )
212 )
213 yield 0, dummyoutfile.getvalue()
213 yield 0, dummyoutfile.getvalue()
214 yield 0, '</td></tr></table>'
214 yield 0, '</td></tr></table>'
@@ -1,1973 +1,1978 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2016 RhodeCode GmbH
3 # Copyright (C) 2010-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 import pygments
39 import pygments
40
40
41 from datetime import datetime
41 from datetime import datetime
42 from functools import partial
42 from functools import partial
43 from pygments.formatters.html import HtmlFormatter
43 from pygments.formatters.html import HtmlFormatter
44 from pygments import highlight as code_highlight
44 from pygments import highlight as code_highlight
45 from pygments.lexers import (
45 from pygments.lexers import (
46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
46 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
47 from pylons import url as pylons_url
47 from pylons import url as pylons_url
48 from pylons.i18n.translation import _, ungettext
48 from pylons.i18n.translation import _, ungettext
49 from pyramid.threadlocal import get_current_request
49 from pyramid.threadlocal import get_current_request
50
50
51 from webhelpers.html import literal, HTML, escape
51 from webhelpers.html import literal, HTML, escape
52 from webhelpers.html.tools import *
52 from webhelpers.html.tools import *
53 from webhelpers.html.builder import make_tag
53 from webhelpers.html.builder import make_tag
54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
54 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
55 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
56 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
57 submit, text, password, textarea, title, ul, xml_declaration, radio
57 submit, text, password, textarea, title, ul, xml_declaration, radio
58 from webhelpers.html.tools import auto_link, button_to, highlight, \
58 from webhelpers.html.tools import auto_link, button_to, highlight, \
59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
59 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
60 from webhelpers.pylonslib import Flash as _Flash
60 from webhelpers.pylonslib import Flash as _Flash
61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
61 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
62 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
63 replace_whitespace, urlify, truncate, wrap_paragraphs
63 replace_whitespace, urlify, truncate, wrap_paragraphs
64 from webhelpers.date import time_ago_in_words
64 from webhelpers.date import time_ago_in_words
65 from webhelpers.paginate import Page as _Page
65 from webhelpers.paginate import Page as _Page
66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
66 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
67 convert_boolean_attrs, NotGiven, _make_safe_id_component
67 convert_boolean_attrs, NotGiven, _make_safe_id_component
68 from webhelpers2.number import format_byte_size
68 from webhelpers2.number import format_byte_size
69
69
70 from rhodecode.lib.annotate import annotate_highlight
70 from rhodecode.lib.annotate import annotate_highlight
71 from rhodecode.lib.action_parser import action_parser
71 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.ext_json import json
72 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
73 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
74 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
75 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 AttributeDict, safe_int, md5, md5_safe
76 AttributeDict, safe_int, md5, md5_safe
77 from rhodecode.lib.markup_renderer import MarkupRenderer
77 from rhodecode.lib.markup_renderer import MarkupRenderer
78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
78 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
79 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
80 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.model.changeset_status import ChangesetStatusModel
81 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.db import Permission, User, Repository
82 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.repo_group import RepoGroupModel
83 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.settings import IssueTrackerSettingsModel
84 from rhodecode.model.settings import IssueTrackerSettingsModel
85
85
86 log = logging.getLogger(__name__)
86 log = logging.getLogger(__name__)
87
87
88
88
89 DEFAULT_USER = User.DEFAULT_USER
89 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
90 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91
91
92
92
93 def url(*args, **kw):
93 def url(*args, **kw):
94 return pylons_url(*args, **kw)
94 return pylons_url(*args, **kw)
95
95
96
96
97 def pylons_url_current(*args, **kw):
97 def pylons_url_current(*args, **kw):
98 """
98 """
99 This function overrides pylons.url.current() which returns the current
99 This function overrides pylons.url.current() which returns the current
100 path so that it will also work from a pyramid only context. This
100 path so that it will also work from a pyramid only context. This
101 should be removed once port to pyramid is complete.
101 should be removed once port to pyramid is complete.
102 """
102 """
103 if not args and not kw:
103 if not args and not kw:
104 request = get_current_request()
104 request = get_current_request()
105 return request.path
105 return request.path
106 return pylons_url.current(*args, **kw)
106 return pylons_url.current(*args, **kw)
107
107
108 url.current = pylons_url_current
108 url.current = pylons_url_current
109
109
110
110
111 def asset(path, ver=None):
111 def asset(path, ver=None):
112 """
112 """
113 Helper to generate a static asset file path for rhodecode assets
113 Helper to generate a static asset file path for rhodecode assets
114
114
115 eg. h.asset('images/image.png', ver='3923')
115 eg. h.asset('images/image.png', ver='3923')
116
116
117 :param path: path of asset
117 :param path: path of asset
118 :param ver: optional version query param to append as ?ver=
118 :param ver: optional version query param to append as ?ver=
119 """
119 """
120 request = get_current_request()
120 request = get_current_request()
121 query = {}
121 query = {}
122 if ver:
122 if ver:
123 query = {'ver': ver}
123 query = {'ver': ver}
124 return request.static_path(
124 return request.static_path(
125 'rhodecode:public/{}'.format(path), _query=query)
125 'rhodecode:public/{}'.format(path), _query=query)
126
126
127
127
128 def html_escape(text, html_escape_table=None):
128 def html_escape(text, html_escape_table=None):
129 """Produce entities within text."""
129 """Produce entities within text."""
130 if not html_escape_table:
130 if not html_escape_table:
131 html_escape_table = {
131 html_escape_table = {
132 "&": "&amp;",
132 "&": "&amp;",
133 '"': "&quot;",
133 '"': "&quot;",
134 "'": "&apos;",
134 "'": "&apos;",
135 ">": "&gt;",
135 ">": "&gt;",
136 "<": "&lt;",
136 "<": "&lt;",
137 }
137 }
138 return "".join(html_escape_table.get(c, c) for c in text)
138 return "".join(html_escape_table.get(c, c) for c in text)
139
139
140
140
141 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
141 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
142 """
142 """
143 Truncate string ``s`` at the first occurrence of ``sub``.
143 Truncate string ``s`` at the first occurrence of ``sub``.
144
144
145 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
145 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
146 """
146 """
147 suffix_if_chopped = suffix_if_chopped or ''
147 suffix_if_chopped = suffix_if_chopped or ''
148 pos = s.find(sub)
148 pos = s.find(sub)
149 if pos == -1:
149 if pos == -1:
150 return s
150 return s
151
151
152 if inclusive:
152 if inclusive:
153 pos += len(sub)
153 pos += len(sub)
154
154
155 chopped = s[:pos]
155 chopped = s[:pos]
156 left = s[pos:].strip()
156 left = s[pos:].strip()
157
157
158 if left and suffix_if_chopped:
158 if left and suffix_if_chopped:
159 chopped += suffix_if_chopped
159 chopped += suffix_if_chopped
160
160
161 return chopped
161 return chopped
162
162
163
163
164 def shorter(text, size=20):
164 def shorter(text, size=20):
165 postfix = '...'
165 postfix = '...'
166 if len(text) > size:
166 if len(text) > size:
167 return text[:size - len(postfix)] + postfix
167 return text[:size - len(postfix)] + postfix
168 return text
168 return text
169
169
170
170
171 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
171 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
172 """
172 """
173 Reset button
173 Reset button
174 """
174 """
175 _set_input_attrs(attrs, type, name, value)
175 _set_input_attrs(attrs, type, name, value)
176 _set_id_attr(attrs, id, name)
176 _set_id_attr(attrs, id, name)
177 convert_boolean_attrs(attrs, ["disabled"])
177 convert_boolean_attrs(attrs, ["disabled"])
178 return HTML.input(**attrs)
178 return HTML.input(**attrs)
179
179
180 reset = _reset
180 reset = _reset
181 safeid = _make_safe_id_component
181 safeid = _make_safe_id_component
182
182
183
183
184 def branding(name, length=40):
184 def branding(name, length=40):
185 return truncate(name, length, indicator="")
185 return truncate(name, length, indicator="")
186
186
187
187
188 def FID(raw_id, path):
188 def FID(raw_id, path):
189 """
189 """
190 Creates a unique ID for filenode based on it's hash of path and commit
190 Creates a unique ID for filenode based on it's hash of path and commit
191 it's safe to use in urls
191 it's safe to use in urls
192
192
193 :param raw_id:
193 :param raw_id:
194 :param path:
194 :param path:
195 """
195 """
196
196
197 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
197 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
198
198
199
199
200 class _GetError(object):
200 class _GetError(object):
201 """Get error from form_errors, and represent it as span wrapped error
201 """Get error from form_errors, and represent it as span wrapped error
202 message
202 message
203
203
204 :param field_name: field to fetch errors for
204 :param field_name: field to fetch errors for
205 :param form_errors: form errors dict
205 :param form_errors: form errors dict
206 """
206 """
207
207
208 def __call__(self, field_name, form_errors):
208 def __call__(self, field_name, form_errors):
209 tmpl = """<span class="error_msg">%s</span>"""
209 tmpl = """<span class="error_msg">%s</span>"""
210 if form_errors and field_name in form_errors:
210 if form_errors and field_name in form_errors:
211 return literal(tmpl % form_errors.get(field_name))
211 return literal(tmpl % form_errors.get(field_name))
212
212
213 get_error = _GetError()
213 get_error = _GetError()
214
214
215
215
216 class _ToolTip(object):
216 class _ToolTip(object):
217
217
218 def __call__(self, tooltip_title, trim_at=50):
218 def __call__(self, tooltip_title, trim_at=50):
219 """
219 """
220 Special function just to wrap our text into nice formatted
220 Special function just to wrap our text into nice formatted
221 autowrapped text
221 autowrapped text
222
222
223 :param tooltip_title:
223 :param tooltip_title:
224 """
224 """
225 tooltip_title = escape(tooltip_title)
225 tooltip_title = escape(tooltip_title)
226 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
226 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
227 return tooltip_title
227 return tooltip_title
228 tooltip = _ToolTip()
228 tooltip = _ToolTip()
229
229
230
230
231 def files_breadcrumbs(repo_name, commit_id, file_path):
231 def files_breadcrumbs(repo_name, commit_id, file_path):
232 if isinstance(file_path, str):
232 if isinstance(file_path, str):
233 file_path = safe_unicode(file_path)
233 file_path = safe_unicode(file_path)
234
234
235 # TODO: johbo: Is this always a url like path, or is this operating
235 # TODO: johbo: Is this always a url like path, or is this operating
236 # system dependent?
236 # system dependent?
237 path_segments = file_path.split('/')
237 path_segments = file_path.split('/')
238
238
239 repo_name_html = escape(repo_name)
239 repo_name_html = escape(repo_name)
240 if len(path_segments) == 1 and path_segments[0] == '':
240 if len(path_segments) == 1 and path_segments[0] == '':
241 url_segments = [repo_name_html]
241 url_segments = [repo_name_html]
242 else:
242 else:
243 url_segments = [
243 url_segments = [
244 link_to(
244 link_to(
245 repo_name_html,
245 repo_name_html,
246 url('files_home',
246 url('files_home',
247 repo_name=repo_name,
247 repo_name=repo_name,
248 revision=commit_id,
248 revision=commit_id,
249 f_path=''),
249 f_path=''),
250 class_='pjax-link')]
250 class_='pjax-link')]
251
251
252 last_cnt = len(path_segments) - 1
252 last_cnt = len(path_segments) - 1
253 for cnt, segment in enumerate(path_segments):
253 for cnt, segment in enumerate(path_segments):
254 if not segment:
254 if not segment:
255 continue
255 continue
256 segment_html = escape(segment)
256 segment_html = escape(segment)
257
257
258 if cnt != last_cnt:
258 if cnt != last_cnt:
259 url_segments.append(
259 url_segments.append(
260 link_to(
260 link_to(
261 segment_html,
261 segment_html,
262 url('files_home',
262 url('files_home',
263 repo_name=repo_name,
263 repo_name=repo_name,
264 revision=commit_id,
264 revision=commit_id,
265 f_path='/'.join(path_segments[:cnt + 1])),
265 f_path='/'.join(path_segments[:cnt + 1])),
266 class_='pjax-link'))
266 class_='pjax-link'))
267 else:
267 else:
268 url_segments.append(segment_html)
268 url_segments.append(segment_html)
269
269
270 return literal('/'.join(url_segments))
270 return literal('/'.join(url_segments))
271
271
272
272
273 class CodeHtmlFormatter(HtmlFormatter):
273 class CodeHtmlFormatter(HtmlFormatter):
274 """
274 """
275 My code Html Formatter for source codes
275 My code Html Formatter for source codes
276 """
276 """
277
277
278 def wrap(self, source, outfile):
278 def wrap(self, source, outfile):
279 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
279 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
280
280
281 def _wrap_code(self, source):
281 def _wrap_code(self, source):
282 for cnt, it in enumerate(source):
282 for cnt, it in enumerate(source):
283 i, t = it
283 i, t = it
284 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
284 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
285 yield i, t
285 yield i, t
286
286
287 def _wrap_tablelinenos(self, inner):
287 def _wrap_tablelinenos(self, inner):
288 dummyoutfile = StringIO.StringIO()
288 dummyoutfile = StringIO.StringIO()
289 lncount = 0
289 lncount = 0
290 for t, line in inner:
290 for t, line in inner:
291 if t:
291 if t:
292 lncount += 1
292 lncount += 1
293 dummyoutfile.write(line)
293 dummyoutfile.write(line)
294
294
295 fl = self.linenostart
295 fl = self.linenostart
296 mw = len(str(lncount + fl - 1))
296 mw = len(str(lncount + fl - 1))
297 sp = self.linenospecial
297 sp = self.linenospecial
298 st = self.linenostep
298 st = self.linenostep
299 la = self.lineanchors
299 la = self.lineanchors
300 aln = self.anchorlinenos
300 aln = self.anchorlinenos
301 nocls = self.noclasses
301 nocls = self.noclasses
302 if sp:
302 if sp:
303 lines = []
303 lines = []
304
304
305 for i in range(fl, fl + lncount):
305 for i in range(fl, fl + lncount):
306 if i % st == 0:
306 if i % st == 0:
307 if i % sp == 0:
307 if i % sp == 0:
308 if aln:
308 if aln:
309 lines.append('<a href="#%s%d" class="special">%*d</a>' %
309 lines.append('<a href="#%s%d" class="special">%*d</a>' %
310 (la, i, mw, i))
310 (la, i, mw, i))
311 else:
311 else:
312 lines.append('<span class="special">%*d</span>' % (mw, i))
312 lines.append('<span class="special">%*d</span>' % (mw, i))
313 else:
313 else:
314 if aln:
314 if aln:
315 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
315 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
316 else:
316 else:
317 lines.append('%*d' % (mw, i))
317 lines.append('%*d' % (mw, i))
318 else:
318 else:
319 lines.append('')
319 lines.append('')
320 ls = '\n'.join(lines)
320 ls = '\n'.join(lines)
321 else:
321 else:
322 lines = []
322 lines = []
323 for i in range(fl, fl + lncount):
323 for i in range(fl, fl + lncount):
324 if i % st == 0:
324 if i % st == 0:
325 if aln:
325 if aln:
326 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
326 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
327 else:
327 else:
328 lines.append('%*d' % (mw, i))
328 lines.append('%*d' % (mw, i))
329 else:
329 else:
330 lines.append('')
330 lines.append('')
331 ls = '\n'.join(lines)
331 ls = '\n'.join(lines)
332
332
333 # in case you wonder about the seemingly redundant <div> here: since the
333 # in case you wonder about the seemingly redundant <div> here: since the
334 # content in the other cell also is wrapped in a div, some browsers in
334 # content in the other cell also is wrapped in a div, some browsers in
335 # some configurations seem to mess up the formatting...
335 # some configurations seem to mess up the formatting...
336 if nocls:
336 if nocls:
337 yield 0, ('<table class="%stable">' % self.cssclass +
337 yield 0, ('<table class="%stable">' % self.cssclass +
338 '<tr><td><div class="linenodiv" '
338 '<tr><td><div class="linenodiv" '
339 'style="background-color: #f0f0f0; padding-right: 10px">'
339 'style="background-color: #f0f0f0; padding-right: 10px">'
340 '<pre style="line-height: 125%">' +
340 '<pre style="line-height: 125%">' +
341 ls + '</pre></div></td><td id="hlcode" class="code">')
341 ls + '</pre></div></td><td id="hlcode" class="code">')
342 else:
342 else:
343 yield 0, ('<table class="%stable">' % self.cssclass +
343 yield 0, ('<table class="%stable">' % self.cssclass +
344 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
344 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
345 ls + '</pre></div></td><td id="hlcode" class="code">')
345 ls + '</pre></div></td><td id="hlcode" class="code">')
346 yield 0, dummyoutfile.getvalue()
346 yield 0, dummyoutfile.getvalue()
347 yield 0, '</td></tr></table>'
347 yield 0, '</td></tr></table>'
348
348
349
349
350 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
350 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
351 def __init__(self, **kw):
351 def __init__(self, **kw):
352 # only show these line numbers if set
352 # only show these line numbers if set
353 self.only_lines = kw.pop('only_line_numbers', [])
353 self.only_lines = kw.pop('only_line_numbers', [])
354 self.query_terms = kw.pop('query_terms', [])
354 self.query_terms = kw.pop('query_terms', [])
355 self.max_lines = kw.pop('max_lines', 5)
355 self.max_lines = kw.pop('max_lines', 5)
356 self.line_context = kw.pop('line_context', 3)
356 self.line_context = kw.pop('line_context', 3)
357 self.url = kw.pop('url', None)
357 self.url = kw.pop('url', None)
358
358
359 super(CodeHtmlFormatter, self).__init__(**kw)
359 super(CodeHtmlFormatter, self).__init__(**kw)
360
360
361 def _wrap_code(self, source):
361 def _wrap_code(self, source):
362 for cnt, it in enumerate(source):
362 for cnt, it in enumerate(source):
363 i, t = it
363 i, t = it
364 t = '<pre>%s</pre>' % t
364 t = '<pre>%s</pre>' % t
365 yield i, t
365 yield i, t
366
366
367 def _wrap_tablelinenos(self, inner):
367 def _wrap_tablelinenos(self, inner):
368 yield 0, '<table class="code-highlight %stable">' % self.cssclass
368 yield 0, '<table class="code-highlight %stable">' % self.cssclass
369
369
370 last_shown_line_number = 0
370 last_shown_line_number = 0
371 current_line_number = 1
371 current_line_number = 1
372
372
373 for t, line in inner:
373 for t, line in inner:
374 if not t:
374 if not t:
375 yield t, line
375 yield t, line
376 continue
376 continue
377
377
378 if current_line_number in self.only_lines:
378 if current_line_number in self.only_lines:
379 if last_shown_line_number + 1 != current_line_number:
379 if last_shown_line_number + 1 != current_line_number:
380 yield 0, '<tr>'
380 yield 0, '<tr>'
381 yield 0, '<td class="line">...</td>'
381 yield 0, '<td class="line">...</td>'
382 yield 0, '<td id="hlcode" class="code"></td>'
382 yield 0, '<td id="hlcode" class="code"></td>'
383 yield 0, '</tr>'
383 yield 0, '</tr>'
384
384
385 yield 0, '<tr>'
385 yield 0, '<tr>'
386 if self.url:
386 if self.url:
387 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
387 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
388 self.url, current_line_number, current_line_number)
388 self.url, current_line_number, current_line_number)
389 else:
389 else:
390 yield 0, '<td class="line"><a href="">%i</a></td>' % (
390 yield 0, '<td class="line"><a href="">%i</a></td>' % (
391 current_line_number)
391 current_line_number)
392 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
392 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
393 yield 0, '</tr>'
393 yield 0, '</tr>'
394
394
395 last_shown_line_number = current_line_number
395 last_shown_line_number = current_line_number
396
396
397 current_line_number += 1
397 current_line_number += 1
398
398
399
399
400 yield 0, '</table>'
400 yield 0, '</table>'
401
401
402
402
403 def extract_phrases(text_query):
403 def extract_phrases(text_query):
404 """
404 """
405 Extracts phrases from search term string making sure phrases
405 Extracts phrases from search term string making sure phrases
406 contained in double quotes are kept together - and discarding empty values
406 contained in double quotes are kept together - and discarding empty values
407 or fully whitespace values eg.
407 or fully whitespace values eg.
408
408
409 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
409 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
410
410
411 """
411 """
412
412
413 in_phrase = False
413 in_phrase = False
414 buf = ''
414 buf = ''
415 phrases = []
415 phrases = []
416 for char in text_query:
416 for char in text_query:
417 if in_phrase:
417 if in_phrase:
418 if char == '"': # end phrase
418 if char == '"': # end phrase
419 phrases.append(buf)
419 phrases.append(buf)
420 buf = ''
420 buf = ''
421 in_phrase = False
421 in_phrase = False
422 continue
422 continue
423 else:
423 else:
424 buf += char
424 buf += char
425 continue
425 continue
426 else:
426 else:
427 if char == '"': # start phrase
427 if char == '"': # start phrase
428 in_phrase = True
428 in_phrase = True
429 phrases.append(buf)
429 phrases.append(buf)
430 buf = ''
430 buf = ''
431 continue
431 continue
432 elif char == ' ':
432 elif char == ' ':
433 phrases.append(buf)
433 phrases.append(buf)
434 buf = ''
434 buf = ''
435 continue
435 continue
436 else:
436 else:
437 buf += char
437 buf += char
438
438
439 phrases.append(buf)
439 phrases.append(buf)
440 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
440 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
441 return phrases
441 return phrases
442
442
443
443
444 def get_matching_offsets(text, phrases):
444 def get_matching_offsets(text, phrases):
445 """
445 """
446 Returns a list of string offsets in `text` that the list of `terms` match
446 Returns a list of string offsets in `text` that the list of `terms` match
447
447
448 >>> get_matching_offsets('some text here', ['some', 'here'])
448 >>> get_matching_offsets('some text here', ['some', 'here'])
449 [(0, 4), (10, 14)]
449 [(0, 4), (10, 14)]
450
450
451 """
451 """
452 offsets = []
452 offsets = []
453 for phrase in phrases:
453 for phrase in phrases:
454 for match in re.finditer(phrase, text):
454 for match in re.finditer(phrase, text):
455 offsets.append((match.start(), match.end()))
455 offsets.append((match.start(), match.end()))
456
456
457 return offsets
457 return offsets
458
458
459
459
460 def normalize_text_for_matching(x):
460 def normalize_text_for_matching(x):
461 """
461 """
462 Replaces all non alnum characters to spaces and lower cases the string,
462 Replaces all non alnum characters to spaces and lower cases the string,
463 useful for comparing two text strings without punctuation
463 useful for comparing two text strings without punctuation
464 """
464 """
465 return re.sub(r'[^\w]', ' ', x.lower())
465 return re.sub(r'[^\w]', ' ', x.lower())
466
466
467
467
468 def get_matching_line_offsets(lines, terms):
468 def get_matching_line_offsets(lines, terms):
469 """ Return a set of `lines` indices (starting from 1) matching a
469 """ Return a set of `lines` indices (starting from 1) matching a
470 text search query, along with `context` lines above/below matching lines
470 text search query, along with `context` lines above/below matching lines
471
471
472 :param lines: list of strings representing lines
472 :param lines: list of strings representing lines
473 :param terms: search term string to match in lines eg. 'some text'
473 :param terms: search term string to match in lines eg. 'some text'
474 :param context: number of lines above/below a matching line to add to result
474 :param context: number of lines above/below a matching line to add to result
475 :param max_lines: cut off for lines of interest
475 :param max_lines: cut off for lines of interest
476 eg.
476 eg.
477
477
478 text = '''
478 text = '''
479 words words words
479 words words words
480 words words words
480 words words words
481 some text some
481 some text some
482 words words words
482 words words words
483 words words words
483 words words words
484 text here what
484 text here what
485 '''
485 '''
486 get_matching_line_offsets(text, 'text', context=1)
486 get_matching_line_offsets(text, 'text', context=1)
487 {3: [(5, 9)], 6: [(0, 4)]]
487 {3: [(5, 9)], 6: [(0, 4)]]
488
488
489 """
489 """
490 matching_lines = {}
490 matching_lines = {}
491 phrases = [normalize_text_for_matching(phrase)
491 phrases = [normalize_text_for_matching(phrase)
492 for phrase in extract_phrases(terms)]
492 for phrase in extract_phrases(terms)]
493
493
494 for line_index, line in enumerate(lines, start=1):
494 for line_index, line in enumerate(lines, start=1):
495 match_offsets = get_matching_offsets(
495 match_offsets = get_matching_offsets(
496 normalize_text_for_matching(line), phrases)
496 normalize_text_for_matching(line), phrases)
497 if match_offsets:
497 if match_offsets:
498 matching_lines[line_index] = match_offsets
498 matching_lines[line_index] = match_offsets
499
499
500 return matching_lines
500 return matching_lines
501
501
502
502
503 def get_lexer_safe(mimetype=None, filepath=None):
503 def get_lexer_safe(mimetype=None, filepath=None):
504 """
504 """
505 Tries to return a relevant pygments lexer using mimetype/filepath name,
505 Tries to return a relevant pygments lexer using mimetype/filepath name,
506 defaulting to plain text if none could be found
506 defaulting to plain text if none could be found
507 """
507 """
508 lexer = None
508 lexer = None
509 try:
509 try:
510 if mimetype:
510 if mimetype:
511 lexer = get_lexer_for_mimetype(mimetype)
511 lexer = get_lexer_for_mimetype(mimetype)
512 if not lexer:
512 if not lexer:
513 lexer = get_lexer_for_filename(filepath)
513 lexer = get_lexer_for_filename(filepath)
514 except pygments.util.ClassNotFound:
514 except pygments.util.ClassNotFound:
515 pass
515 pass
516
516
517 if not lexer:
517 if not lexer:
518 lexer = get_lexer_by_name('text')
518 lexer = get_lexer_by_name('text')
519
519
520 return lexer
520 return lexer
521
521
522
522
523 def get_lexer_for_filenode(filenode):
524 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
525 return lexer
526
527
523 def pygmentize(filenode, **kwargs):
528 def pygmentize(filenode, **kwargs):
524 """
529 """
525 pygmentize function using pygments
530 pygmentize function using pygments
526
531
527 :param filenode:
532 :param filenode:
528 """
533 """
529 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
534 lexer = get_lexer_for_filenode(filenode)
530 return literal(code_highlight(filenode.content, lexer,
535 return literal(code_highlight(filenode.content, lexer,
531 CodeHtmlFormatter(**kwargs)))
536 CodeHtmlFormatter(**kwargs)))
532
537
533
538
534 def pygmentize_annotation(repo_name, filenode, **kwargs):
539 def pygmentize_annotation(repo_name, filenode, **kwargs):
535 """
540 """
536 pygmentize function for annotation
541 pygmentize function for annotation
537
542
538 :param filenode:
543 :param filenode:
539 """
544 """
540
545
541 color_dict = {}
546 color_dict = {}
542
547
543 def gen_color(n=10000):
548 def gen_color(n=10000):
544 """generator for getting n of evenly distributed colors using
549 """generator for getting n of evenly distributed colors using
545 hsv color and golden ratio. It always return same order of colors
550 hsv color and golden ratio. It always return same order of colors
546
551
547 :returns: RGB tuple
552 :returns: RGB tuple
548 """
553 """
549
554
550 def hsv_to_rgb(h, s, v):
555 def hsv_to_rgb(h, s, v):
551 if s == 0.0:
556 if s == 0.0:
552 return v, v, v
557 return v, v, v
553 i = int(h * 6.0) # XXX assume int() truncates!
558 i = int(h * 6.0) # XXX assume int() truncates!
554 f = (h * 6.0) - i
559 f = (h * 6.0) - i
555 p = v * (1.0 - s)
560 p = v * (1.0 - s)
556 q = v * (1.0 - s * f)
561 q = v * (1.0 - s * f)
557 t = v * (1.0 - s * (1.0 - f))
562 t = v * (1.0 - s * (1.0 - f))
558 i = i % 6
563 i = i % 6
559 if i == 0:
564 if i == 0:
560 return v, t, p
565 return v, t, p
561 if i == 1:
566 if i == 1:
562 return q, v, p
567 return q, v, p
563 if i == 2:
568 if i == 2:
564 return p, v, t
569 return p, v, t
565 if i == 3:
570 if i == 3:
566 return p, q, v
571 return p, q, v
567 if i == 4:
572 if i == 4:
568 return t, p, v
573 return t, p, v
569 if i == 5:
574 if i == 5:
570 return v, p, q
575 return v, p, q
571
576
572 golden_ratio = 0.618033988749895
577 golden_ratio = 0.618033988749895
573 h = 0.22717784590367374
578 h = 0.22717784590367374
574
579
575 for _ in xrange(n):
580 for _ in xrange(n):
576 h += golden_ratio
581 h += golden_ratio
577 h %= 1
582 h %= 1
578 HSV_tuple = [h, 0.95, 0.95]
583 HSV_tuple = [h, 0.95, 0.95]
579 RGB_tuple = hsv_to_rgb(*HSV_tuple)
584 RGB_tuple = hsv_to_rgb(*HSV_tuple)
580 yield map(lambda x: str(int(x * 256)), RGB_tuple)
585 yield map(lambda x: str(int(x * 256)), RGB_tuple)
581
586
582 cgenerator = gen_color()
587 cgenerator = gen_color()
583
588
584 def get_color_string(commit_id):
589 def get_color_string(commit_id):
585 if commit_id in color_dict:
590 if commit_id in color_dict:
586 col = color_dict[commit_id]
591 col = color_dict[commit_id]
587 else:
592 else:
588 col = color_dict[commit_id] = cgenerator.next()
593 col = color_dict[commit_id] = cgenerator.next()
589 return "color: rgb(%s)! important;" % (', '.join(col))
594 return "color: rgb(%s)! important;" % (', '.join(col))
590
595
591 def url_func(repo_name):
596 def url_func(repo_name):
592
597
593 def _url_func(commit):
598 def _url_func(commit):
594 author = commit.author
599 author = commit.author
595 date = commit.date
600 date = commit.date
596 message = tooltip(commit.message)
601 message = tooltip(commit.message)
597
602
598 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
603 tooltip_html = ("<div style='font-size:0.8em'><b>Author:</b>"
599 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
604 " %s<br/><b>Date:</b> %s</b><br/><b>Message:"
600 "</b> %s<br/></div>")
605 "</b> %s<br/></div>")
601
606
602 tooltip_html = tooltip_html % (author, date, message)
607 tooltip_html = tooltip_html % (author, date, message)
603 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
608 lnk_format = '%5s:%s' % ('r%s' % commit.idx, commit.short_id)
604 uri = link_to(
609 uri = link_to(
605 lnk_format,
610 lnk_format,
606 url('changeset_home', repo_name=repo_name,
611 url('changeset_home', repo_name=repo_name,
607 revision=commit.raw_id),
612 revision=commit.raw_id),
608 style=get_color_string(commit.raw_id),
613 style=get_color_string(commit.raw_id),
609 class_='tooltip',
614 class_='tooltip',
610 title=tooltip_html
615 title=tooltip_html
611 )
616 )
612
617
613 uri += '\n'
618 uri += '\n'
614 return uri
619 return uri
615 return _url_func
620 return _url_func
616
621
617 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
622 return literal(annotate_highlight(filenode, url_func(repo_name), **kwargs))
618
623
619
624
620 def is_following_repo(repo_name, user_id):
625 def is_following_repo(repo_name, user_id):
621 from rhodecode.model.scm import ScmModel
626 from rhodecode.model.scm import ScmModel
622 return ScmModel().is_following_repo(repo_name, user_id)
627 return ScmModel().is_following_repo(repo_name, user_id)
623
628
624
629
625 class _Message(object):
630 class _Message(object):
626 """A message returned by ``Flash.pop_messages()``.
631 """A message returned by ``Flash.pop_messages()``.
627
632
628 Converting the message to a string returns the message text. Instances
633 Converting the message to a string returns the message text. Instances
629 also have the following attributes:
634 also have the following attributes:
630
635
631 * ``message``: the message text.
636 * ``message``: the message text.
632 * ``category``: the category specified when the message was created.
637 * ``category``: the category specified when the message was created.
633 """
638 """
634
639
635 def __init__(self, category, message):
640 def __init__(self, category, message):
636 self.category = category
641 self.category = category
637 self.message = message
642 self.message = message
638
643
639 def __str__(self):
644 def __str__(self):
640 return self.message
645 return self.message
641
646
642 __unicode__ = __str__
647 __unicode__ = __str__
643
648
644 def __html__(self):
649 def __html__(self):
645 return escape(safe_unicode(self.message))
650 return escape(safe_unicode(self.message))
646
651
647
652
648 class Flash(_Flash):
653 class Flash(_Flash):
649
654
650 def pop_messages(self):
655 def pop_messages(self):
651 """Return all accumulated messages and delete them from the session.
656 """Return all accumulated messages and delete them from the session.
652
657
653 The return value is a list of ``Message`` objects.
658 The return value is a list of ``Message`` objects.
654 """
659 """
655 from pylons import session
660 from pylons import session
656
661
657 messages = []
662 messages = []
658
663
659 # Pop the 'old' pylons flash messages. They are tuples of the form
664 # Pop the 'old' pylons flash messages. They are tuples of the form
660 # (category, message)
665 # (category, message)
661 for cat, msg in session.pop(self.session_key, []):
666 for cat, msg in session.pop(self.session_key, []):
662 messages.append(_Message(cat, msg))
667 messages.append(_Message(cat, msg))
663
668
664 # Pop the 'new' pyramid flash messages for each category as list
669 # Pop the 'new' pyramid flash messages for each category as list
665 # of strings.
670 # of strings.
666 for cat in self.categories:
671 for cat in self.categories:
667 for msg in session.pop_flash(queue=cat):
672 for msg in session.pop_flash(queue=cat):
668 messages.append(_Message(cat, msg))
673 messages.append(_Message(cat, msg))
669 # Map messages from the default queue to the 'notice' category.
674 # Map messages from the default queue to the 'notice' category.
670 for msg in session.pop_flash():
675 for msg in session.pop_flash():
671 messages.append(_Message('notice', msg))
676 messages.append(_Message('notice', msg))
672
677
673 session.save()
678 session.save()
674 return messages
679 return messages
675
680
676 flash = Flash()
681 flash = Flash()
677
682
678 #==============================================================================
683 #==============================================================================
679 # SCM FILTERS available via h.
684 # SCM FILTERS available via h.
680 #==============================================================================
685 #==============================================================================
681 from rhodecode.lib.vcs.utils import author_name, author_email
686 from rhodecode.lib.vcs.utils import author_name, author_email
682 from rhodecode.lib.utils2 import credentials_filter, age as _age
687 from rhodecode.lib.utils2 import credentials_filter, age as _age
683 from rhodecode.model.db import User, ChangesetStatus
688 from rhodecode.model.db import User, ChangesetStatus
684
689
685 age = _age
690 age = _age
686 capitalize = lambda x: x.capitalize()
691 capitalize = lambda x: x.capitalize()
687 email = author_email
692 email = author_email
688 short_id = lambda x: x[:12]
693 short_id = lambda x: x[:12]
689 hide_credentials = lambda x: ''.join(credentials_filter(x))
694 hide_credentials = lambda x: ''.join(credentials_filter(x))
690
695
691
696
692 def age_component(datetime_iso, value=None, time_is_local=False):
697 def age_component(datetime_iso, value=None, time_is_local=False):
693 title = value or format_date(datetime_iso)
698 title = value or format_date(datetime_iso)
694
699
695 # detect if we have a timezone info, otherwise, add it
700 # detect if we have a timezone info, otherwise, add it
696 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
701 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
697 tzinfo = '+00:00'
702 tzinfo = '+00:00'
698
703
699 if time_is_local:
704 if time_is_local:
700 tzinfo = time.strftime("+%H:%M",
705 tzinfo = time.strftime("+%H:%M",
701 time.gmtime(
706 time.gmtime(
702 (datetime.now() - datetime.utcnow()).seconds + 1
707 (datetime.now() - datetime.utcnow()).seconds + 1
703 )
708 )
704 )
709 )
705
710
706 return literal(
711 return literal(
707 '<time class="timeago tooltip" '
712 '<time class="timeago tooltip" '
708 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
713 'title="{1}" datetime="{0}{2}">{1}</time>'.format(
709 datetime_iso, title, tzinfo))
714 datetime_iso, title, tzinfo))
710
715
711
716
712 def _shorten_commit_id(commit_id):
717 def _shorten_commit_id(commit_id):
713 from rhodecode import CONFIG
718 from rhodecode import CONFIG
714 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
719 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
715 return commit_id[:def_len]
720 return commit_id[:def_len]
716
721
717
722
718 def show_id(commit):
723 def show_id(commit):
719 """
724 """
720 Configurable function that shows ID
725 Configurable function that shows ID
721 by default it's r123:fffeeefffeee
726 by default it's r123:fffeeefffeee
722
727
723 :param commit: commit instance
728 :param commit: commit instance
724 """
729 """
725 from rhodecode import CONFIG
730 from rhodecode import CONFIG
726 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
731 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
727
732
728 raw_id = _shorten_commit_id(commit.raw_id)
733 raw_id = _shorten_commit_id(commit.raw_id)
729 if show_idx:
734 if show_idx:
730 return 'r%s:%s' % (commit.idx, raw_id)
735 return 'r%s:%s' % (commit.idx, raw_id)
731 else:
736 else:
732 return '%s' % (raw_id, )
737 return '%s' % (raw_id, )
733
738
734
739
735 def format_date(date):
740 def format_date(date):
736 """
741 """
737 use a standardized formatting for dates used in RhodeCode
742 use a standardized formatting for dates used in RhodeCode
738
743
739 :param date: date/datetime object
744 :param date: date/datetime object
740 :return: formatted date
745 :return: formatted date
741 """
746 """
742
747
743 if date:
748 if date:
744 _fmt = "%a, %d %b %Y %H:%M:%S"
749 _fmt = "%a, %d %b %Y %H:%M:%S"
745 return safe_unicode(date.strftime(_fmt))
750 return safe_unicode(date.strftime(_fmt))
746
751
747 return u""
752 return u""
748
753
749
754
750 class _RepoChecker(object):
755 class _RepoChecker(object):
751
756
752 def __init__(self, backend_alias):
757 def __init__(self, backend_alias):
753 self._backend_alias = backend_alias
758 self._backend_alias = backend_alias
754
759
755 def __call__(self, repository):
760 def __call__(self, repository):
756 if hasattr(repository, 'alias'):
761 if hasattr(repository, 'alias'):
757 _type = repository.alias
762 _type = repository.alias
758 elif hasattr(repository, 'repo_type'):
763 elif hasattr(repository, 'repo_type'):
759 _type = repository.repo_type
764 _type = repository.repo_type
760 else:
765 else:
761 _type = repository
766 _type = repository
762 return _type == self._backend_alias
767 return _type == self._backend_alias
763
768
764 is_git = _RepoChecker('git')
769 is_git = _RepoChecker('git')
765 is_hg = _RepoChecker('hg')
770 is_hg = _RepoChecker('hg')
766 is_svn = _RepoChecker('svn')
771 is_svn = _RepoChecker('svn')
767
772
768
773
769 def get_repo_type_by_name(repo_name):
774 def get_repo_type_by_name(repo_name):
770 repo = Repository.get_by_repo_name(repo_name)
775 repo = Repository.get_by_repo_name(repo_name)
771 return repo.repo_type
776 return repo.repo_type
772
777
773
778
774 def is_svn_without_proxy(repository):
779 def is_svn_without_proxy(repository):
775 if is_svn(repository):
780 if is_svn(repository):
776 from rhodecode.model.settings import VcsSettingsModel
781 from rhodecode.model.settings import VcsSettingsModel
777 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
782 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
778 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
783 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
779 return False
784 return False
780
785
781
786
782 def discover_user(author):
787 def discover_user(author):
783 """
788 """
784 Tries to discover RhodeCode User based on the autho string. Author string
789 Tries to discover RhodeCode User based on the autho string. Author string
785 is typically `FirstName LastName <email@address.com>`
790 is typically `FirstName LastName <email@address.com>`
786 """
791 """
787
792
788 # if author is already an instance use it for extraction
793 # if author is already an instance use it for extraction
789 if isinstance(author, User):
794 if isinstance(author, User):
790 return author
795 return author
791
796
792 # Valid email in the attribute passed, see if they're in the system
797 # Valid email in the attribute passed, see if they're in the system
793 _email = author_email(author)
798 _email = author_email(author)
794 if _email != '':
799 if _email != '':
795 user = User.get_by_email(_email, case_insensitive=True, cache=True)
800 user = User.get_by_email(_email, case_insensitive=True, cache=True)
796 if user is not None:
801 if user is not None:
797 return user
802 return user
798
803
799 # Maybe it's a username, we try to extract it and fetch by username ?
804 # Maybe it's a username, we try to extract it and fetch by username ?
800 _author = author_name(author)
805 _author = author_name(author)
801 user = User.get_by_username(_author, case_insensitive=True, cache=True)
806 user = User.get_by_username(_author, case_insensitive=True, cache=True)
802 if user is not None:
807 if user is not None:
803 return user
808 return user
804
809
805 return None
810 return None
806
811
807
812
808 def email_or_none(author):
813 def email_or_none(author):
809 # extract email from the commit string
814 # extract email from the commit string
810 _email = author_email(author)
815 _email = author_email(author)
811
816
812 # If we have an email, use it, otherwise
817 # If we have an email, use it, otherwise
813 # see if it contains a username we can get an email from
818 # see if it contains a username we can get an email from
814 if _email != '':
819 if _email != '':
815 return _email
820 return _email
816 else:
821 else:
817 user = User.get_by_username(
822 user = User.get_by_username(
818 author_name(author), case_insensitive=True, cache=True)
823 author_name(author), case_insensitive=True, cache=True)
819
824
820 if user is not None:
825 if user is not None:
821 return user.email
826 return user.email
822
827
823 # No valid email, not a valid user in the system, none!
828 # No valid email, not a valid user in the system, none!
824 return None
829 return None
825
830
826
831
827 def link_to_user(author, length=0, **kwargs):
832 def link_to_user(author, length=0, **kwargs):
828 user = discover_user(author)
833 user = discover_user(author)
829 # user can be None, but if we have it already it means we can re-use it
834 # user can be None, but if we have it already it means we can re-use it
830 # in the person() function, so we save 1 intensive-query
835 # in the person() function, so we save 1 intensive-query
831 if user:
836 if user:
832 author = user
837 author = user
833
838
834 display_person = person(author, 'username_or_name_or_email')
839 display_person = person(author, 'username_or_name_or_email')
835 if length:
840 if length:
836 display_person = shorter(display_person, length)
841 display_person = shorter(display_person, length)
837
842
838 if user:
843 if user:
839 return link_to(
844 return link_to(
840 escape(display_person),
845 escape(display_person),
841 url('user_profile', username=user.username),
846 url('user_profile', username=user.username),
842 **kwargs)
847 **kwargs)
843 else:
848 else:
844 return escape(display_person)
849 return escape(display_person)
845
850
846
851
847 def person(author, show_attr="username_and_name"):
852 def person(author, show_attr="username_and_name"):
848 user = discover_user(author)
853 user = discover_user(author)
849 if user:
854 if user:
850 return getattr(user, show_attr)
855 return getattr(user, show_attr)
851 else:
856 else:
852 _author = author_name(author)
857 _author = author_name(author)
853 _email = email(author)
858 _email = email(author)
854 return _author or _email
859 return _author or _email
855
860
856
861
857 def author_string(email):
862 def author_string(email):
858 if email:
863 if email:
859 user = User.get_by_email(email, case_insensitive=True, cache=True)
864 user = User.get_by_email(email, case_insensitive=True, cache=True)
860 if user:
865 if user:
861 if user.firstname or user.lastname:
866 if user.firstname or user.lastname:
862 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
867 return '%s %s &lt;%s&gt;' % (user.firstname, user.lastname, email)
863 else:
868 else:
864 return email
869 return email
865 else:
870 else:
866 return email
871 return email
867 else:
872 else:
868 return None
873 return None
869
874
870
875
871 def person_by_id(id_, show_attr="username_and_name"):
876 def person_by_id(id_, show_attr="username_and_name"):
872 # attr to return from fetched user
877 # attr to return from fetched user
873 person_getter = lambda usr: getattr(usr, show_attr)
878 person_getter = lambda usr: getattr(usr, show_attr)
874
879
875 #maybe it's an ID ?
880 #maybe it's an ID ?
876 if str(id_).isdigit() or isinstance(id_, int):
881 if str(id_).isdigit() or isinstance(id_, int):
877 id_ = int(id_)
882 id_ = int(id_)
878 user = User.get(id_)
883 user = User.get(id_)
879 if user is not None:
884 if user is not None:
880 return person_getter(user)
885 return person_getter(user)
881 return id_
886 return id_
882
887
883
888
884 def gravatar_with_user(author, show_disabled=False):
889 def gravatar_with_user(author, show_disabled=False):
885 from rhodecode.lib.utils import PartialRenderer
890 from rhodecode.lib.utils import PartialRenderer
886 _render = PartialRenderer('base/base.html')
891 _render = PartialRenderer('base/base.html')
887 return _render('gravatar_with_user', author, show_disabled=show_disabled)
892 return _render('gravatar_with_user', author, show_disabled=show_disabled)
888
893
889
894
890 def desc_stylize(value):
895 def desc_stylize(value):
891 """
896 """
892 converts tags from value into html equivalent
897 converts tags from value into html equivalent
893
898
894 :param value:
899 :param value:
895 """
900 """
896 if not value:
901 if not value:
897 return ''
902 return ''
898
903
899 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
904 value = re.sub(r'\[see\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
900 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
905 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
901 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
906 value = re.sub(r'\[license\ \=\>\ *([a-zA-Z0-9\/\=\?\&\ \:\/\.\-]*)\]',
902 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
907 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
903 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
908 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\>\ *([a-zA-Z0-9\-\/]*)\]',
904 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
909 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
905 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
910 value = re.sub(r'\[(lang|language)\ \=\>\ *([a-zA-Z\-\/\#\+]*)\]',
906 '<div class="metatag" tag="lang">\\2</div>', value)
911 '<div class="metatag" tag="lang">\\2</div>', value)
907 value = re.sub(r'\[([a-z]+)\]',
912 value = re.sub(r'\[([a-z]+)\]',
908 '<div class="metatag" tag="\\1">\\1</div>', value)
913 '<div class="metatag" tag="\\1">\\1</div>', value)
909
914
910 return value
915 return value
911
916
912
917
913 def escaped_stylize(value):
918 def escaped_stylize(value):
914 """
919 """
915 converts tags from value into html equivalent, but escaping its value first
920 converts tags from value into html equivalent, but escaping its value first
916 """
921 """
917 if not value:
922 if not value:
918 return ''
923 return ''
919
924
920 # Using default webhelper escape method, but has to force it as a
925 # Using default webhelper escape method, but has to force it as a
921 # plain unicode instead of a markup tag to be used in regex expressions
926 # plain unicode instead of a markup tag to be used in regex expressions
922 value = unicode(escape(safe_unicode(value)))
927 value = unicode(escape(safe_unicode(value)))
923
928
924 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
929 value = re.sub(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
925 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
930 '<div class="metatag" tag="see">see =&gt; \\1 </div>', value)
926 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
931 value = re.sub(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]',
927 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
932 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>', value)
928 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
933 value = re.sub(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]',
929 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
934 '<div class="metatag" tag="\\1">\\1 =&gt; <a href="/\\2">\\2</a></div>', value)
930 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
935 value = re.sub(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+]*)\]',
931 '<div class="metatag" tag="lang">\\2</div>', value)
936 '<div class="metatag" tag="lang">\\2</div>', value)
932 value = re.sub(r'\[([a-z]+)\]',
937 value = re.sub(r'\[([a-z]+)\]',
933 '<div class="metatag" tag="\\1">\\1</div>', value)
938 '<div class="metatag" tag="\\1">\\1</div>', value)
934
939
935 return value
940 return value
936
941
937
942
938 def bool2icon(value):
943 def bool2icon(value):
939 """
944 """
940 Returns boolean value of a given value, represented as html element with
945 Returns boolean value of a given value, represented as html element with
941 classes that will represent icons
946 classes that will represent icons
942
947
943 :param value: given value to convert to html node
948 :param value: given value to convert to html node
944 """
949 """
945
950
946 if value: # does bool conversion
951 if value: # does bool conversion
947 return HTML.tag('i', class_="icon-true")
952 return HTML.tag('i', class_="icon-true")
948 else: # not true as bool
953 else: # not true as bool
949 return HTML.tag('i', class_="icon-false")
954 return HTML.tag('i', class_="icon-false")
950
955
951
956
952 #==============================================================================
957 #==============================================================================
953 # PERMS
958 # PERMS
954 #==============================================================================
959 #==============================================================================
955 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
960 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
956 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
961 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
957 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
962 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
958 csrf_token_key
963 csrf_token_key
959
964
960
965
961 #==============================================================================
966 #==============================================================================
962 # GRAVATAR URL
967 # GRAVATAR URL
963 #==============================================================================
968 #==============================================================================
964 class InitialsGravatar(object):
969 class InitialsGravatar(object):
965 def __init__(self, email_address, first_name, last_name, size=30,
970 def __init__(self, email_address, first_name, last_name, size=30,
966 background=None, text_color='#fff'):
971 background=None, text_color='#fff'):
967 self.size = size
972 self.size = size
968 self.first_name = first_name
973 self.first_name = first_name
969 self.last_name = last_name
974 self.last_name = last_name
970 self.email_address = email_address
975 self.email_address = email_address
971 self.background = background or self.str2color(email_address)
976 self.background = background or self.str2color(email_address)
972 self.text_color = text_color
977 self.text_color = text_color
973
978
974 def get_color_bank(self):
979 def get_color_bank(self):
975 """
980 """
976 returns a predefined list of colors that gravatars can use.
981 returns a predefined list of colors that gravatars can use.
977 Those are randomized distinct colors that guarantee readability and
982 Those are randomized distinct colors that guarantee readability and
978 uniqueness.
983 uniqueness.
979
984
980 generated with: http://phrogz.net/css/distinct-colors.html
985 generated with: http://phrogz.net/css/distinct-colors.html
981 """
986 """
982 return [
987 return [
983 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
988 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
984 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
989 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
985 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
990 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
986 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
991 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
987 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
992 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
988 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
993 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
989 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
994 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
990 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
995 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
991 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
996 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
992 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
997 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
993 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
998 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
994 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
999 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
995 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1000 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
996 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1001 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
997 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1002 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
998 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1003 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
999 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1004 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1000 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1005 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1001 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1006 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1002 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1007 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1003 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1008 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1004 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1009 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1005 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1010 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1006 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1011 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1007 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1012 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1008 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1013 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1009 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1014 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1010 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1015 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1011 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1016 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1012 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1017 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1013 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1018 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1014 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1019 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1015 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1020 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1016 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1021 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1017 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1022 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1018 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1023 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1019 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1024 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1020 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1025 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1021 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1026 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1022 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1027 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1023 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1028 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1024 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1029 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1025 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1030 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1026 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1031 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1027 '#4f8c46', '#368dd9', '#5c0073'
1032 '#4f8c46', '#368dd9', '#5c0073'
1028 ]
1033 ]
1029
1034
1030 def rgb_to_hex_color(self, rgb_tuple):
1035 def rgb_to_hex_color(self, rgb_tuple):
1031 """
1036 """
1032 Converts an rgb_tuple passed to an hex color.
1037 Converts an rgb_tuple passed to an hex color.
1033
1038
1034 :param rgb_tuple: tuple with 3 ints represents rgb color space
1039 :param rgb_tuple: tuple with 3 ints represents rgb color space
1035 """
1040 """
1036 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1041 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1037
1042
1038 def email_to_int_list(self, email_str):
1043 def email_to_int_list(self, email_str):
1039 """
1044 """
1040 Get every byte of the hex digest value of email and turn it to integer.
1045 Get every byte of the hex digest value of email and turn it to integer.
1041 It's going to be always between 0-255
1046 It's going to be always between 0-255
1042 """
1047 """
1043 digest = md5_safe(email_str.lower())
1048 digest = md5_safe(email_str.lower())
1044 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1049 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1045
1050
1046 def pick_color_bank_index(self, email_str, color_bank):
1051 def pick_color_bank_index(self, email_str, color_bank):
1047 return self.email_to_int_list(email_str)[0] % len(color_bank)
1052 return self.email_to_int_list(email_str)[0] % len(color_bank)
1048
1053
1049 def str2color(self, email_str):
1054 def str2color(self, email_str):
1050 """
1055 """
1051 Tries to map in a stable algorithm an email to color
1056 Tries to map in a stable algorithm an email to color
1052
1057
1053 :param email_str:
1058 :param email_str:
1054 """
1059 """
1055 color_bank = self.get_color_bank()
1060 color_bank = self.get_color_bank()
1056 # pick position (module it's length so we always find it in the
1061 # pick position (module it's length so we always find it in the
1057 # bank even if it's smaller than 256 values
1062 # bank even if it's smaller than 256 values
1058 pos = self.pick_color_bank_index(email_str, color_bank)
1063 pos = self.pick_color_bank_index(email_str, color_bank)
1059 return color_bank[pos]
1064 return color_bank[pos]
1060
1065
1061 def normalize_email(self, email_address):
1066 def normalize_email(self, email_address):
1062 import unicodedata
1067 import unicodedata
1063 # default host used to fill in the fake/missing email
1068 # default host used to fill in the fake/missing email
1064 default_host = u'localhost'
1069 default_host = u'localhost'
1065
1070
1066 if not email_address:
1071 if not email_address:
1067 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1072 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1068
1073
1069 email_address = safe_unicode(email_address)
1074 email_address = safe_unicode(email_address)
1070
1075
1071 if u'@' not in email_address:
1076 if u'@' not in email_address:
1072 email_address = u'%s@%s' % (email_address, default_host)
1077 email_address = u'%s@%s' % (email_address, default_host)
1073
1078
1074 if email_address.endswith(u'@'):
1079 if email_address.endswith(u'@'):
1075 email_address = u'%s%s' % (email_address, default_host)
1080 email_address = u'%s%s' % (email_address, default_host)
1076
1081
1077 email_address = unicodedata.normalize('NFKD', email_address)\
1082 email_address = unicodedata.normalize('NFKD', email_address)\
1078 .encode('ascii', 'ignore')
1083 .encode('ascii', 'ignore')
1079 return email_address
1084 return email_address
1080
1085
1081 def get_initials(self):
1086 def get_initials(self):
1082 """
1087 """
1083 Returns 2 letter initials calculated based on the input.
1088 Returns 2 letter initials calculated based on the input.
1084 The algorithm picks first given email address, and takes first letter
1089 The algorithm picks first given email address, and takes first letter
1085 of part before @, and then the first letter of server name. In case
1090 of part before @, and then the first letter of server name. In case
1086 the part before @ is in a format of `somestring.somestring2` it replaces
1091 the part before @ is in a format of `somestring.somestring2` it replaces
1087 the server letter with first letter of somestring2
1092 the server letter with first letter of somestring2
1088
1093
1089 In case function was initialized with both first and lastname, this
1094 In case function was initialized with both first and lastname, this
1090 overrides the extraction from email by first letter of the first and
1095 overrides the extraction from email by first letter of the first and
1091 last name. We add special logic to that functionality, In case Full name
1096 last name. We add special logic to that functionality, In case Full name
1092 is compound, like Guido Von Rossum, we use last part of the last name
1097 is compound, like Guido Von Rossum, we use last part of the last name
1093 (Von Rossum) picking `R`.
1098 (Von Rossum) picking `R`.
1094
1099
1095 Function also normalizes the non-ascii characters to they ascii
1100 Function also normalizes the non-ascii characters to they ascii
1096 representation, eg Δ„ => A
1101 representation, eg Δ„ => A
1097 """
1102 """
1098 import unicodedata
1103 import unicodedata
1099 # replace non-ascii to ascii
1104 # replace non-ascii to ascii
1100 first_name = unicodedata.normalize(
1105 first_name = unicodedata.normalize(
1101 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1106 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1102 last_name = unicodedata.normalize(
1107 last_name = unicodedata.normalize(
1103 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1108 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1104
1109
1105 # do NFKD encoding, and also make sure email has proper format
1110 # do NFKD encoding, and also make sure email has proper format
1106 email_address = self.normalize_email(self.email_address)
1111 email_address = self.normalize_email(self.email_address)
1107
1112
1108 # first push the email initials
1113 # first push the email initials
1109 prefix, server = email_address.split('@', 1)
1114 prefix, server = email_address.split('@', 1)
1110
1115
1111 # check if prefix is maybe a 'firstname.lastname' syntax
1116 # check if prefix is maybe a 'firstname.lastname' syntax
1112 _dot_split = prefix.rsplit('.', 1)
1117 _dot_split = prefix.rsplit('.', 1)
1113 if len(_dot_split) == 2:
1118 if len(_dot_split) == 2:
1114 initials = [_dot_split[0][0], _dot_split[1][0]]
1119 initials = [_dot_split[0][0], _dot_split[1][0]]
1115 else:
1120 else:
1116 initials = [prefix[0], server[0]]
1121 initials = [prefix[0], server[0]]
1117
1122
1118 # then try to replace either firtname or lastname
1123 # then try to replace either firtname or lastname
1119 fn_letter = (first_name or " ")[0].strip()
1124 fn_letter = (first_name or " ")[0].strip()
1120 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1125 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1121
1126
1122 if fn_letter:
1127 if fn_letter:
1123 initials[0] = fn_letter
1128 initials[0] = fn_letter
1124
1129
1125 if ln_letter:
1130 if ln_letter:
1126 initials[1] = ln_letter
1131 initials[1] = ln_letter
1127
1132
1128 return ''.join(initials).upper()
1133 return ''.join(initials).upper()
1129
1134
1130 def get_img_data_by_type(self, font_family, img_type):
1135 def get_img_data_by_type(self, font_family, img_type):
1131 default_user = """
1136 default_user = """
1132 <svg xmlns="http://www.w3.org/2000/svg"
1137 <svg xmlns="http://www.w3.org/2000/svg"
1133 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1138 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1134 viewBox="-15 -10 439.165 429.164"
1139 viewBox="-15 -10 439.165 429.164"
1135
1140
1136 xml:space="preserve"
1141 xml:space="preserve"
1137 style="background:{background};" >
1142 style="background:{background};" >
1138
1143
1139 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1144 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1140 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1145 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1141 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1146 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1142 168.596,153.916,216.671,
1147 168.596,153.916,216.671,
1143 204.583,216.671z" fill="{text_color}"/>
1148 204.583,216.671z" fill="{text_color}"/>
1144 <path d="M407.164,374.717L360.88,
1149 <path d="M407.164,374.717L360.88,
1145 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1150 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1146 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1151 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1147 15.366-44.203,23.488-69.076,23.488c-24.877,
1152 15.366-44.203,23.488-69.076,23.488c-24.877,
1148 0-48.762-8.122-69.078-23.488
1153 0-48.762-8.122-69.078-23.488
1149 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1154 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1150 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1155 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1151 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1156 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1152 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1157 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1153 19.402-10.527 C409.699,390.129,
1158 19.402-10.527 C409.699,390.129,
1154 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1159 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1155 </svg>""".format(
1160 </svg>""".format(
1156 size=self.size,
1161 size=self.size,
1157 background='#979797', # @grey4
1162 background='#979797', # @grey4
1158 text_color=self.text_color,
1163 text_color=self.text_color,
1159 font_family=font_family)
1164 font_family=font_family)
1160
1165
1161 return {
1166 return {
1162 "default_user": default_user
1167 "default_user": default_user
1163 }[img_type]
1168 }[img_type]
1164
1169
1165 def get_img_data(self, svg_type=None):
1170 def get_img_data(self, svg_type=None):
1166 """
1171 """
1167 generates the svg metadata for image
1172 generates the svg metadata for image
1168 """
1173 """
1169
1174
1170 font_family = ','.join([
1175 font_family = ','.join([
1171 'proximanovaregular',
1176 'proximanovaregular',
1172 'Proxima Nova Regular',
1177 'Proxima Nova Regular',
1173 'Proxima Nova',
1178 'Proxima Nova',
1174 'Arial',
1179 'Arial',
1175 'Lucida Grande',
1180 'Lucida Grande',
1176 'sans-serif'
1181 'sans-serif'
1177 ])
1182 ])
1178 if svg_type:
1183 if svg_type:
1179 return self.get_img_data_by_type(font_family, svg_type)
1184 return self.get_img_data_by_type(font_family, svg_type)
1180
1185
1181 initials = self.get_initials()
1186 initials = self.get_initials()
1182 img_data = """
1187 img_data = """
1183 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1188 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1184 width="{size}" height="{size}"
1189 width="{size}" height="{size}"
1185 style="width: 100%; height: 100%; background-color: {background}"
1190 style="width: 100%; height: 100%; background-color: {background}"
1186 viewBox="0 0 {size} {size}">
1191 viewBox="0 0 {size} {size}">
1187 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1192 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1188 pointer-events="auto" fill="{text_color}"
1193 pointer-events="auto" fill="{text_color}"
1189 font-family="{font_family}"
1194 font-family="{font_family}"
1190 style="font-weight: 400; font-size: {f_size}px;">{text}
1195 style="font-weight: 400; font-size: {f_size}px;">{text}
1191 </text>
1196 </text>
1192 </svg>""".format(
1197 </svg>""".format(
1193 size=self.size,
1198 size=self.size,
1194 f_size=self.size/1.85, # scale the text inside the box nicely
1199 f_size=self.size/1.85, # scale the text inside the box nicely
1195 background=self.background,
1200 background=self.background,
1196 text_color=self.text_color,
1201 text_color=self.text_color,
1197 text=initials.upper(),
1202 text=initials.upper(),
1198 font_family=font_family)
1203 font_family=font_family)
1199
1204
1200 return img_data
1205 return img_data
1201
1206
1202 def generate_svg(self, svg_type=None):
1207 def generate_svg(self, svg_type=None):
1203 img_data = self.get_img_data(svg_type)
1208 img_data = self.get_img_data(svg_type)
1204 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1209 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1205
1210
1206
1211
1207 def initials_gravatar(email_address, first_name, last_name, size=30):
1212 def initials_gravatar(email_address, first_name, last_name, size=30):
1208 svg_type = None
1213 svg_type = None
1209 if email_address == User.DEFAULT_USER_EMAIL:
1214 if email_address == User.DEFAULT_USER_EMAIL:
1210 svg_type = 'default_user'
1215 svg_type = 'default_user'
1211 klass = InitialsGravatar(email_address, first_name, last_name, size)
1216 klass = InitialsGravatar(email_address, first_name, last_name, size)
1212 return klass.generate_svg(svg_type=svg_type)
1217 return klass.generate_svg(svg_type=svg_type)
1213
1218
1214
1219
1215 def gravatar_url(email_address, size=30):
1220 def gravatar_url(email_address, size=30):
1216 # doh, we need to re-import those to mock it later
1221 # doh, we need to re-import those to mock it later
1217 from pylons import tmpl_context as c
1222 from pylons import tmpl_context as c
1218
1223
1219 _use_gravatar = c.visual.use_gravatar
1224 _use_gravatar = c.visual.use_gravatar
1220 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1225 _gravatar_url = c.visual.gravatar_url or User.DEFAULT_GRAVATAR_URL
1221
1226
1222 email_address = email_address or User.DEFAULT_USER_EMAIL
1227 email_address = email_address or User.DEFAULT_USER_EMAIL
1223 if isinstance(email_address, unicode):
1228 if isinstance(email_address, unicode):
1224 # hashlib crashes on unicode items
1229 # hashlib crashes on unicode items
1225 email_address = safe_str(email_address)
1230 email_address = safe_str(email_address)
1226
1231
1227 # empty email or default user
1232 # empty email or default user
1228 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1233 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1229 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1234 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1230
1235
1231 if _use_gravatar:
1236 if _use_gravatar:
1232 # TODO: Disuse pyramid thread locals. Think about another solution to
1237 # TODO: Disuse pyramid thread locals. Think about another solution to
1233 # get the host and schema here.
1238 # get the host and schema here.
1234 request = get_current_request()
1239 request = get_current_request()
1235 tmpl = safe_str(_gravatar_url)
1240 tmpl = safe_str(_gravatar_url)
1236 tmpl = tmpl.replace('{email}', email_address)\
1241 tmpl = tmpl.replace('{email}', email_address)\
1237 .replace('{md5email}', md5_safe(email_address.lower())) \
1242 .replace('{md5email}', md5_safe(email_address.lower())) \
1238 .replace('{netloc}', request.host)\
1243 .replace('{netloc}', request.host)\
1239 .replace('{scheme}', request.scheme)\
1244 .replace('{scheme}', request.scheme)\
1240 .replace('{size}', safe_str(size))
1245 .replace('{size}', safe_str(size))
1241 return tmpl
1246 return tmpl
1242 else:
1247 else:
1243 return initials_gravatar(email_address, '', '', size=size)
1248 return initials_gravatar(email_address, '', '', size=size)
1244
1249
1245
1250
1246 class Page(_Page):
1251 class Page(_Page):
1247 """
1252 """
1248 Custom pager to match rendering style with paginator
1253 Custom pager to match rendering style with paginator
1249 """
1254 """
1250
1255
1251 def _get_pos(self, cur_page, max_page, items):
1256 def _get_pos(self, cur_page, max_page, items):
1252 edge = (items / 2) + 1
1257 edge = (items / 2) + 1
1253 if (cur_page <= edge):
1258 if (cur_page <= edge):
1254 radius = max(items / 2, items - cur_page)
1259 radius = max(items / 2, items - cur_page)
1255 elif (max_page - cur_page) < edge:
1260 elif (max_page - cur_page) < edge:
1256 radius = (items - 1) - (max_page - cur_page)
1261 radius = (items - 1) - (max_page - cur_page)
1257 else:
1262 else:
1258 radius = items / 2
1263 radius = items / 2
1259
1264
1260 left = max(1, (cur_page - (radius)))
1265 left = max(1, (cur_page - (radius)))
1261 right = min(max_page, cur_page + (radius))
1266 right = min(max_page, cur_page + (radius))
1262 return left, cur_page, right
1267 return left, cur_page, right
1263
1268
1264 def _range(self, regexp_match):
1269 def _range(self, regexp_match):
1265 """
1270 """
1266 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1271 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1267
1272
1268 Arguments:
1273 Arguments:
1269
1274
1270 regexp_match
1275 regexp_match
1271 A "re" (regular expressions) match object containing the
1276 A "re" (regular expressions) match object containing the
1272 radius of linked pages around the current page in
1277 radius of linked pages around the current page in
1273 regexp_match.group(1) as a string
1278 regexp_match.group(1) as a string
1274
1279
1275 This function is supposed to be called as a callable in
1280 This function is supposed to be called as a callable in
1276 re.sub.
1281 re.sub.
1277
1282
1278 """
1283 """
1279 radius = int(regexp_match.group(1))
1284 radius = int(regexp_match.group(1))
1280
1285
1281 # Compute the first and last page number within the radius
1286 # Compute the first and last page number within the radius
1282 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1287 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1283 # -> leftmost_page = 5
1288 # -> leftmost_page = 5
1284 # -> rightmost_page = 9
1289 # -> rightmost_page = 9
1285 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1290 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1286 self.last_page,
1291 self.last_page,
1287 (radius * 2) + 1)
1292 (radius * 2) + 1)
1288 nav_items = []
1293 nav_items = []
1289
1294
1290 # Create a link to the first page (unless we are on the first page
1295 # Create a link to the first page (unless we are on the first page
1291 # or there would be no need to insert '..' spacers)
1296 # or there would be no need to insert '..' spacers)
1292 if self.page != self.first_page and self.first_page < leftmost_page:
1297 if self.page != self.first_page and self.first_page < leftmost_page:
1293 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1298 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1294
1299
1295 # Insert dots if there are pages between the first page
1300 # Insert dots if there are pages between the first page
1296 # and the currently displayed page range
1301 # and the currently displayed page range
1297 if leftmost_page - self.first_page > 1:
1302 if leftmost_page - self.first_page > 1:
1298 # Wrap in a SPAN tag if nolink_attr is set
1303 # Wrap in a SPAN tag if nolink_attr is set
1299 text = '..'
1304 text = '..'
1300 if self.dotdot_attr:
1305 if self.dotdot_attr:
1301 text = HTML.span(c=text, **self.dotdot_attr)
1306 text = HTML.span(c=text, **self.dotdot_attr)
1302 nav_items.append(text)
1307 nav_items.append(text)
1303
1308
1304 for thispage in xrange(leftmost_page, rightmost_page + 1):
1309 for thispage in xrange(leftmost_page, rightmost_page + 1):
1305 # Hilight the current page number and do not use a link
1310 # Hilight the current page number and do not use a link
1306 if thispage == self.page:
1311 if thispage == self.page:
1307 text = '%s' % (thispage,)
1312 text = '%s' % (thispage,)
1308 # Wrap in a SPAN tag if nolink_attr is set
1313 # Wrap in a SPAN tag if nolink_attr is set
1309 if self.curpage_attr:
1314 if self.curpage_attr:
1310 text = HTML.span(c=text, **self.curpage_attr)
1315 text = HTML.span(c=text, **self.curpage_attr)
1311 nav_items.append(text)
1316 nav_items.append(text)
1312 # Otherwise create just a link to that page
1317 # Otherwise create just a link to that page
1313 else:
1318 else:
1314 text = '%s' % (thispage,)
1319 text = '%s' % (thispage,)
1315 nav_items.append(self._pagerlink(thispage, text))
1320 nav_items.append(self._pagerlink(thispage, text))
1316
1321
1317 # Insert dots if there are pages between the displayed
1322 # Insert dots if there are pages between the displayed
1318 # page numbers and the end of the page range
1323 # page numbers and the end of the page range
1319 if self.last_page - rightmost_page > 1:
1324 if self.last_page - rightmost_page > 1:
1320 text = '..'
1325 text = '..'
1321 # Wrap in a SPAN tag if nolink_attr is set
1326 # Wrap in a SPAN tag if nolink_attr is set
1322 if self.dotdot_attr:
1327 if self.dotdot_attr:
1323 text = HTML.span(c=text, **self.dotdot_attr)
1328 text = HTML.span(c=text, **self.dotdot_attr)
1324 nav_items.append(text)
1329 nav_items.append(text)
1325
1330
1326 # Create a link to the very last page (unless we are on the last
1331 # Create a link to the very last page (unless we are on the last
1327 # page or there would be no need to insert '..' spacers)
1332 # page or there would be no need to insert '..' spacers)
1328 if self.page != self.last_page and rightmost_page < self.last_page:
1333 if self.page != self.last_page and rightmost_page < self.last_page:
1329 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1334 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1330
1335
1331 ## prerender links
1336 ## prerender links
1332 #_page_link = url.current()
1337 #_page_link = url.current()
1333 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1338 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1334 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1339 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1335 return self.separator.join(nav_items)
1340 return self.separator.join(nav_items)
1336
1341
1337 def pager(self, format='~2~', page_param='page', partial_param='partial',
1342 def pager(self, format='~2~', page_param='page', partial_param='partial',
1338 show_if_single_page=False, separator=' ', onclick=None,
1343 show_if_single_page=False, separator=' ', onclick=None,
1339 symbol_first='<<', symbol_last='>>',
1344 symbol_first='<<', symbol_last='>>',
1340 symbol_previous='<', symbol_next='>',
1345 symbol_previous='<', symbol_next='>',
1341 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1346 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1342 curpage_attr={'class': 'pager_curpage'},
1347 curpage_attr={'class': 'pager_curpage'},
1343 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1348 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1344
1349
1345 self.curpage_attr = curpage_attr
1350 self.curpage_attr = curpage_attr
1346 self.separator = separator
1351 self.separator = separator
1347 self.pager_kwargs = kwargs
1352 self.pager_kwargs = kwargs
1348 self.page_param = page_param
1353 self.page_param = page_param
1349 self.partial_param = partial_param
1354 self.partial_param = partial_param
1350 self.onclick = onclick
1355 self.onclick = onclick
1351 self.link_attr = link_attr
1356 self.link_attr = link_attr
1352 self.dotdot_attr = dotdot_attr
1357 self.dotdot_attr = dotdot_attr
1353
1358
1354 # Don't show navigator if there is no more than one page
1359 # Don't show navigator if there is no more than one page
1355 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1360 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1356 return ''
1361 return ''
1357
1362
1358 from string import Template
1363 from string import Template
1359 # Replace ~...~ in token format by range of pages
1364 # Replace ~...~ in token format by range of pages
1360 result = re.sub(r'~(\d+)~', self._range, format)
1365 result = re.sub(r'~(\d+)~', self._range, format)
1361
1366
1362 # Interpolate '%' variables
1367 # Interpolate '%' variables
1363 result = Template(result).safe_substitute({
1368 result = Template(result).safe_substitute({
1364 'first_page': self.first_page,
1369 'first_page': self.first_page,
1365 'last_page': self.last_page,
1370 'last_page': self.last_page,
1366 'page': self.page,
1371 'page': self.page,
1367 'page_count': self.page_count,
1372 'page_count': self.page_count,
1368 'items_per_page': self.items_per_page,
1373 'items_per_page': self.items_per_page,
1369 'first_item': self.first_item,
1374 'first_item': self.first_item,
1370 'last_item': self.last_item,
1375 'last_item': self.last_item,
1371 'item_count': self.item_count,
1376 'item_count': self.item_count,
1372 'link_first': self.page > self.first_page and \
1377 'link_first': self.page > self.first_page and \
1373 self._pagerlink(self.first_page, symbol_first) or '',
1378 self._pagerlink(self.first_page, symbol_first) or '',
1374 'link_last': self.page < self.last_page and \
1379 'link_last': self.page < self.last_page and \
1375 self._pagerlink(self.last_page, symbol_last) or '',
1380 self._pagerlink(self.last_page, symbol_last) or '',
1376 'link_previous': self.previous_page and \
1381 'link_previous': self.previous_page and \
1377 self._pagerlink(self.previous_page, symbol_previous) \
1382 self._pagerlink(self.previous_page, symbol_previous) \
1378 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1383 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1379 'link_next': self.next_page and \
1384 'link_next': self.next_page and \
1380 self._pagerlink(self.next_page, symbol_next) \
1385 self._pagerlink(self.next_page, symbol_next) \
1381 or HTML.span(symbol_next, class_="pg-next disabled")
1386 or HTML.span(symbol_next, class_="pg-next disabled")
1382 })
1387 })
1383
1388
1384 return literal(result)
1389 return literal(result)
1385
1390
1386
1391
1387 #==============================================================================
1392 #==============================================================================
1388 # REPO PAGER, PAGER FOR REPOSITORY
1393 # REPO PAGER, PAGER FOR REPOSITORY
1389 #==============================================================================
1394 #==============================================================================
1390 class RepoPage(Page):
1395 class RepoPage(Page):
1391
1396
1392 def __init__(self, collection, page=1, items_per_page=20,
1397 def __init__(self, collection, page=1, items_per_page=20,
1393 item_count=None, url=None, **kwargs):
1398 item_count=None, url=None, **kwargs):
1394
1399
1395 """Create a "RepoPage" instance. special pager for paging
1400 """Create a "RepoPage" instance. special pager for paging
1396 repository
1401 repository
1397 """
1402 """
1398 self._url_generator = url
1403 self._url_generator = url
1399
1404
1400 # Safe the kwargs class-wide so they can be used in the pager() method
1405 # Safe the kwargs class-wide so they can be used in the pager() method
1401 self.kwargs = kwargs
1406 self.kwargs = kwargs
1402
1407
1403 # Save a reference to the collection
1408 # Save a reference to the collection
1404 self.original_collection = collection
1409 self.original_collection = collection
1405
1410
1406 self.collection = collection
1411 self.collection = collection
1407
1412
1408 # The self.page is the number of the current page.
1413 # The self.page is the number of the current page.
1409 # The first page has the number 1!
1414 # The first page has the number 1!
1410 try:
1415 try:
1411 self.page = int(page) # make it int() if we get it as a string
1416 self.page = int(page) # make it int() if we get it as a string
1412 except (ValueError, TypeError):
1417 except (ValueError, TypeError):
1413 self.page = 1
1418 self.page = 1
1414
1419
1415 self.items_per_page = items_per_page
1420 self.items_per_page = items_per_page
1416
1421
1417 # Unless the user tells us how many items the collections has
1422 # Unless the user tells us how many items the collections has
1418 # we calculate that ourselves.
1423 # we calculate that ourselves.
1419 if item_count is not None:
1424 if item_count is not None:
1420 self.item_count = item_count
1425 self.item_count = item_count
1421 else:
1426 else:
1422 self.item_count = len(self.collection)
1427 self.item_count = len(self.collection)
1423
1428
1424 # Compute the number of the first and last available page
1429 # Compute the number of the first and last available page
1425 if self.item_count > 0:
1430 if self.item_count > 0:
1426 self.first_page = 1
1431 self.first_page = 1
1427 self.page_count = int(math.ceil(float(self.item_count) /
1432 self.page_count = int(math.ceil(float(self.item_count) /
1428 self.items_per_page))
1433 self.items_per_page))
1429 self.last_page = self.first_page + self.page_count - 1
1434 self.last_page = self.first_page + self.page_count - 1
1430
1435
1431 # Make sure that the requested page number is the range of
1436 # Make sure that the requested page number is the range of
1432 # valid pages
1437 # valid pages
1433 if self.page > self.last_page:
1438 if self.page > self.last_page:
1434 self.page = self.last_page
1439 self.page = self.last_page
1435 elif self.page < self.first_page:
1440 elif self.page < self.first_page:
1436 self.page = self.first_page
1441 self.page = self.first_page
1437
1442
1438 # Note: the number of items on this page can be less than
1443 # Note: the number of items on this page can be less than
1439 # items_per_page if the last page is not full
1444 # items_per_page if the last page is not full
1440 self.first_item = max(0, (self.item_count) - (self.page *
1445 self.first_item = max(0, (self.item_count) - (self.page *
1441 items_per_page))
1446 items_per_page))
1442 self.last_item = ((self.item_count - 1) - items_per_page *
1447 self.last_item = ((self.item_count - 1) - items_per_page *
1443 (self.page - 1))
1448 (self.page - 1))
1444
1449
1445 self.items = list(self.collection[self.first_item:self.last_item + 1])
1450 self.items = list(self.collection[self.first_item:self.last_item + 1])
1446
1451
1447 # Links to previous and next page
1452 # Links to previous and next page
1448 if self.page > self.first_page:
1453 if self.page > self.first_page:
1449 self.previous_page = self.page - 1
1454 self.previous_page = self.page - 1
1450 else:
1455 else:
1451 self.previous_page = None
1456 self.previous_page = None
1452
1457
1453 if self.page < self.last_page:
1458 if self.page < self.last_page:
1454 self.next_page = self.page + 1
1459 self.next_page = self.page + 1
1455 else:
1460 else:
1456 self.next_page = None
1461 self.next_page = None
1457
1462
1458 # No items available
1463 # No items available
1459 else:
1464 else:
1460 self.first_page = None
1465 self.first_page = None
1461 self.page_count = 0
1466 self.page_count = 0
1462 self.last_page = None
1467 self.last_page = None
1463 self.first_item = None
1468 self.first_item = None
1464 self.last_item = None
1469 self.last_item = None
1465 self.previous_page = None
1470 self.previous_page = None
1466 self.next_page = None
1471 self.next_page = None
1467 self.items = []
1472 self.items = []
1468
1473
1469 # This is a subclass of the 'list' type. Initialise the list now.
1474 # This is a subclass of the 'list' type. Initialise the list now.
1470 list.__init__(self, reversed(self.items))
1475 list.__init__(self, reversed(self.items))
1471
1476
1472
1477
1473 def changed_tooltip(nodes):
1478 def changed_tooltip(nodes):
1474 """
1479 """
1475 Generates a html string for changed nodes in commit page.
1480 Generates a html string for changed nodes in commit page.
1476 It limits the output to 30 entries
1481 It limits the output to 30 entries
1477
1482
1478 :param nodes: LazyNodesGenerator
1483 :param nodes: LazyNodesGenerator
1479 """
1484 """
1480 if nodes:
1485 if nodes:
1481 pref = ': <br/> '
1486 pref = ': <br/> '
1482 suf = ''
1487 suf = ''
1483 if len(nodes) > 30:
1488 if len(nodes) > 30:
1484 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1489 suf = '<br/>' + _(' and %s more') % (len(nodes) - 30)
1485 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1490 return literal(pref + '<br/> '.join([safe_unicode(x.path)
1486 for x in nodes[:30]]) + suf)
1491 for x in nodes[:30]]) + suf)
1487 else:
1492 else:
1488 return ': ' + _('No Files')
1493 return ': ' + _('No Files')
1489
1494
1490
1495
1491 def breadcrumb_repo_link(repo):
1496 def breadcrumb_repo_link(repo):
1492 """
1497 """
1493 Makes a breadcrumbs path link to repo
1498 Makes a breadcrumbs path link to repo
1494
1499
1495 ex::
1500 ex::
1496 group >> subgroup >> repo
1501 group >> subgroup >> repo
1497
1502
1498 :param repo: a Repository instance
1503 :param repo: a Repository instance
1499 """
1504 """
1500
1505
1501 path = [
1506 path = [
1502 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1507 link_to(group.name, url('repo_group_home', group_name=group.group_name))
1503 for group in repo.groups_with_parents
1508 for group in repo.groups_with_parents
1504 ] + [
1509 ] + [
1505 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1510 link_to(repo.just_name, url('summary_home', repo_name=repo.repo_name))
1506 ]
1511 ]
1507
1512
1508 return literal(' &raquo; '.join(path))
1513 return literal(' &raquo; '.join(path))
1509
1514
1510
1515
1511 def format_byte_size_binary(file_size):
1516 def format_byte_size_binary(file_size):
1512 """
1517 """
1513 Formats file/folder sizes to standard.
1518 Formats file/folder sizes to standard.
1514 """
1519 """
1515 formatted_size = format_byte_size(file_size, binary=True)
1520 formatted_size = format_byte_size(file_size, binary=True)
1516 return formatted_size
1521 return formatted_size
1517
1522
1518
1523
1519 def fancy_file_stats(stats):
1524 def fancy_file_stats(stats):
1520 """
1525 """
1521 Displays a fancy two colored bar for number of added/deleted
1526 Displays a fancy two colored bar for number of added/deleted
1522 lines of code on file
1527 lines of code on file
1523
1528
1524 :param stats: two element list of added/deleted lines of code
1529 :param stats: two element list of added/deleted lines of code
1525 """
1530 """
1526 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1531 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
1527 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1532 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE
1528
1533
1529 def cgen(l_type, a_v, d_v):
1534 def cgen(l_type, a_v, d_v):
1530 mapping = {'tr': 'top-right-rounded-corner-mid',
1535 mapping = {'tr': 'top-right-rounded-corner-mid',
1531 'tl': 'top-left-rounded-corner-mid',
1536 'tl': 'top-left-rounded-corner-mid',
1532 'br': 'bottom-right-rounded-corner-mid',
1537 'br': 'bottom-right-rounded-corner-mid',
1533 'bl': 'bottom-left-rounded-corner-mid'}
1538 'bl': 'bottom-left-rounded-corner-mid'}
1534 map_getter = lambda x: mapping[x]
1539 map_getter = lambda x: mapping[x]
1535
1540
1536 if l_type == 'a' and d_v:
1541 if l_type == 'a' and d_v:
1537 #case when added and deleted are present
1542 #case when added and deleted are present
1538 return ' '.join(map(map_getter, ['tl', 'bl']))
1543 return ' '.join(map(map_getter, ['tl', 'bl']))
1539
1544
1540 if l_type == 'a' and not d_v:
1545 if l_type == 'a' and not d_v:
1541 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1546 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1542
1547
1543 if l_type == 'd' and a_v:
1548 if l_type == 'd' and a_v:
1544 return ' '.join(map(map_getter, ['tr', 'br']))
1549 return ' '.join(map(map_getter, ['tr', 'br']))
1545
1550
1546 if l_type == 'd' and not a_v:
1551 if l_type == 'd' and not a_v:
1547 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1552 return ' '.join(map(map_getter, ['tr', 'br', 'tl', 'bl']))
1548
1553
1549 a, d = stats['added'], stats['deleted']
1554 a, d = stats['added'], stats['deleted']
1550 width = 100
1555 width = 100
1551
1556
1552 if stats['binary']: # binary operations like chmod/rename etc
1557 if stats['binary']: # binary operations like chmod/rename etc
1553 lbl = []
1558 lbl = []
1554 bin_op = 0 # undefined
1559 bin_op = 0 # undefined
1555
1560
1556 # prefix with bin for binary files
1561 # prefix with bin for binary files
1557 if BIN_FILENODE in stats['ops']:
1562 if BIN_FILENODE in stats['ops']:
1558 lbl += ['bin']
1563 lbl += ['bin']
1559
1564
1560 if NEW_FILENODE in stats['ops']:
1565 if NEW_FILENODE in stats['ops']:
1561 lbl += [_('new file')]
1566 lbl += [_('new file')]
1562 bin_op = NEW_FILENODE
1567 bin_op = NEW_FILENODE
1563 elif MOD_FILENODE in stats['ops']:
1568 elif MOD_FILENODE in stats['ops']:
1564 lbl += [_('mod')]
1569 lbl += [_('mod')]
1565 bin_op = MOD_FILENODE
1570 bin_op = MOD_FILENODE
1566 elif DEL_FILENODE in stats['ops']:
1571 elif DEL_FILENODE in stats['ops']:
1567 lbl += [_('del')]
1572 lbl += [_('del')]
1568 bin_op = DEL_FILENODE
1573 bin_op = DEL_FILENODE
1569 elif RENAMED_FILENODE in stats['ops']:
1574 elif RENAMED_FILENODE in stats['ops']:
1570 lbl += [_('rename')]
1575 lbl += [_('rename')]
1571 bin_op = RENAMED_FILENODE
1576 bin_op = RENAMED_FILENODE
1572
1577
1573 # chmod can go with other operations, so we add a + to lbl if needed
1578 # chmod can go with other operations, so we add a + to lbl if needed
1574 if CHMOD_FILENODE in stats['ops']:
1579 if CHMOD_FILENODE in stats['ops']:
1575 lbl += [_('chmod')]
1580 lbl += [_('chmod')]
1576 if bin_op == 0:
1581 if bin_op == 0:
1577 bin_op = CHMOD_FILENODE
1582 bin_op = CHMOD_FILENODE
1578
1583
1579 lbl = '+'.join(lbl)
1584 lbl = '+'.join(lbl)
1580 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1585 b_a = '<div class="bin bin%s %s" style="width:100%%">%s</div>' \
1581 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1586 % (bin_op, cgen('a', a_v='', d_v=0), lbl)
1582 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1587 b_d = '<div class="bin bin1" style="width:0%%"></div>'
1583 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1588 return literal('<div style="width:%spx">%s%s</div>' % (width, b_a, b_d))
1584
1589
1585 t = stats['added'] + stats['deleted']
1590 t = stats['added'] + stats['deleted']
1586 unit = float(width) / (t or 1)
1591 unit = float(width) / (t or 1)
1587
1592
1588 # needs > 9% of width to be visible or 0 to be hidden
1593 # needs > 9% of width to be visible or 0 to be hidden
1589 a_p = max(9, unit * a) if a > 0 else 0
1594 a_p = max(9, unit * a) if a > 0 else 0
1590 d_p = max(9, unit * d) if d > 0 else 0
1595 d_p = max(9, unit * d) if d > 0 else 0
1591 p_sum = a_p + d_p
1596 p_sum = a_p + d_p
1592
1597
1593 if p_sum > width:
1598 if p_sum > width:
1594 #adjust the percentage to be == 100% since we adjusted to 9
1599 #adjust the percentage to be == 100% since we adjusted to 9
1595 if a_p > d_p:
1600 if a_p > d_p:
1596 a_p = a_p - (p_sum - width)
1601 a_p = a_p - (p_sum - width)
1597 else:
1602 else:
1598 d_p = d_p - (p_sum - width)
1603 d_p = d_p - (p_sum - width)
1599
1604
1600 a_v = a if a > 0 else ''
1605 a_v = a if a > 0 else ''
1601 d_v = d if d > 0 else ''
1606 d_v = d if d > 0 else ''
1602
1607
1603 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1608 d_a = '<div class="added %s" style="width:%s%%">%s</div>' % (
1604 cgen('a', a_v, d_v), a_p, a_v
1609 cgen('a', a_v, d_v), a_p, a_v
1605 )
1610 )
1606 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1611 d_d = '<div class="deleted %s" style="width:%s%%">%s</div>' % (
1607 cgen('d', a_v, d_v), d_p, d_v
1612 cgen('d', a_v, d_v), d_p, d_v
1608 )
1613 )
1609 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1614 return literal('<div style="width:%spx">%s%s</div>' % (width, d_a, d_d))
1610
1615
1611
1616
1612 def urlify_text(text_, safe=True):
1617 def urlify_text(text_, safe=True):
1613 """
1618 """
1614 Extrac urls from text and make html links out of them
1619 Extrac urls from text and make html links out of them
1615
1620
1616 :param text_:
1621 :param text_:
1617 """
1622 """
1618
1623
1619 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1624 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1620 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1625 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1621
1626
1622 def url_func(match_obj):
1627 def url_func(match_obj):
1623 url_full = match_obj.groups()[0]
1628 url_full = match_obj.groups()[0]
1624 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1629 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1625 _newtext = url_pat.sub(url_func, text_)
1630 _newtext = url_pat.sub(url_func, text_)
1626 if safe:
1631 if safe:
1627 return literal(_newtext)
1632 return literal(_newtext)
1628 return _newtext
1633 return _newtext
1629
1634
1630
1635
1631 def urlify_commits(text_, repository):
1636 def urlify_commits(text_, repository):
1632 """
1637 """
1633 Extract commit ids from text and make link from them
1638 Extract commit ids from text and make link from them
1634
1639
1635 :param text_:
1640 :param text_:
1636 :param repository: repo name to build the URL with
1641 :param repository: repo name to build the URL with
1637 """
1642 """
1638 from pylons import url # doh, we need to re-import url to mock it later
1643 from pylons import url # doh, we need to re-import url to mock it later
1639 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1644 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1640
1645
1641 def url_func(match_obj):
1646 def url_func(match_obj):
1642 commit_id = match_obj.groups()[1]
1647 commit_id = match_obj.groups()[1]
1643 pref = match_obj.groups()[0]
1648 pref = match_obj.groups()[0]
1644 suf = match_obj.groups()[2]
1649 suf = match_obj.groups()[2]
1645
1650
1646 tmpl = (
1651 tmpl = (
1647 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1652 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1648 '%(commit_id)s</a>%(suf)s'
1653 '%(commit_id)s</a>%(suf)s'
1649 )
1654 )
1650 return tmpl % {
1655 return tmpl % {
1651 'pref': pref,
1656 'pref': pref,
1652 'cls': 'revision-link',
1657 'cls': 'revision-link',
1653 'url': url('changeset_home', repo_name=repository,
1658 'url': url('changeset_home', repo_name=repository,
1654 revision=commit_id, qualified=True),
1659 revision=commit_id, qualified=True),
1655 'commit_id': commit_id,
1660 'commit_id': commit_id,
1656 'suf': suf
1661 'suf': suf
1657 }
1662 }
1658
1663
1659 newtext = URL_PAT.sub(url_func, text_)
1664 newtext = URL_PAT.sub(url_func, text_)
1660
1665
1661 return newtext
1666 return newtext
1662
1667
1663
1668
1664 def _process_url_func(match_obj, repo_name, uid, entry,
1669 def _process_url_func(match_obj, repo_name, uid, entry,
1665 return_raw_data=False):
1670 return_raw_data=False):
1666 pref = ''
1671 pref = ''
1667 if match_obj.group().startswith(' '):
1672 if match_obj.group().startswith(' '):
1668 pref = ' '
1673 pref = ' '
1669
1674
1670 issue_id = ''.join(match_obj.groups())
1675 issue_id = ''.join(match_obj.groups())
1671 tmpl = (
1676 tmpl = (
1672 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1677 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1673 '%(issue-prefix)s%(id-repr)s'
1678 '%(issue-prefix)s%(id-repr)s'
1674 '</a>')
1679 '</a>')
1675
1680
1676 (repo_name_cleaned,
1681 (repo_name_cleaned,
1677 parent_group_name) = RepoGroupModel().\
1682 parent_group_name) = RepoGroupModel().\
1678 _get_group_name_and_parent(repo_name)
1683 _get_group_name_and_parent(repo_name)
1679
1684
1680 # variables replacement
1685 # variables replacement
1681 named_vars = {
1686 named_vars = {
1682 'id': issue_id,
1687 'id': issue_id,
1683 'repo': repo_name,
1688 'repo': repo_name,
1684 'repo_name': repo_name_cleaned,
1689 'repo_name': repo_name_cleaned,
1685 'group_name': parent_group_name
1690 'group_name': parent_group_name
1686 }
1691 }
1687 # named regex variables
1692 # named regex variables
1688 named_vars.update(match_obj.groupdict())
1693 named_vars.update(match_obj.groupdict())
1689 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1694 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1690
1695
1691 data = {
1696 data = {
1692 'pref': pref,
1697 'pref': pref,
1693 'cls': 'issue-tracker-link',
1698 'cls': 'issue-tracker-link',
1694 'url': _url,
1699 'url': _url,
1695 'id-repr': issue_id,
1700 'id-repr': issue_id,
1696 'issue-prefix': entry['pref'],
1701 'issue-prefix': entry['pref'],
1697 'serv': entry['url'],
1702 'serv': entry['url'],
1698 }
1703 }
1699 if return_raw_data:
1704 if return_raw_data:
1700 return {
1705 return {
1701 'id': issue_id,
1706 'id': issue_id,
1702 'url': _url
1707 'url': _url
1703 }
1708 }
1704 return tmpl % data
1709 return tmpl % data
1705
1710
1706
1711
1707 def process_patterns(text_string, repo_name, config=None):
1712 def process_patterns(text_string, repo_name, config=None):
1708 repo = None
1713 repo = None
1709 if repo_name:
1714 if repo_name:
1710 # Retrieving repo_name to avoid invalid repo_name to explode on
1715 # Retrieving repo_name to avoid invalid repo_name to explode on
1711 # IssueTrackerSettingsModel but still passing invalid name further down
1716 # IssueTrackerSettingsModel but still passing invalid name further down
1712 repo = Repository.get_by_repo_name(repo_name, cache=True)
1717 repo = Repository.get_by_repo_name(repo_name, cache=True)
1713
1718
1714 settings_model = IssueTrackerSettingsModel(repo=repo)
1719 settings_model = IssueTrackerSettingsModel(repo=repo)
1715 active_entries = settings_model.get_settings(cache=True)
1720 active_entries = settings_model.get_settings(cache=True)
1716
1721
1717 issues_data = []
1722 issues_data = []
1718 newtext = text_string
1723 newtext = text_string
1719 for uid, entry in active_entries.items():
1724 for uid, entry in active_entries.items():
1720 log.debug('found issue tracker entry with uid %s' % (uid,))
1725 log.debug('found issue tracker entry with uid %s' % (uid,))
1721
1726
1722 if not (entry['pat'] and entry['url']):
1727 if not (entry['pat'] and entry['url']):
1723 log.debug('skipping due to missing data')
1728 log.debug('skipping due to missing data')
1724 continue
1729 continue
1725
1730
1726 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1731 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1727 % (uid, entry['pat'], entry['url'], entry['pref']))
1732 % (uid, entry['pat'], entry['url'], entry['pref']))
1728
1733
1729 try:
1734 try:
1730 pattern = re.compile(r'%s' % entry['pat'])
1735 pattern = re.compile(r'%s' % entry['pat'])
1731 except re.error:
1736 except re.error:
1732 log.exception(
1737 log.exception(
1733 'issue tracker pattern: `%s` failed to compile',
1738 'issue tracker pattern: `%s` failed to compile',
1734 entry['pat'])
1739 entry['pat'])
1735 continue
1740 continue
1736
1741
1737 data_func = partial(
1742 data_func = partial(
1738 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1743 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1739 return_raw_data=True)
1744 return_raw_data=True)
1740
1745
1741 for match_obj in pattern.finditer(text_string):
1746 for match_obj in pattern.finditer(text_string):
1742 issues_data.append(data_func(match_obj))
1747 issues_data.append(data_func(match_obj))
1743
1748
1744 url_func = partial(
1749 url_func = partial(
1745 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1750 _process_url_func, repo_name=repo_name, entry=entry, uid=uid)
1746
1751
1747 newtext = pattern.sub(url_func, newtext)
1752 newtext = pattern.sub(url_func, newtext)
1748 log.debug('processed prefix:uid `%s`' % (uid,))
1753 log.debug('processed prefix:uid `%s`' % (uid,))
1749
1754
1750 return newtext, issues_data
1755 return newtext, issues_data
1751
1756
1752
1757
1753 def urlify_commit_message(commit_text, repository=None):
1758 def urlify_commit_message(commit_text, repository=None):
1754 """
1759 """
1755 Parses given text message and makes proper links.
1760 Parses given text message and makes proper links.
1756 issues are linked to given issue-server, and rest is a commit link
1761 issues are linked to given issue-server, and rest is a commit link
1757
1762
1758 :param commit_text:
1763 :param commit_text:
1759 :param repository:
1764 :param repository:
1760 """
1765 """
1761 from pylons import url # doh, we need to re-import url to mock it later
1766 from pylons import url # doh, we need to re-import url to mock it later
1762
1767
1763 def escaper(string):
1768 def escaper(string):
1764 return string.replace('<', '&lt;').replace('>', '&gt;')
1769 return string.replace('<', '&lt;').replace('>', '&gt;')
1765
1770
1766 newtext = escaper(commit_text)
1771 newtext = escaper(commit_text)
1767
1772
1768 # extract http/https links and make them real urls
1773 # extract http/https links and make them real urls
1769 newtext = urlify_text(newtext, safe=False)
1774 newtext = urlify_text(newtext, safe=False)
1770
1775
1771 # urlify commits - extract commit ids and make link out of them, if we have
1776 # urlify commits - extract commit ids and make link out of them, if we have
1772 # the scope of repository present.
1777 # the scope of repository present.
1773 if repository:
1778 if repository:
1774 newtext = urlify_commits(newtext, repository)
1779 newtext = urlify_commits(newtext, repository)
1775
1780
1776 # process issue tracker patterns
1781 # process issue tracker patterns
1777 newtext, issues = process_patterns(newtext, repository or '')
1782 newtext, issues = process_patterns(newtext, repository or '')
1778
1783
1779 return literal(newtext)
1784 return literal(newtext)
1780
1785
1781
1786
1782 def rst(source, mentions=False):
1787 def rst(source, mentions=False):
1783 return literal('<div class="rst-block">%s</div>' %
1788 return literal('<div class="rst-block">%s</div>' %
1784 MarkupRenderer.rst(source, mentions=mentions))
1789 MarkupRenderer.rst(source, mentions=mentions))
1785
1790
1786
1791
1787 def markdown(source, mentions=False):
1792 def markdown(source, mentions=False):
1788 return literal('<div class="markdown-block">%s</div>' %
1793 return literal('<div class="markdown-block">%s</div>' %
1789 MarkupRenderer.markdown(source, flavored=True,
1794 MarkupRenderer.markdown(source, flavored=True,
1790 mentions=mentions))
1795 mentions=mentions))
1791
1796
1792 def renderer_from_filename(filename, exclude=None):
1797 def renderer_from_filename(filename, exclude=None):
1793 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1798 return MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1794
1799
1795
1800
1796 def render(source, renderer='rst', mentions=False):
1801 def render(source, renderer='rst', mentions=False):
1797 if renderer == 'rst':
1802 if renderer == 'rst':
1798 return rst(source, mentions=mentions)
1803 return rst(source, mentions=mentions)
1799 if renderer == 'markdown':
1804 if renderer == 'markdown':
1800 return markdown(source, mentions=mentions)
1805 return markdown(source, mentions=mentions)
1801
1806
1802
1807
1803 def commit_status(repo, commit_id):
1808 def commit_status(repo, commit_id):
1804 return ChangesetStatusModel().get_status(repo, commit_id)
1809 return ChangesetStatusModel().get_status(repo, commit_id)
1805
1810
1806
1811
1807 def commit_status_lbl(commit_status):
1812 def commit_status_lbl(commit_status):
1808 return dict(ChangesetStatus.STATUSES).get(commit_status)
1813 return dict(ChangesetStatus.STATUSES).get(commit_status)
1809
1814
1810
1815
1811 def commit_time(repo_name, commit_id):
1816 def commit_time(repo_name, commit_id):
1812 repo = Repository.get_by_repo_name(repo_name)
1817 repo = Repository.get_by_repo_name(repo_name)
1813 commit = repo.get_commit(commit_id=commit_id)
1818 commit = repo.get_commit(commit_id=commit_id)
1814 return commit.date
1819 return commit.date
1815
1820
1816
1821
1817 def get_permission_name(key):
1822 def get_permission_name(key):
1818 return dict(Permission.PERMS).get(key)
1823 return dict(Permission.PERMS).get(key)
1819
1824
1820
1825
1821 def journal_filter_help():
1826 def journal_filter_help():
1822 return _(
1827 return _(
1823 'Example filter terms:\n' +
1828 'Example filter terms:\n' +
1824 ' repository:vcs\n' +
1829 ' repository:vcs\n' +
1825 ' username:marcin\n' +
1830 ' username:marcin\n' +
1826 ' action:*push*\n' +
1831 ' action:*push*\n' +
1827 ' ip:127.0.0.1\n' +
1832 ' ip:127.0.0.1\n' +
1828 ' date:20120101\n' +
1833 ' date:20120101\n' +
1829 ' date:[20120101100000 TO 20120102]\n' +
1834 ' date:[20120101100000 TO 20120102]\n' +
1830 '\n' +
1835 '\n' +
1831 'Generate wildcards using \'*\' character:\n' +
1836 'Generate wildcards using \'*\' character:\n' +
1832 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1837 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1833 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1838 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1834 '\n' +
1839 '\n' +
1835 'Optional AND / OR operators in queries\n' +
1840 'Optional AND / OR operators in queries\n' +
1836 ' "repository:vcs OR repository:test"\n' +
1841 ' "repository:vcs OR repository:test"\n' +
1837 ' "username:test AND repository:test*"\n'
1842 ' "username:test AND repository:test*"\n'
1838 )
1843 )
1839
1844
1840
1845
1841 def not_mapped_error(repo_name):
1846 def not_mapped_error(repo_name):
1842 flash(_('%s repository is not mapped to db perhaps'
1847 flash(_('%s repository is not mapped to db perhaps'
1843 ' it was created or renamed from the filesystem'
1848 ' it was created or renamed from the filesystem'
1844 ' please run the application again'
1849 ' please run the application again'
1845 ' in order to rescan repositories') % repo_name, category='error')
1850 ' in order to rescan repositories') % repo_name, category='error')
1846
1851
1847
1852
1848 def ip_range(ip_addr):
1853 def ip_range(ip_addr):
1849 from rhodecode.model.db import UserIpMap
1854 from rhodecode.model.db import UserIpMap
1850 s, e = UserIpMap._get_ip_range(ip_addr)
1855 s, e = UserIpMap._get_ip_range(ip_addr)
1851 return '%s - %s' % (s, e)
1856 return '%s - %s' % (s, e)
1852
1857
1853
1858
1854 def form(url, method='post', needs_csrf_token=True, **attrs):
1859 def form(url, method='post', needs_csrf_token=True, **attrs):
1855 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1860 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1856 if method.lower() != 'get' and needs_csrf_token:
1861 if method.lower() != 'get' and needs_csrf_token:
1857 raise Exception(
1862 raise Exception(
1858 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1863 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1859 'CSRF token. If the endpoint does not require such token you can ' +
1864 'CSRF token. If the endpoint does not require such token you can ' +
1860 'explicitly set the parameter needs_csrf_token to false.')
1865 'explicitly set the parameter needs_csrf_token to false.')
1861
1866
1862 return wh_form(url, method=method, **attrs)
1867 return wh_form(url, method=method, **attrs)
1863
1868
1864
1869
1865 def secure_form(url, method="POST", multipart=False, **attrs):
1870 def secure_form(url, method="POST", multipart=False, **attrs):
1866 """Start a form tag that points the action to an url. This
1871 """Start a form tag that points the action to an url. This
1867 form tag will also include the hidden field containing
1872 form tag will also include the hidden field containing
1868 the auth token.
1873 the auth token.
1869
1874
1870 The url options should be given either as a string, or as a
1875 The url options should be given either as a string, or as a
1871 ``url()`` function. The method for the form defaults to POST.
1876 ``url()`` function. The method for the form defaults to POST.
1872
1877
1873 Options:
1878 Options:
1874
1879
1875 ``multipart``
1880 ``multipart``
1876 If set to True, the enctype is set to "multipart/form-data".
1881 If set to True, the enctype is set to "multipart/form-data".
1877 ``method``
1882 ``method``
1878 The method to use when submitting the form, usually either
1883 The method to use when submitting the form, usually either
1879 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1884 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1880 hidden input with name _method is added to simulate the verb
1885 hidden input with name _method is added to simulate the verb
1881 over POST.
1886 over POST.
1882
1887
1883 """
1888 """
1884 from webhelpers.pylonslib.secure_form import insecure_form
1889 from webhelpers.pylonslib.secure_form import insecure_form
1885 form = insecure_form(url, method, multipart, **attrs)
1890 form = insecure_form(url, method, multipart, **attrs)
1886 token = csrf_input()
1891 token = csrf_input()
1887 return literal("%s\n%s" % (form, token))
1892 return literal("%s\n%s" % (form, token))
1888
1893
1889 def csrf_input():
1894 def csrf_input():
1890 return literal(
1895 return literal(
1891 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1896 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1892 csrf_token_key, csrf_token_key, get_csrf_token()))
1897 csrf_token_key, csrf_token_key, get_csrf_token()))
1893
1898
1894 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1899 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1895 select_html = select(name, selected, options, **attrs)
1900 select_html = select(name, selected, options, **attrs)
1896 select2 = """
1901 select2 = """
1897 <script>
1902 <script>
1898 $(document).ready(function() {
1903 $(document).ready(function() {
1899 $('#%s').select2({
1904 $('#%s').select2({
1900 containerCssClass: 'drop-menu',
1905 containerCssClass: 'drop-menu',
1901 dropdownCssClass: 'drop-menu-dropdown',
1906 dropdownCssClass: 'drop-menu-dropdown',
1902 dropdownAutoWidth: true%s
1907 dropdownAutoWidth: true%s
1903 });
1908 });
1904 });
1909 });
1905 </script>
1910 </script>
1906 """
1911 """
1907 filter_option = """,
1912 filter_option = """,
1908 minimumResultsForSearch: -1
1913 minimumResultsForSearch: -1
1909 """
1914 """
1910 input_id = attrs.get('id') or name
1915 input_id = attrs.get('id') or name
1911 filter_enabled = "" if enable_filter else filter_option
1916 filter_enabled = "" if enable_filter else filter_option
1912 select_script = literal(select2 % (input_id, filter_enabled))
1917 select_script = literal(select2 % (input_id, filter_enabled))
1913
1918
1914 return literal(select_html+select_script)
1919 return literal(select_html+select_script)
1915
1920
1916
1921
1917 def get_visual_attr(tmpl_context_var, attr_name):
1922 def get_visual_attr(tmpl_context_var, attr_name):
1918 """
1923 """
1919 A safe way to get a variable from visual variable of template context
1924 A safe way to get a variable from visual variable of template context
1920
1925
1921 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1926 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1922 :param attr_name: name of the attribute we fetch from the c.visual
1927 :param attr_name: name of the attribute we fetch from the c.visual
1923 """
1928 """
1924 visual = getattr(tmpl_context_var, 'visual', None)
1929 visual = getattr(tmpl_context_var, 'visual', None)
1925 if not visual:
1930 if not visual:
1926 return
1931 return
1927 else:
1932 else:
1928 return getattr(visual, attr_name, None)
1933 return getattr(visual, attr_name, None)
1929
1934
1930
1935
1931 def get_last_path_part(file_node):
1936 def get_last_path_part(file_node):
1932 if not file_node.path:
1937 if not file_node.path:
1933 return u''
1938 return u''
1934
1939
1935 path = safe_unicode(file_node.path.split('/')[-1])
1940 path = safe_unicode(file_node.path.split('/')[-1])
1936 return u'../' + path
1941 return u'../' + path
1937
1942
1938
1943
1939 def route_path(*args, **kwds):
1944 def route_path(*args, **kwds):
1940 """
1945 """
1941 Wrapper around pyramids `route_path` function. It is used to generate
1946 Wrapper around pyramids `route_path` function. It is used to generate
1942 URLs from within pylons views or templates. This will be removed when
1947 URLs from within pylons views or templates. This will be removed when
1943 pyramid migration if finished.
1948 pyramid migration if finished.
1944 """
1949 """
1945 req = get_current_request()
1950 req = get_current_request()
1946 return req.route_path(*args, **kwds)
1951 return req.route_path(*args, **kwds)
1947
1952
1948
1953
1949 def route_path_or_none(*args, **kwargs):
1954 def route_path_or_none(*args, **kwargs):
1950 try:
1955 try:
1951 return route_path(*args, **kwargs)
1956 return route_path(*args, **kwargs)
1952 except KeyError:
1957 except KeyError:
1953 return None
1958 return None
1954
1959
1955
1960
1956 def static_url(*args, **kwds):
1961 def static_url(*args, **kwds):
1957 """
1962 """
1958 Wrapper around pyramids `route_path` function. It is used to generate
1963 Wrapper around pyramids `route_path` function. It is used to generate
1959 URLs from within pylons views or templates. This will be removed when
1964 URLs from within pylons views or templates. This will be removed when
1960 pyramid migration if finished.
1965 pyramid migration if finished.
1961 """
1966 """
1962 req = get_current_request()
1967 req = get_current_request()
1963 return req.static_url(*args, **kwds)
1968 return req.static_url(*args, **kwds)
1964
1969
1965
1970
1966 def resource_path(*args, **kwds):
1971 def resource_path(*args, **kwds):
1967 """
1972 """
1968 Wrapper around pyramids `route_path` function. It is used to generate
1973 Wrapper around pyramids `route_path` function. It is used to generate
1969 URLs from within pylons views or templates. This will be removed when
1974 URLs from within pylons views or templates. This will be removed when
1970 pyramid migration if finished.
1975 pyramid migration if finished.
1971 """
1976 """
1972 req = get_current_request()
1977 req = get_current_request()
1973 return req.resource_path(*args, **kwds)
1978 return req.resource_path(*args, **kwds)
@@ -1,886 +1,895 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2016 RhodeCode GmbH
3 # Copyright (C) 2011-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Some simple helper functions
23 Some simple helper functions
24 """
24 """
25
25
26
26
27 import collections
27 import collections
28 import datetime
28 import datetime
29 import dateutil.relativedelta
29 import dateutil.relativedelta
30 import hashlib
30 import hashlib
31 import logging
31 import logging
32 import re
32 import re
33 import sys
33 import sys
34 import time
34 import time
35 import threading
35 import threading
36 import urllib
36 import urllib
37 import urlobject
37 import urlobject
38 import uuid
38 import uuid
39
39
40 import pygments.lexers
40 import pygments.lexers
41 import sqlalchemy
41 import sqlalchemy
42 import sqlalchemy.engine.url
42 import sqlalchemy.engine.url
43 import webob
43 import webob
44 import routes.util
44 import routes.util
45
45
46 import rhodecode
46 import rhodecode
47
47
48
48
49 def md5(s):
49 def md5(s):
50 return hashlib.md5(s).hexdigest()
50 return hashlib.md5(s).hexdigest()
51
51
52
52
53 def md5_safe(s):
53 def md5_safe(s):
54 return md5(safe_str(s))
54 return md5(safe_str(s))
55
55
56
56
57 def __get_lem():
57 def __get_lem(extra_mapping=None):
58 """
58 """
59 Get language extension map based on what's inside pygments lexers
59 Get language extension map based on what's inside pygments lexers
60 """
60 """
61 d = collections.defaultdict(lambda: [])
61 d = collections.defaultdict(lambda: [])
62
62
63 def __clean(s):
63 def __clean(s):
64 s = s.lstrip('*')
64 s = s.lstrip('*')
65 s = s.lstrip('.')
65 s = s.lstrip('.')
66
66
67 if s.find('[') != -1:
67 if s.find('[') != -1:
68 exts = []
68 exts = []
69 start, stop = s.find('['), s.find(']')
69 start, stop = s.find('['), s.find(']')
70
70
71 for suffix in s[start + 1:stop]:
71 for suffix in s[start + 1:stop]:
72 exts.append(s[:s.find('[')] + suffix)
72 exts.append(s[:s.find('[')] + suffix)
73 return [e.lower() for e in exts]
73 return [e.lower() for e in exts]
74 else:
74 else:
75 return [s.lower()]
75 return [s.lower()]
76
76
77 for lx, t in sorted(pygments.lexers.LEXERS.items()):
77 for lx, t in sorted(pygments.lexers.LEXERS.items()):
78 m = map(__clean, t[-2])
78 m = map(__clean, t[-2])
79 if m:
79 if m:
80 m = reduce(lambda x, y: x + y, m)
80 m = reduce(lambda x, y: x + y, m)
81 for ext in m:
81 for ext in m:
82 desc = lx.replace('Lexer', '')
82 desc = lx.replace('Lexer', '')
83 d[ext].append(desc)
83 d[ext].append(desc)
84
84
85 return dict(d)
85 data = dict(d)
86
87 extra_mapping = extra_mapping or {}
88 if extra_mapping:
89 for k, v in extra_mapping.items():
90 if k not in data:
91 # register new mapping2lexer
92 data[k] = [v]
93
94 return data
86
95
87
96
88 def str2bool(_str):
97 def str2bool(_str):
89 """
98 """
90 returs True/False value from given string, it tries to translate the
99 returs True/False value from given string, it tries to translate the
91 string into boolean
100 string into boolean
92
101
93 :param _str: string value to translate into boolean
102 :param _str: string value to translate into boolean
94 :rtype: boolean
103 :rtype: boolean
95 :returns: boolean from given string
104 :returns: boolean from given string
96 """
105 """
97 if _str is None:
106 if _str is None:
98 return False
107 return False
99 if _str in (True, False):
108 if _str in (True, False):
100 return _str
109 return _str
101 _str = str(_str).strip().lower()
110 _str = str(_str).strip().lower()
102 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
111 return _str in ('t', 'true', 'y', 'yes', 'on', '1')
103
112
104
113
105 def aslist(obj, sep=None, strip=True):
114 def aslist(obj, sep=None, strip=True):
106 """
115 """
107 Returns given string separated by sep as list
116 Returns given string separated by sep as list
108
117
109 :param obj:
118 :param obj:
110 :param sep:
119 :param sep:
111 :param strip:
120 :param strip:
112 """
121 """
113 if isinstance(obj, (basestring)):
122 if isinstance(obj, (basestring,)):
114 lst = obj.split(sep)
123 lst = obj.split(sep)
115 if strip:
124 if strip:
116 lst = [v.strip() for v in lst]
125 lst = [v.strip() for v in lst]
117 return lst
126 return lst
118 elif isinstance(obj, (list, tuple)):
127 elif isinstance(obj, (list, tuple)):
119 return obj
128 return obj
120 elif obj is None:
129 elif obj is None:
121 return []
130 return []
122 else:
131 else:
123 return [obj]
132 return [obj]
124
133
125
134
126 def convert_line_endings(line, mode):
135 def convert_line_endings(line, mode):
127 """
136 """
128 Converts a given line "line end" accordingly to given mode
137 Converts a given line "line end" accordingly to given mode
129
138
130 Available modes are::
139 Available modes are::
131 0 - Unix
140 0 - Unix
132 1 - Mac
141 1 - Mac
133 2 - DOS
142 2 - DOS
134
143
135 :param line: given line to convert
144 :param line: given line to convert
136 :param mode: mode to convert to
145 :param mode: mode to convert to
137 :rtype: str
146 :rtype: str
138 :return: converted line according to mode
147 :return: converted line according to mode
139 """
148 """
140 if mode == 0:
149 if mode == 0:
141 line = line.replace('\r\n', '\n')
150 line = line.replace('\r\n', '\n')
142 line = line.replace('\r', '\n')
151 line = line.replace('\r', '\n')
143 elif mode == 1:
152 elif mode == 1:
144 line = line.replace('\r\n', '\r')
153 line = line.replace('\r\n', '\r')
145 line = line.replace('\n', '\r')
154 line = line.replace('\n', '\r')
146 elif mode == 2:
155 elif mode == 2:
147 line = re.sub('\r(?!\n)|(?<!\r)\n', '\r\n', line)
156 line = re.sub('\r(?!\n)|(?<!\r)\n', '\r\n', line)
148 return line
157 return line
149
158
150
159
151 def detect_mode(line, default):
160 def detect_mode(line, default):
152 """
161 """
153 Detects line break for given line, if line break couldn't be found
162 Detects line break for given line, if line break couldn't be found
154 given default value is returned
163 given default value is returned
155
164
156 :param line: str line
165 :param line: str line
157 :param default: default
166 :param default: default
158 :rtype: int
167 :rtype: int
159 :return: value of line end on of 0 - Unix, 1 - Mac, 2 - DOS
168 :return: value of line end on of 0 - Unix, 1 - Mac, 2 - DOS
160 """
169 """
161 if line.endswith('\r\n'):
170 if line.endswith('\r\n'):
162 return 2
171 return 2
163 elif line.endswith('\n'):
172 elif line.endswith('\n'):
164 return 0
173 return 0
165 elif line.endswith('\r'):
174 elif line.endswith('\r'):
166 return 1
175 return 1
167 else:
176 else:
168 return default
177 return default
169
178
170
179
171 def safe_int(val, default=None):
180 def safe_int(val, default=None):
172 """
181 """
173 Returns int() of val if val is not convertable to int use default
182 Returns int() of val if val is not convertable to int use default
174 instead
183 instead
175
184
176 :param val:
185 :param val:
177 :param default:
186 :param default:
178 """
187 """
179
188
180 try:
189 try:
181 val = int(val)
190 val = int(val)
182 except (ValueError, TypeError):
191 except (ValueError, TypeError):
183 val = default
192 val = default
184
193
185 return val
194 return val
186
195
187
196
188 def safe_unicode(str_, from_encoding=None):
197 def safe_unicode(str_, from_encoding=None):
189 """
198 """
190 safe unicode function. Does few trick to turn str_ into unicode
199 safe unicode function. Does few trick to turn str_ into unicode
191
200
192 In case of UnicodeDecode error, we try to return it with encoding detected
201 In case of UnicodeDecode error, we try to return it with encoding detected
193 by chardet library if it fails fallback to unicode with errors replaced
202 by chardet library if it fails fallback to unicode with errors replaced
194
203
195 :param str_: string to decode
204 :param str_: string to decode
196 :rtype: unicode
205 :rtype: unicode
197 :returns: unicode object
206 :returns: unicode object
198 """
207 """
199 if isinstance(str_, unicode):
208 if isinstance(str_, unicode):
200 return str_
209 return str_
201
210
202 if not from_encoding:
211 if not from_encoding:
203 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
212 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
204 'utf8'), sep=',')
213 'utf8'), sep=',')
205 from_encoding = DEFAULT_ENCODINGS
214 from_encoding = DEFAULT_ENCODINGS
206
215
207 if not isinstance(from_encoding, (list, tuple)):
216 if not isinstance(from_encoding, (list, tuple)):
208 from_encoding = [from_encoding]
217 from_encoding = [from_encoding]
209
218
210 try:
219 try:
211 return unicode(str_)
220 return unicode(str_)
212 except UnicodeDecodeError:
221 except UnicodeDecodeError:
213 pass
222 pass
214
223
215 for enc in from_encoding:
224 for enc in from_encoding:
216 try:
225 try:
217 return unicode(str_, enc)
226 return unicode(str_, enc)
218 except UnicodeDecodeError:
227 except UnicodeDecodeError:
219 pass
228 pass
220
229
221 try:
230 try:
222 import chardet
231 import chardet
223 encoding = chardet.detect(str_)['encoding']
232 encoding = chardet.detect(str_)['encoding']
224 if encoding is None:
233 if encoding is None:
225 raise Exception()
234 raise Exception()
226 return str_.decode(encoding)
235 return str_.decode(encoding)
227 except (ImportError, UnicodeDecodeError, Exception):
236 except (ImportError, UnicodeDecodeError, Exception):
228 return unicode(str_, from_encoding[0], 'replace')
237 return unicode(str_, from_encoding[0], 'replace')
229
238
230
239
231 def safe_str(unicode_, to_encoding=None):
240 def safe_str(unicode_, to_encoding=None):
232 """
241 """
233 safe str function. Does few trick to turn unicode_ into string
242 safe str function. Does few trick to turn unicode_ into string
234
243
235 In case of UnicodeEncodeError, we try to return it with encoding detected
244 In case of UnicodeEncodeError, we try to return it with encoding detected
236 by chardet library if it fails fallback to string with errors replaced
245 by chardet library if it fails fallback to string with errors replaced
237
246
238 :param unicode_: unicode to encode
247 :param unicode_: unicode to encode
239 :rtype: str
248 :rtype: str
240 :returns: str object
249 :returns: str object
241 """
250 """
242
251
243 # if it's not basestr cast to str
252 # if it's not basestr cast to str
244 if not isinstance(unicode_, basestring):
253 if not isinstance(unicode_, basestring):
245 return str(unicode_)
254 return str(unicode_)
246
255
247 if isinstance(unicode_, str):
256 if isinstance(unicode_, str):
248 return unicode_
257 return unicode_
249
258
250 if not to_encoding:
259 if not to_encoding:
251 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
260 DEFAULT_ENCODINGS = aslist(rhodecode.CONFIG.get('default_encoding',
252 'utf8'), sep=',')
261 'utf8'), sep=',')
253 to_encoding = DEFAULT_ENCODINGS
262 to_encoding = DEFAULT_ENCODINGS
254
263
255 if not isinstance(to_encoding, (list, tuple)):
264 if not isinstance(to_encoding, (list, tuple)):
256 to_encoding = [to_encoding]
265 to_encoding = [to_encoding]
257
266
258 for enc in to_encoding:
267 for enc in to_encoding:
259 try:
268 try:
260 return unicode_.encode(enc)
269 return unicode_.encode(enc)
261 except UnicodeEncodeError:
270 except UnicodeEncodeError:
262 pass
271 pass
263
272
264 try:
273 try:
265 import chardet
274 import chardet
266 encoding = chardet.detect(unicode_)['encoding']
275 encoding = chardet.detect(unicode_)['encoding']
267 if encoding is None:
276 if encoding is None:
268 raise UnicodeEncodeError()
277 raise UnicodeEncodeError()
269
278
270 return unicode_.encode(encoding)
279 return unicode_.encode(encoding)
271 except (ImportError, UnicodeEncodeError):
280 except (ImportError, UnicodeEncodeError):
272 return unicode_.encode(to_encoding[0], 'replace')
281 return unicode_.encode(to_encoding[0], 'replace')
273
282
274
283
275 def remove_suffix(s, suffix):
284 def remove_suffix(s, suffix):
276 if s.endswith(suffix):
285 if s.endswith(suffix):
277 s = s[:-1 * len(suffix)]
286 s = s[:-1 * len(suffix)]
278 return s
287 return s
279
288
280
289
281 def remove_prefix(s, prefix):
290 def remove_prefix(s, prefix):
282 if s.startswith(prefix):
291 if s.startswith(prefix):
283 s = s[len(prefix):]
292 s = s[len(prefix):]
284 return s
293 return s
285
294
286
295
287 def find_calling_context(ignore_modules=None):
296 def find_calling_context(ignore_modules=None):
288 """
297 """
289 Look through the calling stack and return the frame which called
298 Look through the calling stack and return the frame which called
290 this function and is part of core module ( ie. rhodecode.* )
299 this function and is part of core module ( ie. rhodecode.* )
291
300
292 :param ignore_modules: list of modules to ignore eg. ['rhodecode.lib']
301 :param ignore_modules: list of modules to ignore eg. ['rhodecode.lib']
293 """
302 """
294
303
295 ignore_modules = ignore_modules or []
304 ignore_modules = ignore_modules or []
296
305
297 f = sys._getframe(2)
306 f = sys._getframe(2)
298 while f.f_back is not None:
307 while f.f_back is not None:
299 name = f.f_globals.get('__name__')
308 name = f.f_globals.get('__name__')
300 if name and name.startswith(__name__.split('.')[0]):
309 if name and name.startswith(__name__.split('.')[0]):
301 if name not in ignore_modules:
310 if name not in ignore_modules:
302 return f
311 return f
303 f = f.f_back
312 f = f.f_back
304 return None
313 return None
305
314
306
315
307 def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
316 def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
308 """Custom engine_from_config functions."""
317 """Custom engine_from_config functions."""
309 log = logging.getLogger('sqlalchemy.engine')
318 log = logging.getLogger('sqlalchemy.engine')
310 engine = sqlalchemy.engine_from_config(configuration, prefix, **kwargs)
319 engine = sqlalchemy.engine_from_config(configuration, prefix, **kwargs)
311
320
312 def color_sql(sql):
321 def color_sql(sql):
313 color_seq = '\033[1;33m' # This is yellow: code 33
322 color_seq = '\033[1;33m' # This is yellow: code 33
314 normal = '\x1b[0m'
323 normal = '\x1b[0m'
315 return ''.join([color_seq, sql, normal])
324 return ''.join([color_seq, sql, normal])
316
325
317 if configuration['debug']:
326 if configuration['debug']:
318 # attach events only for debug configuration
327 # attach events only for debug configuration
319
328
320 def before_cursor_execute(conn, cursor, statement,
329 def before_cursor_execute(conn, cursor, statement,
321 parameters, context, executemany):
330 parameters, context, executemany):
322 setattr(conn, 'query_start_time', time.time())
331 setattr(conn, 'query_start_time', time.time())
323 log.info(color_sql(">>>>> STARTING QUERY >>>>>"))
332 log.info(color_sql(">>>>> STARTING QUERY >>>>>"))
324 calling_context = find_calling_context(ignore_modules=[
333 calling_context = find_calling_context(ignore_modules=[
325 'rhodecode.lib.caching_query',
334 'rhodecode.lib.caching_query',
326 'rhodecode.model.settings',
335 'rhodecode.model.settings',
327 ])
336 ])
328 if calling_context:
337 if calling_context:
329 log.info(color_sql('call context %s:%s' % (
338 log.info(color_sql('call context %s:%s' % (
330 calling_context.f_code.co_filename,
339 calling_context.f_code.co_filename,
331 calling_context.f_lineno,
340 calling_context.f_lineno,
332 )))
341 )))
333
342
334 def after_cursor_execute(conn, cursor, statement,
343 def after_cursor_execute(conn, cursor, statement,
335 parameters, context, executemany):
344 parameters, context, executemany):
336 delattr(conn, 'query_start_time')
345 delattr(conn, 'query_start_time')
337
346
338 sqlalchemy.event.listen(engine, "before_cursor_execute",
347 sqlalchemy.event.listen(engine, "before_cursor_execute",
339 before_cursor_execute)
348 before_cursor_execute)
340 sqlalchemy.event.listen(engine, "after_cursor_execute",
349 sqlalchemy.event.listen(engine, "after_cursor_execute",
341 after_cursor_execute)
350 after_cursor_execute)
342
351
343 return engine
352 return engine
344
353
345
354
346 def get_encryption_key(config):
355 def get_encryption_key(config):
347 secret = config.get('rhodecode.encrypted_values.secret')
356 secret = config.get('rhodecode.encrypted_values.secret')
348 default = config['beaker.session.secret']
357 default = config['beaker.session.secret']
349 return secret or default
358 return secret or default
350
359
351
360
352 def age(prevdate, now=None, show_short_version=False, show_suffix=True,
361 def age(prevdate, now=None, show_short_version=False, show_suffix=True,
353 short_format=False):
362 short_format=False):
354 """
363 """
355 Turns a datetime into an age string.
364 Turns a datetime into an age string.
356 If show_short_version is True, this generates a shorter string with
365 If show_short_version is True, this generates a shorter string with
357 an approximate age; ex. '1 day ago', rather than '1 day and 23 hours ago'.
366 an approximate age; ex. '1 day ago', rather than '1 day and 23 hours ago'.
358
367
359 * IMPORTANT*
368 * IMPORTANT*
360 Code of this function is written in special way so it's easier to
369 Code of this function is written in special way so it's easier to
361 backport it to javascript. If you mean to update it, please also update
370 backport it to javascript. If you mean to update it, please also update
362 `jquery.timeago-extension.js` file
371 `jquery.timeago-extension.js` file
363
372
364 :param prevdate: datetime object
373 :param prevdate: datetime object
365 :param now: get current time, if not define we use
374 :param now: get current time, if not define we use
366 `datetime.datetime.now()`
375 `datetime.datetime.now()`
367 :param show_short_version: if it should approximate the date and
376 :param show_short_version: if it should approximate the date and
368 return a shorter string
377 return a shorter string
369 :param show_suffix:
378 :param show_suffix:
370 :param short_format: show short format, eg 2D instead of 2 days
379 :param short_format: show short format, eg 2D instead of 2 days
371 :rtype: unicode
380 :rtype: unicode
372 :returns: unicode words describing age
381 :returns: unicode words describing age
373 """
382 """
374 from pylons.i18n.translation import _, ungettext
383 from pylons.i18n.translation import _, ungettext
375
384
376 def _get_relative_delta(now, prevdate):
385 def _get_relative_delta(now, prevdate):
377 base = dateutil.relativedelta.relativedelta(now, prevdate)
386 base = dateutil.relativedelta.relativedelta(now, prevdate)
378 return {
387 return {
379 'year': base.years,
388 'year': base.years,
380 'month': base.months,
389 'month': base.months,
381 'day': base.days,
390 'day': base.days,
382 'hour': base.hours,
391 'hour': base.hours,
383 'minute': base.minutes,
392 'minute': base.minutes,
384 'second': base.seconds,
393 'second': base.seconds,
385 }
394 }
386
395
387 def _is_leap_year(year):
396 def _is_leap_year(year):
388 return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
397 return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
389
398
390 def get_month(prevdate):
399 def get_month(prevdate):
391 return prevdate.month
400 return prevdate.month
392
401
393 def get_year(prevdate):
402 def get_year(prevdate):
394 return prevdate.year
403 return prevdate.year
395
404
396 now = now or datetime.datetime.now()
405 now = now or datetime.datetime.now()
397 order = ['year', 'month', 'day', 'hour', 'minute', 'second']
406 order = ['year', 'month', 'day', 'hour', 'minute', 'second']
398 deltas = {}
407 deltas = {}
399 future = False
408 future = False
400
409
401 if prevdate > now:
410 if prevdate > now:
402 now_old = now
411 now_old = now
403 now = prevdate
412 now = prevdate
404 prevdate = now_old
413 prevdate = now_old
405 future = True
414 future = True
406 if future:
415 if future:
407 prevdate = prevdate.replace(microsecond=0)
416 prevdate = prevdate.replace(microsecond=0)
408 # Get date parts deltas
417 # Get date parts deltas
409 for part in order:
418 for part in order:
410 rel_delta = _get_relative_delta(now, prevdate)
419 rel_delta = _get_relative_delta(now, prevdate)
411 deltas[part] = rel_delta[part]
420 deltas[part] = rel_delta[part]
412
421
413 # Fix negative offsets (there is 1 second between 10:59:59 and 11:00:00,
422 # Fix negative offsets (there is 1 second between 10:59:59 and 11:00:00,
414 # not 1 hour, -59 minutes and -59 seconds)
423 # not 1 hour, -59 minutes and -59 seconds)
415 offsets = [[5, 60], [4, 60], [3, 24]]
424 offsets = [[5, 60], [4, 60], [3, 24]]
416 for element in offsets: # seconds, minutes, hours
425 for element in offsets: # seconds, minutes, hours
417 num = element[0]
426 num = element[0]
418 length = element[1]
427 length = element[1]
419
428
420 part = order[num]
429 part = order[num]
421 carry_part = order[num - 1]
430 carry_part = order[num - 1]
422
431
423 if deltas[part] < 0:
432 if deltas[part] < 0:
424 deltas[part] += length
433 deltas[part] += length
425 deltas[carry_part] -= 1
434 deltas[carry_part] -= 1
426
435
427 # Same thing for days except that the increment depends on the (variable)
436 # Same thing for days except that the increment depends on the (variable)
428 # number of days in the month
437 # number of days in the month
429 month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
438 month_lengths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
430 if deltas['day'] < 0:
439 if deltas['day'] < 0:
431 if get_month(prevdate) == 2 and _is_leap_year(get_year(prevdate)):
440 if get_month(prevdate) == 2 and _is_leap_year(get_year(prevdate)):
432 deltas['day'] += 29
441 deltas['day'] += 29
433 else:
442 else:
434 deltas['day'] += month_lengths[get_month(prevdate) - 1]
443 deltas['day'] += month_lengths[get_month(prevdate) - 1]
435
444
436 deltas['month'] -= 1
445 deltas['month'] -= 1
437
446
438 if deltas['month'] < 0:
447 if deltas['month'] < 0:
439 deltas['month'] += 12
448 deltas['month'] += 12
440 deltas['year'] -= 1
449 deltas['year'] -= 1
441
450
442 # Format the result
451 # Format the result
443 if short_format:
452 if short_format:
444 fmt_funcs = {
453 fmt_funcs = {
445 'year': lambda d: u'%dy' % d,
454 'year': lambda d: u'%dy' % d,
446 'month': lambda d: u'%dm' % d,
455 'month': lambda d: u'%dm' % d,
447 'day': lambda d: u'%dd' % d,
456 'day': lambda d: u'%dd' % d,
448 'hour': lambda d: u'%dh' % d,
457 'hour': lambda d: u'%dh' % d,
449 'minute': lambda d: u'%dmin' % d,
458 'minute': lambda d: u'%dmin' % d,
450 'second': lambda d: u'%dsec' % d,
459 'second': lambda d: u'%dsec' % d,
451 }
460 }
452 else:
461 else:
453 fmt_funcs = {
462 fmt_funcs = {
454 'year': lambda d: ungettext(u'%d year', '%d years', d) % d,
463 'year': lambda d: ungettext(u'%d year', '%d years', d) % d,
455 'month': lambda d: ungettext(u'%d month', '%d months', d) % d,
464 'month': lambda d: ungettext(u'%d month', '%d months', d) % d,
456 'day': lambda d: ungettext(u'%d day', '%d days', d) % d,
465 'day': lambda d: ungettext(u'%d day', '%d days', d) % d,
457 'hour': lambda d: ungettext(u'%d hour', '%d hours', d) % d,
466 'hour': lambda d: ungettext(u'%d hour', '%d hours', d) % d,
458 'minute': lambda d: ungettext(u'%d minute', '%d minutes', d) % d,
467 'minute': lambda d: ungettext(u'%d minute', '%d minutes', d) % d,
459 'second': lambda d: ungettext(u'%d second', '%d seconds', d) % d,
468 'second': lambda d: ungettext(u'%d second', '%d seconds', d) % d,
460 }
469 }
461
470
462 i = 0
471 i = 0
463 for part in order:
472 for part in order:
464 value = deltas[part]
473 value = deltas[part]
465 if value != 0:
474 if value != 0:
466
475
467 if i < 5:
476 if i < 5:
468 sub_part = order[i + 1]
477 sub_part = order[i + 1]
469 sub_value = deltas[sub_part]
478 sub_value = deltas[sub_part]
470 else:
479 else:
471 sub_value = 0
480 sub_value = 0
472
481
473 if sub_value == 0 or show_short_version:
482 if sub_value == 0 or show_short_version:
474 _val = fmt_funcs[part](value)
483 _val = fmt_funcs[part](value)
475 if future:
484 if future:
476 if show_suffix:
485 if show_suffix:
477 return _(u'in %s') % _val
486 return _(u'in %s') % _val
478 else:
487 else:
479 return _val
488 return _val
480
489
481 else:
490 else:
482 if show_suffix:
491 if show_suffix:
483 return _(u'%s ago') % _val
492 return _(u'%s ago') % _val
484 else:
493 else:
485 return _val
494 return _val
486
495
487 val = fmt_funcs[part](value)
496 val = fmt_funcs[part](value)
488 val_detail = fmt_funcs[sub_part](sub_value)
497 val_detail = fmt_funcs[sub_part](sub_value)
489
498
490 if short_format:
499 if short_format:
491 datetime_tmpl = u'%s, %s'
500 datetime_tmpl = u'%s, %s'
492 if show_suffix:
501 if show_suffix:
493 datetime_tmpl = _(u'%s, %s ago')
502 datetime_tmpl = _(u'%s, %s ago')
494 if future:
503 if future:
495 datetime_tmpl = _(u'in %s, %s')
504 datetime_tmpl = _(u'in %s, %s')
496 else:
505 else:
497 datetime_tmpl = _(u'%s and %s')
506 datetime_tmpl = _(u'%s and %s')
498 if show_suffix:
507 if show_suffix:
499 datetime_tmpl = _(u'%s and %s ago')
508 datetime_tmpl = _(u'%s and %s ago')
500 if future:
509 if future:
501 datetime_tmpl = _(u'in %s and %s')
510 datetime_tmpl = _(u'in %s and %s')
502
511
503 return datetime_tmpl % (val, val_detail)
512 return datetime_tmpl % (val, val_detail)
504 i += 1
513 i += 1
505 return _(u'just now')
514 return _(u'just now')
506
515
507
516
508 def uri_filter(uri):
517 def uri_filter(uri):
509 """
518 """
510 Removes user:password from given url string
519 Removes user:password from given url string
511
520
512 :param uri:
521 :param uri:
513 :rtype: unicode
522 :rtype: unicode
514 :returns: filtered list of strings
523 :returns: filtered list of strings
515 """
524 """
516 if not uri:
525 if not uri:
517 return ''
526 return ''
518
527
519 proto = ''
528 proto = ''
520
529
521 for pat in ('https://', 'http://'):
530 for pat in ('https://', 'http://'):
522 if uri.startswith(pat):
531 if uri.startswith(pat):
523 uri = uri[len(pat):]
532 uri = uri[len(pat):]
524 proto = pat
533 proto = pat
525 break
534 break
526
535
527 # remove passwords and username
536 # remove passwords and username
528 uri = uri[uri.find('@') + 1:]
537 uri = uri[uri.find('@') + 1:]
529
538
530 # get the port
539 # get the port
531 cred_pos = uri.find(':')
540 cred_pos = uri.find(':')
532 if cred_pos == -1:
541 if cred_pos == -1:
533 host, port = uri, None
542 host, port = uri, None
534 else:
543 else:
535 host, port = uri[:cred_pos], uri[cred_pos + 1:]
544 host, port = uri[:cred_pos], uri[cred_pos + 1:]
536
545
537 return filter(None, [proto, host, port])
546 return filter(None, [proto, host, port])
538
547
539
548
540 def credentials_filter(uri):
549 def credentials_filter(uri):
541 """
550 """
542 Returns a url with removed credentials
551 Returns a url with removed credentials
543
552
544 :param uri:
553 :param uri:
545 """
554 """
546
555
547 uri = uri_filter(uri)
556 uri = uri_filter(uri)
548 # check if we have port
557 # check if we have port
549 if len(uri) > 2 and uri[2]:
558 if len(uri) > 2 and uri[2]:
550 uri[2] = ':' + uri[2]
559 uri[2] = ':' + uri[2]
551
560
552 return ''.join(uri)
561 return ''.join(uri)
553
562
554
563
555 def get_clone_url(uri_tmpl, qualifed_home_url, repo_name, repo_id, **override):
564 def get_clone_url(uri_tmpl, qualifed_home_url, repo_name, repo_id, **override):
556 parsed_url = urlobject.URLObject(qualifed_home_url)
565 parsed_url = urlobject.URLObject(qualifed_home_url)
557 decoded_path = safe_unicode(urllib.unquote(parsed_url.path.rstrip('/')))
566 decoded_path = safe_unicode(urllib.unquote(parsed_url.path.rstrip('/')))
558 args = {
567 args = {
559 'scheme': parsed_url.scheme,
568 'scheme': parsed_url.scheme,
560 'user': '',
569 'user': '',
561 # path if we use proxy-prefix
570 # path if we use proxy-prefix
562 'netloc': parsed_url.netloc+decoded_path,
571 'netloc': parsed_url.netloc+decoded_path,
563 'prefix': decoded_path,
572 'prefix': decoded_path,
564 'repo': repo_name,
573 'repo': repo_name,
565 'repoid': str(repo_id)
574 'repoid': str(repo_id)
566 }
575 }
567 args.update(override)
576 args.update(override)
568 args['user'] = urllib.quote(safe_str(args['user']))
577 args['user'] = urllib.quote(safe_str(args['user']))
569
578
570 for k, v in args.items():
579 for k, v in args.items():
571 uri_tmpl = uri_tmpl.replace('{%s}' % k, v)
580 uri_tmpl = uri_tmpl.replace('{%s}' % k, v)
572
581
573 # remove leading @ sign if it's present. Case of empty user
582 # remove leading @ sign if it's present. Case of empty user
574 url_obj = urlobject.URLObject(uri_tmpl)
583 url_obj = urlobject.URLObject(uri_tmpl)
575 url = url_obj.with_netloc(url_obj.netloc.lstrip('@'))
584 url = url_obj.with_netloc(url_obj.netloc.lstrip('@'))
576
585
577 return safe_unicode(url)
586 return safe_unicode(url)
578
587
579
588
580 def get_commit_safe(repo, commit_id=None, commit_idx=None, pre_load=None):
589 def get_commit_safe(repo, commit_id=None, commit_idx=None, pre_load=None):
581 """
590 """
582 Safe version of get_commit if this commit doesn't exists for a
591 Safe version of get_commit if this commit doesn't exists for a
583 repository it returns a Dummy one instead
592 repository it returns a Dummy one instead
584
593
585 :param repo: repository instance
594 :param repo: repository instance
586 :param commit_id: commit id as str
595 :param commit_id: commit id as str
587 :param pre_load: optional list of commit attributes to load
596 :param pre_load: optional list of commit attributes to load
588 """
597 """
589 # TODO(skreft): remove these circular imports
598 # TODO(skreft): remove these circular imports
590 from rhodecode.lib.vcs.backends.base import BaseRepository, EmptyCommit
599 from rhodecode.lib.vcs.backends.base import BaseRepository, EmptyCommit
591 from rhodecode.lib.vcs.exceptions import RepositoryError
600 from rhodecode.lib.vcs.exceptions import RepositoryError
592 if not isinstance(repo, BaseRepository):
601 if not isinstance(repo, BaseRepository):
593 raise Exception('You must pass an Repository '
602 raise Exception('You must pass an Repository '
594 'object as first argument got %s', type(repo))
603 'object as first argument got %s', type(repo))
595
604
596 try:
605 try:
597 commit = repo.get_commit(
606 commit = repo.get_commit(
598 commit_id=commit_id, commit_idx=commit_idx, pre_load=pre_load)
607 commit_id=commit_id, commit_idx=commit_idx, pre_load=pre_load)
599 except (RepositoryError, LookupError):
608 except (RepositoryError, LookupError):
600 commit = EmptyCommit()
609 commit = EmptyCommit()
601 return commit
610 return commit
602
611
603
612
604 def datetime_to_time(dt):
613 def datetime_to_time(dt):
605 if dt:
614 if dt:
606 return time.mktime(dt.timetuple())
615 return time.mktime(dt.timetuple())
607
616
608
617
609 def time_to_datetime(tm):
618 def time_to_datetime(tm):
610 if tm:
619 if tm:
611 if isinstance(tm, basestring):
620 if isinstance(tm, basestring):
612 try:
621 try:
613 tm = float(tm)
622 tm = float(tm)
614 except ValueError:
623 except ValueError:
615 return
624 return
616 return datetime.datetime.fromtimestamp(tm)
625 return datetime.datetime.fromtimestamp(tm)
617
626
618
627
619 def time_to_utcdatetime(tm):
628 def time_to_utcdatetime(tm):
620 if tm:
629 if tm:
621 if isinstance(tm, basestring):
630 if isinstance(tm, basestring):
622 try:
631 try:
623 tm = float(tm)
632 tm = float(tm)
624 except ValueError:
633 except ValueError:
625 return
634 return
626 return datetime.datetime.utcfromtimestamp(tm)
635 return datetime.datetime.utcfromtimestamp(tm)
627
636
628
637
629 MENTIONS_REGEX = re.compile(
638 MENTIONS_REGEX = re.compile(
630 # ^@ or @ without any special chars in front
639 # ^@ or @ without any special chars in front
631 r'(?:^@|[^a-zA-Z0-9\-\_\.]@)'
640 r'(?:^@|[^a-zA-Z0-9\-\_\.]@)'
632 # main body starts with letter, then can be . - _
641 # main body starts with letter, then can be . - _
633 r'([a-zA-Z0-9]{1}[a-zA-Z0-9\-\_\.]+)',
642 r'([a-zA-Z0-9]{1}[a-zA-Z0-9\-\_\.]+)',
634 re.VERBOSE | re.MULTILINE)
643 re.VERBOSE | re.MULTILINE)
635
644
636
645
637 def extract_mentioned_users(s):
646 def extract_mentioned_users(s):
638 """
647 """
639 Returns unique usernames from given string s that have @mention
648 Returns unique usernames from given string s that have @mention
640
649
641 :param s: string to get mentions
650 :param s: string to get mentions
642 """
651 """
643 usrs = set()
652 usrs = set()
644 for username in MENTIONS_REGEX.findall(s):
653 for username in MENTIONS_REGEX.findall(s):
645 usrs.add(username)
654 usrs.add(username)
646
655
647 return sorted(list(usrs), key=lambda k: k.lower())
656 return sorted(list(usrs), key=lambda k: k.lower())
648
657
649
658
650 class AttributeDict(dict):
659 class AttributeDict(dict):
651 def __getattr__(self, attr):
660 def __getattr__(self, attr):
652 return self.get(attr, None)
661 return self.get(attr, None)
653 __setattr__ = dict.__setitem__
662 __setattr__ = dict.__setitem__
654 __delattr__ = dict.__delitem__
663 __delattr__ = dict.__delitem__
655
664
656
665
657 def fix_PATH(os_=None):
666 def fix_PATH(os_=None):
658 """
667 """
659 Get current active python path, and append it to PATH variable to fix
668 Get current active python path, and append it to PATH variable to fix
660 issues of subprocess calls and different python versions
669 issues of subprocess calls and different python versions
661 """
670 """
662 if os_ is None:
671 if os_ is None:
663 import os
672 import os
664 else:
673 else:
665 os = os_
674 os = os_
666
675
667 cur_path = os.path.split(sys.executable)[0]
676 cur_path = os.path.split(sys.executable)[0]
668 if not os.environ['PATH'].startswith(cur_path):
677 if not os.environ['PATH'].startswith(cur_path):
669 os.environ['PATH'] = '%s:%s' % (cur_path, os.environ['PATH'])
678 os.environ['PATH'] = '%s:%s' % (cur_path, os.environ['PATH'])
670
679
671
680
672 def obfuscate_url_pw(engine):
681 def obfuscate_url_pw(engine):
673 _url = engine or ''
682 _url = engine or ''
674 try:
683 try:
675 _url = sqlalchemy.engine.url.make_url(engine)
684 _url = sqlalchemy.engine.url.make_url(engine)
676 if _url.password:
685 if _url.password:
677 _url.password = 'XXXXX'
686 _url.password = 'XXXXX'
678 except Exception:
687 except Exception:
679 pass
688 pass
680 return unicode(_url)
689 return unicode(_url)
681
690
682
691
683 def get_server_url(environ):
692 def get_server_url(environ):
684 req = webob.Request(environ)
693 req = webob.Request(environ)
685 return req.host_url + req.script_name
694 return req.host_url + req.script_name
686
695
687
696
688 def unique_id(hexlen=32):
697 def unique_id(hexlen=32):
689 alphabet = "23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnpqrstuvwxyz"
698 alphabet = "23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghjklmnpqrstuvwxyz"
690 return suuid(truncate_to=hexlen, alphabet=alphabet)
699 return suuid(truncate_to=hexlen, alphabet=alphabet)
691
700
692
701
693 def suuid(url=None, truncate_to=22, alphabet=None):
702 def suuid(url=None, truncate_to=22, alphabet=None):
694 """
703 """
695 Generate and return a short URL safe UUID.
704 Generate and return a short URL safe UUID.
696
705
697 If the url parameter is provided, set the namespace to the provided
706 If the url parameter is provided, set the namespace to the provided
698 URL and generate a UUID.
707 URL and generate a UUID.
699
708
700 :param url to get the uuid for
709 :param url to get the uuid for
701 :truncate_to: truncate the basic 22 UUID to shorter version
710 :truncate_to: truncate the basic 22 UUID to shorter version
702
711
703 The IDs won't be universally unique any longer, but the probability of
712 The IDs won't be universally unique any longer, but the probability of
704 a collision will still be very low.
713 a collision will still be very low.
705 """
714 """
706 # Define our alphabet.
715 # Define our alphabet.
707 _ALPHABET = alphabet or "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
716 _ALPHABET = alphabet or "23456789ABCDEFGHJKLMNPQRSTUVWXYZ"
708
717
709 # If no URL is given, generate a random UUID.
718 # If no URL is given, generate a random UUID.
710 if url is None:
719 if url is None:
711 unique_id = uuid.uuid4().int
720 unique_id = uuid.uuid4().int
712 else:
721 else:
713 unique_id = uuid.uuid3(uuid.NAMESPACE_URL, url).int
722 unique_id = uuid.uuid3(uuid.NAMESPACE_URL, url).int
714
723
715 alphabet_length = len(_ALPHABET)
724 alphabet_length = len(_ALPHABET)
716 output = []
725 output = []
717 while unique_id > 0:
726 while unique_id > 0:
718 digit = unique_id % alphabet_length
727 digit = unique_id % alphabet_length
719 output.append(_ALPHABET[digit])
728 output.append(_ALPHABET[digit])
720 unique_id = int(unique_id / alphabet_length)
729 unique_id = int(unique_id / alphabet_length)
721 return "".join(output)[:truncate_to]
730 return "".join(output)[:truncate_to]
722
731
723
732
724 def get_current_rhodecode_user():
733 def get_current_rhodecode_user():
725 """
734 """
726 Gets rhodecode user from threadlocal tmpl_context variable if it's
735 Gets rhodecode user from threadlocal tmpl_context variable if it's
727 defined, else returns None.
736 defined, else returns None.
728 """
737 """
729 from pylons import tmpl_context as c
738 from pylons import tmpl_context as c
730 if hasattr(c, 'rhodecode_user'):
739 if hasattr(c, 'rhodecode_user'):
731 return c.rhodecode_user
740 return c.rhodecode_user
732
741
733 return None
742 return None
734
743
735
744
736 def action_logger_generic(action, namespace=''):
745 def action_logger_generic(action, namespace=''):
737 """
746 """
738 A generic logger for actions useful to the system overview, tries to find
747 A generic logger for actions useful to the system overview, tries to find
739 an acting user for the context of the call otherwise reports unknown user
748 an acting user for the context of the call otherwise reports unknown user
740
749
741 :param action: logging message eg 'comment 5 deleted'
750 :param action: logging message eg 'comment 5 deleted'
742 :param type: string
751 :param type: string
743
752
744 :param namespace: namespace of the logging message eg. 'repo.comments'
753 :param namespace: namespace of the logging message eg. 'repo.comments'
745 :param type: string
754 :param type: string
746
755
747 """
756 """
748
757
749 logger_name = 'rhodecode.actions'
758 logger_name = 'rhodecode.actions'
750
759
751 if namespace:
760 if namespace:
752 logger_name += '.' + namespace
761 logger_name += '.' + namespace
753
762
754 log = logging.getLogger(logger_name)
763 log = logging.getLogger(logger_name)
755
764
756 # get a user if we can
765 # get a user if we can
757 user = get_current_rhodecode_user()
766 user = get_current_rhodecode_user()
758
767
759 logfunc = log.info
768 logfunc = log.info
760
769
761 if not user:
770 if not user:
762 user = '<unknown user>'
771 user = '<unknown user>'
763 logfunc = log.warning
772 logfunc = log.warning
764
773
765 logfunc('Logging action by {}: {}'.format(user, action))
774 logfunc('Logging action by {}: {}'.format(user, action))
766
775
767
776
768 def escape_split(text, sep=',', maxsplit=-1):
777 def escape_split(text, sep=',', maxsplit=-1):
769 r"""
778 r"""
770 Allows for escaping of the separator: e.g. arg='foo\, bar'
779 Allows for escaping of the separator: e.g. arg='foo\, bar'
771
780
772 It should be noted that the way bash et. al. do command line parsing, those
781 It should be noted that the way bash et. al. do command line parsing, those
773 single quotes are required.
782 single quotes are required.
774 """
783 """
775 escaped_sep = r'\%s' % sep
784 escaped_sep = r'\%s' % sep
776
785
777 if escaped_sep not in text:
786 if escaped_sep not in text:
778 return text.split(sep, maxsplit)
787 return text.split(sep, maxsplit)
779
788
780 before, _mid, after = text.partition(escaped_sep)
789 before, _mid, after = text.partition(escaped_sep)
781 startlist = before.split(sep, maxsplit) # a regular split is fine here
790 startlist = before.split(sep, maxsplit) # a regular split is fine here
782 unfinished = startlist[-1]
791 unfinished = startlist[-1]
783 startlist = startlist[:-1]
792 startlist = startlist[:-1]
784
793
785 # recurse because there may be more escaped separators
794 # recurse because there may be more escaped separators
786 endlist = escape_split(after, sep, maxsplit)
795 endlist = escape_split(after, sep, maxsplit)
787
796
788 # finish building the escaped value. we use endlist[0] becaue the first
797 # finish building the escaped value. we use endlist[0] becaue the first
789 # part of the string sent in recursion is the rest of the escaped value.
798 # part of the string sent in recursion is the rest of the escaped value.
790 unfinished += sep + endlist[0]
799 unfinished += sep + endlist[0]
791
800
792 return startlist + [unfinished] + endlist[1:] # put together all the parts
801 return startlist + [unfinished] + endlist[1:] # put together all the parts
793
802
794
803
795 class OptionalAttr(object):
804 class OptionalAttr(object):
796 """
805 """
797 Special Optional Option that defines other attribute. Example::
806 Special Optional Option that defines other attribute. Example::
798
807
799 def test(apiuser, userid=Optional(OAttr('apiuser')):
808 def test(apiuser, userid=Optional(OAttr('apiuser')):
800 user = Optional.extract(userid)
809 user = Optional.extract(userid)
801 # calls
810 # calls
802
811
803 """
812 """
804
813
805 def __init__(self, attr_name):
814 def __init__(self, attr_name):
806 self.attr_name = attr_name
815 self.attr_name = attr_name
807
816
808 def __repr__(self):
817 def __repr__(self):
809 return '<OptionalAttr:%s>' % self.attr_name
818 return '<OptionalAttr:%s>' % self.attr_name
810
819
811 def __call__(self):
820 def __call__(self):
812 return self
821 return self
813
822
814
823
815 # alias
824 # alias
816 OAttr = OptionalAttr
825 OAttr = OptionalAttr
817
826
818
827
819 class Optional(object):
828 class Optional(object):
820 """
829 """
821 Defines an optional parameter::
830 Defines an optional parameter::
822
831
823 param = param.getval() if isinstance(param, Optional) else param
832 param = param.getval() if isinstance(param, Optional) else param
824 param = param() if isinstance(param, Optional) else param
833 param = param() if isinstance(param, Optional) else param
825
834
826 is equivalent of::
835 is equivalent of::
827
836
828 param = Optional.extract(param)
837 param = Optional.extract(param)
829
838
830 """
839 """
831
840
832 def __init__(self, type_):
841 def __init__(self, type_):
833 self.type_ = type_
842 self.type_ = type_
834
843
835 def __repr__(self):
844 def __repr__(self):
836 return '<Optional:%s>' % self.type_.__repr__()
845 return '<Optional:%s>' % self.type_.__repr__()
837
846
838 def __call__(self):
847 def __call__(self):
839 return self.getval()
848 return self.getval()
840
849
841 def getval(self):
850 def getval(self):
842 """
851 """
843 returns value from this Optional instance
852 returns value from this Optional instance
844 """
853 """
845 if isinstance(self.type_, OAttr):
854 if isinstance(self.type_, OAttr):
846 # use params name
855 # use params name
847 return self.type_.attr_name
856 return self.type_.attr_name
848 return self.type_
857 return self.type_
849
858
850 @classmethod
859 @classmethod
851 def extract(cls, val):
860 def extract(cls, val):
852 """
861 """
853 Extracts value from Optional() instance
862 Extracts value from Optional() instance
854
863
855 :param val:
864 :param val:
856 :return: original value if it's not Optional instance else
865 :return: original value if it's not Optional instance else
857 value of instance
866 value of instance
858 """
867 """
859 if isinstance(val, cls):
868 if isinstance(val, cls):
860 return val.getval()
869 return val.getval()
861 return val
870 return val
862
871
863
872
864 def get_routes_generator_for_server_url(server_url):
873 def get_routes_generator_for_server_url(server_url):
865 parsed_url = urlobject.URLObject(server_url)
874 parsed_url = urlobject.URLObject(server_url)
866 netloc = safe_str(parsed_url.netloc)
875 netloc = safe_str(parsed_url.netloc)
867 script_name = safe_str(parsed_url.path)
876 script_name = safe_str(parsed_url.path)
868
877
869 if ':' in netloc:
878 if ':' in netloc:
870 server_name, server_port = netloc.split(':')
879 server_name, server_port = netloc.split(':')
871 else:
880 else:
872 server_name = netloc
881 server_name = netloc
873 server_port = (parsed_url.scheme == 'https' and '443' or '80')
882 server_port = (parsed_url.scheme == 'https' and '443' or '80')
874
883
875 environ = {
884 environ = {
876 'REQUEST_METHOD': 'GET',
885 'REQUEST_METHOD': 'GET',
877 'PATH_INFO': '/',
886 'PATH_INFO': '/',
878 'SERVER_NAME': server_name,
887 'SERVER_NAME': server_name,
879 'SERVER_PORT': server_port,
888 'SERVER_PORT': server_port,
880 'SCRIPT_NAME': script_name,
889 'SCRIPT_NAME': script_name,
881 }
890 }
882 if parsed_url.scheme == 'https':
891 if parsed_url.scheme == 'https':
883 environ['HTTPS'] = 'on'
892 environ['HTTPS'] = 'on'
884 environ['wsgi.url_scheme'] = 'https'
893 environ['wsgi.url_scheme'] = 'https'
885
894
886 return routes.util.URLGenerator(rhodecode.CONFIG['routes.map'], environ)
895 return routes.util.URLGenerator(rhodecode.CONFIG['routes.map'], environ)
@@ -1,740 +1,756 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2014-2016 RhodeCode GmbH
3 # Copyright (C) 2014-2016 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Module holding everything related to vcs nodes, with vcs2 architecture.
22 Module holding everything related to vcs nodes, with vcs2 architecture.
23 """
23 """
24
24
25
25
26 import stat
26 import stat
27
27
28 from zope.cachedescriptors.property import Lazy as LazyProperty
28 from zope.cachedescriptors.property import Lazy as LazyProperty
29
29
30 from rhodecode.config.conf import LANGUAGES_EXTENSIONS_MAP
30 from rhodecode.lib.utils import safe_unicode, safe_str
31 from rhodecode.lib.utils import safe_unicode, safe_str
31 from rhodecode.lib.utils2 import md5
32 from rhodecode.lib.utils2 import md5
32 from rhodecode.lib.vcs import path as vcspath
33 from rhodecode.lib.vcs import path as vcspath
33 from rhodecode.lib.vcs.backends.base import EmptyCommit, FILEMODE_DEFAULT
34 from rhodecode.lib.vcs.backends.base import EmptyCommit, FILEMODE_DEFAULT
34 from rhodecode.lib.vcs.conf.mtypes import get_mimetypes_db
35 from rhodecode.lib.vcs.conf.mtypes import get_mimetypes_db
35 from rhodecode.lib.vcs.exceptions import NodeError, RemovedFileNodeError
36 from rhodecode.lib.vcs.exceptions import NodeError, RemovedFileNodeError
36
37
37 LARGEFILE_PREFIX = '.hglf'
38 LARGEFILE_PREFIX = '.hglf'
38
39
39
40
40 class NodeKind:
41 class NodeKind:
41 SUBMODULE = -1
42 SUBMODULE = -1
42 DIR = 1
43 DIR = 1
43 FILE = 2
44 FILE = 2
44 LARGEFILE = 3
45 LARGEFILE = 3
45
46
46
47
47 class NodeState:
48 class NodeState:
48 ADDED = u'added'
49 ADDED = u'added'
49 CHANGED = u'changed'
50 CHANGED = u'changed'
50 NOT_CHANGED = u'not changed'
51 NOT_CHANGED = u'not changed'
51 REMOVED = u'removed'
52 REMOVED = u'removed'
52
53
53
54
54 class NodeGeneratorBase(object):
55 class NodeGeneratorBase(object):
55 """
56 """
56 Base class for removed added and changed filenodes, it's a lazy generator
57 Base class for removed added and changed filenodes, it's a lazy generator
57 class that will create filenodes only on iteration or call
58 class that will create filenodes only on iteration or call
58
59
59 The len method doesn't need to create filenodes at all
60 The len method doesn't need to create filenodes at all
60 """
61 """
61
62
62 def __init__(self, current_paths, cs):
63 def __init__(self, current_paths, cs):
63 self.cs = cs
64 self.cs = cs
64 self.current_paths = current_paths
65 self.current_paths = current_paths
65
66
66 def __call__(self):
67 def __call__(self):
67 return [n for n in self]
68 return [n for n in self]
68
69
69 def __getslice__(self, i, j):
70 def __getslice__(self, i, j):
70 for p in self.current_paths[i:j]:
71 for p in self.current_paths[i:j]:
71 yield self.cs.get_node(p)
72 yield self.cs.get_node(p)
72
73
73 def __len__(self):
74 def __len__(self):
74 return len(self.current_paths)
75 return len(self.current_paths)
75
76
76 def __iter__(self):
77 def __iter__(self):
77 for p in self.current_paths:
78 for p in self.current_paths:
78 yield self.cs.get_node(p)
79 yield self.cs.get_node(p)
79
80
80
81
81 class AddedFileNodesGenerator(NodeGeneratorBase):
82 class AddedFileNodesGenerator(NodeGeneratorBase):
82 """
83 """
83 Class holding added files for current commit
84 Class holding added files for current commit
84 """
85 """
85
86
86
87
87 class ChangedFileNodesGenerator(NodeGeneratorBase):
88 class ChangedFileNodesGenerator(NodeGeneratorBase):
88 """
89 """
89 Class holding changed files for current commit
90 Class holding changed files for current commit
90 """
91 """
91
92
92
93
93 class RemovedFileNodesGenerator(NodeGeneratorBase):
94 class RemovedFileNodesGenerator(NodeGeneratorBase):
94 """
95 """
95 Class holding removed files for current commit
96 Class holding removed files for current commit
96 """
97 """
97 def __iter__(self):
98 def __iter__(self):
98 for p in self.current_paths:
99 for p in self.current_paths:
99 yield RemovedFileNode(path=p)
100 yield RemovedFileNode(path=p)
100
101
101 def __getslice__(self, i, j):
102 def __getslice__(self, i, j):
102 for p in self.current_paths[i:j]:
103 for p in self.current_paths[i:j]:
103 yield RemovedFileNode(path=p)
104 yield RemovedFileNode(path=p)
104
105
105
106
106 class Node(object):
107 class Node(object):
107 """
108 """
108 Simplest class representing file or directory on repository. SCM backends
109 Simplest class representing file or directory on repository. SCM backends
109 should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node``
110 should use ``FileNode`` and ``DirNode`` subclasses rather than ``Node``
110 directly.
111 directly.
111
112
112 Node's ``path`` cannot start with slash as we operate on *relative* paths
113 Node's ``path`` cannot start with slash as we operate on *relative* paths
113 only. Moreover, every single node is identified by the ``path`` attribute,
114 only. Moreover, every single node is identified by the ``path`` attribute,
114 so it cannot end with slash, too. Otherwise, path could lead to mistakes.
115 so it cannot end with slash, too. Otherwise, path could lead to mistakes.
115 """
116 """
116
117
117 commit = None
118 commit = None
118
119
119 def __init__(self, path, kind):
120 def __init__(self, path, kind):
120 self._validate_path(path) # can throw exception if path is invalid
121 self._validate_path(path) # can throw exception if path is invalid
121 self.path = safe_str(path.rstrip('/')) # we store paths as str
122 self.path = safe_str(path.rstrip('/')) # we store paths as str
122 if path == '' and kind != NodeKind.DIR:
123 if path == '' and kind != NodeKind.DIR:
123 raise NodeError("Only DirNode and its subclasses may be "
124 raise NodeError("Only DirNode and its subclasses may be "
124 "initialized with empty path")
125 "initialized with empty path")
125 self.kind = kind
126 self.kind = kind
126
127
127 if self.is_root() and not self.is_dir():
128 if self.is_root() and not self.is_dir():
128 raise NodeError("Root node cannot be FILE kind")
129 raise NodeError("Root node cannot be FILE kind")
129
130
130 def _validate_path(self, path):
131 def _validate_path(self, path):
131 if path.startswith('/'):
132 if path.startswith('/'):
132 raise NodeError(
133 raise NodeError(
133 "Cannot initialize Node objects with slash at "
134 "Cannot initialize Node objects with slash at "
134 "the beginning as only relative paths are supported. "
135 "the beginning as only relative paths are supported. "
135 "Got %s" % (path,))
136 "Got %s" % (path,))
136
137
137 @LazyProperty
138 @LazyProperty
138 def parent(self):
139 def parent(self):
139 parent_path = self.get_parent_path()
140 parent_path = self.get_parent_path()
140 if parent_path:
141 if parent_path:
141 if self.commit:
142 if self.commit:
142 return self.commit.get_node(parent_path)
143 return self.commit.get_node(parent_path)
143 return DirNode(parent_path)
144 return DirNode(parent_path)
144 return None
145 return None
145
146
146 @LazyProperty
147 @LazyProperty
147 def unicode_path(self):
148 def unicode_path(self):
148 return safe_unicode(self.path)
149 return safe_unicode(self.path)
149
150
150 @LazyProperty
151 @LazyProperty
151 def dir_path(self):
152 def dir_path(self):
152 """
153 """
153 Returns name of the directory from full path of this vcs node. Empty
154 Returns name of the directory from full path of this vcs node. Empty
154 string is returned if there's no directory in the path
155 string is returned if there's no directory in the path
155 """
156 """
156 _parts = self.path.rstrip('/').rsplit('/', 1)
157 _parts = self.path.rstrip('/').rsplit('/', 1)
157 if len(_parts) == 2:
158 if len(_parts) == 2:
158 return safe_unicode(_parts[0])
159 return safe_unicode(_parts[0])
159 return u''
160 return u''
160
161
161 @LazyProperty
162 @LazyProperty
162 def name(self):
163 def name(self):
163 """
164 """
164 Returns name of the node so if its path
165 Returns name of the node so if its path
165 then only last part is returned.
166 then only last part is returned.
166 """
167 """
167 return safe_unicode(self.path.rstrip('/').split('/')[-1])
168 return safe_unicode(self.path.rstrip('/').split('/')[-1])
168
169
169 @property
170 @property
170 def kind(self):
171 def kind(self):
171 return self._kind
172 return self._kind
172
173
173 @kind.setter
174 @kind.setter
174 def kind(self, kind):
175 def kind(self, kind):
175 if hasattr(self, '_kind'):
176 if hasattr(self, '_kind'):
176 raise NodeError("Cannot change node's kind")
177 raise NodeError("Cannot change node's kind")
177 else:
178 else:
178 self._kind = kind
179 self._kind = kind
179 # Post setter check (path's trailing slash)
180 # Post setter check (path's trailing slash)
180 if self.path.endswith('/'):
181 if self.path.endswith('/'):
181 raise NodeError("Node's path cannot end with slash")
182 raise NodeError("Node's path cannot end with slash")
182
183
183 def __cmp__(self, other):
184 def __cmp__(self, other):
184 """
185 """
185 Comparator using name of the node, needed for quick list sorting.
186 Comparator using name of the node, needed for quick list sorting.
186 """
187 """
187 kind_cmp = cmp(self.kind, other.kind)
188 kind_cmp = cmp(self.kind, other.kind)
188 if kind_cmp:
189 if kind_cmp:
189 return kind_cmp
190 return kind_cmp
190 return cmp(self.name, other.name)
191 return cmp(self.name, other.name)
191
192
192 def __eq__(self, other):
193 def __eq__(self, other):
193 for attr in ['name', 'path', 'kind']:
194 for attr in ['name', 'path', 'kind']:
194 if getattr(self, attr) != getattr(other, attr):
195 if getattr(self, attr) != getattr(other, attr):
195 return False
196 return False
196 if self.is_file():
197 if self.is_file():
197 if self.content != other.content:
198 if self.content != other.content:
198 return False
199 return False
199 else:
200 else:
200 # For DirNode's check without entering each dir
201 # For DirNode's check without entering each dir
201 self_nodes_paths = list(sorted(n.path for n in self.nodes))
202 self_nodes_paths = list(sorted(n.path for n in self.nodes))
202 other_nodes_paths = list(sorted(n.path for n in self.nodes))
203 other_nodes_paths = list(sorted(n.path for n in self.nodes))
203 if self_nodes_paths != other_nodes_paths:
204 if self_nodes_paths != other_nodes_paths:
204 return False
205 return False
205 return True
206 return True
206
207
207 def __ne__(self, other):
208 def __ne__(self, other):
208 return not self.__eq__(other)
209 return not self.__eq__(other)
209
210
210 def __repr__(self):
211 def __repr__(self):
211 return '<%s %r>' % (self.__class__.__name__, self.path)
212 return '<%s %r>' % (self.__class__.__name__, self.path)
212
213
213 def __str__(self):
214 def __str__(self):
214 return self.__repr__()
215 return self.__repr__()
215
216
216 def __unicode__(self):
217 def __unicode__(self):
217 return self.name
218 return self.name
218
219
219 def get_parent_path(self):
220 def get_parent_path(self):
220 """
221 """
221 Returns node's parent path or empty string if node is root.
222 Returns node's parent path or empty string if node is root.
222 """
223 """
223 if self.is_root():
224 if self.is_root():
224 return ''
225 return ''
225 return vcspath.dirname(self.path.rstrip('/')) + '/'
226 return vcspath.dirname(self.path.rstrip('/')) + '/'
226
227
227 def is_file(self):
228 def is_file(self):
228 """
229 """
229 Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False``
230 Returns ``True`` if node's kind is ``NodeKind.FILE``, ``False``
230 otherwise.
231 otherwise.
231 """
232 """
232 return self.kind == NodeKind.FILE
233 return self.kind == NodeKind.FILE
233
234
234 def is_dir(self):
235 def is_dir(self):
235 """
236 """
236 Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False``
237 Returns ``True`` if node's kind is ``NodeKind.DIR``, ``False``
237 otherwise.
238 otherwise.
238 """
239 """
239 return self.kind == NodeKind.DIR
240 return self.kind == NodeKind.DIR
240
241
241 def is_root(self):
242 def is_root(self):
242 """
243 """
243 Returns ``True`` if node is a root node and ``False`` otherwise.
244 Returns ``True`` if node is a root node and ``False`` otherwise.
244 """
245 """
245 return self.kind == NodeKind.DIR and self.path == ''
246 return self.kind == NodeKind.DIR and self.path == ''
246
247
247 def is_submodule(self):
248 def is_submodule(self):
248 """
249 """
249 Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False``
250 Returns ``True`` if node's kind is ``NodeKind.SUBMODULE``, ``False``
250 otherwise.
251 otherwise.
251 """
252 """
252 return self.kind == NodeKind.SUBMODULE
253 return self.kind == NodeKind.SUBMODULE
253
254
254 def is_largefile(self):
255 def is_largefile(self):
255 """
256 """
256 Returns ``True`` if node's kind is ``NodeKind.LARGEFILE``, ``False``
257 Returns ``True`` if node's kind is ``NodeKind.LARGEFILE``, ``False``
257 otherwise
258 otherwise
258 """
259 """
259 return self.kind == NodeKind.LARGEFILE
260 return self.kind == NodeKind.LARGEFILE
260
261
261 def is_link(self):
262 def is_link(self):
262 if self.commit:
263 if self.commit:
263 return self.commit.is_link(self.path)
264 return self.commit.is_link(self.path)
264 return False
265 return False
265
266
266 @LazyProperty
267 @LazyProperty
267 def added(self):
268 def added(self):
268 return self.state is NodeState.ADDED
269 return self.state is NodeState.ADDED
269
270
270 @LazyProperty
271 @LazyProperty
271 def changed(self):
272 def changed(self):
272 return self.state is NodeState.CHANGED
273 return self.state is NodeState.CHANGED
273
274
274 @LazyProperty
275 @LazyProperty
275 def not_changed(self):
276 def not_changed(self):
276 return self.state is NodeState.NOT_CHANGED
277 return self.state is NodeState.NOT_CHANGED
277
278
278 @LazyProperty
279 @LazyProperty
279 def removed(self):
280 def removed(self):
280 return self.state is NodeState.REMOVED
281 return self.state is NodeState.REMOVED
281
282
282
283
283 class FileNode(Node):
284 class FileNode(Node):
284 """
285 """
285 Class representing file nodes.
286 Class representing file nodes.
286
287
287 :attribute: path: path to the node, relative to repository's root
288 :attribute: path: path to the node, relative to repository's root
288 :attribute: content: if given arbitrary sets content of the file
289 :attribute: content: if given arbitrary sets content of the file
289 :attribute: commit: if given, first time content is accessed, callback
290 :attribute: commit: if given, first time content is accessed, callback
290 :attribute: mode: stat mode for a node. Default is `FILEMODE_DEFAULT`.
291 :attribute: mode: stat mode for a node. Default is `FILEMODE_DEFAULT`.
291 """
292 """
292
293
293 def __init__(self, path, content=None, commit=None, mode=None):
294 def __init__(self, path, content=None, commit=None, mode=None):
294 """
295 """
295 Only one of ``content`` and ``commit`` may be given. Passing both
296 Only one of ``content`` and ``commit`` may be given. Passing both
296 would raise ``NodeError`` exception.
297 would raise ``NodeError`` exception.
297
298
298 :param path: relative path to the node
299 :param path: relative path to the node
299 :param content: content may be passed to constructor
300 :param content: content may be passed to constructor
300 :param commit: if given, will use it to lazily fetch content
301 :param commit: if given, will use it to lazily fetch content
301 :param mode: ST_MODE (i.e. 0100644)
302 :param mode: ST_MODE (i.e. 0100644)
302 """
303 """
303 if content and commit:
304 if content and commit:
304 raise NodeError("Cannot use both content and commit")
305 raise NodeError("Cannot use both content and commit")
305 super(FileNode, self).__init__(path, kind=NodeKind.FILE)
306 super(FileNode, self).__init__(path, kind=NodeKind.FILE)
306 self.commit = commit
307 self.commit = commit
307 self._content = content
308 self._content = content
308 self._mode = mode or FILEMODE_DEFAULT
309 self._mode = mode or FILEMODE_DEFAULT
309
310
310 @LazyProperty
311 @LazyProperty
311 def mode(self):
312 def mode(self):
312 """
313 """
313 Returns lazily mode of the FileNode. If `commit` is not set, would
314 Returns lazily mode of the FileNode. If `commit` is not set, would
314 use value given at initialization or `FILEMODE_DEFAULT` (default).
315 use value given at initialization or `FILEMODE_DEFAULT` (default).
315 """
316 """
316 if self.commit:
317 if self.commit:
317 mode = self.commit.get_file_mode(self.path)
318 mode = self.commit.get_file_mode(self.path)
318 else:
319 else:
319 mode = self._mode
320 mode = self._mode
320 return mode
321 return mode
321
322
322 @LazyProperty
323 @LazyProperty
323 def raw_bytes(self):
324 def raw_bytes(self):
324 """
325 """
325 Returns lazily the raw bytes of the FileNode.
326 Returns lazily the raw bytes of the FileNode.
326 """
327 """
327 if self.commit:
328 if self.commit:
328 if self._content is None:
329 if self._content is None:
329 self._content = self.commit.get_file_content(self.path)
330 self._content = self.commit.get_file_content(self.path)
330 content = self._content
331 content = self._content
331 else:
332 else:
332 content = self._content
333 content = self._content
333 return content
334 return content
334
335
335 @LazyProperty
336 @LazyProperty
336 def md5(self):
337 def md5(self):
337 """
338 """
338 Returns md5 of the file node.
339 Returns md5 of the file node.
339 """
340 """
340 return md5(self.raw_bytes)
341 return md5(self.raw_bytes)
341
342
342 @LazyProperty
343 @LazyProperty
343 def content(self):
344 def content(self):
344 """
345 """
345 Returns lazily content of the FileNode. If possible, would try to
346 Returns lazily content of the FileNode. If possible, would try to
346 decode content from UTF-8.
347 decode content from UTF-8.
347 """
348 """
348 content = self.raw_bytes
349 content = self.raw_bytes
349
350
350 if self.is_binary:
351 if self.is_binary:
351 return content
352 return content
352 return safe_unicode(content)
353 return safe_unicode(content)
353
354
354 @LazyProperty
355 @LazyProperty
355 def size(self):
356 def size(self):
356 if self.commit:
357 if self.commit:
357 return self.commit.get_file_size(self.path)
358 return self.commit.get_file_size(self.path)
358 raise NodeError(
359 raise NodeError(
359 "Cannot retrieve size of the file without related "
360 "Cannot retrieve size of the file without related "
360 "commit attribute")
361 "commit attribute")
361
362
362 @LazyProperty
363 @LazyProperty
363 def message(self):
364 def message(self):
364 if self.commit:
365 if self.commit:
365 return self.last_commit.message
366 return self.last_commit.message
366 raise NodeError(
367 raise NodeError(
367 "Cannot retrieve message of the file without related "
368 "Cannot retrieve message of the file without related "
368 "commit attribute")
369 "commit attribute")
369
370
370 @LazyProperty
371 @LazyProperty
371 def last_commit(self):
372 def last_commit(self):
372 if self.commit:
373 if self.commit:
373 pre_load = ["author", "date", "message"]
374 pre_load = ["author", "date", "message"]
374 return self.commit.get_file_commit(self.path, pre_load=pre_load)
375 return self.commit.get_file_commit(self.path, pre_load=pre_load)
375 raise NodeError(
376 raise NodeError(
376 "Cannot retrieve last commit of the file without "
377 "Cannot retrieve last commit of the file without "
377 "related commit attribute")
378 "related commit attribute")
378
379
379 def get_mimetype(self):
380 def get_mimetype(self):
380 """
381 """
381 Mimetype is calculated based on the file's content. If ``_mimetype``
382 Mimetype is calculated based on the file's content. If ``_mimetype``
382 attribute is available, it will be returned (backends which store
383 attribute is available, it will be returned (backends which store
383 mimetypes or can easily recognize them, should set this private
384 mimetypes or can easily recognize them, should set this private
384 attribute to indicate that type should *NOT* be calculated).
385 attribute to indicate that type should *NOT* be calculated).
385 """
386 """
386
387
387 if hasattr(self, '_mimetype'):
388 if hasattr(self, '_mimetype'):
388 if (isinstance(self._mimetype, (tuple, list,)) and
389 if (isinstance(self._mimetype, (tuple, list,)) and
389 len(self._mimetype) == 2):
390 len(self._mimetype) == 2):
390 return self._mimetype
391 return self._mimetype
391 else:
392 else:
392 raise NodeError('given _mimetype attribute must be an 2 '
393 raise NodeError('given _mimetype attribute must be an 2 '
393 'element list or tuple')
394 'element list or tuple')
394
395
395 db = get_mimetypes_db()
396 db = get_mimetypes_db()
396 mtype, encoding = db.guess_type(self.name)
397 mtype, encoding = db.guess_type(self.name)
397
398
398 if mtype is None:
399 if mtype is None:
399 if self.is_binary:
400 if self.is_binary:
400 mtype = 'application/octet-stream'
401 mtype = 'application/octet-stream'
401 encoding = None
402 encoding = None
402 else:
403 else:
403 mtype = 'text/plain'
404 mtype = 'text/plain'
404 encoding = None
405 encoding = None
405
406
406 # try with pygments
407 # try with pygments
407 try:
408 try:
408 from pygments.lexers import get_lexer_for_filename
409 from pygments.lexers import get_lexer_for_filename
409 mt = get_lexer_for_filename(self.name).mimetypes
410 mt = get_lexer_for_filename(self.name).mimetypes
410 except Exception:
411 except Exception:
411 mt = None
412 mt = None
412
413
413 if mt:
414 if mt:
414 mtype = mt[0]
415 mtype = mt[0]
415
416
416 return mtype, encoding
417 return mtype, encoding
417
418
418 @LazyProperty
419 @LazyProperty
419 def mimetype(self):
420 def mimetype(self):
420 """
421 """
421 Wrapper around full mimetype info. It returns only type of fetched
422 Wrapper around full mimetype info. It returns only type of fetched
422 mimetype without the encoding part. use get_mimetype function to fetch
423 mimetype without the encoding part. use get_mimetype function to fetch
423 full set of (type,encoding)
424 full set of (type,encoding)
424 """
425 """
425 return self.get_mimetype()[0]
426 return self.get_mimetype()[0]
426
427
427 @LazyProperty
428 @LazyProperty
428 def mimetype_main(self):
429 def mimetype_main(self):
429 return self.mimetype.split('/')[0]
430 return self.mimetype.split('/')[0]
430
431
431 @LazyProperty
432 @LazyProperty
432 def lexer(self):
433 def lexer(self):
433 """
434 """
434 Returns pygment's lexer class. Would try to guess lexer taking file's
435 Returns pygment's lexer class. Would try to guess lexer taking file's
435 content, name and mimetype.
436 content, name and mimetype.
436 """
437 """
437 from pygments import lexers
438 from pygments import lexers
439
440 lexer = None
438 try:
441 try:
439 lexer = lexers.guess_lexer_for_filename(self.name, self.content, stripnl=False)
442 lexer = lexers.guess_lexer_for_filename(
443 self.name, self.content, stripnl=False)
440 except lexers.ClassNotFound:
444 except lexers.ClassNotFound:
445 lexer = None
446
447 # try our EXTENSION_MAP
448 if not lexer:
449 try:
450 lexer_class = LANGUAGES_EXTENSIONS_MAP.get(self.extension)
451 if lexer_class:
452 lexer = lexers.get_lexer_by_name(lexer_class[0])
453 except lexers.ClassNotFound:
454 lexer = None
455
456 if not lexer:
441 lexer = lexers.TextLexer(stripnl=False)
457 lexer = lexers.TextLexer(stripnl=False)
442 # returns first alias
458
443 return lexer
459 return lexer
444
460
445 @LazyProperty
461 @LazyProperty
446 def lexer_alias(self):
462 def lexer_alias(self):
447 """
463 """
448 Returns first alias of the lexer guessed for this file.
464 Returns first alias of the lexer guessed for this file.
449 """
465 """
450 return self.lexer.aliases[0]
466 return self.lexer.aliases[0]
451
467
452 @LazyProperty
468 @LazyProperty
453 def history(self):
469 def history(self):
454 """
470 """
455 Returns a list of commit for this file in which the file was changed
471 Returns a list of commit for this file in which the file was changed
456 """
472 """
457 if self.commit is None:
473 if self.commit is None:
458 raise NodeError('Unable to get commit for this FileNode')
474 raise NodeError('Unable to get commit for this FileNode')
459 return self.commit.get_file_history(self.path)
475 return self.commit.get_file_history(self.path)
460
476
461 @LazyProperty
477 @LazyProperty
462 def annotate(self):
478 def annotate(self):
463 """
479 """
464 Returns a list of three element tuples with lineno, commit and line
480 Returns a list of three element tuples with lineno, commit and line
465 """
481 """
466 if self.commit is None:
482 if self.commit is None:
467 raise NodeError('Unable to get commit for this FileNode')
483 raise NodeError('Unable to get commit for this FileNode')
468 pre_load = ["author", "date", "message"]
484 pre_load = ["author", "date", "message"]
469 return self.commit.get_file_annotate(self.path, pre_load=pre_load)
485 return self.commit.get_file_annotate(self.path, pre_load=pre_load)
470
486
471 @LazyProperty
487 @LazyProperty
472 def state(self):
488 def state(self):
473 if not self.commit:
489 if not self.commit:
474 raise NodeError(
490 raise NodeError(
475 "Cannot check state of the node if it's not "
491 "Cannot check state of the node if it's not "
476 "linked with commit")
492 "linked with commit")
477 elif self.path in (node.path for node in self.commit.added):
493 elif self.path in (node.path for node in self.commit.added):
478 return NodeState.ADDED
494 return NodeState.ADDED
479 elif self.path in (node.path for node in self.commit.changed):
495 elif self.path in (node.path for node in self.commit.changed):
480 return NodeState.CHANGED
496 return NodeState.CHANGED
481 else:
497 else:
482 return NodeState.NOT_CHANGED
498 return NodeState.NOT_CHANGED
483
499
484 @LazyProperty
500 @LazyProperty
485 def is_binary(self):
501 def is_binary(self):
486 """
502 """
487 Returns True if file has binary content.
503 Returns True if file has binary content.
488 """
504 """
489 _bin = self.raw_bytes and '\0' in self.raw_bytes
505 _bin = self.raw_bytes and '\0' in self.raw_bytes
490 return _bin
506 return _bin
491
507
492 @LazyProperty
508 @LazyProperty
493 def extension(self):
509 def extension(self):
494 """Returns filenode extension"""
510 """Returns filenode extension"""
495 return self.name.split('.')[-1]
511 return self.name.split('.')[-1]
496
512
497 @property
513 @property
498 def is_executable(self):
514 def is_executable(self):
499 """
515 """
500 Returns ``True`` if file has executable flag turned on.
516 Returns ``True`` if file has executable flag turned on.
501 """
517 """
502 return bool(self.mode & stat.S_IXUSR)
518 return bool(self.mode & stat.S_IXUSR)
503
519
504 def get_largefile_node(self):
520 def get_largefile_node(self):
505 """
521 """
506 Try to return a Mercurial FileNode from this node. It does internal
522 Try to return a Mercurial FileNode from this node. It does internal
507 checks inside largefile store, if that file exist there it will
523 checks inside largefile store, if that file exist there it will
508 create special instance of LargeFileNode which can get content from
524 create special instance of LargeFileNode which can get content from
509 LF store.
525 LF store.
510 """
526 """
511 if self.commit and self.path.startswith(LARGEFILE_PREFIX):
527 if self.commit and self.path.startswith(LARGEFILE_PREFIX):
512 largefile_path = self.path.split(LARGEFILE_PREFIX)[-1].lstrip('/')
528 largefile_path = self.path.split(LARGEFILE_PREFIX)[-1].lstrip('/')
513 return self.commit.get_largefile_node(largefile_path)
529 return self.commit.get_largefile_node(largefile_path)
514
530
515 def lines(self, count_empty=False):
531 def lines(self, count_empty=False):
516 all_lines, empty_lines = 0, 0
532 all_lines, empty_lines = 0, 0
517
533
518 if not self.is_binary:
534 if not self.is_binary:
519 content = self.content
535 content = self.content
520 if count_empty:
536 if count_empty:
521 all_lines = 0
537 all_lines = 0
522 empty_lines = 0
538 empty_lines = 0
523 for line in content.splitlines(True):
539 for line in content.splitlines(True):
524 if line == '\n':
540 if line == '\n':
525 empty_lines += 1
541 empty_lines += 1
526 all_lines += 1
542 all_lines += 1
527
543
528 return all_lines, all_lines - empty_lines
544 return all_lines, all_lines - empty_lines
529 else:
545 else:
530 # fast method
546 # fast method
531 empty_lines = all_lines = content.count('\n')
547 empty_lines = all_lines = content.count('\n')
532 if all_lines == 0 and content:
548 if all_lines == 0 and content:
533 # one-line without a newline
549 # one-line without a newline
534 empty_lines = all_lines = 1
550 empty_lines = all_lines = 1
535
551
536 return all_lines, empty_lines
552 return all_lines, empty_lines
537
553
538 def __repr__(self):
554 def __repr__(self):
539 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
555 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
540 getattr(self.commit, 'short_id', ''))
556 getattr(self.commit, 'short_id', ''))
541
557
542
558
543 class RemovedFileNode(FileNode):
559 class RemovedFileNode(FileNode):
544 """
560 """
545 Dummy FileNode class - trying to access any public attribute except path,
561 Dummy FileNode class - trying to access any public attribute except path,
546 name, kind or state (or methods/attributes checking those two) would raise
562 name, kind or state (or methods/attributes checking those two) would raise
547 RemovedFileNodeError.
563 RemovedFileNodeError.
548 """
564 """
549 ALLOWED_ATTRIBUTES = [
565 ALLOWED_ATTRIBUTES = [
550 'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind',
566 'name', 'path', 'state', 'is_root', 'is_file', 'is_dir', 'kind',
551 'added', 'changed', 'not_changed', 'removed'
567 'added', 'changed', 'not_changed', 'removed'
552 ]
568 ]
553
569
554 def __init__(self, path):
570 def __init__(self, path):
555 """
571 """
556 :param path: relative path to the node
572 :param path: relative path to the node
557 """
573 """
558 super(RemovedFileNode, self).__init__(path=path)
574 super(RemovedFileNode, self).__init__(path=path)
559
575
560 def __getattribute__(self, attr):
576 def __getattribute__(self, attr):
561 if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES:
577 if attr.startswith('_') or attr in RemovedFileNode.ALLOWED_ATTRIBUTES:
562 return super(RemovedFileNode, self).__getattribute__(attr)
578 return super(RemovedFileNode, self).__getattribute__(attr)
563 raise RemovedFileNodeError(
579 raise RemovedFileNodeError(
564 "Cannot access attribute %s on RemovedFileNode" % attr)
580 "Cannot access attribute %s on RemovedFileNode" % attr)
565
581
566 @LazyProperty
582 @LazyProperty
567 def state(self):
583 def state(self):
568 return NodeState.REMOVED
584 return NodeState.REMOVED
569
585
570
586
571 class DirNode(Node):
587 class DirNode(Node):
572 """
588 """
573 DirNode stores list of files and directories within this node.
589 DirNode stores list of files and directories within this node.
574 Nodes may be used standalone but within repository context they
590 Nodes may be used standalone but within repository context they
575 lazily fetch data within same repositorty's commit.
591 lazily fetch data within same repositorty's commit.
576 """
592 """
577
593
578 def __init__(self, path, nodes=(), commit=None):
594 def __init__(self, path, nodes=(), commit=None):
579 """
595 """
580 Only one of ``nodes`` and ``commit`` may be given. Passing both
596 Only one of ``nodes`` and ``commit`` may be given. Passing both
581 would raise ``NodeError`` exception.
597 would raise ``NodeError`` exception.
582
598
583 :param path: relative path to the node
599 :param path: relative path to the node
584 :param nodes: content may be passed to constructor
600 :param nodes: content may be passed to constructor
585 :param commit: if given, will use it to lazily fetch content
601 :param commit: if given, will use it to lazily fetch content
586 """
602 """
587 if nodes and commit:
603 if nodes and commit:
588 raise NodeError("Cannot use both nodes and commit")
604 raise NodeError("Cannot use both nodes and commit")
589 super(DirNode, self).__init__(path, NodeKind.DIR)
605 super(DirNode, self).__init__(path, NodeKind.DIR)
590 self.commit = commit
606 self.commit = commit
591 self._nodes = nodes
607 self._nodes = nodes
592
608
593 @LazyProperty
609 @LazyProperty
594 def content(self):
610 def content(self):
595 raise NodeError(
611 raise NodeError(
596 "%s represents a dir and has no `content` attribute" % self)
612 "%s represents a dir and has no `content` attribute" % self)
597
613
598 @LazyProperty
614 @LazyProperty
599 def nodes(self):
615 def nodes(self):
600 if self.commit:
616 if self.commit:
601 nodes = self.commit.get_nodes(self.path)
617 nodes = self.commit.get_nodes(self.path)
602 else:
618 else:
603 nodes = self._nodes
619 nodes = self._nodes
604 self._nodes_dict = dict((node.path, node) for node in nodes)
620 self._nodes_dict = dict((node.path, node) for node in nodes)
605 return sorted(nodes)
621 return sorted(nodes)
606
622
607 @LazyProperty
623 @LazyProperty
608 def files(self):
624 def files(self):
609 return sorted((node for node in self.nodes if node.is_file()))
625 return sorted((node for node in self.nodes if node.is_file()))
610
626
611 @LazyProperty
627 @LazyProperty
612 def dirs(self):
628 def dirs(self):
613 return sorted((node for node in self.nodes if node.is_dir()))
629 return sorted((node for node in self.nodes if node.is_dir()))
614
630
615 def __iter__(self):
631 def __iter__(self):
616 for node in self.nodes:
632 for node in self.nodes:
617 yield node
633 yield node
618
634
619 def get_node(self, path):
635 def get_node(self, path):
620 """
636 """
621 Returns node from within this particular ``DirNode``, so it is now
637 Returns node from within this particular ``DirNode``, so it is now
622 allowed to fetch, i.e. node located at 'docs/api/index.rst' from node
638 allowed to fetch, i.e. node located at 'docs/api/index.rst' from node
623 'docs'. In order to access deeper nodes one must fetch nodes between
639 'docs'. In order to access deeper nodes one must fetch nodes between
624 them first - this would work::
640 them first - this would work::
625
641
626 docs = root.get_node('docs')
642 docs = root.get_node('docs')
627 docs.get_node('api').get_node('index.rst')
643 docs.get_node('api').get_node('index.rst')
628
644
629 :param: path - relative to the current node
645 :param: path - relative to the current node
630
646
631 .. note::
647 .. note::
632 To access lazily (as in example above) node have to be initialized
648 To access lazily (as in example above) node have to be initialized
633 with related commit object - without it node is out of
649 with related commit object - without it node is out of
634 context and may know nothing about anything else than nearest
650 context and may know nothing about anything else than nearest
635 (located at same level) nodes.
651 (located at same level) nodes.
636 """
652 """
637 try:
653 try:
638 path = path.rstrip('/')
654 path = path.rstrip('/')
639 if path == '':
655 if path == '':
640 raise NodeError("Cannot retrieve node without path")
656 raise NodeError("Cannot retrieve node without path")
641 self.nodes # access nodes first in order to set _nodes_dict
657 self.nodes # access nodes first in order to set _nodes_dict
642 paths = path.split('/')
658 paths = path.split('/')
643 if len(paths) == 1:
659 if len(paths) == 1:
644 if not self.is_root():
660 if not self.is_root():
645 path = '/'.join((self.path, paths[0]))
661 path = '/'.join((self.path, paths[0]))
646 else:
662 else:
647 path = paths[0]
663 path = paths[0]
648 return self._nodes_dict[path]
664 return self._nodes_dict[path]
649 elif len(paths) > 1:
665 elif len(paths) > 1:
650 if self.commit is None:
666 if self.commit is None:
651 raise NodeError(
667 raise NodeError(
652 "Cannot access deeper nodes without commit")
668 "Cannot access deeper nodes without commit")
653 else:
669 else:
654 path1, path2 = paths[0], '/'.join(paths[1:])
670 path1, path2 = paths[0], '/'.join(paths[1:])
655 return self.get_node(path1).get_node(path2)
671 return self.get_node(path1).get_node(path2)
656 else:
672 else:
657 raise KeyError
673 raise KeyError
658 except KeyError:
674 except KeyError:
659 raise NodeError("Node does not exist at %s" % path)
675 raise NodeError("Node does not exist at %s" % path)
660
676
661 @LazyProperty
677 @LazyProperty
662 def state(self):
678 def state(self):
663 raise NodeError("Cannot access state of DirNode")
679 raise NodeError("Cannot access state of DirNode")
664
680
665 @LazyProperty
681 @LazyProperty
666 def size(self):
682 def size(self):
667 size = 0
683 size = 0
668 for root, dirs, files in self.commit.walk(self.path):
684 for root, dirs, files in self.commit.walk(self.path):
669 for f in files:
685 for f in files:
670 size += f.size
686 size += f.size
671
687
672 return size
688 return size
673
689
674 def __repr__(self):
690 def __repr__(self):
675 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
691 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
676 getattr(self.commit, 'short_id', ''))
692 getattr(self.commit, 'short_id', ''))
677
693
678
694
679 class RootNode(DirNode):
695 class RootNode(DirNode):
680 """
696 """
681 DirNode being the root node of the repository.
697 DirNode being the root node of the repository.
682 """
698 """
683
699
684 def __init__(self, nodes=(), commit=None):
700 def __init__(self, nodes=(), commit=None):
685 super(RootNode, self).__init__(path='', nodes=nodes, commit=commit)
701 super(RootNode, self).__init__(path='', nodes=nodes, commit=commit)
686
702
687 def __repr__(self):
703 def __repr__(self):
688 return '<%s>' % self.__class__.__name__
704 return '<%s>' % self.__class__.__name__
689
705
690
706
691 class SubModuleNode(Node):
707 class SubModuleNode(Node):
692 """
708 """
693 represents a SubModule of Git or SubRepo of Mercurial
709 represents a SubModule of Git or SubRepo of Mercurial
694 """
710 """
695 is_binary = False
711 is_binary = False
696 size = 0
712 size = 0
697
713
698 def __init__(self, name, url=None, commit=None, alias=None):
714 def __init__(self, name, url=None, commit=None, alias=None):
699 self.path = name
715 self.path = name
700 self.kind = NodeKind.SUBMODULE
716 self.kind = NodeKind.SUBMODULE
701 self.alias = alias
717 self.alias = alias
702
718
703 # we have to use EmptyCommit here since this can point to svn/git/hg
719 # we have to use EmptyCommit here since this can point to svn/git/hg
704 # submodules we cannot get from repository
720 # submodules we cannot get from repository
705 self.commit = EmptyCommit(str(commit), alias=alias)
721 self.commit = EmptyCommit(str(commit), alias=alias)
706 self.url = url or self._extract_submodule_url()
722 self.url = url or self._extract_submodule_url()
707
723
708 def __repr__(self):
724 def __repr__(self):
709 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
725 return '<%s %r @ %s>' % (self.__class__.__name__, self.path,
710 getattr(self.commit, 'short_id', ''))
726 getattr(self.commit, 'short_id', ''))
711
727
712 def _extract_submodule_url(self):
728 def _extract_submodule_url(self):
713 # TODO: find a way to parse gits submodule file and extract the
729 # TODO: find a way to parse gits submodule file and extract the
714 # linking URL
730 # linking URL
715 return self.path
731 return self.path
716
732
717 @LazyProperty
733 @LazyProperty
718 def name(self):
734 def name(self):
719 """
735 """
720 Returns name of the node so if its path
736 Returns name of the node so if its path
721 then only last part is returned.
737 then only last part is returned.
722 """
738 """
723 org = safe_unicode(self.path.rstrip('/').split('/')[-1])
739 org = safe_unicode(self.path.rstrip('/').split('/')[-1])
724 return u'%s @ %s' % (org, self.commit.short_id)
740 return u'%s @ %s' % (org, self.commit.short_id)
725
741
726
742
727 class LargeFileNode(FileNode):
743 class LargeFileNode(FileNode):
728
744
729 def _validate_path(self, path):
745 def _validate_path(self, path):
730 """
746 """
731 we override check since the LargeFileNode path is system absolute
747 we override check since the LargeFileNode path is system absolute
732 """
748 """
733
749
734 def raw_bytes(self):
750 def raw_bytes(self):
735 if self.commit:
751 if self.commit:
736 with open(self.path, 'rb') as f:
752 with open(self.path, 'rb') as f:
737 content = f.read()
753 content = f.read()
738 else:
754 else:
739 content = self._content
755 content = self._content
740 return content No newline at end of file
756 return content
@@ -1,70 +1,71 b''
1
1
2 <div id="codeblock" class="codeblock">
2 <div id="codeblock" class="codeblock">
3 <div class="codeblock-header">
3 <div class="codeblock-header">
4 <div class="stats">
4 <div class="stats">
5 <span> <strong>${c.file}</strong></span>
5 <span> <strong>${c.file}</strong></span>
6 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
6 <span> | ${c.file.lines()[0]} ${ungettext('line', 'lines', c.file.lines()[0])}</span>
7 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
7 <span> | ${h.format_byte_size_binary(c.file.size)}</span>
8 <span class="item last"> | ${c.file.mimetype}</span>
8 <span> | ${c.file.mimetype} </span>
9 <span class="item last"> | ${h.get_lexer_for_filenode(c.file).__class__.__name__}</span>
9 </div>
10 </div>
10 <div class="buttons">
11 <div class="buttons">
11 <a id="file_history_overview" href="#">
12 <a id="file_history_overview" href="#">
12 ${_('History')}
13 ${_('History')}
13 </a>
14 </a>
14 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
15 <a id="file_history_overview_full" style="display: none" href="${h.url('changelog_file_home',repo_name=c.repo_name, revision=c.commit.raw_id, f_path=c.f_path)}">
15 ${_('Show Full History')}
16 ${_('Show Full History')}
16 </a> |
17 </a> |
17 %if c.annotate:
18 %if c.annotate:
18 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
19 ${h.link_to(_('Source'), h.url('files_home', repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
19 %else:
20 %else:
20 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
21 ${h.link_to(_('Annotation'), h.url('files_annotate_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
21 %endif
22 %endif
22 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
23 | ${h.link_to(_('Raw'), h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
23 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
24 | <a href="${h.url('files_rawfile_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path)}">
24 ${_('Download')}
25 ${_('Download')}
25 </a>
26 </a>
26
27
27 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
28 %if h.HasRepoPermissionAny('repository.write','repository.admin')(c.repo_name):
28 |
29 |
29 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
30 %if c.on_branch_head and c.branch_or_raw_id and not c.file.is_binary:
30 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
31 <a href="${h.url('files_edit_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">
31 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
32 ${_('Edit on Branch:%s') % c.branch_or_raw_id}
32 </a>
33 </a>
33 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
34 | <a class="btn-danger btn-link" href="${h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit')}">${_('Delete')}
34 </a>
35 </a>
35 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
36 %elif c.on_branch_head and c.branch_or_raw_id and c.file.is_binary:
36 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
37 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing binary files not allowed'))}
37 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
38 | ${h.link_to(_('Delete'), h.url('files_delete_home',repo_name=c.repo_name,revision=c.branch_or_raw_id,f_path=c.f_path, anchor='edit'),class_="btn-danger btn-link")}
38 %else:
39 %else:
39 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
40 ${h.link_to(_('Edit'), '#', class_="btn btn-link disabled tooltip", title=_('Editing files allowed only when on branch head commit'))}
40 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
41 | ${h.link_to(_('Delete'), '#', class_="btn btn-danger btn-link disabled tooltip", title=_('Deleting files allowed only when on branch head commit'))}
41 %endif
42 %endif
42 %endif
43 %endif
43 </div>
44 </div>
44 </div>
45 </div>
45 <div id="file_history_container"></div>
46 <div id="file_history_container"></div>
46 <div class="code-body">
47 <div class="code-body">
47 %if c.file.is_binary:
48 %if c.file.is_binary:
48 <div>
49 <div>
49 ${_('Binary file (%s)') % c.file.mimetype}
50 ${_('Binary file (%s)') % c.file.mimetype}
50 </div>
51 </div>
51 %else:
52 %else:
52 % if c.file.size < c.cut_off_limit:
53 % if c.file.size < c.cut_off_limit:
53 %if c.annotate:
54 %if c.annotate:
54 ${h.pygmentize_annotation(c.repo_name,c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
55 ${h.pygmentize_annotation(c.repo_name,c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
55 %elif c.renderer:
56 %elif c.renderer:
56 ${h.render(c.file.content, renderer=c.renderer)}
57 ${h.render(c.file.content, renderer=c.renderer)}
57 %else:
58 %else:
58 ${h.pygmentize(c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
59 ${h.pygmentize(c.file,linenos=True,anchorlinenos=True,lineanchors='L',cssclass="code-highlight")}
59 %endif
60 %endif
60 %else:
61 %else:
61 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
62 ${_('File is too big to display')} ${h.link_to(_('Show as raw'),
62 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
63 h.url('files_raw_home',repo_name=c.repo_name,revision=c.commit.raw_id,f_path=c.f_path))}
63 %endif
64 %endif
64 %endif
65 %endif
65 </div>
66 </div>
66 </div>
67 </div>
67
68
68 <script>
69 <script>
69 var source_page = true;
70 var source_page = true;
70 </script>
71 </script>
General Comments 0
You need to be logged in to leave comments. Login now