##// END OF EJS Templates
release: merge back stable branch into default
marcink -
r2329:65ad46d2 merge default
parent child Browse files
Show More
@@ -0,0 +1,41 b''
1 |RCE| 4.10.2 |RNS|
2 ------------------
3
4 Release Date
5 ^^^^^^^^^^^^
6
7 - 2017-11-08
8
9
10 New Features
11 ^^^^^^^^^^^^
12
13
14
15 General
16 ^^^^^^^
17
18
19
20 Security
21 ^^^^^^^^
22
23
24
25 Performance
26 ^^^^^^^^^^^
27
28
29
30 Fixes
31 ^^^^^
32
33
34 - helpers: support for empty descriptions and metatags.
35
36
37
38 Upgrade notes
39 ^^^^^^^^^^^^^
40
41 - Changes helpers to support empty descriptions, no potential problems with upgrade.
@@ -1,25 +1,26 b''
1 1bd3e92b7e2e2d2024152b34bb88dff1db544a71 v4.0.0
1 1bd3e92b7e2e2d2024152b34bb88dff1db544a71 v4.0.0
2 170c5398320ea6cddd50955e88d408794c21d43a v4.0.1
2 170c5398320ea6cddd50955e88d408794c21d43a v4.0.1
3 c3fe200198f5aa34cf2e4066df2881a9cefe3704 v4.1.0
3 c3fe200198f5aa34cf2e4066df2881a9cefe3704 v4.1.0
4 7fd5c850745e2ea821fb4406af5f4bff9b0a7526 v4.1.1
4 7fd5c850745e2ea821fb4406af5f4bff9b0a7526 v4.1.1
5 41c87da28a179953df86061d817bc35533c66dd2 v4.1.2
5 41c87da28a179953df86061d817bc35533c66dd2 v4.1.2
6 baaf9f5bcea3bae0ef12ae20c8b270482e62abb6 v4.2.0
6 baaf9f5bcea3bae0ef12ae20c8b270482e62abb6 v4.2.0
7 32a70c7e56844a825f61df496ee5eaf8c3c4e189 v4.2.1
7 32a70c7e56844a825f61df496ee5eaf8c3c4e189 v4.2.1
8 fa695cdb411d294679ac081d595ac654e5613b03 v4.3.0
8 fa695cdb411d294679ac081d595ac654e5613b03 v4.3.0
9 0e4dc11b58cad833c513fe17bac39e6850edf959 v4.3.1
9 0e4dc11b58cad833c513fe17bac39e6850edf959 v4.3.1
10 8a876f48f5cb1d018b837db28ff928500cb32cfb v4.4.0
10 8a876f48f5cb1d018b837db28ff928500cb32cfb v4.4.0
11 8dd86b410b1aac086ffdfc524ef300f896af5047 v4.4.1
11 8dd86b410b1aac086ffdfc524ef300f896af5047 v4.4.1
12 d2514226abc8d3b4f6fb57765f47d1b6fb360a05 v4.4.2
12 d2514226abc8d3b4f6fb57765f47d1b6fb360a05 v4.4.2
13 27d783325930af6dad2741476c0d0b1b7c8415c2 v4.5.0
13 27d783325930af6dad2741476c0d0b1b7c8415c2 v4.5.0
14 7f2016f352abcbdba4a19d4039c386e9629449da v4.5.1
14 7f2016f352abcbdba4a19d4039c386e9629449da v4.5.1
15 416fec799314c70a5c780fb28b3357b08869333a v4.5.2
15 416fec799314c70a5c780fb28b3357b08869333a v4.5.2
16 27c3b85fafc83143e6678fbc3da69e1615bcac55 v4.6.0
16 27c3b85fafc83143e6678fbc3da69e1615bcac55 v4.6.0
17 5ad13deb9118c2a5243d4032d4d9cc174e5872db v4.6.1
17 5ad13deb9118c2a5243d4032d4d9cc174e5872db v4.6.1
18 2be921e01fa24bb102696ada596f87464c3666f6 v4.7.0
18 2be921e01fa24bb102696ada596f87464c3666f6 v4.7.0
19 7198bdec29c2872c974431d55200d0398354cdb1 v4.7.1
19 7198bdec29c2872c974431d55200d0398354cdb1 v4.7.1
20 bd1c8d230fe741c2dfd7100a0ef39fd0774fd581 v4.7.2
20 bd1c8d230fe741c2dfd7100a0ef39fd0774fd581 v4.7.2
21 9731914f89765d9628dc4dddc84bc9402aa124c8 v4.8.0
21 9731914f89765d9628dc4dddc84bc9402aa124c8 v4.8.0
22 c5a2b7d0e4bbdebc4a62d7b624befe375207b659 v4.9.0
22 c5a2b7d0e4bbdebc4a62d7b624befe375207b659 v4.9.0
23 d9aa3b27ac9f7e78359775c75fedf7bfece232f1 v4.9.1
23 d9aa3b27ac9f7e78359775c75fedf7bfece232f1 v4.9.1
24 4ba4d74981cec5d6b28b158f875a2540952c2f74 v4.10.0
24 4ba4d74981cec5d6b28b158f875a2540952c2f74 v4.10.0
25 0a6821cbd6b0b3c21503002f88800679fa35ab63 v4.10.1
25 0a6821cbd6b0b3c21503002f88800679fa35ab63 v4.10.1
26 434ad90ec8d621f4416074b84f6e9ce03964defb v4.10.2
@@ -1,102 +1,103 b''
1 .. _rhodecode-release-notes-ref:
1 .. _rhodecode-release-notes-ref:
2
2
3 Release Notes
3 Release Notes
4 =============
4 =============
5
5
6 |RCE| 4.x Versions
6 |RCE| 4.x Versions
7 ------------------
7 ------------------
8
8
9 .. toctree::
9 .. toctree::
10 :maxdepth: 1
10 :maxdepth: 1
11
11
12 release-notes-4.10.2.rst
12 release-notes-4.10.1.rst
13 release-notes-4.10.1.rst
13 release-notes-4.10.0.rst
14 release-notes-4.10.0.rst
14 release-notes-4.9.1.rst
15 release-notes-4.9.1.rst
15 release-notes-4.9.0.rst
16 release-notes-4.9.0.rst
16 release-notes-4.8.0.rst
17 release-notes-4.8.0.rst
17 release-notes-4.7.2.rst
18 release-notes-4.7.2.rst
18 release-notes-4.7.1.rst
19 release-notes-4.7.1.rst
19 release-notes-4.7.0.rst
20 release-notes-4.7.0.rst
20 release-notes-4.6.1.rst
21 release-notes-4.6.1.rst
21 release-notes-4.6.0.rst
22 release-notes-4.6.0.rst
22 release-notes-4.5.2.rst
23 release-notes-4.5.2.rst
23 release-notes-4.5.1.rst
24 release-notes-4.5.1.rst
24 release-notes-4.5.0.rst
25 release-notes-4.5.0.rst
25 release-notes-4.4.2.rst
26 release-notes-4.4.2.rst
26 release-notes-4.4.1.rst
27 release-notes-4.4.1.rst
27 release-notes-4.4.0.rst
28 release-notes-4.4.0.rst
28 release-notes-4.3.1.rst
29 release-notes-4.3.1.rst
29 release-notes-4.3.0.rst
30 release-notes-4.3.0.rst
30 release-notes-4.2.1.rst
31 release-notes-4.2.1.rst
31 release-notes-4.2.0.rst
32 release-notes-4.2.0.rst
32 release-notes-4.1.2.rst
33 release-notes-4.1.2.rst
33 release-notes-4.1.1.rst
34 release-notes-4.1.1.rst
34 release-notes-4.1.0.rst
35 release-notes-4.1.0.rst
35 release-notes-4.0.1.rst
36 release-notes-4.0.1.rst
36 release-notes-4.0.0.rst
37 release-notes-4.0.0.rst
37
38
38 |RCE| 3.x Versions
39 |RCE| 3.x Versions
39 ------------------
40 ------------------
40
41
41 .. toctree::
42 .. toctree::
42 :maxdepth: 1
43 :maxdepth: 1
43
44
44 release-notes-3.8.4.rst
45 release-notes-3.8.4.rst
45 release-notes-3.8.3.rst
46 release-notes-3.8.3.rst
46 release-notes-3.8.2.rst
47 release-notes-3.8.2.rst
47 release-notes-3.8.1.rst
48 release-notes-3.8.1.rst
48 release-notes-3.8.0.rst
49 release-notes-3.8.0.rst
49 release-notes-3.7.1.rst
50 release-notes-3.7.1.rst
50 release-notes-3.7.0.rst
51 release-notes-3.7.0.rst
51 release-notes-3.6.1.rst
52 release-notes-3.6.1.rst
52 release-notes-3.6.0.rst
53 release-notes-3.6.0.rst
53 release-notes-3.5.2.rst
54 release-notes-3.5.2.rst
54 release-notes-3.5.1.rst
55 release-notes-3.5.1.rst
55 release-notes-3.5.0.rst
56 release-notes-3.5.0.rst
56 release-notes-3.4.1.rst
57 release-notes-3.4.1.rst
57 release-notes-3.4.0.rst
58 release-notes-3.4.0.rst
58 release-notes-3.3.4.rst
59 release-notes-3.3.4.rst
59 release-notes-3.3.3.rst
60 release-notes-3.3.3.rst
60 release-notes-3.3.2.rst
61 release-notes-3.3.2.rst
61 release-notes-3.3.1.rst
62 release-notes-3.3.1.rst
62 release-notes-3.3.0.rst
63 release-notes-3.3.0.rst
63 release-notes-3.2.3.rst
64 release-notes-3.2.3.rst
64 release-notes-3.2.2.rst
65 release-notes-3.2.2.rst
65 release-notes-3.2.1.rst
66 release-notes-3.2.1.rst
66 release-notes-3.2.0.rst
67 release-notes-3.2.0.rst
67 release-notes-3.1.1.rst
68 release-notes-3.1.1.rst
68 release-notes-3.1.0.rst
69 release-notes-3.1.0.rst
69 release-notes-3.0.2.rst
70 release-notes-3.0.2.rst
70 release-notes-3.0.1.rst
71 release-notes-3.0.1.rst
71 release-notes-3.0.0.rst
72 release-notes-3.0.0.rst
72
73
73 |RCE| 2.x Versions
74 |RCE| 2.x Versions
74 ------------------
75 ------------------
75
76
76 .. toctree::
77 .. toctree::
77 :maxdepth: 1
78 :maxdepth: 1
78
79
79 release-notes-2.2.8.rst
80 release-notes-2.2.8.rst
80 release-notes-2.2.7.rst
81 release-notes-2.2.7.rst
81 release-notes-2.2.6.rst
82 release-notes-2.2.6.rst
82 release-notes-2.2.5.rst
83 release-notes-2.2.5.rst
83 release-notes-2.2.4.rst
84 release-notes-2.2.4.rst
84 release-notes-2.2.3.rst
85 release-notes-2.2.3.rst
85 release-notes-2.2.2.rst
86 release-notes-2.2.2.rst
86 release-notes-2.2.1.rst
87 release-notes-2.2.1.rst
87 release-notes-2.2.0.rst
88 release-notes-2.2.0.rst
88 release-notes-2.1.0.rst
89 release-notes-2.1.0.rst
89 release-notes-2.0.2.rst
90 release-notes-2.0.2.rst
90 release-notes-2.0.1.rst
91 release-notes-2.0.1.rst
91 release-notes-2.0.0.rst
92 release-notes-2.0.0.rst
92
93
93 |RCE| 1.x Versions
94 |RCE| 1.x Versions
94 ------------------
95 ------------------
95
96
96 .. toctree::
97 .. toctree::
97 :maxdepth: 1
98 :maxdepth: 1
98
99
99 release-notes-1.7.2.rst
100 release-notes-1.7.2.rst
100 release-notes-1.7.1.rst
101 release-notes-1.7.1.rst
101 release-notes-1.7.0.rst
102 release-notes-1.7.0.rst
102 release-notes-1.6.0.rst
103 release-notes-1.6.0.rst
@@ -1,2102 +1,2102 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 """
21 """
22 Helper functions
22 Helper functions
23
23
24 Consists of functions to typically be used within templates, but also
24 Consists of functions to typically be used within templates, but also
25 available to Controllers. This module is available to both as 'h'.
25 available to Controllers. This module is available to both as 'h'.
26 """
26 """
27
27
28 import random
28 import random
29 import hashlib
29 import hashlib
30 import StringIO
30 import StringIO
31 import urllib
31 import urllib
32 import math
32 import math
33 import logging
33 import logging
34 import re
34 import re
35 import urlparse
35 import urlparse
36 import time
36 import time
37 import string
37 import string
38 import hashlib
38 import hashlib
39 from collections import OrderedDict
39 from collections import OrderedDict
40
40
41 import pygments
41 import pygments
42 import itertools
42 import itertools
43 import fnmatch
43 import fnmatch
44
44
45 from datetime import datetime
45 from datetime import datetime
46 from functools import partial
46 from functools import partial
47 from pygments.formatters.html import HtmlFormatter
47 from pygments.formatters.html import HtmlFormatter
48 from pygments import highlight as code_highlight
48 from pygments import highlight as code_highlight
49 from pygments.lexers import (
49 from pygments.lexers import (
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
50 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
51
51
52 from pyramid.threadlocal import get_current_request
52 from pyramid.threadlocal import get_current_request
53
53
54 from webhelpers.html import literal, HTML, escape
54 from webhelpers.html import literal, HTML, escape
55 from webhelpers.html.tools import *
55 from webhelpers.html.tools import *
56 from webhelpers.html.builder import make_tag
56 from webhelpers.html.builder import make_tag
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
57 from webhelpers.html.tags import auto_discovery_link, checkbox, css_classes, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
58 end_form, file, form as wh_form, hidden, image, javascript_link, link_to, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
59 link_to_if, link_to_unless, ol, required_legend, select, stylesheet_link, \
60 submit, text, password, textarea, title, ul, xml_declaration, radio
60 submit, text, password, textarea, title, ul, xml_declaration, radio
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
61 from webhelpers.html.tools import auto_link, button_to, highlight, \
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
62 js_obfuscate, mail_to, strip_links, strip_tags, tag_re
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
63 from webhelpers.text import chop_at, collapse, convert_accented_entities, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
64 convert_misc_entities, lchop, plural, rchop, remove_formatting, \
65 replace_whitespace, urlify, truncate, wrap_paragraphs
65 replace_whitespace, urlify, truncate, wrap_paragraphs
66 from webhelpers.date import time_ago_in_words
66 from webhelpers.date import time_ago_in_words
67 from webhelpers.paginate import Page as _Page
67 from webhelpers.paginate import Page as _Page
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
68 from webhelpers.html.tags import _set_input_attrs, _set_id_attr, \
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
69 convert_boolean_attrs, NotGiven, _make_safe_id_component
70 from webhelpers2.number import format_byte_size
70 from webhelpers2.number import format_byte_size
71
71
72 from rhodecode.lib.action_parser import action_parser
72 from rhodecode.lib.action_parser import action_parser
73 from rhodecode.lib.ext_json import json
73 from rhodecode.lib.ext_json import json
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
74 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
75 from rhodecode.lib.utils2 import str2bool, safe_unicode, safe_str, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
76 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime, \
77 AttributeDict, safe_int, md5, md5_safe
77 AttributeDict, safe_int, md5, md5_safe
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
78 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
79 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
80 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
81 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
82 from rhodecode.model.changeset_status import ChangesetStatusModel
82 from rhodecode.model.changeset_status import ChangesetStatusModel
83 from rhodecode.model.db import Permission, User, Repository
83 from rhodecode.model.db import Permission, User, Repository
84 from rhodecode.model.repo_group import RepoGroupModel
84 from rhodecode.model.repo_group import RepoGroupModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
85 from rhodecode.model.settings import IssueTrackerSettingsModel
86
86
87 log = logging.getLogger(__name__)
87 log = logging.getLogger(__name__)
88
88
89
89
90 DEFAULT_USER = User.DEFAULT_USER
90 DEFAULT_USER = User.DEFAULT_USER
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
91 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
92
92
93
93
94 def url(*args, **kw):
94 def url(*args, **kw):
95 from pylons import url as pylons_url
95 from pylons import url as pylons_url
96 return pylons_url(*args, **kw)
96 return pylons_url(*args, **kw)
97
97
98
98
99 def asset(path, ver=None, **kwargs):
99 def asset(path, ver=None, **kwargs):
100 """
100 """
101 Helper to generate a static asset file path for rhodecode assets
101 Helper to generate a static asset file path for rhodecode assets
102
102
103 eg. h.asset('images/image.png', ver='3923')
103 eg. h.asset('images/image.png', ver='3923')
104
104
105 :param path: path of asset
105 :param path: path of asset
106 :param ver: optional version query param to append as ?ver=
106 :param ver: optional version query param to append as ?ver=
107 """
107 """
108 request = get_current_request()
108 request = get_current_request()
109 query = {}
109 query = {}
110 query.update(kwargs)
110 query.update(kwargs)
111 if ver:
111 if ver:
112 query = {'ver': ver}
112 query = {'ver': ver}
113 return request.static_path(
113 return request.static_path(
114 'rhodecode:public/{}'.format(path), _query=query)
114 'rhodecode:public/{}'.format(path), _query=query)
115
115
116
116
117 default_html_escape_table = {
117 default_html_escape_table = {
118 ord('&'): u'&amp;',
118 ord('&'): u'&amp;',
119 ord('<'): u'&lt;',
119 ord('<'): u'&lt;',
120 ord('>'): u'&gt;',
120 ord('>'): u'&gt;',
121 ord('"'): u'&quot;',
121 ord('"'): u'&quot;',
122 ord("'"): u'&#39;',
122 ord("'"): u'&#39;',
123 }
123 }
124
124
125
125
126 def html_escape(text, html_escape_table=default_html_escape_table):
126 def html_escape(text, html_escape_table=default_html_escape_table):
127 """Produce entities within text."""
127 """Produce entities within text."""
128 return text.translate(html_escape_table)
128 return text.translate(html_escape_table)
129
129
130
130
131 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
131 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
132 """
132 """
133 Truncate string ``s`` at the first occurrence of ``sub``.
133 Truncate string ``s`` at the first occurrence of ``sub``.
134
134
135 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
135 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
136 """
136 """
137 suffix_if_chopped = suffix_if_chopped or ''
137 suffix_if_chopped = suffix_if_chopped or ''
138 pos = s.find(sub)
138 pos = s.find(sub)
139 if pos == -1:
139 if pos == -1:
140 return s
140 return s
141
141
142 if inclusive:
142 if inclusive:
143 pos += len(sub)
143 pos += len(sub)
144
144
145 chopped = s[:pos]
145 chopped = s[:pos]
146 left = s[pos:].strip()
146 left = s[pos:].strip()
147
147
148 if left and suffix_if_chopped:
148 if left and suffix_if_chopped:
149 chopped += suffix_if_chopped
149 chopped += suffix_if_chopped
150
150
151 return chopped
151 return chopped
152
152
153
153
154 def shorter(text, size=20):
154 def shorter(text, size=20):
155 postfix = '...'
155 postfix = '...'
156 if len(text) > size:
156 if len(text) > size:
157 return text[:size - len(postfix)] + postfix
157 return text[:size - len(postfix)] + postfix
158 return text
158 return text
159
159
160
160
161 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
161 def _reset(name, value=None, id=NotGiven, type="reset", **attrs):
162 """
162 """
163 Reset button
163 Reset button
164 """
164 """
165 _set_input_attrs(attrs, type, name, value)
165 _set_input_attrs(attrs, type, name, value)
166 _set_id_attr(attrs, id, name)
166 _set_id_attr(attrs, id, name)
167 convert_boolean_attrs(attrs, ["disabled"])
167 convert_boolean_attrs(attrs, ["disabled"])
168 return HTML.input(**attrs)
168 return HTML.input(**attrs)
169
169
170 reset = _reset
170 reset = _reset
171 safeid = _make_safe_id_component
171 safeid = _make_safe_id_component
172
172
173
173
174 def branding(name, length=40):
174 def branding(name, length=40):
175 return truncate(name, length, indicator="")
175 return truncate(name, length, indicator="")
176
176
177
177
178 def FID(raw_id, path):
178 def FID(raw_id, path):
179 """
179 """
180 Creates a unique ID for filenode based on it's hash of path and commit
180 Creates a unique ID for filenode based on it's hash of path and commit
181 it's safe to use in urls
181 it's safe to use in urls
182
182
183 :param raw_id:
183 :param raw_id:
184 :param path:
184 :param path:
185 """
185 """
186
186
187 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
187 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
188
188
189
189
190 class _GetError(object):
190 class _GetError(object):
191 """Get error from form_errors, and represent it as span wrapped error
191 """Get error from form_errors, and represent it as span wrapped error
192 message
192 message
193
193
194 :param field_name: field to fetch errors for
194 :param field_name: field to fetch errors for
195 :param form_errors: form errors dict
195 :param form_errors: form errors dict
196 """
196 """
197
197
198 def __call__(self, field_name, form_errors):
198 def __call__(self, field_name, form_errors):
199 tmpl = """<span class="error_msg">%s</span>"""
199 tmpl = """<span class="error_msg">%s</span>"""
200 if form_errors and field_name in form_errors:
200 if form_errors and field_name in form_errors:
201 return literal(tmpl % form_errors.get(field_name))
201 return literal(tmpl % form_errors.get(field_name))
202
202
203 get_error = _GetError()
203 get_error = _GetError()
204
204
205
205
206 class _ToolTip(object):
206 class _ToolTip(object):
207
207
208 def __call__(self, tooltip_title, trim_at=50):
208 def __call__(self, tooltip_title, trim_at=50):
209 """
209 """
210 Special function just to wrap our text into nice formatted
210 Special function just to wrap our text into nice formatted
211 autowrapped text
211 autowrapped text
212
212
213 :param tooltip_title:
213 :param tooltip_title:
214 """
214 """
215 tooltip_title = escape(tooltip_title)
215 tooltip_title = escape(tooltip_title)
216 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
216 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
217 return tooltip_title
217 return tooltip_title
218 tooltip = _ToolTip()
218 tooltip = _ToolTip()
219
219
220
220
221 def files_breadcrumbs(repo_name, commit_id, file_path):
221 def files_breadcrumbs(repo_name, commit_id, file_path):
222 if isinstance(file_path, str):
222 if isinstance(file_path, str):
223 file_path = safe_unicode(file_path)
223 file_path = safe_unicode(file_path)
224
224
225 # TODO: johbo: Is this always a url like path, or is this operating
225 # TODO: johbo: Is this always a url like path, or is this operating
226 # system dependent?
226 # system dependent?
227 path_segments = file_path.split('/')
227 path_segments = file_path.split('/')
228
228
229 repo_name_html = escape(repo_name)
229 repo_name_html = escape(repo_name)
230 if len(path_segments) == 1 and path_segments[0] == '':
230 if len(path_segments) == 1 and path_segments[0] == '':
231 url_segments = [repo_name_html]
231 url_segments = [repo_name_html]
232 else:
232 else:
233 url_segments = [
233 url_segments = [
234 link_to(
234 link_to(
235 repo_name_html,
235 repo_name_html,
236 route_path(
236 route_path(
237 'repo_files',
237 'repo_files',
238 repo_name=repo_name,
238 repo_name=repo_name,
239 commit_id=commit_id,
239 commit_id=commit_id,
240 f_path=''),
240 f_path=''),
241 class_='pjax-link')]
241 class_='pjax-link')]
242
242
243 last_cnt = len(path_segments) - 1
243 last_cnt = len(path_segments) - 1
244 for cnt, segment in enumerate(path_segments):
244 for cnt, segment in enumerate(path_segments):
245 if not segment:
245 if not segment:
246 continue
246 continue
247 segment_html = escape(segment)
247 segment_html = escape(segment)
248
248
249 if cnt != last_cnt:
249 if cnt != last_cnt:
250 url_segments.append(
250 url_segments.append(
251 link_to(
251 link_to(
252 segment_html,
252 segment_html,
253 route_path(
253 route_path(
254 'repo_files',
254 'repo_files',
255 repo_name=repo_name,
255 repo_name=repo_name,
256 commit_id=commit_id,
256 commit_id=commit_id,
257 f_path='/'.join(path_segments[:cnt + 1])),
257 f_path='/'.join(path_segments[:cnt + 1])),
258 class_='pjax-link'))
258 class_='pjax-link'))
259 else:
259 else:
260 url_segments.append(segment_html)
260 url_segments.append(segment_html)
261
261
262 return literal('/'.join(url_segments))
262 return literal('/'.join(url_segments))
263
263
264
264
265 class CodeHtmlFormatter(HtmlFormatter):
265 class CodeHtmlFormatter(HtmlFormatter):
266 """
266 """
267 My code Html Formatter for source codes
267 My code Html Formatter for source codes
268 """
268 """
269
269
270 def wrap(self, source, outfile):
270 def wrap(self, source, outfile):
271 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
271 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
272
272
273 def _wrap_code(self, source):
273 def _wrap_code(self, source):
274 for cnt, it in enumerate(source):
274 for cnt, it in enumerate(source):
275 i, t = it
275 i, t = it
276 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
276 t = '<div id="L%s">%s</div>' % (cnt + 1, t)
277 yield i, t
277 yield i, t
278
278
279 def _wrap_tablelinenos(self, inner):
279 def _wrap_tablelinenos(self, inner):
280 dummyoutfile = StringIO.StringIO()
280 dummyoutfile = StringIO.StringIO()
281 lncount = 0
281 lncount = 0
282 for t, line in inner:
282 for t, line in inner:
283 if t:
283 if t:
284 lncount += 1
284 lncount += 1
285 dummyoutfile.write(line)
285 dummyoutfile.write(line)
286
286
287 fl = self.linenostart
287 fl = self.linenostart
288 mw = len(str(lncount + fl - 1))
288 mw = len(str(lncount + fl - 1))
289 sp = self.linenospecial
289 sp = self.linenospecial
290 st = self.linenostep
290 st = self.linenostep
291 la = self.lineanchors
291 la = self.lineanchors
292 aln = self.anchorlinenos
292 aln = self.anchorlinenos
293 nocls = self.noclasses
293 nocls = self.noclasses
294 if sp:
294 if sp:
295 lines = []
295 lines = []
296
296
297 for i in range(fl, fl + lncount):
297 for i in range(fl, fl + lncount):
298 if i % st == 0:
298 if i % st == 0:
299 if i % sp == 0:
299 if i % sp == 0:
300 if aln:
300 if aln:
301 lines.append('<a href="#%s%d" class="special">%*d</a>' %
301 lines.append('<a href="#%s%d" class="special">%*d</a>' %
302 (la, i, mw, i))
302 (la, i, mw, i))
303 else:
303 else:
304 lines.append('<span class="special">%*d</span>' % (mw, i))
304 lines.append('<span class="special">%*d</span>' % (mw, i))
305 else:
305 else:
306 if aln:
306 if aln:
307 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
307 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
308 else:
308 else:
309 lines.append('%*d' % (mw, i))
309 lines.append('%*d' % (mw, i))
310 else:
310 else:
311 lines.append('')
311 lines.append('')
312 ls = '\n'.join(lines)
312 ls = '\n'.join(lines)
313 else:
313 else:
314 lines = []
314 lines = []
315 for i in range(fl, fl + lncount):
315 for i in range(fl, fl + lncount):
316 if i % st == 0:
316 if i % st == 0:
317 if aln:
317 if aln:
318 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
318 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
319 else:
319 else:
320 lines.append('%*d' % (mw, i))
320 lines.append('%*d' % (mw, i))
321 else:
321 else:
322 lines.append('')
322 lines.append('')
323 ls = '\n'.join(lines)
323 ls = '\n'.join(lines)
324
324
325 # in case you wonder about the seemingly redundant <div> here: since the
325 # in case you wonder about the seemingly redundant <div> here: since the
326 # content in the other cell also is wrapped in a div, some browsers in
326 # content in the other cell also is wrapped in a div, some browsers in
327 # some configurations seem to mess up the formatting...
327 # some configurations seem to mess up the formatting...
328 if nocls:
328 if nocls:
329 yield 0, ('<table class="%stable">' % self.cssclass +
329 yield 0, ('<table class="%stable">' % self.cssclass +
330 '<tr><td><div class="linenodiv" '
330 '<tr><td><div class="linenodiv" '
331 'style="background-color: #f0f0f0; padding-right: 10px">'
331 'style="background-color: #f0f0f0; padding-right: 10px">'
332 '<pre style="line-height: 125%">' +
332 '<pre style="line-height: 125%">' +
333 ls + '</pre></div></td><td id="hlcode" class="code">')
333 ls + '</pre></div></td><td id="hlcode" class="code">')
334 else:
334 else:
335 yield 0, ('<table class="%stable">' % self.cssclass +
335 yield 0, ('<table class="%stable">' % self.cssclass +
336 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
336 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
337 ls + '</pre></div></td><td id="hlcode" class="code">')
337 ls + '</pre></div></td><td id="hlcode" class="code">')
338 yield 0, dummyoutfile.getvalue()
338 yield 0, dummyoutfile.getvalue()
339 yield 0, '</td></tr></table>'
339 yield 0, '</td></tr></table>'
340
340
341
341
342 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
342 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
343 def __init__(self, **kw):
343 def __init__(self, **kw):
344 # only show these line numbers if set
344 # only show these line numbers if set
345 self.only_lines = kw.pop('only_line_numbers', [])
345 self.only_lines = kw.pop('only_line_numbers', [])
346 self.query_terms = kw.pop('query_terms', [])
346 self.query_terms = kw.pop('query_terms', [])
347 self.max_lines = kw.pop('max_lines', 5)
347 self.max_lines = kw.pop('max_lines', 5)
348 self.line_context = kw.pop('line_context', 3)
348 self.line_context = kw.pop('line_context', 3)
349 self.url = kw.pop('url', None)
349 self.url = kw.pop('url', None)
350
350
351 super(CodeHtmlFormatter, self).__init__(**kw)
351 super(CodeHtmlFormatter, self).__init__(**kw)
352
352
353 def _wrap_code(self, source):
353 def _wrap_code(self, source):
354 for cnt, it in enumerate(source):
354 for cnt, it in enumerate(source):
355 i, t = it
355 i, t = it
356 t = '<pre>%s</pre>' % t
356 t = '<pre>%s</pre>' % t
357 yield i, t
357 yield i, t
358
358
359 def _wrap_tablelinenos(self, inner):
359 def _wrap_tablelinenos(self, inner):
360 yield 0, '<table class="code-highlight %stable">' % self.cssclass
360 yield 0, '<table class="code-highlight %stable">' % self.cssclass
361
361
362 last_shown_line_number = 0
362 last_shown_line_number = 0
363 current_line_number = 1
363 current_line_number = 1
364
364
365 for t, line in inner:
365 for t, line in inner:
366 if not t:
366 if not t:
367 yield t, line
367 yield t, line
368 continue
368 continue
369
369
370 if current_line_number in self.only_lines:
370 if current_line_number in self.only_lines:
371 if last_shown_line_number + 1 != current_line_number:
371 if last_shown_line_number + 1 != current_line_number:
372 yield 0, '<tr>'
372 yield 0, '<tr>'
373 yield 0, '<td class="line">...</td>'
373 yield 0, '<td class="line">...</td>'
374 yield 0, '<td id="hlcode" class="code"></td>'
374 yield 0, '<td id="hlcode" class="code"></td>'
375 yield 0, '</tr>'
375 yield 0, '</tr>'
376
376
377 yield 0, '<tr>'
377 yield 0, '<tr>'
378 if self.url:
378 if self.url:
379 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
379 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
380 self.url, current_line_number, current_line_number)
380 self.url, current_line_number, current_line_number)
381 else:
381 else:
382 yield 0, '<td class="line"><a href="">%i</a></td>' % (
382 yield 0, '<td class="line"><a href="">%i</a></td>' % (
383 current_line_number)
383 current_line_number)
384 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
384 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
385 yield 0, '</tr>'
385 yield 0, '</tr>'
386
386
387 last_shown_line_number = current_line_number
387 last_shown_line_number = current_line_number
388
388
389 current_line_number += 1
389 current_line_number += 1
390
390
391
391
392 yield 0, '</table>'
392 yield 0, '</table>'
393
393
394
394
395 def extract_phrases(text_query):
395 def extract_phrases(text_query):
396 """
396 """
397 Extracts phrases from search term string making sure phrases
397 Extracts phrases from search term string making sure phrases
398 contained in double quotes are kept together - and discarding empty values
398 contained in double quotes are kept together - and discarding empty values
399 or fully whitespace values eg.
399 or fully whitespace values eg.
400
400
401 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
401 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
402
402
403 """
403 """
404
404
405 in_phrase = False
405 in_phrase = False
406 buf = ''
406 buf = ''
407 phrases = []
407 phrases = []
408 for char in text_query:
408 for char in text_query:
409 if in_phrase:
409 if in_phrase:
410 if char == '"': # end phrase
410 if char == '"': # end phrase
411 phrases.append(buf)
411 phrases.append(buf)
412 buf = ''
412 buf = ''
413 in_phrase = False
413 in_phrase = False
414 continue
414 continue
415 else:
415 else:
416 buf += char
416 buf += char
417 continue
417 continue
418 else:
418 else:
419 if char == '"': # start phrase
419 if char == '"': # start phrase
420 in_phrase = True
420 in_phrase = True
421 phrases.append(buf)
421 phrases.append(buf)
422 buf = ''
422 buf = ''
423 continue
423 continue
424 elif char == ' ':
424 elif char == ' ':
425 phrases.append(buf)
425 phrases.append(buf)
426 buf = ''
426 buf = ''
427 continue
427 continue
428 else:
428 else:
429 buf += char
429 buf += char
430
430
431 phrases.append(buf)
431 phrases.append(buf)
432 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
432 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
433 return phrases
433 return phrases
434
434
435
435
436 def get_matching_offsets(text, phrases):
436 def get_matching_offsets(text, phrases):
437 """
437 """
438 Returns a list of string offsets in `text` that the list of `terms` match
438 Returns a list of string offsets in `text` that the list of `terms` match
439
439
440 >>> get_matching_offsets('some text here', ['some', 'here'])
440 >>> get_matching_offsets('some text here', ['some', 'here'])
441 [(0, 4), (10, 14)]
441 [(0, 4), (10, 14)]
442
442
443 """
443 """
444 offsets = []
444 offsets = []
445 for phrase in phrases:
445 for phrase in phrases:
446 for match in re.finditer(phrase, text):
446 for match in re.finditer(phrase, text):
447 offsets.append((match.start(), match.end()))
447 offsets.append((match.start(), match.end()))
448
448
449 return offsets
449 return offsets
450
450
451
451
452 def normalize_text_for_matching(x):
452 def normalize_text_for_matching(x):
453 """
453 """
454 Replaces all non alnum characters to spaces and lower cases the string,
454 Replaces all non alnum characters to spaces and lower cases the string,
455 useful for comparing two text strings without punctuation
455 useful for comparing two text strings without punctuation
456 """
456 """
457 return re.sub(r'[^\w]', ' ', x.lower())
457 return re.sub(r'[^\w]', ' ', x.lower())
458
458
459
459
460 def get_matching_line_offsets(lines, terms):
460 def get_matching_line_offsets(lines, terms):
461 """ Return a set of `lines` indices (starting from 1) matching a
461 """ Return a set of `lines` indices (starting from 1) matching a
462 text search query, along with `context` lines above/below matching lines
462 text search query, along with `context` lines above/below matching lines
463
463
464 :param lines: list of strings representing lines
464 :param lines: list of strings representing lines
465 :param terms: search term string to match in lines eg. 'some text'
465 :param terms: search term string to match in lines eg. 'some text'
466 :param context: number of lines above/below a matching line to add to result
466 :param context: number of lines above/below a matching line to add to result
467 :param max_lines: cut off for lines of interest
467 :param max_lines: cut off for lines of interest
468 eg.
468 eg.
469
469
470 text = '''
470 text = '''
471 words words words
471 words words words
472 words words words
472 words words words
473 some text some
473 some text some
474 words words words
474 words words words
475 words words words
475 words words words
476 text here what
476 text here what
477 '''
477 '''
478 get_matching_line_offsets(text, 'text', context=1)
478 get_matching_line_offsets(text, 'text', context=1)
479 {3: [(5, 9)], 6: [(0, 4)]]
479 {3: [(5, 9)], 6: [(0, 4)]]
480
480
481 """
481 """
482 matching_lines = {}
482 matching_lines = {}
483 phrases = [normalize_text_for_matching(phrase)
483 phrases = [normalize_text_for_matching(phrase)
484 for phrase in extract_phrases(terms)]
484 for phrase in extract_phrases(terms)]
485
485
486 for line_index, line in enumerate(lines, start=1):
486 for line_index, line in enumerate(lines, start=1):
487 match_offsets = get_matching_offsets(
487 match_offsets = get_matching_offsets(
488 normalize_text_for_matching(line), phrases)
488 normalize_text_for_matching(line), phrases)
489 if match_offsets:
489 if match_offsets:
490 matching_lines[line_index] = match_offsets
490 matching_lines[line_index] = match_offsets
491
491
492 return matching_lines
492 return matching_lines
493
493
494
494
495 def hsv_to_rgb(h, s, v):
495 def hsv_to_rgb(h, s, v):
496 """ Convert hsv color values to rgb """
496 """ Convert hsv color values to rgb """
497
497
498 if s == 0.0:
498 if s == 0.0:
499 return v, v, v
499 return v, v, v
500 i = int(h * 6.0) # XXX assume int() truncates!
500 i = int(h * 6.0) # XXX assume int() truncates!
501 f = (h * 6.0) - i
501 f = (h * 6.0) - i
502 p = v * (1.0 - s)
502 p = v * (1.0 - s)
503 q = v * (1.0 - s * f)
503 q = v * (1.0 - s * f)
504 t = v * (1.0 - s * (1.0 - f))
504 t = v * (1.0 - s * (1.0 - f))
505 i = i % 6
505 i = i % 6
506 if i == 0:
506 if i == 0:
507 return v, t, p
507 return v, t, p
508 if i == 1:
508 if i == 1:
509 return q, v, p
509 return q, v, p
510 if i == 2:
510 if i == 2:
511 return p, v, t
511 return p, v, t
512 if i == 3:
512 if i == 3:
513 return p, q, v
513 return p, q, v
514 if i == 4:
514 if i == 4:
515 return t, p, v
515 return t, p, v
516 if i == 5:
516 if i == 5:
517 return v, p, q
517 return v, p, q
518
518
519
519
520 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
520 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
521 """
521 """
522 Generator for getting n of evenly distributed colors using
522 Generator for getting n of evenly distributed colors using
523 hsv color and golden ratio. It always return same order of colors
523 hsv color and golden ratio. It always return same order of colors
524
524
525 :param n: number of colors to generate
525 :param n: number of colors to generate
526 :param saturation: saturation of returned colors
526 :param saturation: saturation of returned colors
527 :param lightness: lightness of returned colors
527 :param lightness: lightness of returned colors
528 :returns: RGB tuple
528 :returns: RGB tuple
529 """
529 """
530
530
531 golden_ratio = 0.618033988749895
531 golden_ratio = 0.618033988749895
532 h = 0.22717784590367374
532 h = 0.22717784590367374
533
533
534 for _ in xrange(n):
534 for _ in xrange(n):
535 h += golden_ratio
535 h += golden_ratio
536 h %= 1
536 h %= 1
537 HSV_tuple = [h, saturation, lightness]
537 HSV_tuple = [h, saturation, lightness]
538 RGB_tuple = hsv_to_rgb(*HSV_tuple)
538 RGB_tuple = hsv_to_rgb(*HSV_tuple)
539 yield map(lambda x: str(int(x * 256)), RGB_tuple)
539 yield map(lambda x: str(int(x * 256)), RGB_tuple)
540
540
541
541
542 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
542 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
543 """
543 """
544 Returns a function which when called with an argument returns a unique
544 Returns a function which when called with an argument returns a unique
545 color for that argument, eg.
545 color for that argument, eg.
546
546
547 :param n: number of colors to generate
547 :param n: number of colors to generate
548 :param saturation: saturation of returned colors
548 :param saturation: saturation of returned colors
549 :param lightness: lightness of returned colors
549 :param lightness: lightness of returned colors
550 :returns: css RGB string
550 :returns: css RGB string
551
551
552 >>> color_hash = color_hasher()
552 >>> color_hash = color_hasher()
553 >>> color_hash('hello')
553 >>> color_hash('hello')
554 'rgb(34, 12, 59)'
554 'rgb(34, 12, 59)'
555 >>> color_hash('hello')
555 >>> color_hash('hello')
556 'rgb(34, 12, 59)'
556 'rgb(34, 12, 59)'
557 >>> color_hash('other')
557 >>> color_hash('other')
558 'rgb(90, 224, 159)'
558 'rgb(90, 224, 159)'
559 """
559 """
560
560
561 color_dict = {}
561 color_dict = {}
562 cgenerator = unique_color_generator(
562 cgenerator = unique_color_generator(
563 saturation=saturation, lightness=lightness)
563 saturation=saturation, lightness=lightness)
564
564
565 def get_color_string(thing):
565 def get_color_string(thing):
566 if thing in color_dict:
566 if thing in color_dict:
567 col = color_dict[thing]
567 col = color_dict[thing]
568 else:
568 else:
569 col = color_dict[thing] = cgenerator.next()
569 col = color_dict[thing] = cgenerator.next()
570 return "rgb(%s)" % (', '.join(col))
570 return "rgb(%s)" % (', '.join(col))
571
571
572 return get_color_string
572 return get_color_string
573
573
574
574
575 def get_lexer_safe(mimetype=None, filepath=None):
575 def get_lexer_safe(mimetype=None, filepath=None):
576 """
576 """
577 Tries to return a relevant pygments lexer using mimetype/filepath name,
577 Tries to return a relevant pygments lexer using mimetype/filepath name,
578 defaulting to plain text if none could be found
578 defaulting to plain text if none could be found
579 """
579 """
580 lexer = None
580 lexer = None
581 try:
581 try:
582 if mimetype:
582 if mimetype:
583 lexer = get_lexer_for_mimetype(mimetype)
583 lexer = get_lexer_for_mimetype(mimetype)
584 if not lexer:
584 if not lexer:
585 lexer = get_lexer_for_filename(filepath)
585 lexer = get_lexer_for_filename(filepath)
586 except pygments.util.ClassNotFound:
586 except pygments.util.ClassNotFound:
587 pass
587 pass
588
588
589 if not lexer:
589 if not lexer:
590 lexer = get_lexer_by_name('text')
590 lexer = get_lexer_by_name('text')
591
591
592 return lexer
592 return lexer
593
593
594
594
595 def get_lexer_for_filenode(filenode):
595 def get_lexer_for_filenode(filenode):
596 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
596 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
597 return lexer
597 return lexer
598
598
599
599
600 def pygmentize(filenode, **kwargs):
600 def pygmentize(filenode, **kwargs):
601 """
601 """
602 pygmentize function using pygments
602 pygmentize function using pygments
603
603
604 :param filenode:
604 :param filenode:
605 """
605 """
606 lexer = get_lexer_for_filenode(filenode)
606 lexer = get_lexer_for_filenode(filenode)
607 return literal(code_highlight(filenode.content, lexer,
607 return literal(code_highlight(filenode.content, lexer,
608 CodeHtmlFormatter(**kwargs)))
608 CodeHtmlFormatter(**kwargs)))
609
609
610
610
611 def is_following_repo(repo_name, user_id):
611 def is_following_repo(repo_name, user_id):
612 from rhodecode.model.scm import ScmModel
612 from rhodecode.model.scm import ScmModel
613 return ScmModel().is_following_repo(repo_name, user_id)
613 return ScmModel().is_following_repo(repo_name, user_id)
614
614
615
615
616 class _Message(object):
616 class _Message(object):
617 """A message returned by ``Flash.pop_messages()``.
617 """A message returned by ``Flash.pop_messages()``.
618
618
619 Converting the message to a string returns the message text. Instances
619 Converting the message to a string returns the message text. Instances
620 also have the following attributes:
620 also have the following attributes:
621
621
622 * ``message``: the message text.
622 * ``message``: the message text.
623 * ``category``: the category specified when the message was created.
623 * ``category``: the category specified when the message was created.
624 """
624 """
625
625
626 def __init__(self, category, message):
626 def __init__(self, category, message):
627 self.category = category
627 self.category = category
628 self.message = message
628 self.message = message
629
629
630 def __str__(self):
630 def __str__(self):
631 return self.message
631 return self.message
632
632
633 __unicode__ = __str__
633 __unicode__ = __str__
634
634
635 def __html__(self):
635 def __html__(self):
636 return escape(safe_unicode(self.message))
636 return escape(safe_unicode(self.message))
637
637
638
638
639 class Flash(object):
639 class Flash(object):
640 # List of allowed categories. If None, allow any category.
640 # List of allowed categories. If None, allow any category.
641 categories = ["warning", "notice", "error", "success"]
641 categories = ["warning", "notice", "error", "success"]
642
642
643 # Default category if none is specified.
643 # Default category if none is specified.
644 default_category = "notice"
644 default_category = "notice"
645
645
646 def __init__(self, session_key="flash", categories=None,
646 def __init__(self, session_key="flash", categories=None,
647 default_category=None):
647 default_category=None):
648 """
648 """
649 Instantiate a ``Flash`` object.
649 Instantiate a ``Flash`` object.
650
650
651 ``session_key`` is the key to save the messages under in the user's
651 ``session_key`` is the key to save the messages under in the user's
652 session.
652 session.
653
653
654 ``categories`` is an optional list which overrides the default list
654 ``categories`` is an optional list which overrides the default list
655 of categories.
655 of categories.
656
656
657 ``default_category`` overrides the default category used for messages
657 ``default_category`` overrides the default category used for messages
658 when none is specified.
658 when none is specified.
659 """
659 """
660 self.session_key = session_key
660 self.session_key = session_key
661 if categories is not None:
661 if categories is not None:
662 self.categories = categories
662 self.categories = categories
663 if default_category is not None:
663 if default_category is not None:
664 self.default_category = default_category
664 self.default_category = default_category
665 if self.categories and self.default_category not in self.categories:
665 if self.categories and self.default_category not in self.categories:
666 raise ValueError(
666 raise ValueError(
667 "unrecognized default category %r" % (self.default_category,))
667 "unrecognized default category %r" % (self.default_category,))
668
668
669 def pop_messages(self, session=None, request=None):
669 def pop_messages(self, session=None, request=None):
670 """
670 """
671 Return all accumulated messages and delete them from the session.
671 Return all accumulated messages and delete them from the session.
672
672
673 The return value is a list of ``Message`` objects.
673 The return value is a list of ``Message`` objects.
674 """
674 """
675 messages = []
675 messages = []
676
676
677 if not session:
677 if not session:
678 if not request:
678 if not request:
679 request = get_current_request()
679 request = get_current_request()
680 session = request.session
680 session = request.session
681
681
682 # Pop the 'old' pylons flash messages. They are tuples of the form
682 # Pop the 'old' pylons flash messages. They are tuples of the form
683 # (category, message)
683 # (category, message)
684 for cat, msg in session.pop(self.session_key, []):
684 for cat, msg in session.pop(self.session_key, []):
685 messages.append(_Message(cat, msg))
685 messages.append(_Message(cat, msg))
686
686
687 # Pop the 'new' pyramid flash messages for each category as list
687 # Pop the 'new' pyramid flash messages for each category as list
688 # of strings.
688 # of strings.
689 for cat in self.categories:
689 for cat in self.categories:
690 for msg in session.pop_flash(queue=cat):
690 for msg in session.pop_flash(queue=cat):
691 messages.append(_Message(cat, msg))
691 messages.append(_Message(cat, msg))
692 # Map messages from the default queue to the 'notice' category.
692 # Map messages from the default queue to the 'notice' category.
693 for msg in session.pop_flash():
693 for msg in session.pop_flash():
694 messages.append(_Message('notice', msg))
694 messages.append(_Message('notice', msg))
695
695
696 session.save()
696 session.save()
697 return messages
697 return messages
698
698
699 def json_alerts(self, session=None, request=None):
699 def json_alerts(self, session=None, request=None):
700 payloads = []
700 payloads = []
701 messages = flash.pop_messages(session=session, request=request)
701 messages = flash.pop_messages(session=session, request=request)
702 if messages:
702 if messages:
703 for message in messages:
703 for message in messages:
704 subdata = {}
704 subdata = {}
705 if hasattr(message.message, 'rsplit'):
705 if hasattr(message.message, 'rsplit'):
706 flash_data = message.message.rsplit('|DELIM|', 1)
706 flash_data = message.message.rsplit('|DELIM|', 1)
707 org_message = flash_data[0]
707 org_message = flash_data[0]
708 if len(flash_data) > 1:
708 if len(flash_data) > 1:
709 subdata = json.loads(flash_data[1])
709 subdata = json.loads(flash_data[1])
710 else:
710 else:
711 org_message = message.message
711 org_message = message.message
712 payloads.append({
712 payloads.append({
713 'message': {
713 'message': {
714 'message': u'{}'.format(org_message),
714 'message': u'{}'.format(org_message),
715 'level': message.category,
715 'level': message.category,
716 'force': True,
716 'force': True,
717 'subdata': subdata
717 'subdata': subdata
718 }
718 }
719 })
719 })
720 return json.dumps(payloads)
720 return json.dumps(payloads)
721
721
722 def __call__(self, message, category=None, ignore_duplicate=False,
722 def __call__(self, message, category=None, ignore_duplicate=False,
723 session=None, request=None):
723 session=None, request=None):
724
724
725 if not session:
725 if not session:
726 if not request:
726 if not request:
727 request = get_current_request()
727 request = get_current_request()
728 session = request.session
728 session = request.session
729
729
730 session.flash(
730 session.flash(
731 message, queue=category, allow_duplicate=not ignore_duplicate)
731 message, queue=category, allow_duplicate=not ignore_duplicate)
732
732
733
733
734 flash = Flash()
734 flash = Flash()
735
735
736 #==============================================================================
736 #==============================================================================
737 # SCM FILTERS available via h.
737 # SCM FILTERS available via h.
738 #==============================================================================
738 #==============================================================================
739 from rhodecode.lib.vcs.utils import author_name, author_email
739 from rhodecode.lib.vcs.utils import author_name, author_email
740 from rhodecode.lib.utils2 import credentials_filter, age as _age
740 from rhodecode.lib.utils2 import credentials_filter, age as _age
741 from rhodecode.model.db import User, ChangesetStatus
741 from rhodecode.model.db import User, ChangesetStatus
742
742
743 age = _age
743 age = _age
744 capitalize = lambda x: x.capitalize()
744 capitalize = lambda x: x.capitalize()
745 email = author_email
745 email = author_email
746 short_id = lambda x: x[:12]
746 short_id = lambda x: x[:12]
747 hide_credentials = lambda x: ''.join(credentials_filter(x))
747 hide_credentials = lambda x: ''.join(credentials_filter(x))
748
748
749
749
750 def age_component(datetime_iso, value=None, time_is_local=False):
750 def age_component(datetime_iso, value=None, time_is_local=False):
751 title = value or format_date(datetime_iso)
751 title = value or format_date(datetime_iso)
752 tzinfo = '+00:00'
752 tzinfo = '+00:00'
753
753
754 # detect if we have a timezone info, otherwise, add it
754 # detect if we have a timezone info, otherwise, add it
755 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
755 if isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
756 if time_is_local:
756 if time_is_local:
757 tzinfo = time.strftime("+%H:%M",
757 tzinfo = time.strftime("+%H:%M",
758 time.gmtime(
758 time.gmtime(
759 (datetime.now() - datetime.utcnow()).seconds + 1
759 (datetime.now() - datetime.utcnow()).seconds + 1
760 )
760 )
761 )
761 )
762
762
763 return literal(
763 return literal(
764 '<time class="timeago tooltip" '
764 '<time class="timeago tooltip" '
765 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
765 'title="{1}{2}" datetime="{0}{2}">{1}</time>'.format(
766 datetime_iso, title, tzinfo))
766 datetime_iso, title, tzinfo))
767
767
768
768
769 def _shorten_commit_id(commit_id):
769 def _shorten_commit_id(commit_id):
770 from rhodecode import CONFIG
770 from rhodecode import CONFIG
771 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
771 def_len = safe_int(CONFIG.get('rhodecode_show_sha_length', 12))
772 return commit_id[:def_len]
772 return commit_id[:def_len]
773
773
774
774
775 def show_id(commit):
775 def show_id(commit):
776 """
776 """
777 Configurable function that shows ID
777 Configurable function that shows ID
778 by default it's r123:fffeeefffeee
778 by default it's r123:fffeeefffeee
779
779
780 :param commit: commit instance
780 :param commit: commit instance
781 """
781 """
782 from rhodecode import CONFIG
782 from rhodecode import CONFIG
783 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
783 show_idx = str2bool(CONFIG.get('rhodecode_show_revision_number', True))
784
784
785 raw_id = _shorten_commit_id(commit.raw_id)
785 raw_id = _shorten_commit_id(commit.raw_id)
786 if show_idx:
786 if show_idx:
787 return 'r%s:%s' % (commit.idx, raw_id)
787 return 'r%s:%s' % (commit.idx, raw_id)
788 else:
788 else:
789 return '%s' % (raw_id, )
789 return '%s' % (raw_id, )
790
790
791
791
792 def format_date(date):
792 def format_date(date):
793 """
793 """
794 use a standardized formatting for dates used in RhodeCode
794 use a standardized formatting for dates used in RhodeCode
795
795
796 :param date: date/datetime object
796 :param date: date/datetime object
797 :return: formatted date
797 :return: formatted date
798 """
798 """
799
799
800 if date:
800 if date:
801 _fmt = "%a, %d %b %Y %H:%M:%S"
801 _fmt = "%a, %d %b %Y %H:%M:%S"
802 return safe_unicode(date.strftime(_fmt))
802 return safe_unicode(date.strftime(_fmt))
803
803
804 return u""
804 return u""
805
805
806
806
807 class _RepoChecker(object):
807 class _RepoChecker(object):
808
808
809 def __init__(self, backend_alias):
809 def __init__(self, backend_alias):
810 self._backend_alias = backend_alias
810 self._backend_alias = backend_alias
811
811
812 def __call__(self, repository):
812 def __call__(self, repository):
813 if hasattr(repository, 'alias'):
813 if hasattr(repository, 'alias'):
814 _type = repository.alias
814 _type = repository.alias
815 elif hasattr(repository, 'repo_type'):
815 elif hasattr(repository, 'repo_type'):
816 _type = repository.repo_type
816 _type = repository.repo_type
817 else:
817 else:
818 _type = repository
818 _type = repository
819 return _type == self._backend_alias
819 return _type == self._backend_alias
820
820
821 is_git = _RepoChecker('git')
821 is_git = _RepoChecker('git')
822 is_hg = _RepoChecker('hg')
822 is_hg = _RepoChecker('hg')
823 is_svn = _RepoChecker('svn')
823 is_svn = _RepoChecker('svn')
824
824
825
825
826 def get_repo_type_by_name(repo_name):
826 def get_repo_type_by_name(repo_name):
827 repo = Repository.get_by_repo_name(repo_name)
827 repo = Repository.get_by_repo_name(repo_name)
828 return repo.repo_type
828 return repo.repo_type
829
829
830
830
831 def is_svn_without_proxy(repository):
831 def is_svn_without_proxy(repository):
832 if is_svn(repository):
832 if is_svn(repository):
833 from rhodecode.model.settings import VcsSettingsModel
833 from rhodecode.model.settings import VcsSettingsModel
834 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
834 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
835 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
835 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
836 return False
836 return False
837
837
838
838
839 def discover_user(author):
839 def discover_user(author):
840 """
840 """
841 Tries to discover RhodeCode User based on the autho string. Author string
841 Tries to discover RhodeCode User based on the autho string. Author string
842 is typically `FirstName LastName <email@address.com>`
842 is typically `FirstName LastName <email@address.com>`
843 """
843 """
844
844
845 # if author is already an instance use it for extraction
845 # if author is already an instance use it for extraction
846 if isinstance(author, User):
846 if isinstance(author, User):
847 return author
847 return author
848
848
849 # Valid email in the attribute passed, see if they're in the system
849 # Valid email in the attribute passed, see if they're in the system
850 _email = author_email(author)
850 _email = author_email(author)
851 if _email != '':
851 if _email != '':
852 user = User.get_by_email(_email, case_insensitive=True, cache=True)
852 user = User.get_by_email(_email, case_insensitive=True, cache=True)
853 if user is not None:
853 if user is not None:
854 return user
854 return user
855
855
856 # Maybe it's a username, we try to extract it and fetch by username ?
856 # Maybe it's a username, we try to extract it and fetch by username ?
857 _author = author_name(author)
857 _author = author_name(author)
858 user = User.get_by_username(_author, case_insensitive=True, cache=True)
858 user = User.get_by_username(_author, case_insensitive=True, cache=True)
859 if user is not None:
859 if user is not None:
860 return user
860 return user
861
861
862 return None
862 return None
863
863
864
864
865 def email_or_none(author):
865 def email_or_none(author):
866 # extract email from the commit string
866 # extract email from the commit string
867 _email = author_email(author)
867 _email = author_email(author)
868
868
869 # If we have an email, use it, otherwise
869 # If we have an email, use it, otherwise
870 # see if it contains a username we can get an email from
870 # see if it contains a username we can get an email from
871 if _email != '':
871 if _email != '':
872 return _email
872 return _email
873 else:
873 else:
874 user = User.get_by_username(
874 user = User.get_by_username(
875 author_name(author), case_insensitive=True, cache=True)
875 author_name(author), case_insensitive=True, cache=True)
876
876
877 if user is not None:
877 if user is not None:
878 return user.email
878 return user.email
879
879
880 # No valid email, not a valid user in the system, none!
880 # No valid email, not a valid user in the system, none!
881 return None
881 return None
882
882
883
883
884 def link_to_user(author, length=0, **kwargs):
884 def link_to_user(author, length=0, **kwargs):
885 user = discover_user(author)
885 user = discover_user(author)
886 # user can be None, but if we have it already it means we can re-use it
886 # user can be None, but if we have it already it means we can re-use it
887 # in the person() function, so we save 1 intensive-query
887 # in the person() function, so we save 1 intensive-query
888 if user:
888 if user:
889 author = user
889 author = user
890
890
891 display_person = person(author, 'username_or_name_or_email')
891 display_person = person(author, 'username_or_name_or_email')
892 if length:
892 if length:
893 display_person = shorter(display_person, length)
893 display_person = shorter(display_person, length)
894
894
895 if user:
895 if user:
896 return link_to(
896 return link_to(
897 escape(display_person),
897 escape(display_person),
898 route_path('user_profile', username=user.username),
898 route_path('user_profile', username=user.username),
899 **kwargs)
899 **kwargs)
900 else:
900 else:
901 return escape(display_person)
901 return escape(display_person)
902
902
903
903
904 def person(author, show_attr="username_and_name"):
904 def person(author, show_attr="username_and_name"):
905 user = discover_user(author)
905 user = discover_user(author)
906 if user:
906 if user:
907 return getattr(user, show_attr)
907 return getattr(user, show_attr)
908 else:
908 else:
909 _author = author_name(author)
909 _author = author_name(author)
910 _email = email(author)
910 _email = email(author)
911 return _author or _email
911 return _author or _email
912
912
913
913
914 def author_string(email):
914 def author_string(email):
915 if email:
915 if email:
916 user = User.get_by_email(email, case_insensitive=True, cache=True)
916 user = User.get_by_email(email, case_insensitive=True, cache=True)
917 if user:
917 if user:
918 if user.first_name or user.last_name:
918 if user.first_name or user.last_name:
919 return '%s %s &lt;%s&gt;' % (
919 return '%s %s &lt;%s&gt;' % (
920 user.first_name, user.last_name, email)
920 user.first_name, user.last_name, email)
921 else:
921 else:
922 return email
922 return email
923 else:
923 else:
924 return email
924 return email
925 else:
925 else:
926 return None
926 return None
927
927
928
928
929 def person_by_id(id_, show_attr="username_and_name"):
929 def person_by_id(id_, show_attr="username_and_name"):
930 # attr to return from fetched user
930 # attr to return from fetched user
931 person_getter = lambda usr: getattr(usr, show_attr)
931 person_getter = lambda usr: getattr(usr, show_attr)
932
932
933 #maybe it's an ID ?
933 #maybe it's an ID ?
934 if str(id_).isdigit() or isinstance(id_, int):
934 if str(id_).isdigit() or isinstance(id_, int):
935 id_ = int(id_)
935 id_ = int(id_)
936 user = User.get(id_)
936 user = User.get(id_)
937 if user is not None:
937 if user is not None:
938 return person_getter(user)
938 return person_getter(user)
939 return id_
939 return id_
940
940
941
941
942 def gravatar_with_user(request, author, show_disabled=False):
942 def gravatar_with_user(request, author, show_disabled=False):
943 _render = request.get_partial_renderer(
943 _render = request.get_partial_renderer(
944 'rhodecode:templates/base/base.mako')
944 'rhodecode:templates/base/base.mako')
945 return _render('gravatar_with_user', author, show_disabled=show_disabled)
945 return _render('gravatar_with_user', author, show_disabled=show_disabled)
946
946
947
947
948 tags_paterns = OrderedDict((
948 tags_paterns = OrderedDict((
949 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
949 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
950 '<div class="metatag" tag="lang">\\2</div>')),
950 '<div class="metatag" tag="lang">\\2</div>')),
951
951
952 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
952 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
953 '<div class="metatag" tag="see">see: \\1 </div>')),
953 '<div class="metatag" tag="see">see: \\1 </div>')),
954
954
955 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
955 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((.*?)\)\]'),
956 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
956 '<div class="metatag" tag="url"> <a href="\\2">\\1</a> </div>')),
957
957
958 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
958 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
959 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
959 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
960
960
961 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
961 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
962 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
962 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
963
963
964 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
964 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
965 '<div class="metatag" tag="state \\1">\\1</div>')),
965 '<div class="metatag" tag="state \\1">\\1</div>')),
966
966
967 # label in grey
967 # label in grey
968 ('label', (re.compile(r'\[([a-z]+)\]'),
968 ('label', (re.compile(r'\[([a-z]+)\]'),
969 '<div class="metatag" tag="label">\\1</div>')),
969 '<div class="metatag" tag="label">\\1</div>')),
970
970
971 # generic catch all in grey
971 # generic catch all in grey
972 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
972 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
973 '<div class="metatag" tag="generic">\\1</div>')),
973 '<div class="metatag" tag="generic">\\1</div>')),
974 ))
974 ))
975
975
976
976
977 def extract_metatags(value):
977 def extract_metatags(value):
978 """
978 """
979 Extract supported meta-tags from given text value
979 Extract supported meta-tags from given text value
980 """
980 """
981 tags = []
981 if not value:
982 if not value:
982 return ''
983 return tags, ''
983
984
984 tags = []
985 for key, val in tags_paterns.items():
985 for key, val in tags_paterns.items():
986 pat, replace_html = val
986 pat, replace_html = val
987 tags.extend([(key, x.group()) for x in pat.finditer(value)])
987 tags.extend([(key, x.group()) for x in pat.finditer(value)])
988 value = pat.sub('', value)
988 value = pat.sub('', value)
989
989
990 return tags, value
990 return tags, value
991
991
992
992
993 def style_metatag(tag_type, value):
993 def style_metatag(tag_type, value):
994 """
994 """
995 converts tags from value into html equivalent
995 converts tags from value into html equivalent
996 """
996 """
997 if not value:
997 if not value:
998 return ''
998 return ''
999
999
1000 html_value = value
1000 html_value = value
1001 tag_data = tags_paterns.get(tag_type)
1001 tag_data = tags_paterns.get(tag_type)
1002 if tag_data:
1002 if tag_data:
1003 pat, replace_html = tag_data
1003 pat, replace_html = tag_data
1004 # convert to plain `unicode` instead of a markup tag to be used in
1004 # convert to plain `unicode` instead of a markup tag to be used in
1005 # regex expressions. safe_unicode doesn't work here
1005 # regex expressions. safe_unicode doesn't work here
1006 html_value = pat.sub(replace_html, unicode(value))
1006 html_value = pat.sub(replace_html, unicode(value))
1007
1007
1008 return html_value
1008 return html_value
1009
1009
1010
1010
1011 def bool2icon(value):
1011 def bool2icon(value):
1012 """
1012 """
1013 Returns boolean value of a given value, represented as html element with
1013 Returns boolean value of a given value, represented as html element with
1014 classes that will represent icons
1014 classes that will represent icons
1015
1015
1016 :param value: given value to convert to html node
1016 :param value: given value to convert to html node
1017 """
1017 """
1018
1018
1019 if value: # does bool conversion
1019 if value: # does bool conversion
1020 return HTML.tag('i', class_="icon-true")
1020 return HTML.tag('i', class_="icon-true")
1021 else: # not true as bool
1021 else: # not true as bool
1022 return HTML.tag('i', class_="icon-false")
1022 return HTML.tag('i', class_="icon-false")
1023
1023
1024
1024
1025 #==============================================================================
1025 #==============================================================================
1026 # PERMS
1026 # PERMS
1027 #==============================================================================
1027 #==============================================================================
1028 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1028 from rhodecode.lib.auth import HasPermissionAny, HasPermissionAll, \
1029 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1029 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll, \
1030 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1030 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token, \
1031 csrf_token_key
1031 csrf_token_key
1032
1032
1033
1033
1034 #==============================================================================
1034 #==============================================================================
1035 # GRAVATAR URL
1035 # GRAVATAR URL
1036 #==============================================================================
1036 #==============================================================================
1037 class InitialsGravatar(object):
1037 class InitialsGravatar(object):
1038 def __init__(self, email_address, first_name, last_name, size=30,
1038 def __init__(self, email_address, first_name, last_name, size=30,
1039 background=None, text_color='#fff'):
1039 background=None, text_color='#fff'):
1040 self.size = size
1040 self.size = size
1041 self.first_name = first_name
1041 self.first_name = first_name
1042 self.last_name = last_name
1042 self.last_name = last_name
1043 self.email_address = email_address
1043 self.email_address = email_address
1044 self.background = background or self.str2color(email_address)
1044 self.background = background or self.str2color(email_address)
1045 self.text_color = text_color
1045 self.text_color = text_color
1046
1046
1047 def get_color_bank(self):
1047 def get_color_bank(self):
1048 """
1048 """
1049 returns a predefined list of colors that gravatars can use.
1049 returns a predefined list of colors that gravatars can use.
1050 Those are randomized distinct colors that guarantee readability and
1050 Those are randomized distinct colors that guarantee readability and
1051 uniqueness.
1051 uniqueness.
1052
1052
1053 generated with: http://phrogz.net/css/distinct-colors.html
1053 generated with: http://phrogz.net/css/distinct-colors.html
1054 """
1054 """
1055 return [
1055 return [
1056 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1056 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1057 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1057 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1058 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1058 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1059 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1059 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1060 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1060 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1061 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1061 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1062 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1062 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1063 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1063 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1064 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1064 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1065 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1065 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1066 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1066 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1067 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1067 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1068 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1068 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1069 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1069 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1070 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1070 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1071 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1071 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1072 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1072 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1073 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1073 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1074 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1074 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1075 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1075 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1076 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1076 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1077 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1077 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1078 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1078 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1079 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1079 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1080 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1080 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1081 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1081 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1082 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1082 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1083 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1083 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1084 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1084 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1085 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1085 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1086 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1086 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1087 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1087 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1088 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1088 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1089 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1089 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1090 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1090 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1091 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1091 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1092 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1092 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1093 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1093 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1094 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1094 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1095 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1095 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1096 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1096 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1097 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1097 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1098 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1098 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1099 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1099 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1100 '#4f8c46', '#368dd9', '#5c0073'
1100 '#4f8c46', '#368dd9', '#5c0073'
1101 ]
1101 ]
1102
1102
1103 def rgb_to_hex_color(self, rgb_tuple):
1103 def rgb_to_hex_color(self, rgb_tuple):
1104 """
1104 """
1105 Converts an rgb_tuple passed to an hex color.
1105 Converts an rgb_tuple passed to an hex color.
1106
1106
1107 :param rgb_tuple: tuple with 3 ints represents rgb color space
1107 :param rgb_tuple: tuple with 3 ints represents rgb color space
1108 """
1108 """
1109 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1109 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1110
1110
1111 def email_to_int_list(self, email_str):
1111 def email_to_int_list(self, email_str):
1112 """
1112 """
1113 Get every byte of the hex digest value of email and turn it to integer.
1113 Get every byte of the hex digest value of email and turn it to integer.
1114 It's going to be always between 0-255
1114 It's going to be always between 0-255
1115 """
1115 """
1116 digest = md5_safe(email_str.lower())
1116 digest = md5_safe(email_str.lower())
1117 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1117 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1118
1118
1119 def pick_color_bank_index(self, email_str, color_bank):
1119 def pick_color_bank_index(self, email_str, color_bank):
1120 return self.email_to_int_list(email_str)[0] % len(color_bank)
1120 return self.email_to_int_list(email_str)[0] % len(color_bank)
1121
1121
1122 def str2color(self, email_str):
1122 def str2color(self, email_str):
1123 """
1123 """
1124 Tries to map in a stable algorithm an email to color
1124 Tries to map in a stable algorithm an email to color
1125
1125
1126 :param email_str:
1126 :param email_str:
1127 """
1127 """
1128 color_bank = self.get_color_bank()
1128 color_bank = self.get_color_bank()
1129 # pick position (module it's length so we always find it in the
1129 # pick position (module it's length so we always find it in the
1130 # bank even if it's smaller than 256 values
1130 # bank even if it's smaller than 256 values
1131 pos = self.pick_color_bank_index(email_str, color_bank)
1131 pos = self.pick_color_bank_index(email_str, color_bank)
1132 return color_bank[pos]
1132 return color_bank[pos]
1133
1133
1134 def normalize_email(self, email_address):
1134 def normalize_email(self, email_address):
1135 import unicodedata
1135 import unicodedata
1136 # default host used to fill in the fake/missing email
1136 # default host used to fill in the fake/missing email
1137 default_host = u'localhost'
1137 default_host = u'localhost'
1138
1138
1139 if not email_address:
1139 if not email_address:
1140 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1140 email_address = u'%s@%s' % (User.DEFAULT_USER, default_host)
1141
1141
1142 email_address = safe_unicode(email_address)
1142 email_address = safe_unicode(email_address)
1143
1143
1144 if u'@' not in email_address:
1144 if u'@' not in email_address:
1145 email_address = u'%s@%s' % (email_address, default_host)
1145 email_address = u'%s@%s' % (email_address, default_host)
1146
1146
1147 if email_address.endswith(u'@'):
1147 if email_address.endswith(u'@'):
1148 email_address = u'%s%s' % (email_address, default_host)
1148 email_address = u'%s%s' % (email_address, default_host)
1149
1149
1150 email_address = unicodedata.normalize('NFKD', email_address)\
1150 email_address = unicodedata.normalize('NFKD', email_address)\
1151 .encode('ascii', 'ignore')
1151 .encode('ascii', 'ignore')
1152 return email_address
1152 return email_address
1153
1153
1154 def get_initials(self):
1154 def get_initials(self):
1155 """
1155 """
1156 Returns 2 letter initials calculated based on the input.
1156 Returns 2 letter initials calculated based on the input.
1157 The algorithm picks first given email address, and takes first letter
1157 The algorithm picks first given email address, and takes first letter
1158 of part before @, and then the first letter of server name. In case
1158 of part before @, and then the first letter of server name. In case
1159 the part before @ is in a format of `somestring.somestring2` it replaces
1159 the part before @ is in a format of `somestring.somestring2` it replaces
1160 the server letter with first letter of somestring2
1160 the server letter with first letter of somestring2
1161
1161
1162 In case function was initialized with both first and lastname, this
1162 In case function was initialized with both first and lastname, this
1163 overrides the extraction from email by first letter of the first and
1163 overrides the extraction from email by first letter of the first and
1164 last name. We add special logic to that functionality, In case Full name
1164 last name. We add special logic to that functionality, In case Full name
1165 is compound, like Guido Von Rossum, we use last part of the last name
1165 is compound, like Guido Von Rossum, we use last part of the last name
1166 (Von Rossum) picking `R`.
1166 (Von Rossum) picking `R`.
1167
1167
1168 Function also normalizes the non-ascii characters to they ascii
1168 Function also normalizes the non-ascii characters to they ascii
1169 representation, eg Δ„ => A
1169 representation, eg Δ„ => A
1170 """
1170 """
1171 import unicodedata
1171 import unicodedata
1172 # replace non-ascii to ascii
1172 # replace non-ascii to ascii
1173 first_name = unicodedata.normalize(
1173 first_name = unicodedata.normalize(
1174 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1174 'NFKD', safe_unicode(self.first_name)).encode('ascii', 'ignore')
1175 last_name = unicodedata.normalize(
1175 last_name = unicodedata.normalize(
1176 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1176 'NFKD', safe_unicode(self.last_name)).encode('ascii', 'ignore')
1177
1177
1178 # do NFKD encoding, and also make sure email has proper format
1178 # do NFKD encoding, and also make sure email has proper format
1179 email_address = self.normalize_email(self.email_address)
1179 email_address = self.normalize_email(self.email_address)
1180
1180
1181 # first push the email initials
1181 # first push the email initials
1182 prefix, server = email_address.split('@', 1)
1182 prefix, server = email_address.split('@', 1)
1183
1183
1184 # check if prefix is maybe a 'first_name.last_name' syntax
1184 # check if prefix is maybe a 'first_name.last_name' syntax
1185 _dot_split = prefix.rsplit('.', 1)
1185 _dot_split = prefix.rsplit('.', 1)
1186 if len(_dot_split) == 2 and _dot_split[1]:
1186 if len(_dot_split) == 2 and _dot_split[1]:
1187 initials = [_dot_split[0][0], _dot_split[1][0]]
1187 initials = [_dot_split[0][0], _dot_split[1][0]]
1188 else:
1188 else:
1189 initials = [prefix[0], server[0]]
1189 initials = [prefix[0], server[0]]
1190
1190
1191 # then try to replace either first_name or last_name
1191 # then try to replace either first_name or last_name
1192 fn_letter = (first_name or " ")[0].strip()
1192 fn_letter = (first_name or " ")[0].strip()
1193 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1193 ln_letter = (last_name.split(' ', 1)[-1] or " ")[0].strip()
1194
1194
1195 if fn_letter:
1195 if fn_letter:
1196 initials[0] = fn_letter
1196 initials[0] = fn_letter
1197
1197
1198 if ln_letter:
1198 if ln_letter:
1199 initials[1] = ln_letter
1199 initials[1] = ln_letter
1200
1200
1201 return ''.join(initials).upper()
1201 return ''.join(initials).upper()
1202
1202
1203 def get_img_data_by_type(self, font_family, img_type):
1203 def get_img_data_by_type(self, font_family, img_type):
1204 default_user = """
1204 default_user = """
1205 <svg xmlns="http://www.w3.org/2000/svg"
1205 <svg xmlns="http://www.w3.org/2000/svg"
1206 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1206 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1207 viewBox="-15 -10 439.165 429.164"
1207 viewBox="-15 -10 439.165 429.164"
1208
1208
1209 xml:space="preserve"
1209 xml:space="preserve"
1210 style="background:{background};" >
1210 style="background:{background};" >
1211
1211
1212 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1212 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1213 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1213 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1214 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1214 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1215 168.596,153.916,216.671,
1215 168.596,153.916,216.671,
1216 204.583,216.671z" fill="{text_color}"/>
1216 204.583,216.671z" fill="{text_color}"/>
1217 <path d="M407.164,374.717L360.88,
1217 <path d="M407.164,374.717L360.88,
1218 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1218 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1219 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1219 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1220 15.366-44.203,23.488-69.076,23.488c-24.877,
1220 15.366-44.203,23.488-69.076,23.488c-24.877,
1221 0-48.762-8.122-69.078-23.488
1221 0-48.762-8.122-69.078-23.488
1222 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1222 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1223 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1223 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1224 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1224 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1225 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1225 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1226 19.402-10.527 C409.699,390.129,
1226 19.402-10.527 C409.699,390.129,
1227 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1227 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1228 </svg>""".format(
1228 </svg>""".format(
1229 size=self.size,
1229 size=self.size,
1230 background='#979797', # @grey4
1230 background='#979797', # @grey4
1231 text_color=self.text_color,
1231 text_color=self.text_color,
1232 font_family=font_family)
1232 font_family=font_family)
1233
1233
1234 return {
1234 return {
1235 "default_user": default_user
1235 "default_user": default_user
1236 }[img_type]
1236 }[img_type]
1237
1237
1238 def get_img_data(self, svg_type=None):
1238 def get_img_data(self, svg_type=None):
1239 """
1239 """
1240 generates the svg metadata for image
1240 generates the svg metadata for image
1241 """
1241 """
1242
1242
1243 font_family = ','.join([
1243 font_family = ','.join([
1244 'proximanovaregular',
1244 'proximanovaregular',
1245 'Proxima Nova Regular',
1245 'Proxima Nova Regular',
1246 'Proxima Nova',
1246 'Proxima Nova',
1247 'Arial',
1247 'Arial',
1248 'Lucida Grande',
1248 'Lucida Grande',
1249 'sans-serif'
1249 'sans-serif'
1250 ])
1250 ])
1251 if svg_type:
1251 if svg_type:
1252 return self.get_img_data_by_type(font_family, svg_type)
1252 return self.get_img_data_by_type(font_family, svg_type)
1253
1253
1254 initials = self.get_initials()
1254 initials = self.get_initials()
1255 img_data = """
1255 img_data = """
1256 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1256 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1257 width="{size}" height="{size}"
1257 width="{size}" height="{size}"
1258 style="width: 100%; height: 100%; background-color: {background}"
1258 style="width: 100%; height: 100%; background-color: {background}"
1259 viewBox="0 0 {size} {size}">
1259 viewBox="0 0 {size} {size}">
1260 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1260 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1261 pointer-events="auto" fill="{text_color}"
1261 pointer-events="auto" fill="{text_color}"
1262 font-family="{font_family}"
1262 font-family="{font_family}"
1263 style="font-weight: 400; font-size: {f_size}px;">{text}
1263 style="font-weight: 400; font-size: {f_size}px;">{text}
1264 </text>
1264 </text>
1265 </svg>""".format(
1265 </svg>""".format(
1266 size=self.size,
1266 size=self.size,
1267 f_size=self.size/1.85, # scale the text inside the box nicely
1267 f_size=self.size/1.85, # scale the text inside the box nicely
1268 background=self.background,
1268 background=self.background,
1269 text_color=self.text_color,
1269 text_color=self.text_color,
1270 text=initials.upper(),
1270 text=initials.upper(),
1271 font_family=font_family)
1271 font_family=font_family)
1272
1272
1273 return img_data
1273 return img_data
1274
1274
1275 def generate_svg(self, svg_type=None):
1275 def generate_svg(self, svg_type=None):
1276 img_data = self.get_img_data(svg_type)
1276 img_data = self.get_img_data(svg_type)
1277 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1277 return "data:image/svg+xml;base64,%s" % img_data.encode('base64')
1278
1278
1279
1279
1280 def initials_gravatar(email_address, first_name, last_name, size=30):
1280 def initials_gravatar(email_address, first_name, last_name, size=30):
1281 svg_type = None
1281 svg_type = None
1282 if email_address == User.DEFAULT_USER_EMAIL:
1282 if email_address == User.DEFAULT_USER_EMAIL:
1283 svg_type = 'default_user'
1283 svg_type = 'default_user'
1284 klass = InitialsGravatar(email_address, first_name, last_name, size)
1284 klass = InitialsGravatar(email_address, first_name, last_name, size)
1285 return klass.generate_svg(svg_type=svg_type)
1285 return klass.generate_svg(svg_type=svg_type)
1286
1286
1287
1287
1288 def gravatar_url(email_address, size=30, request=None):
1288 def gravatar_url(email_address, size=30, request=None):
1289 request = get_current_request()
1289 request = get_current_request()
1290 if request and hasattr(request, 'call_context'):
1290 if request and hasattr(request, 'call_context'):
1291 _use_gravatar = request.call_context.visual.use_gravatar
1291 _use_gravatar = request.call_context.visual.use_gravatar
1292 _gravatar_url = request.call_context.visual.gravatar_url
1292 _gravatar_url = request.call_context.visual.gravatar_url
1293 else:
1293 else:
1294 # doh, we need to re-import those to mock it later
1294 # doh, we need to re-import those to mock it later
1295 from pylons import tmpl_context as c
1295 from pylons import tmpl_context as c
1296
1296
1297 _use_gravatar = c.visual.use_gravatar
1297 _use_gravatar = c.visual.use_gravatar
1298 _gravatar_url = c.visual.gravatar_url
1298 _gravatar_url = c.visual.gravatar_url
1299
1299
1300 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1300 _gravatar_url = _gravatar_url or User.DEFAULT_GRAVATAR_URL
1301
1301
1302 email_address = email_address or User.DEFAULT_USER_EMAIL
1302 email_address = email_address or User.DEFAULT_USER_EMAIL
1303 if isinstance(email_address, unicode):
1303 if isinstance(email_address, unicode):
1304 # hashlib crashes on unicode items
1304 # hashlib crashes on unicode items
1305 email_address = safe_str(email_address)
1305 email_address = safe_str(email_address)
1306
1306
1307 # empty email or default user
1307 # empty email or default user
1308 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1308 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1309 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1309 return initials_gravatar(User.DEFAULT_USER_EMAIL, '', '', size=size)
1310
1310
1311 if _use_gravatar:
1311 if _use_gravatar:
1312 # TODO: Disuse pyramid thread locals. Think about another solution to
1312 # TODO: Disuse pyramid thread locals. Think about another solution to
1313 # get the host and schema here.
1313 # get the host and schema here.
1314 request = get_current_request()
1314 request = get_current_request()
1315 tmpl = safe_str(_gravatar_url)
1315 tmpl = safe_str(_gravatar_url)
1316 tmpl = tmpl.replace('{email}', email_address)\
1316 tmpl = tmpl.replace('{email}', email_address)\
1317 .replace('{md5email}', md5_safe(email_address.lower())) \
1317 .replace('{md5email}', md5_safe(email_address.lower())) \
1318 .replace('{netloc}', request.host)\
1318 .replace('{netloc}', request.host)\
1319 .replace('{scheme}', request.scheme)\
1319 .replace('{scheme}', request.scheme)\
1320 .replace('{size}', safe_str(size))
1320 .replace('{size}', safe_str(size))
1321 return tmpl
1321 return tmpl
1322 else:
1322 else:
1323 return initials_gravatar(email_address, '', '', size=size)
1323 return initials_gravatar(email_address, '', '', size=size)
1324
1324
1325
1325
1326 class Page(_Page):
1326 class Page(_Page):
1327 """
1327 """
1328 Custom pager to match rendering style with paginator
1328 Custom pager to match rendering style with paginator
1329 """
1329 """
1330
1330
1331 def _get_pos(self, cur_page, max_page, items):
1331 def _get_pos(self, cur_page, max_page, items):
1332 edge = (items / 2) + 1
1332 edge = (items / 2) + 1
1333 if (cur_page <= edge):
1333 if (cur_page <= edge):
1334 radius = max(items / 2, items - cur_page)
1334 radius = max(items / 2, items - cur_page)
1335 elif (max_page - cur_page) < edge:
1335 elif (max_page - cur_page) < edge:
1336 radius = (items - 1) - (max_page - cur_page)
1336 radius = (items - 1) - (max_page - cur_page)
1337 else:
1337 else:
1338 radius = items / 2
1338 radius = items / 2
1339
1339
1340 left = max(1, (cur_page - (radius)))
1340 left = max(1, (cur_page - (radius)))
1341 right = min(max_page, cur_page + (radius))
1341 right = min(max_page, cur_page + (radius))
1342 return left, cur_page, right
1342 return left, cur_page, right
1343
1343
1344 def _range(self, regexp_match):
1344 def _range(self, regexp_match):
1345 """
1345 """
1346 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1346 Return range of linked pages (e.g. '1 2 [3] 4 5 6 7 8').
1347
1347
1348 Arguments:
1348 Arguments:
1349
1349
1350 regexp_match
1350 regexp_match
1351 A "re" (regular expressions) match object containing the
1351 A "re" (regular expressions) match object containing the
1352 radius of linked pages around the current page in
1352 radius of linked pages around the current page in
1353 regexp_match.group(1) as a string
1353 regexp_match.group(1) as a string
1354
1354
1355 This function is supposed to be called as a callable in
1355 This function is supposed to be called as a callable in
1356 re.sub.
1356 re.sub.
1357
1357
1358 """
1358 """
1359 radius = int(regexp_match.group(1))
1359 radius = int(regexp_match.group(1))
1360
1360
1361 # Compute the first and last page number within the radius
1361 # Compute the first and last page number within the radius
1362 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1362 # e.g. '1 .. 5 6 [7] 8 9 .. 12'
1363 # -> leftmost_page = 5
1363 # -> leftmost_page = 5
1364 # -> rightmost_page = 9
1364 # -> rightmost_page = 9
1365 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1365 leftmost_page, _cur, rightmost_page = self._get_pos(self.page,
1366 self.last_page,
1366 self.last_page,
1367 (radius * 2) + 1)
1367 (radius * 2) + 1)
1368 nav_items = []
1368 nav_items = []
1369
1369
1370 # Create a link to the first page (unless we are on the first page
1370 # Create a link to the first page (unless we are on the first page
1371 # or there would be no need to insert '..' spacers)
1371 # or there would be no need to insert '..' spacers)
1372 if self.page != self.first_page and self.first_page < leftmost_page:
1372 if self.page != self.first_page and self.first_page < leftmost_page:
1373 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1373 nav_items.append(self._pagerlink(self.first_page, self.first_page))
1374
1374
1375 # Insert dots if there are pages between the first page
1375 # Insert dots if there are pages between the first page
1376 # and the currently displayed page range
1376 # and the currently displayed page range
1377 if leftmost_page - self.first_page > 1:
1377 if leftmost_page - self.first_page > 1:
1378 # Wrap in a SPAN tag if nolink_attr is set
1378 # Wrap in a SPAN tag if nolink_attr is set
1379 text = '..'
1379 text = '..'
1380 if self.dotdot_attr:
1380 if self.dotdot_attr:
1381 text = HTML.span(c=text, **self.dotdot_attr)
1381 text = HTML.span(c=text, **self.dotdot_attr)
1382 nav_items.append(text)
1382 nav_items.append(text)
1383
1383
1384 for thispage in xrange(leftmost_page, rightmost_page + 1):
1384 for thispage in xrange(leftmost_page, rightmost_page + 1):
1385 # Hilight the current page number and do not use a link
1385 # Hilight the current page number and do not use a link
1386 if thispage == self.page:
1386 if thispage == self.page:
1387 text = '%s' % (thispage,)
1387 text = '%s' % (thispage,)
1388 # Wrap in a SPAN tag if nolink_attr is set
1388 # Wrap in a SPAN tag if nolink_attr is set
1389 if self.curpage_attr:
1389 if self.curpage_attr:
1390 text = HTML.span(c=text, **self.curpage_attr)
1390 text = HTML.span(c=text, **self.curpage_attr)
1391 nav_items.append(text)
1391 nav_items.append(text)
1392 # Otherwise create just a link to that page
1392 # Otherwise create just a link to that page
1393 else:
1393 else:
1394 text = '%s' % (thispage,)
1394 text = '%s' % (thispage,)
1395 nav_items.append(self._pagerlink(thispage, text))
1395 nav_items.append(self._pagerlink(thispage, text))
1396
1396
1397 # Insert dots if there are pages between the displayed
1397 # Insert dots if there are pages between the displayed
1398 # page numbers and the end of the page range
1398 # page numbers and the end of the page range
1399 if self.last_page - rightmost_page > 1:
1399 if self.last_page - rightmost_page > 1:
1400 text = '..'
1400 text = '..'
1401 # Wrap in a SPAN tag if nolink_attr is set
1401 # Wrap in a SPAN tag if nolink_attr is set
1402 if self.dotdot_attr:
1402 if self.dotdot_attr:
1403 text = HTML.span(c=text, **self.dotdot_attr)
1403 text = HTML.span(c=text, **self.dotdot_attr)
1404 nav_items.append(text)
1404 nav_items.append(text)
1405
1405
1406 # Create a link to the very last page (unless we are on the last
1406 # Create a link to the very last page (unless we are on the last
1407 # page or there would be no need to insert '..' spacers)
1407 # page or there would be no need to insert '..' spacers)
1408 if self.page != self.last_page and rightmost_page < self.last_page:
1408 if self.page != self.last_page and rightmost_page < self.last_page:
1409 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1409 nav_items.append(self._pagerlink(self.last_page, self.last_page))
1410
1410
1411 ## prerender links
1411 ## prerender links
1412 #_page_link = url.current()
1412 #_page_link = url.current()
1413 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1413 #nav_items.append(literal('<link rel="prerender" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1414 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1414 #nav_items.append(literal('<link rel="prefetch" href="%s?page=%s">' % (_page_link, str(int(self.page)+1))))
1415 return self.separator.join(nav_items)
1415 return self.separator.join(nav_items)
1416
1416
1417 def pager(self, format='~2~', page_param='page', partial_param='partial',
1417 def pager(self, format='~2~', page_param='page', partial_param='partial',
1418 show_if_single_page=False, separator=' ', onclick=None,
1418 show_if_single_page=False, separator=' ', onclick=None,
1419 symbol_first='<<', symbol_last='>>',
1419 symbol_first='<<', symbol_last='>>',
1420 symbol_previous='<', symbol_next='>',
1420 symbol_previous='<', symbol_next='>',
1421 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1421 link_attr={'class': 'pager_link', 'rel': 'prerender'},
1422 curpage_attr={'class': 'pager_curpage'},
1422 curpage_attr={'class': 'pager_curpage'},
1423 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1423 dotdot_attr={'class': 'pager_dotdot'}, **kwargs):
1424
1424
1425 self.curpage_attr = curpage_attr
1425 self.curpage_attr = curpage_attr
1426 self.separator = separator
1426 self.separator = separator
1427 self.pager_kwargs = kwargs
1427 self.pager_kwargs = kwargs
1428 self.page_param = page_param
1428 self.page_param = page_param
1429 self.partial_param = partial_param
1429 self.partial_param = partial_param
1430 self.onclick = onclick
1430 self.onclick = onclick
1431 self.link_attr = link_attr
1431 self.link_attr = link_attr
1432 self.dotdot_attr = dotdot_attr
1432 self.dotdot_attr = dotdot_attr
1433
1433
1434 # Don't show navigator if there is no more than one page
1434 # Don't show navigator if there is no more than one page
1435 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1435 if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page):
1436 return ''
1436 return ''
1437
1437
1438 from string import Template
1438 from string import Template
1439 # Replace ~...~ in token format by range of pages
1439 # Replace ~...~ in token format by range of pages
1440 result = re.sub(r'~(\d+)~', self._range, format)
1440 result = re.sub(r'~(\d+)~', self._range, format)
1441
1441
1442 # Interpolate '%' variables
1442 # Interpolate '%' variables
1443 result = Template(result).safe_substitute({
1443 result = Template(result).safe_substitute({
1444 'first_page': self.first_page,
1444 'first_page': self.first_page,
1445 'last_page': self.last_page,
1445 'last_page': self.last_page,
1446 'page': self.page,
1446 'page': self.page,
1447 'page_count': self.page_count,
1447 'page_count': self.page_count,
1448 'items_per_page': self.items_per_page,
1448 'items_per_page': self.items_per_page,
1449 'first_item': self.first_item,
1449 'first_item': self.first_item,
1450 'last_item': self.last_item,
1450 'last_item': self.last_item,
1451 'item_count': self.item_count,
1451 'item_count': self.item_count,
1452 'link_first': self.page > self.first_page and \
1452 'link_first': self.page > self.first_page and \
1453 self._pagerlink(self.first_page, symbol_first) or '',
1453 self._pagerlink(self.first_page, symbol_first) or '',
1454 'link_last': self.page < self.last_page and \
1454 'link_last': self.page < self.last_page and \
1455 self._pagerlink(self.last_page, symbol_last) or '',
1455 self._pagerlink(self.last_page, symbol_last) or '',
1456 'link_previous': self.previous_page and \
1456 'link_previous': self.previous_page and \
1457 self._pagerlink(self.previous_page, symbol_previous) \
1457 self._pagerlink(self.previous_page, symbol_previous) \
1458 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1458 or HTML.span(symbol_previous, class_="pg-previous disabled"),
1459 'link_next': self.next_page and \
1459 'link_next': self.next_page and \
1460 self._pagerlink(self.next_page, symbol_next) \
1460 self._pagerlink(self.next_page, symbol_next) \
1461 or HTML.span(symbol_next, class_="pg-next disabled")
1461 or HTML.span(symbol_next, class_="pg-next disabled")
1462 })
1462 })
1463
1463
1464 return literal(result)
1464 return literal(result)
1465
1465
1466
1466
1467 #==============================================================================
1467 #==============================================================================
1468 # REPO PAGER, PAGER FOR REPOSITORY
1468 # REPO PAGER, PAGER FOR REPOSITORY
1469 #==============================================================================
1469 #==============================================================================
1470 class RepoPage(Page):
1470 class RepoPage(Page):
1471
1471
1472 def __init__(self, collection, page=1, items_per_page=20,
1472 def __init__(self, collection, page=1, items_per_page=20,
1473 item_count=None, url=None, **kwargs):
1473 item_count=None, url=None, **kwargs):
1474
1474
1475 """Create a "RepoPage" instance. special pager for paging
1475 """Create a "RepoPage" instance. special pager for paging
1476 repository
1476 repository
1477 """
1477 """
1478 self._url_generator = url
1478 self._url_generator = url
1479
1479
1480 # Safe the kwargs class-wide so they can be used in the pager() method
1480 # Safe the kwargs class-wide so they can be used in the pager() method
1481 self.kwargs = kwargs
1481 self.kwargs = kwargs
1482
1482
1483 # Save a reference to the collection
1483 # Save a reference to the collection
1484 self.original_collection = collection
1484 self.original_collection = collection
1485
1485
1486 self.collection = collection
1486 self.collection = collection
1487
1487
1488 # The self.page is the number of the current page.
1488 # The self.page is the number of the current page.
1489 # The first page has the number 1!
1489 # The first page has the number 1!
1490 try:
1490 try:
1491 self.page = int(page) # make it int() if we get it as a string
1491 self.page = int(page) # make it int() if we get it as a string
1492 except (ValueError, TypeError):
1492 except (ValueError, TypeError):
1493 self.page = 1
1493 self.page = 1
1494
1494
1495 self.items_per_page = items_per_page
1495 self.items_per_page = items_per_page
1496
1496
1497 # Unless the user tells us how many items the collections has
1497 # Unless the user tells us how many items the collections has
1498 # we calculate that ourselves.
1498 # we calculate that ourselves.
1499 if item_count is not None:
1499 if item_count is not None:
1500 self.item_count = item_count
1500 self.item_count = item_count
1501 else:
1501 else:
1502 self.item_count = len(self.collection)
1502 self.item_count = len(self.collection)
1503
1503
1504 # Compute the number of the first and last available page
1504 # Compute the number of the first and last available page
1505 if self.item_count > 0:
1505 if self.item_count > 0:
1506 self.first_page = 1
1506 self.first_page = 1
1507 self.page_count = int(math.ceil(float(self.item_count) /
1507 self.page_count = int(math.ceil(float(self.item_count) /
1508 self.items_per_page))
1508 self.items_per_page))
1509 self.last_page = self.first_page + self.page_count - 1
1509 self.last_page = self.first_page + self.page_count - 1
1510
1510
1511 # Make sure that the requested page number is the range of
1511 # Make sure that the requested page number is the range of
1512 # valid pages
1512 # valid pages
1513 if self.page > self.last_page:
1513 if self.page > self.last_page:
1514 self.page = self.last_page
1514 self.page = self.last_page
1515 elif self.page < self.first_page:
1515 elif self.page < self.first_page:
1516 self.page = self.first_page
1516 self.page = self.first_page
1517
1517
1518 # Note: the number of items on this page can be less than
1518 # Note: the number of items on this page can be less than
1519 # items_per_page if the last page is not full
1519 # items_per_page if the last page is not full
1520 self.first_item = max(0, (self.item_count) - (self.page *
1520 self.first_item = max(0, (self.item_count) - (self.page *
1521 items_per_page))
1521 items_per_page))
1522 self.last_item = ((self.item_count - 1) - items_per_page *
1522 self.last_item = ((self.item_count - 1) - items_per_page *
1523 (self.page - 1))
1523 (self.page - 1))
1524
1524
1525 self.items = list(self.collection[self.first_item:self.last_item + 1])
1525 self.items = list(self.collection[self.first_item:self.last_item + 1])
1526
1526
1527 # Links to previous and next page
1527 # Links to previous and next page
1528 if self.page > self.first_page:
1528 if self.page > self.first_page:
1529 self.previous_page = self.page - 1
1529 self.previous_page = self.page - 1
1530 else:
1530 else:
1531 self.previous_page = None
1531 self.previous_page = None
1532
1532
1533 if self.page < self.last_page:
1533 if self.page < self.last_page:
1534 self.next_page = self.page + 1
1534 self.next_page = self.page + 1
1535 else:
1535 else:
1536 self.next_page = None
1536 self.next_page = None
1537
1537
1538 # No items available
1538 # No items available
1539 else:
1539 else:
1540 self.first_page = None
1540 self.first_page = None
1541 self.page_count = 0
1541 self.page_count = 0
1542 self.last_page = None
1542 self.last_page = None
1543 self.first_item = None
1543 self.first_item = None
1544 self.last_item = None
1544 self.last_item = None
1545 self.previous_page = None
1545 self.previous_page = None
1546 self.next_page = None
1546 self.next_page = None
1547 self.items = []
1547 self.items = []
1548
1548
1549 # This is a subclass of the 'list' type. Initialise the list now.
1549 # This is a subclass of the 'list' type. Initialise the list now.
1550 list.__init__(self, reversed(self.items))
1550 list.__init__(self, reversed(self.items))
1551
1551
1552
1552
1553 def breadcrumb_repo_link(repo):
1553 def breadcrumb_repo_link(repo):
1554 """
1554 """
1555 Makes a breadcrumbs path link to repo
1555 Makes a breadcrumbs path link to repo
1556
1556
1557 ex::
1557 ex::
1558 group >> subgroup >> repo
1558 group >> subgroup >> repo
1559
1559
1560 :param repo: a Repository instance
1560 :param repo: a Repository instance
1561 """
1561 """
1562
1562
1563 path = [
1563 path = [
1564 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1564 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name))
1565 for group in repo.groups_with_parents
1565 for group in repo.groups_with_parents
1566 ] + [
1566 ] + [
1567 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1567 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name))
1568 ]
1568 ]
1569
1569
1570 return literal(' &raquo; '.join(path))
1570 return literal(' &raquo; '.join(path))
1571
1571
1572
1572
1573 def format_byte_size_binary(file_size):
1573 def format_byte_size_binary(file_size):
1574 """
1574 """
1575 Formats file/folder sizes to standard.
1575 Formats file/folder sizes to standard.
1576 """
1576 """
1577 if file_size is None:
1577 if file_size is None:
1578 file_size = 0
1578 file_size = 0
1579
1579
1580 formatted_size = format_byte_size(file_size, binary=True)
1580 formatted_size = format_byte_size(file_size, binary=True)
1581 return formatted_size
1581 return formatted_size
1582
1582
1583
1583
1584 def urlify_text(text_, safe=True):
1584 def urlify_text(text_, safe=True):
1585 """
1585 """
1586 Extrac urls from text and make html links out of them
1586 Extrac urls from text and make html links out of them
1587
1587
1588 :param text_:
1588 :param text_:
1589 """
1589 """
1590
1590
1591 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1591 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1592 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1592 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1593
1593
1594 def url_func(match_obj):
1594 def url_func(match_obj):
1595 url_full = match_obj.groups()[0]
1595 url_full = match_obj.groups()[0]
1596 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1596 return '<a href="%(url)s">%(url)s</a>' % ({'url': url_full})
1597 _newtext = url_pat.sub(url_func, text_)
1597 _newtext = url_pat.sub(url_func, text_)
1598 if safe:
1598 if safe:
1599 return literal(_newtext)
1599 return literal(_newtext)
1600 return _newtext
1600 return _newtext
1601
1601
1602
1602
1603 def urlify_commits(text_, repository):
1603 def urlify_commits(text_, repository):
1604 """
1604 """
1605 Extract commit ids from text and make link from them
1605 Extract commit ids from text and make link from them
1606
1606
1607 :param text_:
1607 :param text_:
1608 :param repository: repo name to build the URL with
1608 :param repository: repo name to build the URL with
1609 """
1609 """
1610
1610
1611 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1611 URL_PAT = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1612
1612
1613 def url_func(match_obj):
1613 def url_func(match_obj):
1614 commit_id = match_obj.groups()[1]
1614 commit_id = match_obj.groups()[1]
1615 pref = match_obj.groups()[0]
1615 pref = match_obj.groups()[0]
1616 suf = match_obj.groups()[2]
1616 suf = match_obj.groups()[2]
1617
1617
1618 tmpl = (
1618 tmpl = (
1619 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1619 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1620 '%(commit_id)s</a>%(suf)s'
1620 '%(commit_id)s</a>%(suf)s'
1621 )
1621 )
1622 return tmpl % {
1622 return tmpl % {
1623 'pref': pref,
1623 'pref': pref,
1624 'cls': 'revision-link',
1624 'cls': 'revision-link',
1625 'url': route_url('repo_commit', repo_name=repository,
1625 'url': route_url('repo_commit', repo_name=repository,
1626 commit_id=commit_id),
1626 commit_id=commit_id),
1627 'commit_id': commit_id,
1627 'commit_id': commit_id,
1628 'suf': suf
1628 'suf': suf
1629 }
1629 }
1630
1630
1631 newtext = URL_PAT.sub(url_func, text_)
1631 newtext = URL_PAT.sub(url_func, text_)
1632
1632
1633 return newtext
1633 return newtext
1634
1634
1635
1635
1636 def _process_url_func(match_obj, repo_name, uid, entry,
1636 def _process_url_func(match_obj, repo_name, uid, entry,
1637 return_raw_data=False, link_format='html'):
1637 return_raw_data=False, link_format='html'):
1638 pref = ''
1638 pref = ''
1639 if match_obj.group().startswith(' '):
1639 if match_obj.group().startswith(' '):
1640 pref = ' '
1640 pref = ' '
1641
1641
1642 issue_id = ''.join(match_obj.groups())
1642 issue_id = ''.join(match_obj.groups())
1643
1643
1644 if link_format == 'html':
1644 if link_format == 'html':
1645 tmpl = (
1645 tmpl = (
1646 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1646 '%(pref)s<a class="%(cls)s" href="%(url)s">'
1647 '%(issue-prefix)s%(id-repr)s'
1647 '%(issue-prefix)s%(id-repr)s'
1648 '</a>')
1648 '</a>')
1649 elif link_format == 'rst':
1649 elif link_format == 'rst':
1650 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1650 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1651 elif link_format == 'markdown':
1651 elif link_format == 'markdown':
1652 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1652 tmpl = '[%(issue-prefix)s%(id-repr)s](%(url)s)'
1653 else:
1653 else:
1654 raise ValueError('Bad link_format:{}'.format(link_format))
1654 raise ValueError('Bad link_format:{}'.format(link_format))
1655
1655
1656 (repo_name_cleaned,
1656 (repo_name_cleaned,
1657 parent_group_name) = RepoGroupModel().\
1657 parent_group_name) = RepoGroupModel().\
1658 _get_group_name_and_parent(repo_name)
1658 _get_group_name_and_parent(repo_name)
1659
1659
1660 # variables replacement
1660 # variables replacement
1661 named_vars = {
1661 named_vars = {
1662 'id': issue_id,
1662 'id': issue_id,
1663 'repo': repo_name,
1663 'repo': repo_name,
1664 'repo_name': repo_name_cleaned,
1664 'repo_name': repo_name_cleaned,
1665 'group_name': parent_group_name
1665 'group_name': parent_group_name
1666 }
1666 }
1667 # named regex variables
1667 # named regex variables
1668 named_vars.update(match_obj.groupdict())
1668 named_vars.update(match_obj.groupdict())
1669 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1669 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1670
1670
1671 data = {
1671 data = {
1672 'pref': pref,
1672 'pref': pref,
1673 'cls': 'issue-tracker-link',
1673 'cls': 'issue-tracker-link',
1674 'url': _url,
1674 'url': _url,
1675 'id-repr': issue_id,
1675 'id-repr': issue_id,
1676 'issue-prefix': entry['pref'],
1676 'issue-prefix': entry['pref'],
1677 'serv': entry['url'],
1677 'serv': entry['url'],
1678 }
1678 }
1679 if return_raw_data:
1679 if return_raw_data:
1680 return {
1680 return {
1681 'id': issue_id,
1681 'id': issue_id,
1682 'url': _url
1682 'url': _url
1683 }
1683 }
1684 return tmpl % data
1684 return tmpl % data
1685
1685
1686
1686
1687 def process_patterns(text_string, repo_name, link_format='html'):
1687 def process_patterns(text_string, repo_name, link_format='html'):
1688 allowed_formats = ['html', 'rst', 'markdown']
1688 allowed_formats = ['html', 'rst', 'markdown']
1689 if link_format not in allowed_formats:
1689 if link_format not in allowed_formats:
1690 raise ValueError('Link format can be only one of:{} got {}'.format(
1690 raise ValueError('Link format can be only one of:{} got {}'.format(
1691 allowed_formats, link_format))
1691 allowed_formats, link_format))
1692
1692
1693 repo = None
1693 repo = None
1694 if repo_name:
1694 if repo_name:
1695 # Retrieving repo_name to avoid invalid repo_name to explode on
1695 # Retrieving repo_name to avoid invalid repo_name to explode on
1696 # IssueTrackerSettingsModel but still passing invalid name further down
1696 # IssueTrackerSettingsModel but still passing invalid name further down
1697 repo = Repository.get_by_repo_name(repo_name, cache=True)
1697 repo = Repository.get_by_repo_name(repo_name, cache=True)
1698
1698
1699 settings_model = IssueTrackerSettingsModel(repo=repo)
1699 settings_model = IssueTrackerSettingsModel(repo=repo)
1700 active_entries = settings_model.get_settings(cache=True)
1700 active_entries = settings_model.get_settings(cache=True)
1701
1701
1702 issues_data = []
1702 issues_data = []
1703 newtext = text_string
1703 newtext = text_string
1704
1704
1705 for uid, entry in active_entries.items():
1705 for uid, entry in active_entries.items():
1706 log.debug('found issue tracker entry with uid %s' % (uid,))
1706 log.debug('found issue tracker entry with uid %s' % (uid,))
1707
1707
1708 if not (entry['pat'] and entry['url']):
1708 if not (entry['pat'] and entry['url']):
1709 log.debug('skipping due to missing data')
1709 log.debug('skipping due to missing data')
1710 continue
1710 continue
1711
1711
1712 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1712 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s'
1713 % (uid, entry['pat'], entry['url'], entry['pref']))
1713 % (uid, entry['pat'], entry['url'], entry['pref']))
1714
1714
1715 try:
1715 try:
1716 pattern = re.compile(r'%s' % entry['pat'])
1716 pattern = re.compile(r'%s' % entry['pat'])
1717 except re.error:
1717 except re.error:
1718 log.exception(
1718 log.exception(
1719 'issue tracker pattern: `%s` failed to compile',
1719 'issue tracker pattern: `%s` failed to compile',
1720 entry['pat'])
1720 entry['pat'])
1721 continue
1721 continue
1722
1722
1723 data_func = partial(
1723 data_func = partial(
1724 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1724 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1725 return_raw_data=True)
1725 return_raw_data=True)
1726
1726
1727 for match_obj in pattern.finditer(text_string):
1727 for match_obj in pattern.finditer(text_string):
1728 issues_data.append(data_func(match_obj))
1728 issues_data.append(data_func(match_obj))
1729
1729
1730 url_func = partial(
1730 url_func = partial(
1731 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1731 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1732 link_format=link_format)
1732 link_format=link_format)
1733
1733
1734 newtext = pattern.sub(url_func, newtext)
1734 newtext = pattern.sub(url_func, newtext)
1735 log.debug('processed prefix:uid `%s`' % (uid,))
1735 log.debug('processed prefix:uid `%s`' % (uid,))
1736
1736
1737 return newtext, issues_data
1737 return newtext, issues_data
1738
1738
1739
1739
1740 def urlify_commit_message(commit_text, repository=None):
1740 def urlify_commit_message(commit_text, repository=None):
1741 """
1741 """
1742 Parses given text message and makes proper links.
1742 Parses given text message and makes proper links.
1743 issues are linked to given issue-server, and rest is a commit link
1743 issues are linked to given issue-server, and rest is a commit link
1744
1744
1745 :param commit_text:
1745 :param commit_text:
1746 :param repository:
1746 :param repository:
1747 """
1747 """
1748 from pylons import url # doh, we need to re-import url to mock it later
1748 from pylons import url # doh, we need to re-import url to mock it later
1749
1749
1750 def escaper(string):
1750 def escaper(string):
1751 return string.replace('<', '&lt;').replace('>', '&gt;')
1751 return string.replace('<', '&lt;').replace('>', '&gt;')
1752
1752
1753 newtext = escaper(commit_text)
1753 newtext = escaper(commit_text)
1754
1754
1755 # extract http/https links and make them real urls
1755 # extract http/https links and make them real urls
1756 newtext = urlify_text(newtext, safe=False)
1756 newtext = urlify_text(newtext, safe=False)
1757
1757
1758 # urlify commits - extract commit ids and make link out of them, if we have
1758 # urlify commits - extract commit ids and make link out of them, if we have
1759 # the scope of repository present.
1759 # the scope of repository present.
1760 if repository:
1760 if repository:
1761 newtext = urlify_commits(newtext, repository)
1761 newtext = urlify_commits(newtext, repository)
1762
1762
1763 # process issue tracker patterns
1763 # process issue tracker patterns
1764 newtext, issues = process_patterns(newtext, repository or '')
1764 newtext, issues = process_patterns(newtext, repository or '')
1765
1765
1766 return literal(newtext)
1766 return literal(newtext)
1767
1767
1768
1768
1769 def render_binary(repo_name, file_obj):
1769 def render_binary(repo_name, file_obj):
1770 """
1770 """
1771 Choose how to render a binary file
1771 Choose how to render a binary file
1772 """
1772 """
1773 filename = file_obj.name
1773 filename = file_obj.name
1774
1774
1775 # images
1775 # images
1776 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1776 for ext in ['*.png', '*.jpg', '*.ico', '*.gif']:
1777 if fnmatch.fnmatch(filename, pat=ext):
1777 if fnmatch.fnmatch(filename, pat=ext):
1778 alt = filename
1778 alt = filename
1779 src = route_path(
1779 src = route_path(
1780 'repo_file_raw', repo_name=repo_name,
1780 'repo_file_raw', repo_name=repo_name,
1781 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1781 commit_id=file_obj.commit.raw_id, f_path=file_obj.path)
1782 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1782 return literal('<img class="rendered-binary" alt="{}" src="{}">'.format(alt, src))
1783
1783
1784
1784
1785 def renderer_from_filename(filename, exclude=None):
1785 def renderer_from_filename(filename, exclude=None):
1786 """
1786 """
1787 choose a renderer based on filename, this works only for text based files
1787 choose a renderer based on filename, this works only for text based files
1788 """
1788 """
1789
1789
1790 # ipython
1790 # ipython
1791 for ext in ['*.ipynb']:
1791 for ext in ['*.ipynb']:
1792 if fnmatch.fnmatch(filename, pat=ext):
1792 if fnmatch.fnmatch(filename, pat=ext):
1793 return 'jupyter'
1793 return 'jupyter'
1794
1794
1795 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1795 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1796 if is_markup:
1796 if is_markup:
1797 return is_markup
1797 return is_markup
1798 return None
1798 return None
1799
1799
1800
1800
1801 def render(source, renderer='rst', mentions=False, relative_urls=None,
1801 def render(source, renderer='rst', mentions=False, relative_urls=None,
1802 repo_name=None):
1802 repo_name=None):
1803
1803
1804 def maybe_convert_relative_links(html_source):
1804 def maybe_convert_relative_links(html_source):
1805 if relative_urls:
1805 if relative_urls:
1806 return relative_links(html_source, relative_urls)
1806 return relative_links(html_source, relative_urls)
1807 return html_source
1807 return html_source
1808
1808
1809 if renderer == 'rst':
1809 if renderer == 'rst':
1810 if repo_name:
1810 if repo_name:
1811 # process patterns on comments if we pass in repo name
1811 # process patterns on comments if we pass in repo name
1812 source, issues = process_patterns(
1812 source, issues = process_patterns(
1813 source, repo_name, link_format='rst')
1813 source, repo_name, link_format='rst')
1814
1814
1815 return literal(
1815 return literal(
1816 '<div class="rst-block">%s</div>' %
1816 '<div class="rst-block">%s</div>' %
1817 maybe_convert_relative_links(
1817 maybe_convert_relative_links(
1818 MarkupRenderer.rst(source, mentions=mentions)))
1818 MarkupRenderer.rst(source, mentions=mentions)))
1819 elif renderer == 'markdown':
1819 elif renderer == 'markdown':
1820 if repo_name:
1820 if repo_name:
1821 # process patterns on comments if we pass in repo name
1821 # process patterns on comments if we pass in repo name
1822 source, issues = process_patterns(
1822 source, issues = process_patterns(
1823 source, repo_name, link_format='markdown')
1823 source, repo_name, link_format='markdown')
1824
1824
1825 return literal(
1825 return literal(
1826 '<div class="markdown-block">%s</div>' %
1826 '<div class="markdown-block">%s</div>' %
1827 maybe_convert_relative_links(
1827 maybe_convert_relative_links(
1828 MarkupRenderer.markdown(source, flavored=True,
1828 MarkupRenderer.markdown(source, flavored=True,
1829 mentions=mentions)))
1829 mentions=mentions)))
1830 elif renderer == 'jupyter':
1830 elif renderer == 'jupyter':
1831 return literal(
1831 return literal(
1832 '<div class="ipynb">%s</div>' %
1832 '<div class="ipynb">%s</div>' %
1833 maybe_convert_relative_links(
1833 maybe_convert_relative_links(
1834 MarkupRenderer.jupyter(source)))
1834 MarkupRenderer.jupyter(source)))
1835
1835
1836 # None means just show the file-source
1836 # None means just show the file-source
1837 return None
1837 return None
1838
1838
1839
1839
1840 def commit_status(repo, commit_id):
1840 def commit_status(repo, commit_id):
1841 return ChangesetStatusModel().get_status(repo, commit_id)
1841 return ChangesetStatusModel().get_status(repo, commit_id)
1842
1842
1843
1843
1844 def commit_status_lbl(commit_status):
1844 def commit_status_lbl(commit_status):
1845 return dict(ChangesetStatus.STATUSES).get(commit_status)
1845 return dict(ChangesetStatus.STATUSES).get(commit_status)
1846
1846
1847
1847
1848 def commit_time(repo_name, commit_id):
1848 def commit_time(repo_name, commit_id):
1849 repo = Repository.get_by_repo_name(repo_name)
1849 repo = Repository.get_by_repo_name(repo_name)
1850 commit = repo.get_commit(commit_id=commit_id)
1850 commit = repo.get_commit(commit_id=commit_id)
1851 return commit.date
1851 return commit.date
1852
1852
1853
1853
1854 def get_permission_name(key):
1854 def get_permission_name(key):
1855 return dict(Permission.PERMS).get(key)
1855 return dict(Permission.PERMS).get(key)
1856
1856
1857
1857
1858 def journal_filter_help(request):
1858 def journal_filter_help(request):
1859 _ = request.translate
1859 _ = request.translate
1860
1860
1861 return _(
1861 return _(
1862 'Example filter terms:\n' +
1862 'Example filter terms:\n' +
1863 ' repository:vcs\n' +
1863 ' repository:vcs\n' +
1864 ' username:marcin\n' +
1864 ' username:marcin\n' +
1865 ' username:(NOT marcin)\n' +
1865 ' username:(NOT marcin)\n' +
1866 ' action:*push*\n' +
1866 ' action:*push*\n' +
1867 ' ip:127.0.0.1\n' +
1867 ' ip:127.0.0.1\n' +
1868 ' date:20120101\n' +
1868 ' date:20120101\n' +
1869 ' date:[20120101100000 TO 20120102]\n' +
1869 ' date:[20120101100000 TO 20120102]\n' +
1870 '\n' +
1870 '\n' +
1871 'Generate wildcards using \'*\' character:\n' +
1871 'Generate wildcards using \'*\' character:\n' +
1872 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1872 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1873 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1873 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1874 '\n' +
1874 '\n' +
1875 'Optional AND / OR operators in queries\n' +
1875 'Optional AND / OR operators in queries\n' +
1876 ' "repository:vcs OR repository:test"\n' +
1876 ' "repository:vcs OR repository:test"\n' +
1877 ' "username:test AND repository:test*"\n'
1877 ' "username:test AND repository:test*"\n'
1878 )
1878 )
1879
1879
1880
1880
1881 def search_filter_help(searcher, request):
1881 def search_filter_help(searcher, request):
1882 _ = request.translate
1882 _ = request.translate
1883
1883
1884 terms = ''
1884 terms = ''
1885 return _(
1885 return _(
1886 'Example filter terms for `{searcher}` search:\n' +
1886 'Example filter terms for `{searcher}` search:\n' +
1887 '{terms}\n' +
1887 '{terms}\n' +
1888 'Generate wildcards using \'*\' character:\n' +
1888 'Generate wildcards using \'*\' character:\n' +
1889 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1889 ' "repo_name:vcs*" - search everything starting with \'vcs\'\n' +
1890 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1890 ' "repo_name:*vcs*" - search for repository containing \'vcs\'\n' +
1891 '\n' +
1891 '\n' +
1892 'Optional AND / OR operators in queries\n' +
1892 'Optional AND / OR operators in queries\n' +
1893 ' "repo_name:vcs OR repo_name:test"\n' +
1893 ' "repo_name:vcs OR repo_name:test"\n' +
1894 ' "owner:test AND repo_name:test*"\n' +
1894 ' "owner:test AND repo_name:test*"\n' +
1895 'More: {search_doc}'
1895 'More: {search_doc}'
1896 ).format(searcher=searcher.name,
1896 ).format(searcher=searcher.name,
1897 terms=terms, search_doc=searcher.query_lang_doc)
1897 terms=terms, search_doc=searcher.query_lang_doc)
1898
1898
1899
1899
1900 def not_mapped_error(repo_name):
1900 def not_mapped_error(repo_name):
1901 from rhodecode.translation import _
1901 from rhodecode.translation import _
1902 flash(_('%s repository is not mapped to db perhaps'
1902 flash(_('%s repository is not mapped to db perhaps'
1903 ' it was created or renamed from the filesystem'
1903 ' it was created or renamed from the filesystem'
1904 ' please run the application again'
1904 ' please run the application again'
1905 ' in order to rescan repositories') % repo_name, category='error')
1905 ' in order to rescan repositories') % repo_name, category='error')
1906
1906
1907
1907
1908 def ip_range(ip_addr):
1908 def ip_range(ip_addr):
1909 from rhodecode.model.db import UserIpMap
1909 from rhodecode.model.db import UserIpMap
1910 s, e = UserIpMap._get_ip_range(ip_addr)
1910 s, e = UserIpMap._get_ip_range(ip_addr)
1911 return '%s - %s' % (s, e)
1911 return '%s - %s' % (s, e)
1912
1912
1913
1913
1914 def form(url, method='post', needs_csrf_token=True, **attrs):
1914 def form(url, method='post', needs_csrf_token=True, **attrs):
1915 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1915 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1916 if method.lower() != 'get' and needs_csrf_token:
1916 if method.lower() != 'get' and needs_csrf_token:
1917 raise Exception(
1917 raise Exception(
1918 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1918 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1919 'CSRF token. If the endpoint does not require such token you can ' +
1919 'CSRF token. If the endpoint does not require such token you can ' +
1920 'explicitly set the parameter needs_csrf_token to false.')
1920 'explicitly set the parameter needs_csrf_token to false.')
1921
1921
1922 return wh_form(url, method=method, **attrs)
1922 return wh_form(url, method=method, **attrs)
1923
1923
1924
1924
1925 def secure_form(form_url, method="POST", multipart=False, **attrs):
1925 def secure_form(form_url, method="POST", multipart=False, **attrs):
1926 """Start a form tag that points the action to an url. This
1926 """Start a form tag that points the action to an url. This
1927 form tag will also include the hidden field containing
1927 form tag will also include the hidden field containing
1928 the auth token.
1928 the auth token.
1929
1929
1930 The url options should be given either as a string, or as a
1930 The url options should be given either as a string, or as a
1931 ``url()`` function. The method for the form defaults to POST.
1931 ``url()`` function. The method for the form defaults to POST.
1932
1932
1933 Options:
1933 Options:
1934
1934
1935 ``multipart``
1935 ``multipart``
1936 If set to True, the enctype is set to "multipart/form-data".
1936 If set to True, the enctype is set to "multipart/form-data".
1937 ``method``
1937 ``method``
1938 The method to use when submitting the form, usually either
1938 The method to use when submitting the form, usually either
1939 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1939 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1940 hidden input with name _method is added to simulate the verb
1940 hidden input with name _method is added to simulate the verb
1941 over POST.
1941 over POST.
1942
1942
1943 """
1943 """
1944 from webhelpers.pylonslib.secure_form import insecure_form
1944 from webhelpers.pylonslib.secure_form import insecure_form
1945
1945
1946 session = None
1946 session = None
1947
1947
1948 # TODO(marcink): after pyramid migration require request variable ALWAYS
1948 # TODO(marcink): after pyramid migration require request variable ALWAYS
1949 if 'request' in attrs:
1949 if 'request' in attrs:
1950 session = attrs['request'].session
1950 session = attrs['request'].session
1951 del attrs['request']
1951 del attrs['request']
1952
1952
1953 form = insecure_form(form_url, method, multipart, **attrs)
1953 form = insecure_form(form_url, method, multipart, **attrs)
1954 token = literal(
1954 token = literal(
1955 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1955 '<input type="hidden" id="{}" name="{}" value="{}">'.format(
1956 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1956 csrf_token_key, csrf_token_key, get_csrf_token(session)))
1957
1957
1958 return literal("%s\n%s" % (form, token))
1958 return literal("%s\n%s" % (form, token))
1959
1959
1960
1960
1961 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1961 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1962 select_html = select(name, selected, options, **attrs)
1962 select_html = select(name, selected, options, **attrs)
1963 select2 = """
1963 select2 = """
1964 <script>
1964 <script>
1965 $(document).ready(function() {
1965 $(document).ready(function() {
1966 $('#%s').select2({
1966 $('#%s').select2({
1967 containerCssClass: 'drop-menu',
1967 containerCssClass: 'drop-menu',
1968 dropdownCssClass: 'drop-menu-dropdown',
1968 dropdownCssClass: 'drop-menu-dropdown',
1969 dropdownAutoWidth: true%s
1969 dropdownAutoWidth: true%s
1970 });
1970 });
1971 });
1971 });
1972 </script>
1972 </script>
1973 """
1973 """
1974 filter_option = """,
1974 filter_option = """,
1975 minimumResultsForSearch: -1
1975 minimumResultsForSearch: -1
1976 """
1976 """
1977 input_id = attrs.get('id') or name
1977 input_id = attrs.get('id') or name
1978 filter_enabled = "" if enable_filter else filter_option
1978 filter_enabled = "" if enable_filter else filter_option
1979 select_script = literal(select2 % (input_id, filter_enabled))
1979 select_script = literal(select2 % (input_id, filter_enabled))
1980
1980
1981 return literal(select_html+select_script)
1981 return literal(select_html+select_script)
1982
1982
1983
1983
1984 def get_visual_attr(tmpl_context_var, attr_name):
1984 def get_visual_attr(tmpl_context_var, attr_name):
1985 """
1985 """
1986 A safe way to get a variable from visual variable of template context
1986 A safe way to get a variable from visual variable of template context
1987
1987
1988 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1988 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
1989 :param attr_name: name of the attribute we fetch from the c.visual
1989 :param attr_name: name of the attribute we fetch from the c.visual
1990 """
1990 """
1991 visual = getattr(tmpl_context_var, 'visual', None)
1991 visual = getattr(tmpl_context_var, 'visual', None)
1992 if not visual:
1992 if not visual:
1993 return
1993 return
1994 else:
1994 else:
1995 return getattr(visual, attr_name, None)
1995 return getattr(visual, attr_name, None)
1996
1996
1997
1997
1998 def get_last_path_part(file_node):
1998 def get_last_path_part(file_node):
1999 if not file_node.path:
1999 if not file_node.path:
2000 return u''
2000 return u''
2001
2001
2002 path = safe_unicode(file_node.path.split('/')[-1])
2002 path = safe_unicode(file_node.path.split('/')[-1])
2003 return u'../' + path
2003 return u'../' + path
2004
2004
2005
2005
2006 def route_url(*args, **kwargs):
2006 def route_url(*args, **kwargs):
2007 """
2007 """
2008 Wrapper around pyramids `route_url` (fully qualified url) function.
2008 Wrapper around pyramids `route_url` (fully qualified url) function.
2009 It is used to generate URLs from within pylons views or templates.
2009 It is used to generate URLs from within pylons views or templates.
2010 This will be removed when pyramid migration if finished.
2010 This will be removed when pyramid migration if finished.
2011 """
2011 """
2012 req = get_current_request()
2012 req = get_current_request()
2013 return req.route_url(*args, **kwargs)
2013 return req.route_url(*args, **kwargs)
2014
2014
2015
2015
2016 def route_path(*args, **kwargs):
2016 def route_path(*args, **kwargs):
2017 """
2017 """
2018 Wrapper around pyramids `route_path` function. It is used to generate
2018 Wrapper around pyramids `route_path` function. It is used to generate
2019 URLs from within pylons views or templates. This will be removed when
2019 URLs from within pylons views or templates. This will be removed when
2020 pyramid migration if finished.
2020 pyramid migration if finished.
2021 """
2021 """
2022 req = get_current_request()
2022 req = get_current_request()
2023 return req.route_path(*args, **kwargs)
2023 return req.route_path(*args, **kwargs)
2024
2024
2025
2025
2026 def route_path_or_none(*args, **kwargs):
2026 def route_path_or_none(*args, **kwargs):
2027 try:
2027 try:
2028 return route_path(*args, **kwargs)
2028 return route_path(*args, **kwargs)
2029 except KeyError:
2029 except KeyError:
2030 return None
2030 return None
2031
2031
2032
2032
2033 def current_route_path(request, **kw):
2033 def current_route_path(request, **kw):
2034 new_args = request.GET.mixed()
2034 new_args = request.GET.mixed()
2035 new_args.update(kw)
2035 new_args.update(kw)
2036 return request.current_route_path(_query=new_args)
2036 return request.current_route_path(_query=new_args)
2037
2037
2038
2038
2039 def static_url(*args, **kwds):
2039 def static_url(*args, **kwds):
2040 """
2040 """
2041 Wrapper around pyramids `route_path` function. It is used to generate
2041 Wrapper around pyramids `route_path` function. It is used to generate
2042 URLs from within pylons views or templates. This will be removed when
2042 URLs from within pylons views or templates. This will be removed when
2043 pyramid migration if finished.
2043 pyramid migration if finished.
2044 """
2044 """
2045 req = get_current_request()
2045 req = get_current_request()
2046 return req.static_url(*args, **kwds)
2046 return req.static_url(*args, **kwds)
2047
2047
2048
2048
2049 def resource_path(*args, **kwds):
2049 def resource_path(*args, **kwds):
2050 """
2050 """
2051 Wrapper around pyramids `route_path` function. It is used to generate
2051 Wrapper around pyramids `route_path` function. It is used to generate
2052 URLs from within pylons views or templates. This will be removed when
2052 URLs from within pylons views or templates. This will be removed when
2053 pyramid migration if finished.
2053 pyramid migration if finished.
2054 """
2054 """
2055 req = get_current_request()
2055 req = get_current_request()
2056 return req.resource_path(*args, **kwds)
2056 return req.resource_path(*args, **kwds)
2057
2057
2058
2058
2059 def api_call_example(method, args):
2059 def api_call_example(method, args):
2060 """
2060 """
2061 Generates an API call example via CURL
2061 Generates an API call example via CURL
2062 """
2062 """
2063 args_json = json.dumps(OrderedDict([
2063 args_json = json.dumps(OrderedDict([
2064 ('id', 1),
2064 ('id', 1),
2065 ('auth_token', 'SECRET'),
2065 ('auth_token', 'SECRET'),
2066 ('method', method),
2066 ('method', method),
2067 ('args', args)
2067 ('args', args)
2068 ]))
2068 ]))
2069 return literal(
2069 return literal(
2070 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2070 "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{data}'"
2071 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2071 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2072 "and needs to be of `api calls` role."
2072 "and needs to be of `api calls` role."
2073 .format(
2073 .format(
2074 api_url=route_url('apiv2'),
2074 api_url=route_url('apiv2'),
2075 token_url=route_url('my_account_auth_tokens'),
2075 token_url=route_url('my_account_auth_tokens'),
2076 data=args_json))
2076 data=args_json))
2077
2077
2078
2078
2079 def notification_description(notification, request):
2079 def notification_description(notification, request):
2080 """
2080 """
2081 Generate notification human readable description based on notification type
2081 Generate notification human readable description based on notification type
2082 """
2082 """
2083 from rhodecode.model.notification import NotificationModel
2083 from rhodecode.model.notification import NotificationModel
2084 return NotificationModel().make_description(
2084 return NotificationModel().make_description(
2085 notification, translate=request.translate)
2085 notification, translate=request.translate)
2086
2086
2087
2087
2088 def go_import_header(request, db_repo=None):
2088 def go_import_header(request, db_repo=None):
2089 """
2089 """
2090 Creates a header for go-import functionality in Go Lang
2090 Creates a header for go-import functionality in Go Lang
2091 """
2091 """
2092
2092
2093 if not db_repo:
2093 if not db_repo:
2094 return
2094 return
2095 if 'go-get' not in request.GET:
2095 if 'go-get' not in request.GET:
2096 return
2096 return
2097
2097
2098 clone_url = db_repo.clone_url()
2098 clone_url = db_repo.clone_url()
2099 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2099 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2100 # we have a repo and go-get flag,
2100 # we have a repo and go-get flag,
2101 return literal('<meta name="go-import" content="{} {} {}">'.format(
2101 return literal('<meta name="go-import" content="{} {} {}">'.format(
2102 prefix, db_repo.repo_type, clone_url))
2102 prefix, db_repo.repo_type, clone_url))
@@ -1,623 +1,631 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2010-2017 RhodeCode GmbH
3 # Copyright (C) 2010-2017 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21
21
22 """
22 """
23 Package for testing various lib/helper functions in rhodecode
23 Package for testing various lib/helper functions in rhodecode
24 """
24 """
25
25
26 import datetime
26 import datetime
27 import string
27 import string
28 import mock
28 import mock
29 import pytest
29 import pytest
30
30
31 from rhodecode.tests import no_newline_id_generator
31 from rhodecode.tests import no_newline_id_generator
32 from rhodecode.tests.utils import run_test_concurrently
32 from rhodecode.tests.utils import run_test_concurrently
33 from rhodecode.lib.helpers import InitialsGravatar
33 from rhodecode.lib.helpers import InitialsGravatar
34
34
35 from rhodecode.lib.utils2 import AttributeDict
35 from rhodecode.lib.utils2 import AttributeDict
36 from rhodecode.model.db import Repository
36 from rhodecode.model.db import Repository
37
37
38
38
39 def _urls_for_proto(proto):
39 def _urls_for_proto(proto):
40 return [
40 return [
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
41 ('%s://127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
42 '%s://127.0.0.1' % proto),
42 '%s://127.0.0.1' % proto),
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
43 ('%s://marcink@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
44 '%s://127.0.0.1' % proto),
44 '%s://127.0.0.1' % proto),
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
45 ('%s://marcink:pass@127.0.0.1' % proto, ['%s://' % proto, '127.0.0.1'],
46 '%s://127.0.0.1' % proto),
46 '%s://127.0.0.1' % proto),
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
47 ('%s://127.0.0.1:8080' % proto, ['%s://' % proto, '127.0.0.1', '8080'],
48 '%s://127.0.0.1:8080' % proto),
48 '%s://127.0.0.1:8080' % proto),
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
49 ('%s://domain.org' % proto, ['%s://' % proto, 'domain.org'],
50 '%s://domain.org' % proto),
50 '%s://domain.org' % proto),
51 ('%s://user:pass@domain.org:8080' % proto,
51 ('%s://user:pass@domain.org:8080' % proto,
52 ['%s://' % proto, 'domain.org', '8080'],
52 ['%s://' % proto, 'domain.org', '8080'],
53 '%s://domain.org:8080' % proto),
53 '%s://domain.org:8080' % proto),
54 ]
54 ]
55
55
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
56 TEST_URLS = _urls_for_proto('http') + _urls_for_proto('https')
57
57
58
58
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
59 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
60 def test_uri_filter(test_url, expected, expected_creds):
60 def test_uri_filter(test_url, expected, expected_creds):
61 from rhodecode.lib.utils2 import uri_filter
61 from rhodecode.lib.utils2 import uri_filter
62 assert uri_filter(test_url) == expected
62 assert uri_filter(test_url) == expected
63
63
64
64
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
65 @pytest.mark.parametrize("test_url, expected, expected_creds", TEST_URLS)
66 def test_credentials_filter(test_url, expected, expected_creds):
66 def test_credentials_filter(test_url, expected, expected_creds):
67 from rhodecode.lib.utils2 import credentials_filter
67 from rhodecode.lib.utils2 import credentials_filter
68 assert credentials_filter(test_url) == expected_creds
68 assert credentials_filter(test_url) == expected_creds
69
69
70
70
71 @pytest.mark.parametrize("str_bool, expected", [
71 @pytest.mark.parametrize("str_bool, expected", [
72 ('t', True),
72 ('t', True),
73 ('true', True),
73 ('true', True),
74 ('y', True),
74 ('y', True),
75 ('yes', True),
75 ('yes', True),
76 ('on', True),
76 ('on', True),
77 ('1', True),
77 ('1', True),
78 ('Y', True),
78 ('Y', True),
79 ('yeS', True),
79 ('yeS', True),
80 ('Y', True),
80 ('Y', True),
81 ('TRUE', True),
81 ('TRUE', True),
82 ('T', True),
82 ('T', True),
83 ('False', False),
83 ('False', False),
84 ('F', False),
84 ('F', False),
85 ('FALSE', False),
85 ('FALSE', False),
86 ('0', False),
86 ('0', False),
87 ('-1', False),
87 ('-1', False),
88 ('', False)
88 ('', False)
89 ])
89 ])
90 def test_str2bool(str_bool, expected):
90 def test_str2bool(str_bool, expected):
91 from rhodecode.lib.utils2 import str2bool
91 from rhodecode.lib.utils2 import str2bool
92 assert str2bool(str_bool) == expected
92 assert str2bool(str_bool) == expected
93
93
94
94
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
95 @pytest.mark.parametrize("text, expected", reduce(lambda a1,a2:a1+a2, [
96 [
96 [
97 (pref+"", []),
97 (pref+"", []),
98 (pref+"Hi there @marcink", ['marcink']),
98 (pref+"Hi there @marcink", ['marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
99 (pref+"Hi there @marcink and @bob", ['bob', 'marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
100 (pref+"Hi there @marcink\n", ['marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
101 (pref+"Hi there @marcink and @bob\n", ['bob', 'marcink']),
102 (pref+"Hi there marcin@rhodecode.com", []),
102 (pref+"Hi there marcin@rhodecode.com", []),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
103 (pref+"Hi there @john.malcovic and @bob\n", ['bob', 'john.malcovic']),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
104 (pref+"This needs to be reviewed: (@marcink,@john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
105 (pref+"This needs to be reviewed: (@marcink, @john)", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
106 (pref+"This needs to be reviewed: [@marcink,@john]", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
107 (pref+"This needs to be reviewed: (@marcink @john)", ["john", "marcink"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
108 (pref+"@john @mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
109 (pref+"@john,@mary, please review", ["john", "mary"]),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
110 (pref+"Hej @123, @22john,@mary, please review", ['123', '22john', 'mary']),
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
111 (pref+"@first hi there @marcink here's my email marcin@email.com "
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
112 "@lukaszb check @one_more22 it pls @ ttwelve @D[] @one@two@three ", ['first', 'lukaszb', 'marcink', 'one', 'one_more22']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
113 (pref+"@MARCIN @maRCiN @2one_more22 @john please see this http://org.pl", ['2one_more22', 'john', 'MARCIN', 'maRCiN']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
114 (pref+"@marian.user just do it @marco-polo and next extract @marco_polo", ['marco-polo', 'marco_polo', 'marian.user']),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
115 (pref+"user.dot hej ! not-needed maril@domain.org", []),
116 (pref+"\n@marcin", ['marcin']),
116 (pref+"\n@marcin", ['marcin']),
117 ]
117 ]
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
118 for pref in ['', '\n', 'hi !', '\t', '\n\n']]), ids=no_newline_id_generator)
119 def test_mention_extractor(text, expected):
119 def test_mention_extractor(text, expected):
120 from rhodecode.lib.utils2 import extract_mentioned_users
120 from rhodecode.lib.utils2 import extract_mentioned_users
121 got = extract_mentioned_users(text)
121 got = extract_mentioned_users(text)
122 assert sorted(got, key=lambda x: x.lower()) == got
122 assert sorted(got, key=lambda x: x.lower()) == got
123 assert set(expected) == set(got)
123 assert set(expected) == set(got)
124
124
125 @pytest.mark.parametrize("age_args, expected, kw", [
125 @pytest.mark.parametrize("age_args, expected, kw", [
126 ({}, u'just now', {}),
126 ({}, u'just now', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
127 ({'seconds': -1}, u'1 second ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
128 ({'seconds': -60 * 2}, u'2 minutes ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
129 ({'hours': -1}, u'1 hour ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
130 ({'hours': -24}, u'1 day ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
131 ({'hours': -24 * 5}, u'5 days ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
132 ({'months': -1}, u'1 month ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
133 ({'months': -1, 'days': -2}, u'1 month and 2 days ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
134 ({'years': -1, 'months': -1}, u'1 year and 1 month ago', {}),
135 ({}, u'just now', {'short_format': True}),
135 ({}, u'just now', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
136 ({'seconds': -1}, u'1sec ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
137 ({'seconds': -60 * 2}, u'2min ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
138 ({'hours': -1}, u'1h ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
139 ({'hours': -24}, u'1d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
140 ({'hours': -24 * 5}, u'5d ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
141 ({'months': -1}, u'1m ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
142 ({'months': -1, 'days': -2}, u'1m, 2d ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
143 ({'years': -1, 'months': -1}, u'1y, 1m ago', {'short_format': True}),
144 ])
144 ])
145 def test_age(age_args, expected, kw, pylonsapp):
145 def test_age(age_args, expected, kw, pylonsapp):
146 from rhodecode.lib.utils2 import age
146 from rhodecode.lib.utils2 import age
147 from dateutil import relativedelta
147 from dateutil import relativedelta
148 n = datetime.datetime(year=2012, month=5, day=17)
148 n = datetime.datetime(year=2012, month=5, day=17)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
149 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
150
150
151 def translate(elem):
151 def translate(elem):
152 return elem.interpolate()
152 return elem.interpolate()
153
153
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
154 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
155
155
156
156
157 @pytest.mark.parametrize("age_args, expected, kw", [
157 @pytest.mark.parametrize("age_args, expected, kw", [
158 ({}, u'just now', {}),
158 ({}, u'just now', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
159 ({'seconds': 1}, u'in 1 second', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
160 ({'seconds': 60 * 2}, u'in 2 minutes', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
161 ({'hours': 1}, u'in 1 hour', {}),
162 ({'hours': 24}, u'in 1 day', {}),
162 ({'hours': 24}, u'in 1 day', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
163 ({'hours': 24 * 5}, u'in 5 days', {}),
164 ({'months': 1}, u'in 1 month', {}),
164 ({'months': 1}, u'in 1 month', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
165 ({'months': 1, 'days': 1}, u'in 1 month and 1 day', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
166 ({'years': 1, 'months': 1}, u'in 1 year and 1 month', {}),
167 ({}, u'just now', {'short_format': True}),
167 ({}, u'just now', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
168 ({'seconds': 1}, u'in 1sec', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
169 ({'seconds': 60 * 2}, u'in 2min', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
170 ({'hours': 1}, u'in 1h', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
171 ({'hours': 24}, u'in 1d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
172 ({'hours': 24 * 5}, u'in 5d', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
173 ({'months': 1}, u'in 1m', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
174 ({'months': 1, 'days': 1}, u'in 1m, 1d', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
175 ({'years': 1, 'months': 1}, u'in 1y, 1m', {'short_format': True}),
176 ])
176 ])
177 def test_age_in_future(age_args, expected, kw, pylonsapp):
177 def test_age_in_future(age_args, expected, kw, pylonsapp):
178 from rhodecode.lib.utils2 import age
178 from rhodecode.lib.utils2 import age
179 from dateutil import relativedelta
179 from dateutil import relativedelta
180 n = datetime.datetime(year=2012, month=5, day=17)
180 n = datetime.datetime(year=2012, month=5, day=17)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
181 delt = lambda *args, **kwargs: relativedelta.relativedelta(*args, **kwargs)
182
182
183 def translate(elem):
183 def translate(elem):
184 return elem.interpolate()
184 return elem.interpolate()
185
185
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
186 assert translate(age(n + delt(**age_args), now=n, **kw)) == expected
187
187
188
188
189 @pytest.mark.parametrize("sample, expected_tags", [
189 @pytest.mark.parametrize("sample, expected_tags", [
190 # entry
191 ((
192 ""
193 ),
194 [
195
196 ]),
197 # entry
190 ((
198 ((
191 "hello world [stale]"
199 "hello world [stale]"
192 ),
200 ),
193 [
201 [
194 ('state', '[stale]'),
202 ('state', '[stale]'),
195 ]),
203 ]),
196 # entry
204 # entry
197 ((
205 ((
198 "hello world [v2.0.0] [v1.0.0]"
206 "hello world [v2.0.0] [v1.0.0]"
199 ),
207 ),
200 [
208 [
201 ('generic', '[v2.0.0]'),
209 ('generic', '[v2.0.0]'),
202 ('generic', '[v1.0.0]'),
210 ('generic', '[v1.0.0]'),
203 ]),
211 ]),
204 # entry
212 # entry
205 ((
213 ((
206 "he[ll]o wo[rl]d"
214 "he[ll]o wo[rl]d"
207 ),
215 ),
208 [
216 [
209 ('label', '[ll]'),
217 ('label', '[ll]'),
210 ('label', '[rl]'),
218 ('label', '[rl]'),
211 ]),
219 ]),
212 # entry
220 # entry
213 ((
221 ((
214 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
222 "hello world [stale]\n[featured]\n[stale] [dead] [dev]"
215 ),
223 ),
216 [
224 [
217 ('state', '[stale]'),
225 ('state', '[stale]'),
218 ('state', '[featured]'),
226 ('state', '[featured]'),
219 ('state', '[stale]'),
227 ('state', '[stale]'),
220 ('state', '[dead]'),
228 ('state', '[dead]'),
221 ('state', '[dev]'),
229 ('state', '[dev]'),
222 ]),
230 ]),
223 # entry
231 # entry
224 ((
232 ((
225 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
233 "hello world \n\n [stale] \n [url =&gt; [name](http://rc.com)]"
226 ),
234 ),
227 [
235 [
228 ('state', '[stale]'),
236 ('state', '[stale]'),
229 ('url', '[url =&gt; [name](http://rc.com)]'),
237 ('url', '[url =&gt; [name](http://rc.com)]'),
230 ]),
238 ]),
231 # entry
239 # entry
232 ((
240 ((
233 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
241 "hello pta[tag] gog [[]] [[] sda ero[or]d [me =&gt;>< sa]"
234 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
242 "[requires] [stale] [see<>=&gt;] [see =&gt; http://url.com]"
235 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
243 "[requires =&gt; url] [lang =&gt; python] [just a tag] "
236 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
244 "<html_tag first='abc' attr=\"my.url?attr=&another=\"></html_tag>"
237 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
245 "[,d] [ =&gt; ULR ] [obsolete] [desc]]"
238 ),
246 ),
239 [
247 [
240 ('label', '[desc]'),
248 ('label', '[desc]'),
241 ('label', '[obsolete]'),
249 ('label', '[obsolete]'),
242 ('label', '[or]'),
250 ('label', '[or]'),
243 ('label', '[requires]'),
251 ('label', '[requires]'),
244 ('label', '[tag]'),
252 ('label', '[tag]'),
245 ('state', '[stale]'),
253 ('state', '[stale]'),
246 ('lang', '[lang =&gt; python]'),
254 ('lang', '[lang =&gt; python]'),
247 ('ref', '[requires =&gt; url]'),
255 ('ref', '[requires =&gt; url]'),
248 ('see', '[see =&gt; http://url.com]'),
256 ('see', '[see =&gt; http://url.com]'),
249
257
250 ]),
258 ]),
251
259
252 ], ids=no_newline_id_generator)
260 ], ids=no_newline_id_generator)
253 def test_metatag_extraction(sample, expected_tags):
261 def test_metatag_extraction(sample, expected_tags):
254 from rhodecode.lib.helpers import extract_metatags
262 from rhodecode.lib.helpers import extract_metatags
255 tags, value = extract_metatags(sample)
263 tags, value = extract_metatags(sample)
256 assert sorted(tags) == sorted(expected_tags)
264 assert sorted(tags) == sorted(expected_tags)
257
265
258
266
259 @pytest.mark.parametrize("tag_data, expected_html", [
267 @pytest.mark.parametrize("tag_data, expected_html", [
260
268
261 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
269 (('state', '[stable]'), '<div class="metatag" tag="state stable">stable</div>'),
262 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
270 (('state', '[stale]'), '<div class="metatag" tag="state stale">stale</div>'),
263 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
271 (('state', '[featured]'), '<div class="metatag" tag="state featured">featured</div>'),
264 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
272 (('state', '[dev]'), '<div class="metatag" tag="state dev">dev</div>'),
265 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
273 (('state', '[dead]'), '<div class="metatag" tag="state dead">dead</div>'),
266
274
267 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
275 (('label', '[personal]'), '<div class="metatag" tag="label">personal</div>'),
268 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
276 (('generic', '[v2.0.0]'), '<div class="metatag" tag="generic">v2.0.0</div>'),
269
277
270 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
278 (('lang', '[lang =&gt; JavaScript]'), '<div class="metatag" tag="lang">JavaScript</div>'),
271 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
279 (('lang', '[lang =&gt; C++]'), '<div class="metatag" tag="lang">C++</div>'),
272 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
280 (('lang', '[lang =&gt; C#]'), '<div class="metatag" tag="lang">C#</div>'),
273 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
281 (('lang', '[lang =&gt; Delphi/Object]'), '<div class="metatag" tag="lang">Delphi/Object</div>'),
274 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
282 (('lang', '[lang =&gt; Objective-C]'), '<div class="metatag" tag="lang">Objective-C</div>'),
275 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
283 (('lang', '[lang =&gt; .NET]'), '<div class="metatag" tag="lang">.NET</div>'),
276
284
277 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
285 (('license', '[license =&gt; BSD 3-clause]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/BSD 3-clause">BSD 3-clause</a></div>'),
278 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
286 (('license', '[license =&gt; GPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/GPLv3">GPLv3</a></div>'),
279 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
287 (('license', '[license =&gt; MIT]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/MIT">MIT</a></div>'),
280 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
288 (('license', '[license =&gt; AGPLv3]'), '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/AGPLv3">AGPLv3</a></div>'),
281
289
282 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
290 (('ref', '[requires =&gt; RepoName]'), '<div class="metatag" tag="ref requires">requires: <a href="/RepoName">RepoName</a></div>'),
283 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
291 (('ref', '[recommends =&gt; GroupName]'), '<div class="metatag" tag="ref recommends">recommends: <a href="/GroupName">GroupName</a></div>'),
284 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
292 (('ref', '[conflicts =&gt; SomeName]'), '<div class="metatag" tag="ref conflicts">conflicts: <a href="/SomeName">SomeName</a></div>'),
285 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
293 (('ref', '[base =&gt; SomeName]'), '<div class="metatag" tag="ref base">base: <a href="/SomeName">SomeName</a></div>'),
286
294
287 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
295 (('see', '[see =&gt; http://rhodecode.com]'), '<div class="metatag" tag="see">see: http://rhodecode.com </div>'),
288
296
289 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
297 (('url', '[url =&gt; [linkName](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">linkName</a> </div>'),
290 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
298 (('url', '[url =&gt; [example link](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">example link</a> </div>'),
291 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
299 (('url', '[url =&gt; [v1.0.0](https://rhodecode.com)]'), '<div class="metatag" tag="url"> <a href="https://rhodecode.com">v1.0.0</a> </div>'),
292
300
293 ])
301 ])
294 def test_metatags_stylize(tag_data, expected_html):
302 def test_metatags_stylize(tag_data, expected_html):
295 from rhodecode.lib.helpers import style_metatag
303 from rhodecode.lib.helpers import style_metatag
296 tag_type,value = tag_data
304 tag_type,value = tag_data
297 assert style_metatag(tag_type, value) == expected_html
305 assert style_metatag(tag_type, value) == expected_html
298
306
299
307
300 @pytest.mark.parametrize("tmpl_url, email, expected", [
308 @pytest.mark.parametrize("tmpl_url, email, expected", [
301 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
309 ('http://test.com/{email}', 'test@foo.com', 'http://test.com/test@foo.com'),
302
310
303 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
311 ('http://test.com/{md5email}', 'test@foo.com', 'http://test.com/3cb7232fcc48743000cb86d0d5022bd9'),
304 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
312 ('http://test.com/{md5email}', 'testΔ…Δ‡@foo.com', 'http://test.com/978debb907a3c55cd741872ab293ef30'),
305
313
306 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
314 ('http://testX.com/{md5email}?s={size}', 'test@foo.com', 'http://testX.com/3cb7232fcc48743000cb86d0d5022bd9?s=24'),
307 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
315 ('http://testX.com/{md5email}?s={size}', 'testΔ…Δ‡@foo.com', 'http://testX.com/978debb907a3c55cd741872ab293ef30?s=24'),
308
316
309 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
317 ('{scheme}://{netloc}/{md5email}/{size}', 'test@foo.com', 'https://server.com/3cb7232fcc48743000cb86d0d5022bd9/24'),
310 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
318 ('{scheme}://{netloc}/{md5email}/{size}', 'testΔ…Δ‡@foo.com', 'https://server.com/978debb907a3c55cd741872ab293ef30/24'),
311
319
312 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
320 ('http://test.com/{email}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com'),
313 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
321 ('http://test.com/{email}?size={size}', 'test@foo.com', 'http://test.com/test@foo.com?size=24'),
314 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
322 ('http://test.com/{email}?size={size}', 'testΔ…Δ‡@foo.com', 'http://test.com/testΔ…Δ‡@foo.com?size=24'),
315 ])
323 ])
316 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
324 def test_gravatar_url_builder(tmpl_url, email, expected, request_stub):
317 from rhodecode.lib.helpers import gravatar_url
325 from rhodecode.lib.helpers import gravatar_url
318
326
319 def fake_tmpl_context(_url):
327 def fake_tmpl_context(_url):
320 _c = AttributeDict()
328 _c = AttributeDict()
321 _c.visual = AttributeDict()
329 _c.visual = AttributeDict()
322 _c.visual.use_gravatar = True
330 _c.visual.use_gravatar = True
323 _c.visual.gravatar_url = _url
331 _c.visual.gravatar_url = _url
324 return _c
332 return _c
325
333
326 # mock pyramid.threadlocals
334 # mock pyramid.threadlocals
327 def fake_get_current_request():
335 def fake_get_current_request():
328 request_stub.scheme = 'https'
336 request_stub.scheme = 'https'
329 request_stub.host = 'server.com'
337 request_stub.host = 'server.com'
330
338
331 request_stub._call_context = fake_tmpl_context(tmpl_url)
339 request_stub._call_context = fake_tmpl_context(tmpl_url)
332 return request_stub
340 return request_stub
333
341
334 with mock.patch('rhodecode.lib.helpers.get_current_request',
342 with mock.patch('rhodecode.lib.helpers.get_current_request',
335 fake_get_current_request):
343 fake_get_current_request):
336
344
337 grav = gravatar_url(email_address=email, size=24)
345 grav = gravatar_url(email_address=email, size=24)
338 assert grav == expected
346 assert grav == expected
339
347
340
348
341 @pytest.mark.parametrize(
349 @pytest.mark.parametrize(
342 "email, first_name, last_name, expected_initials, expected_color", [
350 "email, first_name, last_name, expected_initials, expected_color", [
343
351
344 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
352 ('test@rhodecode.com', '', '', 'TR', '#8a994d'),
345 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
353 ('marcin.kuzminski@rhodecode.com', '', '', 'MK', '#6559b3'),
346 # special cases of email
354 # special cases of email
347 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
355 ('john.van.dam@rhodecode.com', '', '', 'JD', '#526600'),
348 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
356 ('Guido.van.Rossum@rhodecode.com', '', '', 'GR', '#990052'),
349 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
357 ('Guido.van.Rossum@rhodecode.com', 'Guido', 'Van Rossum', 'GR', '#990052'),
350
358
351 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
359 ('rhodecode+Guido.van.Rossum@rhodecode.com', '', '', 'RR', '#46598c'),
352 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
360 ('pclouds@rhodecode.com', 'Nguyα»…n ThΓ‘i', 'Tgọc Duy', 'ND', '#665200'),
353
361
354 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
362 ('john-brown@foo.com', '', '', 'JF', '#73006b'),
355 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
363 ('admin@rhodecode.com', 'Marcin', 'Kuzminski', 'MK', '#104036'),
356 # partials
364 # partials
357 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
365 ('admin@rhodecode.com', 'Marcin', '', 'MR', '#104036'), # fn+email
358 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
366 ('admin@rhodecode.com', '', 'Kuzminski', 'AK', '#104036'), # em+ln
359 # non-ascii
367 # non-ascii
360 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
368 ('admin@rhodecode.com', 'Marcin', 'Śuzminski', 'MS', '#104036'),
361 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
369 ('marcin.Ε›uzminski@rhodecode.com', '', '', 'MS', '#73000f'),
362
370
363 # special cases, LDAP can provide those...
371 # special cases, LDAP can provide those...
364 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
372 ('admin@', 'Marcin', 'Śuzminski', 'MS', '#aa00ff'),
365 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
373 ('marcin.Ε›uzminski', '', '', 'MS', '#402020'),
366 ('null', '', '', 'NL', '#8c4646'),
374 ('null', '', '', 'NL', '#8c4646'),
367 ('some.@abc.com', 'some', '', 'SA', '#664e33')
375 ('some.@abc.com', 'some', '', 'SA', '#664e33')
368 ])
376 ])
369 def test_initials_gravatar_pick_of_initials_and_color_algo(
377 def test_initials_gravatar_pick_of_initials_and_color_algo(
370 email, first_name, last_name, expected_initials, expected_color):
378 email, first_name, last_name, expected_initials, expected_color):
371 instance = InitialsGravatar(email, first_name, last_name)
379 instance = InitialsGravatar(email, first_name, last_name)
372 assert instance.get_initials() == expected_initials
380 assert instance.get_initials() == expected_initials
373 assert instance.str2color(email) == expected_color
381 assert instance.str2color(email) == expected_color
374
382
375
383
376 def test_initials_gravatar_mapping_algo():
384 def test_initials_gravatar_mapping_algo():
377 pos = set()
385 pos = set()
378 instance = InitialsGravatar('', '', '')
386 instance = InitialsGravatar('', '', '')
379 iterations = 0
387 iterations = 0
380
388
381 variations = []
389 variations = []
382 for letter1 in string.ascii_letters:
390 for letter1 in string.ascii_letters:
383 for letter2 in string.ascii_letters[::-1][:10]:
391 for letter2 in string.ascii_letters[::-1][:10]:
384 for letter3 in string.ascii_letters[:10]:
392 for letter3 in string.ascii_letters[:10]:
385 variations.append(
393 variations.append(
386 '%s@rhodecode.com' % (letter1+letter2+letter3))
394 '%s@rhodecode.com' % (letter1+letter2+letter3))
387
395
388 max_variations = 4096
396 max_variations = 4096
389 for email in variations[:max_variations]:
397 for email in variations[:max_variations]:
390 iterations += 1
398 iterations += 1
391 pos.add(
399 pos.add(
392 instance.pick_color_bank_index(email,
400 instance.pick_color_bank_index(email,
393 instance.get_color_bank()))
401 instance.get_color_bank()))
394
402
395 # we assume that we have match all 256 possible positions,
403 # we assume that we have match all 256 possible positions,
396 # in reasonable amount of different email addresses
404 # in reasonable amount of different email addresses
397 assert len(pos) == 256
405 assert len(pos) == 256
398 assert iterations == max_variations
406 assert iterations == max_variations
399
407
400
408
401 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
409 @pytest.mark.parametrize("tmpl, repo_name, overrides, prefix, expected", [
402 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
410 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '', 'http://vps1:8000/group/repo1'),
403 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
411 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/group/repo1'),
404 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
412 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {}, '/rc', 'http://vps1:8000/rc/group/repo1'),
405 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
413 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc', 'http://user@vps1:8000/rc/group/repo1'),
406 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
414 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc', 'http://marcink@vps1:8000/rc/group/repo1'),
407 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
415 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'user'}, '/rc/', 'http://user@vps1:8000/rc/group/repo1'),
408 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
416 (Repository.DEFAULT_CLONE_URI, 'group/repo1', {'user': 'marcink'}, '/rc/', 'http://marcink@vps1:8000/rc/group/repo1'),
409 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
417 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {}, '', 'http://vps1:8000/_23'),
410 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
418 ('{scheme}://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
411 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
419 ('http://{user}@{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://marcink@vps1:8000/_23'),
412 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
420 ('http://{netloc}/_{repoid}', 'group/repo1', {'user': 'marcink'}, '', 'http://vps1:8000/_23'),
413 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
421 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://marcink@proxy1.server.com/group/repo1'),
414 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
422 ('https://{user}@proxy1.server.com/{repo}', 'group/repo1', {}, '', 'https://proxy1.server.com/group/repo1'),
415 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
423 ('https://proxy1.server.com/{user}/{repo}', 'group/repo1', {'user': 'marcink'}, '', 'https://proxy1.server.com/marcink/group/repo1'),
416 ])
424 ])
417 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
425 def test_clone_url_generator(tmpl, repo_name, overrides, prefix, expected):
418 from rhodecode.lib.utils2 import get_clone_url
426 from rhodecode.lib.utils2 import get_clone_url
419
427
420 class RequestStub(object):
428 class RequestStub(object):
421 def request_url(self, name):
429 def request_url(self, name):
422 return 'http://vps1:8000' + prefix
430 return 'http://vps1:8000' + prefix
423
431
424 def route_url(self, name):
432 def route_url(self, name):
425 return self.request_url(name)
433 return self.request_url(name)
426
434
427 clone_url = get_clone_url(
435 clone_url = get_clone_url(
428 request=RequestStub(),
436 request=RequestStub(),
429 uri_tmpl=tmpl,
437 uri_tmpl=tmpl,
430 repo_name=repo_name, repo_id=23, **overrides)
438 repo_name=repo_name, repo_id=23, **overrides)
431 assert clone_url == expected
439 assert clone_url == expected
432
440
433
441
434 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
442 def _quick_url(text, tmpl="""<a class="revision-link" href="%s">%s</a>""", url_=None):
435 """
443 """
436 Changes `some text url[foo]` => `some text <a href="/">foo</a>
444 Changes `some text url[foo]` => `some text <a href="/">foo</a>
437
445
438 :param text:
446 :param text:
439 """
447 """
440 import re
448 import re
441 # quickly change expected url[] into a link
449 # quickly change expected url[] into a link
442 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
450 URL_PAT = re.compile(r'(?:url\[)(.+?)(?:\])')
443
451
444 def url_func(match_obj):
452 def url_func(match_obj):
445 _url = match_obj.groups()[0]
453 _url = match_obj.groups()[0]
446 return tmpl % (url_ or '/some-url', _url)
454 return tmpl % (url_ or '/some-url', _url)
447 return URL_PAT.sub(url_func, text)
455 return URL_PAT.sub(url_func, text)
448
456
449
457
450 @pytest.mark.parametrize("sample, expected", [
458 @pytest.mark.parametrize("sample, expected", [
451 ("",
459 ("",
452 ""),
460 ""),
453 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
461 ("git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68",
454 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
462 "git-svn-id: https://svn.apache.org/repos/asf/libcloud/trunk@1441655 13f79535-47bb-0310-9956-ffa450edef68"),
455 ("from rev 000000000000",
463 ("from rev 000000000000",
456 "from rev url[000000000000]"),
464 "from rev url[000000000000]"),
457 ("from rev 000000000000123123 also rev 000000000000",
465 ("from rev 000000000000123123 also rev 000000000000",
458 "from rev url[000000000000123123] also rev url[000000000000]"),
466 "from rev url[000000000000123123] also rev url[000000000000]"),
459 ("this should-000 00",
467 ("this should-000 00",
460 "this should-000 00"),
468 "this should-000 00"),
461 ("longtextffffffffff rev 123123123123",
469 ("longtextffffffffff rev 123123123123",
462 "longtextffffffffff rev url[123123123123]"),
470 "longtextffffffffff rev url[123123123123]"),
463 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
471 ("rev ffffffffffffffffffffffffffffffffffffffffffffffffff",
464 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
472 "rev ffffffffffffffffffffffffffffffffffffffffffffffffff"),
465 ("ffffffffffff some text traalaa",
473 ("ffffffffffff some text traalaa",
466 "url[ffffffffffff] some text traalaa"),
474 "url[ffffffffffff] some text traalaa"),
467 ("""Multi line
475 ("""Multi line
468 123123123123
476 123123123123
469 some text 123123123123
477 some text 123123123123
470 sometimes !
478 sometimes !
471 """,
479 """,
472 """Multi line
480 """Multi line
473 url[123123123123]
481 url[123123123123]
474 some text url[123123123123]
482 some text url[123123123123]
475 sometimes !
483 sometimes !
476 """)
484 """)
477 ], ids=no_newline_id_generator)
485 ], ids=no_newline_id_generator)
478 def test_urlify_commits(sample, expected):
486 def test_urlify_commits(sample, expected):
479 def fake_url(self, *args, **kwargs):
487 def fake_url(self, *args, **kwargs):
480 return '/some-url'
488 return '/some-url'
481
489
482 expected = _quick_url(expected)
490 expected = _quick_url(expected)
483
491
484 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
492 with mock.patch('rhodecode.lib.helpers.route_url', fake_url):
485 from rhodecode.lib.helpers import urlify_commits
493 from rhodecode.lib.helpers import urlify_commits
486 assert urlify_commits(sample, 'repo_name') == expected
494 assert urlify_commits(sample, 'repo_name') == expected
487
495
488
496
489 @pytest.mark.parametrize("sample, expected, url_", [
497 @pytest.mark.parametrize("sample, expected, url_", [
490 ("",
498 ("",
491 "",
499 "",
492 ""),
500 ""),
493 ("https://svn.apache.org/repos",
501 ("https://svn.apache.org/repos",
494 "url[https://svn.apache.org/repos]",
502 "url[https://svn.apache.org/repos]",
495 "https://svn.apache.org/repos"),
503 "https://svn.apache.org/repos"),
496 ("http://svn.apache.org/repos",
504 ("http://svn.apache.org/repos",
497 "url[http://svn.apache.org/repos]",
505 "url[http://svn.apache.org/repos]",
498 "http://svn.apache.org/repos"),
506 "http://svn.apache.org/repos"),
499 ("from rev a also rev http://google.com",
507 ("from rev a also rev http://google.com",
500 "from rev a also rev url[http://google.com]",
508 "from rev a also rev url[http://google.com]",
501 "http://google.com"),
509 "http://google.com"),
502 ("""Multi line
510 ("""Multi line
503 https://foo.bar.com
511 https://foo.bar.com
504 some text lalala""",
512 some text lalala""",
505 """Multi line
513 """Multi line
506 url[https://foo.bar.com]
514 url[https://foo.bar.com]
507 some text lalala""",
515 some text lalala""",
508 "https://foo.bar.com")
516 "https://foo.bar.com")
509 ], ids=no_newline_id_generator)
517 ], ids=no_newline_id_generator)
510 def test_urlify_test(sample, expected, url_):
518 def test_urlify_test(sample, expected, url_):
511 from rhodecode.lib.helpers import urlify_text
519 from rhodecode.lib.helpers import urlify_text
512 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
520 expected = _quick_url(expected, tmpl="""<a href="%s">%s</a>""", url_=url_)
513 assert urlify_text(sample) == expected
521 assert urlify_text(sample) == expected
514
522
515
523
516 @pytest.mark.parametrize("test, expected", [
524 @pytest.mark.parametrize("test, expected", [
517 ("", None),
525 ("", None),
518 ("/_2", '2'),
526 ("/_2", '2'),
519 ("_2", '2'),
527 ("_2", '2'),
520 ("/_2/", '2'),
528 ("/_2/", '2'),
521 ("_2/", '2'),
529 ("_2/", '2'),
522
530
523 ("/_21", '21'),
531 ("/_21", '21'),
524 ("_21", '21'),
532 ("_21", '21'),
525 ("/_21/", '21'),
533 ("/_21/", '21'),
526 ("_21/", '21'),
534 ("_21/", '21'),
527
535
528 ("/_21/foobar", '21'),
536 ("/_21/foobar", '21'),
529 ("_21/121", '21'),
537 ("_21/121", '21'),
530 ("/_21/_12", '21'),
538 ("/_21/_12", '21'),
531 ("_21/rc/foo", '21'),
539 ("_21/rc/foo", '21'),
532
540
533 ])
541 ])
534 def test_get_repo_by_id(test, expected):
542 def test_get_repo_by_id(test, expected):
535 from rhodecode.model.repo import RepoModel
543 from rhodecode.model.repo import RepoModel
536 _test = RepoModel()._extract_id_from_repo_name(test)
544 _test = RepoModel()._extract_id_from_repo_name(test)
537 assert _test == expected
545 assert _test == expected
538
546
539
547
540 @pytest.mark.parametrize("test_repo_name, repo_type", [
548 @pytest.mark.parametrize("test_repo_name, repo_type", [
541 ("test_repo_1", None),
549 ("test_repo_1", None),
542 ("repo_group/foobar", None),
550 ("repo_group/foobar", None),
543 ("test_non_asci_Δ…Δ‡Δ™", None),
551 ("test_non_asci_Δ…Δ‡Δ™", None),
544 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
552 (u"test_non_asci_unicode_Δ…Δ‡Δ™", None),
545 ])
553 ])
546 def test_invalidation_context(pylonsapp, test_repo_name, repo_type):
554 def test_invalidation_context(pylonsapp, test_repo_name, repo_type):
547 from beaker.cache import cache_region
555 from beaker.cache import cache_region
548 from rhodecode.lib import caches
556 from rhodecode.lib import caches
549 from rhodecode.model.db import CacheKey
557 from rhodecode.model.db import CacheKey
550
558
551 @cache_region('long_term')
559 @cache_region('long_term')
552 def _dummy_func(cache_key):
560 def _dummy_func(cache_key):
553 return 'result'
561 return 'result'
554
562
555 invalidator_context = CacheKey.repo_context_cache(
563 invalidator_context = CacheKey.repo_context_cache(
556 _dummy_func, test_repo_name, 'repo')
564 _dummy_func, test_repo_name, 'repo')
557
565
558 with invalidator_context as context:
566 with invalidator_context as context:
559 invalidated = context.invalidate()
567 invalidated = context.invalidate()
560 result = context.compute()
568 result = context.compute()
561
569
562 assert invalidated == True
570 assert invalidated == True
563 assert 'result' == result
571 assert 'result' == result
564 assert isinstance(context, caches.FreshRegionCache)
572 assert isinstance(context, caches.FreshRegionCache)
565
573
566 assert 'InvalidationContext' in repr(invalidator_context)
574 assert 'InvalidationContext' in repr(invalidator_context)
567
575
568 with invalidator_context as context:
576 with invalidator_context as context:
569 context.invalidate()
577 context.invalidate()
570 result = context.compute()
578 result = context.compute()
571
579
572 assert 'result' == result
580 assert 'result' == result
573 assert isinstance(context, caches.ActiveRegionCache)
581 assert isinstance(context, caches.ActiveRegionCache)
574
582
575
583
576 def test_invalidation_context_exception_in_compute(pylonsapp):
584 def test_invalidation_context_exception_in_compute(pylonsapp):
577 from rhodecode.model.db import CacheKey
585 from rhodecode.model.db import CacheKey
578 from beaker.cache import cache_region
586 from beaker.cache import cache_region
579
587
580 @cache_region('long_term')
588 @cache_region('long_term')
581 def _dummy_func(cache_key):
589 def _dummy_func(cache_key):
582 # this causes error since it doesn't get any params
590 # this causes error since it doesn't get any params
583 raise Exception('ups')
591 raise Exception('ups')
584
592
585 invalidator_context = CacheKey.repo_context_cache(
593 invalidator_context = CacheKey.repo_context_cache(
586 _dummy_func, 'test_repo_2', 'repo')
594 _dummy_func, 'test_repo_2', 'repo')
587
595
588 with pytest.raises(Exception):
596 with pytest.raises(Exception):
589 with invalidator_context as context:
597 with invalidator_context as context:
590 context.invalidate()
598 context.invalidate()
591 context.compute()
599 context.compute()
592
600
593
601
594 @pytest.mark.parametrize('execution_number', range(5))
602 @pytest.mark.parametrize('execution_number', range(5))
595 def test_cache_invalidation_race_condition(execution_number, pylonsapp):
603 def test_cache_invalidation_race_condition(execution_number, pylonsapp):
596 import time
604 import time
597 from beaker.cache import cache_region
605 from beaker.cache import cache_region
598 from rhodecode.model.db import CacheKey
606 from rhodecode.model.db import CacheKey
599
607
600 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
608 if CacheKey.metadata.bind.url.get_backend_name() == "mysql":
601 reason = (
609 reason = (
602 'Fails on MariaDB due to some locking issues. Investigation'
610 'Fails on MariaDB due to some locking issues. Investigation'
603 ' needed')
611 ' needed')
604 pytest.xfail(reason=reason)
612 pytest.xfail(reason=reason)
605
613
606 @run_test_concurrently(25)
614 @run_test_concurrently(25)
607 def test_create_and_delete_cache_keys():
615 def test_create_and_delete_cache_keys():
608 time.sleep(0.2)
616 time.sleep(0.2)
609
617
610 @cache_region('long_term')
618 @cache_region('long_term')
611 def _dummy_func(cache_key):
619 def _dummy_func(cache_key):
612 return 'result'
620 return 'result'
613
621
614 invalidator_context = CacheKey.repo_context_cache(
622 invalidator_context = CacheKey.repo_context_cache(
615 _dummy_func, 'test_repo_1', 'repo')
623 _dummy_func, 'test_repo_1', 'repo')
616
624
617 with invalidator_context as context:
625 with invalidator_context as context:
618 context.invalidate()
626 context.invalidate()
619 context.compute()
627 context.compute()
620
628
621 CacheKey.set_invalidate('test_repo_1', delete=True)
629 CacheKey.set_invalidate('test_repo_1', delete=True)
622
630
623 test_create_and_delete_cache_keys()
631 test_create_and_delete_cache_keys()
General Comments 0
You need to be logged in to leave comments. Login now