##// END OF EJS Templates
code: fixes to escape characters improperly used
super-admin -
r5149:ef80e1c5 default
parent child Browse files
Show More
@@ -1,1143 +1,1143 b''
1 # Copyright (C) 2011-2023 RhodeCode GmbH
1 # Copyright (C) 2011-2023 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19
19
20 """
20 """
21 Set of diffing helpers, previously part of vcs
21 Set of diffing helpers, previously part of vcs
22 """
22 """
23 import dataclasses
23 import dataclasses
24 import os
24 import os
25 import re
25 import re
26 import bz2
26 import bz2
27 import gzip
27 import gzip
28 import time
28 import time
29
29
30 import difflib
30 import difflib
31 import logging
31 import logging
32 import pickle
32 import pickle
33 from itertools import tee
33 from itertools import tee
34
34
35 from rhodecode.lib.vcs.exceptions import VCSError
35 from rhodecode.lib.vcs.exceptions import VCSError
36 from rhodecode.lib.vcs.nodes import FileNode, SubModuleNode
36 from rhodecode.lib.vcs.nodes import FileNode, SubModuleNode
37 from rhodecode.lib.vcs.backends import base
37 from rhodecode.lib.vcs.backends import base
38 from rhodecode.lib.str_utils import safe_str
38 from rhodecode.lib.str_utils import safe_str
39
39
40 log = logging.getLogger(__name__)
40 log = logging.getLogger(__name__)
41
41
42 # define max context, a file with more than this numbers of lines is unusable
42 # define max context, a file with more than this numbers of lines is unusable
43 # in browser anyway
43 # in browser anyway
44 MAX_CONTEXT = 20 * 1024
44 MAX_CONTEXT = 20 * 1024
45 DEFAULT_CONTEXT = 3
45 DEFAULT_CONTEXT = 3
46
46
47
47
48 def get_diff_context(request):
48 def get_diff_context(request):
49 return MAX_CONTEXT if request.GET.get('fullcontext', '') == '1' else DEFAULT_CONTEXT
49 return MAX_CONTEXT if request.GET.get('fullcontext', '') == '1' else DEFAULT_CONTEXT
50
50
51
51
52 def get_diff_whitespace_flag(request):
52 def get_diff_whitespace_flag(request):
53 return request.GET.get('ignorews', '') == '1'
53 return request.GET.get('ignorews', '') == '1'
54
54
55
55
56 @dataclasses.dataclass
56 @dataclasses.dataclass
57 class OPS:
57 class OPS:
58 ADD: str = 'A'
58 ADD: str = 'A'
59 MOD: str = 'M'
59 MOD: str = 'M'
60 DEL: str = 'D'
60 DEL: str = 'D'
61
61
62
62
63 @dataclasses.dataclass
63 @dataclasses.dataclass
64 class DiffLineNumber:
64 class DiffLineNumber:
65 old: int | None
65 old: int | None
66 new: int | None
66 new: int | None
67
67
68 def __iter__(self):
68 def __iter__(self):
69 yield self.old
69 yield self.old
70 yield self.new
70 yield self.new
71
71
72
72
73 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
73 def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3):
74 """
74 """
75 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
75 Returns git style diff between given ``filenode_old`` and ``filenode_new``.
76
76
77 :param ignore_whitespace: ignore whitespaces in diff
77 :param ignore_whitespace: ignore whitespaces in diff
78 """
78 """
79 # make sure we pass in default context
79 # make sure we pass in default context
80 context = context or 3
80 context = context or 3
81 # protect against IntOverflow when passing HUGE context
81 # protect against IntOverflow when passing HUGE context
82 if context > MAX_CONTEXT:
82 if context > MAX_CONTEXT:
83 context = MAX_CONTEXT
83 context = MAX_CONTEXT
84
84
85 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
85 submodules = [o for o in [filenode_new, filenode_old] if isinstance(o, SubModuleNode)]
86 if submodules:
86 if submodules:
87 return ''
87 return ''
88
88
89 for filenode in (filenode_old, filenode_new):
89 for filenode in (filenode_old, filenode_new):
90 if not isinstance(filenode, FileNode):
90 if not isinstance(filenode, FileNode):
91 raise VCSError(f"Given object should be FileNode object, not {filenode.__class__}")
91 raise VCSError(f"Given object should be FileNode object, not {filenode.__class__}")
92
92
93 repo = filenode_new.commit.repository
93 repo = filenode_new.commit.repository
94 old_commit = filenode_old.commit or repo.EMPTY_COMMIT
94 old_commit = filenode_old.commit or repo.EMPTY_COMMIT
95 new_commit = filenode_new.commit
95 new_commit = filenode_new.commit
96
96
97 vcs_gitdiff = repo.get_diff(
97 vcs_gitdiff = repo.get_diff(
98 old_commit, new_commit, filenode_new.path,
98 old_commit, new_commit, filenode_new.path,
99 ignore_whitespace, context, path1=filenode_old.path)
99 ignore_whitespace, context, path1=filenode_old.path)
100 return vcs_gitdiff
100 return vcs_gitdiff
101
101
102 NEW_FILENODE = 1
102 NEW_FILENODE = 1
103 DEL_FILENODE = 2
103 DEL_FILENODE = 2
104 MOD_FILENODE = 3
104 MOD_FILENODE = 3
105 RENAMED_FILENODE = 4
105 RENAMED_FILENODE = 4
106 COPIED_FILENODE = 5
106 COPIED_FILENODE = 5
107 CHMOD_FILENODE = 6
107 CHMOD_FILENODE = 6
108 BIN_FILENODE = 7
108 BIN_FILENODE = 7
109
109
110
110
111 class LimitedDiffContainer(object):
111 class LimitedDiffContainer(object):
112
112
113 def __init__(self, diff_limit: int, cur_diff_size, diff):
113 def __init__(self, diff_limit: int, cur_diff_size, diff):
114 self.diff = diff
114 self.diff = diff
115 self.diff_limit = diff_limit
115 self.diff_limit = diff_limit
116 self.cur_diff_size = cur_diff_size
116 self.cur_diff_size = cur_diff_size
117
117
118 def __getitem__(self, key):
118 def __getitem__(self, key):
119 return self.diff.__getitem__(key)
119 return self.diff.__getitem__(key)
120
120
121 def __iter__(self):
121 def __iter__(self):
122 yield from self.diff
122 yield from self.diff
123
123
124
124
125 class Action(object):
125 class Action(object):
126 """
126 """
127 Contains constants for the action value of the lines in a parsed diff.
127 Contains constants for the action value of the lines in a parsed diff.
128 """
128 """
129
129
130 ADD = 'add'
130 ADD = 'add'
131 DELETE = 'del'
131 DELETE = 'del'
132 UNMODIFIED = 'unmod'
132 UNMODIFIED = 'unmod'
133
133
134 CONTEXT = 'context'
134 CONTEXT = 'context'
135 OLD_NO_NL = 'old-no-nl'
135 OLD_NO_NL = 'old-no-nl'
136 NEW_NO_NL = 'new-no-nl'
136 NEW_NO_NL = 'new-no-nl'
137
137
138
138
139 class DiffProcessor(object):
139 class DiffProcessor(object):
140 """
140 """
141 Give it a unified or git diff, and it returns a list of the files that were
141 Give it a unified or git diff, and it returns a list of the files that were
142 mentioned in the diff together with a dict of meta information that
142 mentioned in the diff together with a dict of meta information that
143 can be used to render it in an HTML template.
143 can be used to render it in an HTML template.
144
144
145 .. note:: Unicode handling
145 .. note:: Unicode handling
146
146
147 The original diffs are a byte sequence and can contain filenames
147 The original diffs are a byte sequence and can contain filenames
148 in mixed encodings. This class generally returns `unicode` objects
148 in mixed encodings. This class generally returns `unicode` objects
149 since the result is intended for presentation to the user.
149 since the result is intended for presentation to the user.
150
150
151 """
151 """
152 _chunk_re = re.compile(br'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
152 _chunk_re = re.compile(br'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)')
153 _newline_marker = re.compile(br'^\\ No newline at end of file')
153 _newline_marker = re.compile(br'^\\ No newline at end of file')
154
154
155 # used for inline highlighter word split
155 # used for inline highlighter word split
156 _token_re = re.compile(br'()(&gt;|&lt;|&amp;|\W+?)')
156 _token_re = re.compile(br'()(&gt;|&lt;|&amp;|\W+?)')
157
157
158 # collapse ranges of commits over given number
158 # collapse ranges of commits over given number
159 _collapse_commits_over = 5
159 _collapse_commits_over = 5
160
160
161 def __init__(self, diff: base.Diff, diff_format='gitdiff', diff_limit: int = 0,
161 def __init__(self, diff: base.Diff, diff_format='gitdiff', diff_limit: int = 0,
162 file_limit: int = 0, show_full_diff=True):
162 file_limit: int = 0, show_full_diff=True):
163 """
163 """
164 :param diff: A `Diff` object representing a diff from a vcs backend
164 :param diff: A `Diff` object representing a diff from a vcs backend
165 :param diff_format: format of diff passed, `udiff` or `gitdiff`
165 :param diff_format: format of diff passed, `udiff` or `gitdiff`
166 :param diff_limit: define the size of diff that is considered "big"
166 :param diff_limit: define the size of diff that is considered "big"
167 based on that parameter cut off will be triggered, set to None
167 based on that parameter cut off will be triggered, set to None
168 to show full diff
168 to show full diff
169 """
169 """
170 self._diff = diff
170 self._diff = diff
171 self._format = diff_format
171 self._format = diff_format
172 self.adds = 0
172 self.adds = 0
173 self.removes = 0
173 self.removes = 0
174 # calculate diff size
174 # calculate diff size
175 self.diff_limit = diff_limit
175 self.diff_limit = diff_limit
176 self.file_limit = file_limit
176 self.file_limit = file_limit
177 self.show_full_diff = show_full_diff
177 self.show_full_diff = show_full_diff
178 self.cur_diff_size = 0
178 self.cur_diff_size = 0
179 self.parsed = False
179 self.parsed = False
180 self.parsed_diff = []
180 self.parsed_diff = []
181
181
182 log.debug('Initialized DiffProcessor with %s mode', diff_format)
182 log.debug('Initialized DiffProcessor with %s mode', diff_format)
183 self.differ = self._highlight_line_udiff
183 self.differ = self._highlight_line_udiff
184 self._parser = self._new_parse_gitdiff
184 self._parser = self._new_parse_gitdiff
185
185
186 if diff_format == 'gitdiff':
186 if diff_format == 'gitdiff':
187 self.differ = self._highlight_line_difflib
187 self.differ = self._highlight_line_difflib
188 self._parser = self._parse_gitdiff
188 self._parser = self._parse_gitdiff
189 raise DeprecationWarning('gitdiff usage is deprecated')
189 raise DeprecationWarning('gitdiff usage is deprecated')
190
190
191 def _copy_iterator(self):
191 def _copy_iterator(self):
192 """
192 """
193 make a fresh copy of generator, we should not iterate thru
193 make a fresh copy of generator, we should not iterate thru
194 an original as it's needed for repeating operations on
194 an original as it's needed for repeating operations on
195 this instance of DiffProcessor
195 this instance of DiffProcessor
196 """
196 """
197 self.__udiff, iterator_copy = tee(self.__udiff)
197 self.__udiff, iterator_copy = tee(self.__udiff)
198 return iterator_copy
198 return iterator_copy
199
199
200 def _escaper(self, diff_string):
200 def _escaper(self, diff_string):
201 """
201 """
202 Escaper for diff escapes special chars and checks the diff limit
202 Escaper for diff escapes special chars and checks the diff limit
203
203
204 :param string:
204 :param string:
205 """
205 """
206 self.cur_diff_size += len(diff_string)
206 self.cur_diff_size += len(diff_string)
207
207
208 if not self.show_full_diff and (self.cur_diff_size > self.diff_limit):
208 if not self.show_full_diff and (self.cur_diff_size > self.diff_limit):
209 raise DiffLimitExceeded('Diff Limit Exceeded')
209 raise DiffLimitExceeded('Diff Limit Exceeded')
210
210
211 return diff_string \
211 return diff_string \
212 .replace(b'&', b'&amp;')\
212 .replace(b'&', b'&amp;')\
213 .replace(b'<', b'&lt;')\
213 .replace(b'<', b'&lt;')\
214 .replace(b'>', b'&gt;')
214 .replace(b'>', b'&gt;')
215
215
216 def _line_counter(self, diff_line):
216 def _line_counter(self, diff_line):
217 """
217 """
218 Checks each line and bumps total adds/removes for this diff
218 Checks each line and bumps total adds/removes for this diff
219
219
220 :param diff_line:
220 :param diff_line:
221 """
221 """
222 if diff_line.startswith(b'+') and not diff_line.startswith(b'+++'):
222 if diff_line.startswith(b'+') and not diff_line.startswith(b'+++'):
223 self.adds += 1
223 self.adds += 1
224 elif diff_line.startswith(b'-') and not diff_line.startswith(b'---'):
224 elif diff_line.startswith(b'-') and not diff_line.startswith(b'---'):
225 self.removes += 1
225 self.removes += 1
226 return diff_line
226 return diff_line
227
227
228 def _highlight_line_difflib(self, line, next_):
228 def _highlight_line_difflib(self, line, next_):
229 """
229 """
230 Highlight inline changes in both lines.
230 Highlight inline changes in both lines.
231 """
231 """
232
232
233 if line['action'] == Action.DELETE:
233 if line['action'] == Action.DELETE:
234 old, new = line, next_
234 old, new = line, next_
235 else:
235 else:
236 old, new = next_, line
236 old, new = next_, line
237
237
238 oldwords = self._token_re.split(old['line'])
238 oldwords = self._token_re.split(old['line'])
239 newwords = self._token_re.split(new['line'])
239 newwords = self._token_re.split(new['line'])
240 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
240 sequence = difflib.SequenceMatcher(None, oldwords, newwords)
241
241
242 oldfragments, newfragments = [], []
242 oldfragments, newfragments = [], []
243 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
243 for tag, i1, i2, j1, j2 in sequence.get_opcodes():
244 oldfrag = ''.join(oldwords[i1:i2])
244 oldfrag = ''.join(oldwords[i1:i2])
245 newfrag = ''.join(newwords[j1:j2])
245 newfrag = ''.join(newwords[j1:j2])
246 if tag != 'equal':
246 if tag != 'equal':
247 if oldfrag:
247 if oldfrag:
248 oldfrag = f'<del>{oldfrag}</del>'
248 oldfrag = f'<del>{oldfrag}</del>'
249 if newfrag:
249 if newfrag:
250 newfrag = f'<ins>{newfrag}</ins>'
250 newfrag = f'<ins>{newfrag}</ins>'
251 oldfragments.append(oldfrag)
251 oldfragments.append(oldfrag)
252 newfragments.append(newfrag)
252 newfragments.append(newfrag)
253
253
254 old['line'] = "".join(oldfragments)
254 old['line'] = "".join(oldfragments)
255 new['line'] = "".join(newfragments)
255 new['line'] = "".join(newfragments)
256
256
257 def _highlight_line_udiff(self, line, next_):
257 def _highlight_line_udiff(self, line, next_):
258 """
258 """
259 Highlight inline changes in both lines.
259 Highlight inline changes in both lines.
260 """
260 """
261 start = 0
261 start = 0
262 limit = min(len(line['line']), len(next_['line']))
262 limit = min(len(line['line']), len(next_['line']))
263 while start < limit and line['line'][start] == next_['line'][start]:
263 while start < limit and line['line'][start] == next_['line'][start]:
264 start += 1
264 start += 1
265 end = -1
265 end = -1
266 limit -= start
266 limit -= start
267 while -end <= limit and line['line'][end] == next_['line'][end]:
267 while -end <= limit and line['line'][end] == next_['line'][end]:
268 end -= 1
268 end -= 1
269 end += 1
269 end += 1
270 if start or end:
270 if start or end:
271 def do(l):
271 def do(l):
272 last = end + len(l['line'])
272 last = end + len(l['line'])
273 if l['action'] == Action.ADD:
273 if l['action'] == Action.ADD:
274 tag = 'ins'
274 tag = 'ins'
275 else:
275 else:
276 tag = 'del'
276 tag = 'del'
277 l['line'] = f"{l['line'][:start]}<{tag}>{l['line'][start:last]}</{tag}>{l['line'][last:]}"
277 l['line'] = f"{l['line'][:start]}<{tag}>{l['line'][start:last]}</{tag}>{l['line'][last:]}"
278 do(line)
278 do(line)
279 do(next_)
279 do(next_)
280
280
281 def _clean_line(self, line, command: str):
281 def _clean_line(self, line, command: str):
282 if command in ['+', '-', ' ']:
282 if command in ['+', '-', ' ']:
283 # only modify the line if it's actually a diff thing
283 # only modify the line if it's actually a diff thing
284 line = line[1:]
284 line = line[1:]
285 return line
285 return line
286
286
287 def _parse_gitdiff(self, inline_diff=True):
287 def _parse_gitdiff(self, inline_diff=True):
288 _files = []
288 _files = []
289
289
290 def diff_container(arg):
290 def diff_container(arg):
291 return arg
291 return arg
292
292
293 for chunk in self._diff.chunks():
293 for chunk in self._diff.chunks():
294 head = chunk.header
294 head = chunk.header
295
295
296 diff = map(self._escaper, self.diff_splitter(chunk.diff))
296 diff = map(self._escaper, self.diff_splitter(chunk.diff))
297 raw_diff = chunk.raw
297 raw_diff = chunk.raw
298 limited_diff = False
298 limited_diff = False
299 exceeds_limit = False
299 exceeds_limit = False
300
300
301 op = None
301 op = None
302 stats = {
302 stats = {
303 'added': 0,
303 'added': 0,
304 'deleted': 0,
304 'deleted': 0,
305 'binary': False,
305 'binary': False,
306 'ops': {},
306 'ops': {},
307 }
307 }
308
308
309 if head['deleted_file_mode']:
309 if head['deleted_file_mode']:
310 op = OPS.DEL
310 op = OPS.DEL
311 stats['binary'] = True
311 stats['binary'] = True
312 stats['ops'][DEL_FILENODE] = 'deleted file'
312 stats['ops'][DEL_FILENODE] = 'deleted file'
313
313
314 elif head['new_file_mode']:
314 elif head['new_file_mode']:
315 op = OPS.ADD
315 op = OPS.ADD
316 stats['binary'] = True
316 stats['binary'] = True
317 stats['ops'][NEW_FILENODE] = f"new file {safe_str(head['new_file_mode'])}"
317 stats['ops'][NEW_FILENODE] = f"new file {safe_str(head['new_file_mode'])}"
318 else: # modify operation, can be: copy, rename or chmod
318 else: # modify operation, can be: copy, rename or chmod
319
319
320 # CHMOD
320 # CHMOD
321 if head['new_mode'] and head['old_mode']:
321 if head['new_mode'] and head['old_mode']:
322 op = OPS.MOD
322 op = OPS.MOD
323 stats['binary'] = True
323 stats['binary'] = True
324 stats['ops'][CHMOD_FILENODE] = f"modified file chmod {safe_str(head['old_mode'])} => {safe_str(head['new_mode'])}"
324 stats['ops'][CHMOD_FILENODE] = f"modified file chmod {safe_str(head['old_mode'])} => {safe_str(head['new_mode'])}"
325 # RENAME
325 # RENAME
326 if head['rename_from'] != head['rename_to']:
326 if head['rename_from'] != head['rename_to']:
327 op = OPS.MOD
327 op = OPS.MOD
328 stats['binary'] = True
328 stats['binary'] = True
329 stats['ops'][RENAMED_FILENODE] = f"file renamed from {safe_str(head['rename_from'])} to {safe_str(head['rename_to'])}"
329 stats['ops'][RENAMED_FILENODE] = f"file renamed from {safe_str(head['rename_from'])} to {safe_str(head['rename_to'])}"
330 # COPY
330 # COPY
331 if head.get('copy_from') and head.get('copy_to'):
331 if head.get('copy_from') and head.get('copy_to'):
332 op = OPS.MOD
332 op = OPS.MOD
333 stats['binary'] = True
333 stats['binary'] = True
334 stats['ops'][COPIED_FILENODE] = f"file copied from {safe_str(head['copy_from'])} to {safe_str(head['copy_to'])}"
334 stats['ops'][COPIED_FILENODE] = f"file copied from {safe_str(head['copy_from'])} to {safe_str(head['copy_to'])}"
335
335
336 # If our new parsed headers didn't match anything fallback to
336 # If our new parsed headers didn't match anything fallback to
337 # old style detection
337 # old style detection
338 if op is None:
338 if op is None:
339 if not head['a_file'] and head['b_file']:
339 if not head['a_file'] and head['b_file']:
340 op = OPS.ADD
340 op = OPS.ADD
341 stats['binary'] = True
341 stats['binary'] = True
342 stats['ops'][NEW_FILENODE] = 'new file'
342 stats['ops'][NEW_FILENODE] = 'new file'
343
343
344 elif head['a_file'] and not head['b_file']:
344 elif head['a_file'] and not head['b_file']:
345 op = OPS.DEL
345 op = OPS.DEL
346 stats['binary'] = True
346 stats['binary'] = True
347 stats['ops'][DEL_FILENODE] = 'deleted file'
347 stats['ops'][DEL_FILENODE] = 'deleted file'
348
348
349 # it's not ADD not DELETE
349 # it's not ADD not DELETE
350 if op is None:
350 if op is None:
351 op = OPS.MOD
351 op = OPS.MOD
352 stats['binary'] = True
352 stats['binary'] = True
353 stats['ops'][MOD_FILENODE] = 'modified file'
353 stats['ops'][MOD_FILENODE] = 'modified file'
354
354
355 # a real non-binary diff
355 # a real non-binary diff
356 if head['a_file'] or head['b_file']:
356 if head['a_file'] or head['b_file']:
357 try:
357 try:
358 raw_diff, chunks, _stats = self._parse_lines(diff)
358 raw_diff, chunks, _stats = self._parse_lines(diff)
359 stats['binary'] = False
359 stats['binary'] = False
360 stats['added'] = _stats[0]
360 stats['added'] = _stats[0]
361 stats['deleted'] = _stats[1]
361 stats['deleted'] = _stats[1]
362 # explicit mark that it's a modified file
362 # explicit mark that it's a modified file
363 if op == OPS.MOD:
363 if op == OPS.MOD:
364 stats['ops'][MOD_FILENODE] = 'modified file'
364 stats['ops'][MOD_FILENODE] = 'modified file'
365 exceeds_limit = len(raw_diff) > self.file_limit
365 exceeds_limit = len(raw_diff) > self.file_limit
366
366
367 # changed from _escaper function so we validate size of
367 # changed from _escaper function so we validate size of
368 # each file instead of the whole diff
368 # each file instead of the whole diff
369 # diff will hide big files but still show small ones
369 # diff will hide big files but still show small ones
370 # from my tests, big files are fairly safe to be parsed
370 # from my tests, big files are fairly safe to be parsed
371 # but the browser is the bottleneck
371 # but the browser is the bottleneck
372 if not self.show_full_diff and exceeds_limit:
372 if not self.show_full_diff and exceeds_limit:
373 raise DiffLimitExceeded('File Limit Exceeded')
373 raise DiffLimitExceeded('File Limit Exceeded')
374
374
375 except DiffLimitExceeded:
375 except DiffLimitExceeded:
376 def diff_container(_diff):
376 def diff_container(_diff):
377 return LimitedDiffContainer(self.diff_limit, self.cur_diff_size, _diff)
377 return LimitedDiffContainer(self.diff_limit, self.cur_diff_size, _diff)
378
378
379 exceeds_limit = len(raw_diff) > self.file_limit
379 exceeds_limit = len(raw_diff) > self.file_limit
380 limited_diff = True
380 limited_diff = True
381 chunks = []
381 chunks = []
382
382
383 else: # GIT format binary patch, or possibly empty diff
383 else: # GIT format binary patch, or possibly empty diff
384 if head['bin_patch']:
384 if head['bin_patch']:
385 # we have operation already extracted, but we mark simply
385 # we have operation already extracted, but we mark simply
386 # it's a diff we won't show for binary files
386 # it's a diff we won't show for binary files
387 stats['ops'][BIN_FILENODE] = 'binary diff hidden'
387 stats['ops'][BIN_FILENODE] = 'binary diff hidden'
388 chunks = []
388 chunks = []
389
389
390 if chunks and not self.show_full_diff and op == OPS.DEL:
390 if chunks and not self.show_full_diff and op == OPS.DEL:
391 # if not full diff mode show deleted file contents
391 # if not full diff mode show deleted file contents
392 # TODO: anderson: if the view is not too big, there is no way
392 # TODO: anderson: if the view is not too big, there is no way
393 # to see the content of the file
393 # to see the content of the file
394 chunks = []
394 chunks = []
395
395
396 frag = [{
396 frag = [{
397 'old_lineno': '',
397 'old_lineno': '',
398 'new_lineno': '',
398 'new_lineno': '',
399 'action': Action.CONTEXT,
399 'action': Action.CONTEXT,
400 'line': msg,
400 'line': msg,
401 } for _op, msg in list(stats['ops'].items())
401 } for _op, msg in list(stats['ops'].items())
402 if _op not in [MOD_FILENODE]]
402 if _op not in [MOD_FILENODE]]
403
403
404 chunks.insert(0, frag)
404 chunks.insert(0, frag)
405
405
406 _files.append({
406 _files.append({
407 'filename': safe_str(head['b_path']),
407 'filename': safe_str(head['b_path']),
408 'old_revision': head['a_blob_id'],
408 'old_revision': head['a_blob_id'],
409 'new_revision': head['b_blob_id'],
409 'new_revision': head['b_blob_id'],
410 'chunks': chunks,
410 'chunks': chunks,
411 'raw_diff': safe_str(raw_diff),
411 'raw_diff': safe_str(raw_diff),
412 'operation': op,
412 'operation': op,
413 'stats': stats,
413 'stats': stats,
414 'exceeds_limit': exceeds_limit,
414 'exceeds_limit': exceeds_limit,
415 'is_limited_diff': limited_diff,
415 'is_limited_diff': limited_diff,
416 })
416 })
417
417
418 def operation_sorter(info):
418 def operation_sorter(info):
419 return {OPS.ADD: 0, OPS.MOD: 1, OPS.DEL: 2}.get(info['operation'])
419 return {OPS.ADD: 0, OPS.MOD: 1, OPS.DEL: 2}.get(info['operation'])
420
420
421 if not inline_diff:
421 if not inline_diff:
422 return diff_container(sorted(_files, key=operation_sorter))
422 return diff_container(sorted(_files, key=operation_sorter))
423
423
424 # highlight inline changes
424 # highlight inline changes
425 for diff_data in _files:
425 for diff_data in _files:
426 for chunk in diff_data['chunks']:
426 for chunk in diff_data['chunks']:
427 lineiter = iter(chunk)
427 lineiter = iter(chunk)
428 try:
428 try:
429 while 1:
429 while 1:
430 line = next(lineiter)
430 line = next(lineiter)
431 if line['action'] not in (
431 if line['action'] not in (
432 Action.UNMODIFIED, Action.CONTEXT):
432 Action.UNMODIFIED, Action.CONTEXT):
433 nextline = next(lineiter)
433 nextline = next(lineiter)
434 if nextline['action'] in ['unmod', 'context'] or \
434 if nextline['action'] in ['unmod', 'context'] or \
435 nextline['action'] == line['action']:
435 nextline['action'] == line['action']:
436 continue
436 continue
437 self.differ(line, nextline)
437 self.differ(line, nextline)
438 except StopIteration:
438 except StopIteration:
439 pass
439 pass
440
440
441 return diff_container(sorted(_files, key=operation_sorter))
441 return diff_container(sorted(_files, key=operation_sorter))
442
442
443 def _check_large_diff(self):
443 def _check_large_diff(self):
444 if self.diff_limit:
444 if self.diff_limit:
445 log.debug('Checking if diff exceeds current diff_limit of %s', self.diff_limit)
445 log.debug('Checking if diff exceeds current diff_limit of %s', self.diff_limit)
446 if not self.show_full_diff and (self.cur_diff_size > self.diff_limit):
446 if not self.show_full_diff and (self.cur_diff_size > self.diff_limit):
447 raise DiffLimitExceeded(f'Diff Limit `{self.diff_limit}` Exceeded')
447 raise DiffLimitExceeded(f'Diff Limit `{self.diff_limit}` Exceeded')
448
448
449 # FIXME: NEWDIFFS: dan: this replaces _parse_gitdiff
449 # FIXME: NEWDIFFS: dan: this replaces _parse_gitdiff
450 def _new_parse_gitdiff(self, inline_diff=True):
450 def _new_parse_gitdiff(self, inline_diff=True):
451 _files = []
451 _files = []
452
452
453 # this can be overridden later to a LimitedDiffContainer type
453 # this can be overridden later to a LimitedDiffContainer type
454 def diff_container(arg):
454 def diff_container(arg):
455 return arg
455 return arg
456
456
457 for chunk in self._diff.chunks():
457 for chunk in self._diff.chunks():
458 head = chunk.header_as_str
458 head = chunk.header_as_str
459 log.debug('parsing diff chunk %r', chunk)
459 log.debug('parsing diff chunk %r', chunk)
460
460
461 raw_diff = chunk.raw
461 raw_diff = chunk.raw
462 limited_diff = False
462 limited_diff = False
463 exceeds_limit = False
463 exceeds_limit = False
464
464
465 op = None
465 op = None
466 stats = {
466 stats = {
467 'added': 0,
467 'added': 0,
468 'deleted': 0,
468 'deleted': 0,
469 'binary': False,
469 'binary': False,
470 'old_mode': '',
470 'old_mode': '',
471 'new_mode': '',
471 'new_mode': '',
472 'ops': {},
472 'ops': {},
473 }
473 }
474 if head['old_mode']:
474 if head['old_mode']:
475 stats['old_mode'] = head['old_mode']
475 stats['old_mode'] = head['old_mode']
476 if head['new_mode']:
476 if head['new_mode']:
477 stats['new_mode'] = head['new_mode']
477 stats['new_mode'] = head['new_mode']
478 if head['b_mode']:
478 if head['b_mode']:
479 stats['new_mode'] = head['b_mode']
479 stats['new_mode'] = head['b_mode']
480
480
481 # delete file
481 # delete file
482 if head['deleted_file_mode']:
482 if head['deleted_file_mode']:
483 op = OPS.DEL
483 op = OPS.DEL
484 stats['binary'] = True
484 stats['binary'] = True
485 stats['ops'][DEL_FILENODE] = 'deleted file'
485 stats['ops'][DEL_FILENODE] = 'deleted file'
486
486
487 # new file
487 # new file
488 elif head['new_file_mode']:
488 elif head['new_file_mode']:
489 op = OPS.ADD
489 op = OPS.ADD
490 stats['binary'] = True
490 stats['binary'] = True
491 stats['old_mode'] = ''
491 stats['old_mode'] = ''
492 stats['new_mode'] = head['new_file_mode']
492 stats['new_mode'] = head['new_file_mode']
493 stats['ops'][NEW_FILENODE] = f"new file {head['new_file_mode']}"
493 stats['ops'][NEW_FILENODE] = f"new file {head['new_file_mode']}"
494
494
495 # modify operation, can be: copy, rename or chmod
495 # modify operation, can be: copy, rename or chmod
496 else:
496 else:
497 # CHMOD
497 # CHMOD
498 if head['new_mode'] and head['old_mode']:
498 if head['new_mode'] and head['old_mode']:
499 op = OPS.MOD
499 op = OPS.MOD
500 stats['binary'] = True
500 stats['binary'] = True
501 stats['ops'][CHMOD_FILENODE] = f"modified file chmod {head['old_mode']} => {head['new_mode']}"
501 stats['ops'][CHMOD_FILENODE] = f"modified file chmod {head['old_mode']} => {head['new_mode']}"
502
502
503 # RENAME
503 # RENAME
504 if head['rename_from'] != head['rename_to']:
504 if head['rename_from'] != head['rename_to']:
505 op = OPS.MOD
505 op = OPS.MOD
506 stats['binary'] = True
506 stats['binary'] = True
507 stats['renamed'] = (head['rename_from'], head['rename_to'])
507 stats['renamed'] = (head['rename_from'], head['rename_to'])
508 stats['ops'][RENAMED_FILENODE] = f"file renamed from {head['rename_from']} to {head['rename_to']}"
508 stats['ops'][RENAMED_FILENODE] = f"file renamed from {head['rename_from']} to {head['rename_to']}"
509 # COPY
509 # COPY
510 if head.get('copy_from') and head.get('copy_to'):
510 if head.get('copy_from') and head.get('copy_to'):
511 op = OPS.MOD
511 op = OPS.MOD
512 stats['binary'] = True
512 stats['binary'] = True
513 stats['copied'] = (head['copy_from'], head['copy_to'])
513 stats['copied'] = (head['copy_from'], head['copy_to'])
514 stats['ops'][COPIED_FILENODE] = f"file copied from {head['copy_from']} to {head['copy_to']}"
514 stats['ops'][COPIED_FILENODE] = f"file copied from {head['copy_from']} to {head['copy_to']}"
515
515
516 # If our new parsed headers didn't match anything fallback to
516 # If our new parsed headers didn't match anything fallback to
517 # old style detection
517 # old style detection
518 if op is None:
518 if op is None:
519 if not head['a_file'] and head['b_file']:
519 if not head['a_file'] and head['b_file']:
520 op = OPS.ADD
520 op = OPS.ADD
521 stats['binary'] = True
521 stats['binary'] = True
522 stats['new_file'] = True
522 stats['new_file'] = True
523 stats['ops'][NEW_FILENODE] = 'new file'
523 stats['ops'][NEW_FILENODE] = 'new file'
524
524
525 elif head['a_file'] and not head['b_file']:
525 elif head['a_file'] and not head['b_file']:
526 op = OPS.DEL
526 op = OPS.DEL
527 stats['binary'] = True
527 stats['binary'] = True
528 stats['ops'][DEL_FILENODE] = 'deleted file'
528 stats['ops'][DEL_FILENODE] = 'deleted file'
529
529
530 # it's not ADD not DELETE
530 # it's not ADD not DELETE
531 if op is None:
531 if op is None:
532 op = OPS.MOD
532 op = OPS.MOD
533 stats['binary'] = True
533 stats['binary'] = True
534 stats['ops'][MOD_FILENODE] = 'modified file'
534 stats['ops'][MOD_FILENODE] = 'modified file'
535
535
536 # a real non-binary diff
536 # a real non-binary diff
537 if head['a_file'] or head['b_file']:
537 if head['a_file'] or head['b_file']:
538 # simulate splitlines, so we keep the line end part
538 # simulate splitlines, so we keep the line end part
539 diff = self.diff_splitter(chunk.diff)
539 diff = self.diff_splitter(chunk.diff)
540
540
541 # append each file to the diff size
541 # append each file to the diff size
542 raw_chunk_size = len(raw_diff)
542 raw_chunk_size = len(raw_diff)
543
543
544 exceeds_limit = raw_chunk_size > self.file_limit
544 exceeds_limit = raw_chunk_size > self.file_limit
545 self.cur_diff_size += raw_chunk_size
545 self.cur_diff_size += raw_chunk_size
546
546
547 try:
547 try:
548 # Check each file instead of the whole diff.
548 # Check each file instead of the whole diff.
549 # Diff will hide big files but still show small ones.
549 # Diff will hide big files but still show small ones.
550 # From the tests big files are fairly safe to be parsed
550 # From the tests big files are fairly safe to be parsed
551 # but the browser is the bottleneck.
551 # but the browser is the bottleneck.
552 if not self.show_full_diff and exceeds_limit:
552 if not self.show_full_diff and exceeds_limit:
553 log.debug('File `%s` exceeds current file_limit of %s',
553 log.debug('File `%s` exceeds current file_limit of %s',
554 head['b_path'], self.file_limit)
554 head['b_path'], self.file_limit)
555 raise DiffLimitExceeded(f'File Limit {self.file_limit} Exceeded')
555 raise DiffLimitExceeded(f'File Limit {self.file_limit} Exceeded')
556
556
557 self._check_large_diff()
557 self._check_large_diff()
558
558
559 raw_diff, chunks, _stats = self._new_parse_lines(diff)
559 raw_diff, chunks, _stats = self._new_parse_lines(diff)
560 stats['binary'] = False
560 stats['binary'] = False
561 stats['added'] = _stats[0]
561 stats['added'] = _stats[0]
562 stats['deleted'] = _stats[1]
562 stats['deleted'] = _stats[1]
563 # explicit mark that it's a modified file
563 # explicit mark that it's a modified file
564 if op == OPS.MOD:
564 if op == OPS.MOD:
565 stats['ops'][MOD_FILENODE] = 'modified file'
565 stats['ops'][MOD_FILENODE] = 'modified file'
566
566
567 except DiffLimitExceeded:
567 except DiffLimitExceeded:
568 def limited_diff_container(_diff):
568 def limited_diff_container(_diff):
569 return LimitedDiffContainer(self.diff_limit, self.cur_diff_size, _diff)
569 return LimitedDiffContainer(self.diff_limit, self.cur_diff_size, _diff)
570
570
571 # re-definition of our container wrapper
571 # re-definition of our container wrapper
572 diff_container = limited_diff_container
572 diff_container = limited_diff_container
573
573
574 limited_diff = True
574 limited_diff = True
575 chunks = []
575 chunks = []
576
576
577 else: # GIT format binary patch, or possibly empty diff
577 else: # GIT format binary patch, or possibly empty diff
578 if head['bin_patch']:
578 if head['bin_patch']:
579 # we have operation already extracted, but we mark simply
579 # we have operation already extracted, but we mark simply
580 # it's a diff we won't show for binary files
580 # it's a diff we won't show for binary files
581 stats['ops'][BIN_FILENODE] = 'binary diff hidden'
581 stats['ops'][BIN_FILENODE] = 'binary diff hidden'
582 chunks = []
582 chunks = []
583
583
584 # Hide content of deleted node by setting empty chunks
584 # Hide content of deleted node by setting empty chunks
585 if chunks and not self.show_full_diff and op == OPS.DEL:
585 if chunks and not self.show_full_diff and op == OPS.DEL:
586 # if not full diff mode show deleted file contents
586 # if not full diff mode show deleted file contents
587 # TODO: anderson: if the view is not too big, there is no way
587 # TODO: anderson: if the view is not too big, there is no way
588 # to see the content of the file
588 # to see the content of the file
589 chunks = []
589 chunks = []
590
590
591 frag = [
591 frag = [
592 {'old_lineno': '',
592 {'old_lineno': '',
593 'new_lineno': '',
593 'new_lineno': '',
594 'action': Action.CONTEXT,
594 'action': Action.CONTEXT,
595 'line': msg,
595 'line': msg,
596 } for _op, msg in list(stats['ops'].items())
596 } for _op, msg in list(stats['ops'].items())
597 if _op not in [MOD_FILENODE]]
597 if _op not in [MOD_FILENODE]]
598
598
599 chunks.insert(0, frag)
599 chunks.insert(0, frag)
600
600
601 original_filename = safe_str(head['a_path'])
601 original_filename = safe_str(head['a_path'])
602 _files.append({
602 _files.append({
603 'original_filename': original_filename,
603 'original_filename': original_filename,
604 'filename': safe_str(head['b_path']),
604 'filename': safe_str(head['b_path']),
605 'old_revision': head['a_blob_id'],
605 'old_revision': head['a_blob_id'],
606 'new_revision': head['b_blob_id'],
606 'new_revision': head['b_blob_id'],
607 'chunks': chunks,
607 'chunks': chunks,
608 'raw_diff': safe_str(raw_diff),
608 'raw_diff': safe_str(raw_diff),
609 'operation': op,
609 'operation': op,
610 'stats': stats,
610 'stats': stats,
611 'exceeds_limit': exceeds_limit,
611 'exceeds_limit': exceeds_limit,
612 'is_limited_diff': limited_diff,
612 'is_limited_diff': limited_diff,
613 })
613 })
614
614
615 def sorter(info):
615 def sorter(info):
616 return {OPS.ADD: 0, OPS.MOD: 1, OPS.DEL: 2}.get(info['operation'])
616 return {OPS.ADD: 0, OPS.MOD: 1, OPS.DEL: 2}.get(info['operation'])
617 return diff_container(sorted(_files, key=sorter))
617 return diff_container(sorted(_files, key=sorter))
618
618
619 # FIXME: NEWDIFFS: dan: this gets replaced by _new_parse_lines
619 # FIXME: NEWDIFFS: dan: this gets replaced by _new_parse_lines
620 def _parse_lines(self, diff_iter):
620 def _parse_lines(self, diff_iter):
621 """
621 """
622 Parse the diff an return data for the template.
622 Parse the diff an return data for the template.
623 """
623 """
624
624
625 stats = [0, 0]
625 stats = [0, 0]
626 chunks = []
626 chunks = []
627 raw_diff = []
627 raw_diff = []
628
628
629 try:
629 try:
630 line = next(diff_iter)
630 line = next(diff_iter)
631
631
632 while line:
632 while line:
633 raw_diff.append(line)
633 raw_diff.append(line)
634 lines = []
634 lines = []
635 chunks.append(lines)
635 chunks.append(lines)
636
636
637 match = self._chunk_re.match(line)
637 match = self._chunk_re.match(line)
638
638
639 if not match:
639 if not match:
640 break
640 break
641
641
642 gr = match.groups()
642 gr = match.groups()
643 (old_line, old_end,
643 (old_line, old_end,
644 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
644 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
645 old_line -= 1
645 old_line -= 1
646 new_line -= 1
646 new_line -= 1
647
647
648 context = len(gr) == 5
648 context = len(gr) == 5
649 old_end += old_line
649 old_end += old_line
650 new_end += new_line
650 new_end += new_line
651
651
652 if context:
652 if context:
653 # skip context only if it's first line
653 # skip context only if it's first line
654 if int(gr[0]) > 1:
654 if int(gr[0]) > 1:
655 lines.append({
655 lines.append({
656 'old_lineno': '...',
656 'old_lineno': '...',
657 'new_lineno': '...',
657 'new_lineno': '...',
658 'action': Action.CONTEXT,
658 'action': Action.CONTEXT,
659 'line': line,
659 'line': line,
660 })
660 })
661
661
662 line = next(diff_iter)
662 line = next(diff_iter)
663
663
664 while old_line < old_end or new_line < new_end:
664 while old_line < old_end or new_line < new_end:
665 command = b' '
665 command = b' '
666 if line:
666 if line:
667 command = line[0]
667 command = line[0]
668
668
669 affects_old = affects_new = False
669 affects_old = affects_new = False
670
670
671 # ignore those if we don't expect them
671 # ignore those if we don't expect them
672 if command in b'#@':
672 if command in b'#@':
673 continue
673 continue
674 elif command == b'+':
674 elif command == b'+':
675 affects_new = True
675 affects_new = True
676 action = Action.ADD
676 action = Action.ADD
677 stats[0] += 1
677 stats[0] += 1
678 elif command == b'-':
678 elif command == b'-':
679 affects_old = True
679 affects_old = True
680 action = Action.DELETE
680 action = Action.DELETE
681 stats[1] += 1
681 stats[1] += 1
682 else:
682 else:
683 affects_old = affects_new = True
683 affects_old = affects_new = True
684 action = Action.UNMODIFIED
684 action = Action.UNMODIFIED
685
685
686 if not self._newline_marker.match(line):
686 if not self._newline_marker.match(line):
687 old_line += affects_old
687 old_line += affects_old
688 new_line += affects_new
688 new_line += affects_new
689 lines.append({
689 lines.append({
690 'old_lineno': affects_old and old_line or b'',
690 'old_lineno': affects_old and old_line or b'',
691 'new_lineno': affects_new and new_line or b'',
691 'new_lineno': affects_new and new_line or b'',
692 'action': action,
692 'action': action,
693 'line': self._clean_line(line, command)
693 'line': self._clean_line(line, command)
694 })
694 })
695 raw_diff.append(line)
695 raw_diff.append(line)
696
696
697 line = next(diff_iter)
697 line = next(diff_iter)
698
698
699 if self._newline_marker.match(line):
699 if self._newline_marker.match(line):
700 # we need to append to lines, since this is not
700 # we need to append to lines, since this is not
701 # counted in the line specs of diff
701 # counted in the line specs of diff
702 lines.append({
702 lines.append({
703 'old_lineno': '...',
703 'old_lineno': '...',
704 'new_lineno': '...',
704 'new_lineno': '...',
705 'action': Action.CONTEXT,
705 'action': Action.CONTEXT,
706 'line': self._clean_line(line, command)
706 'line': self._clean_line(line, command)
707 })
707 })
708
708
709 except StopIteration:
709 except StopIteration:
710 pass
710 pass
711 return ''.join(raw_diff), chunks, stats
711 return ''.join(raw_diff), chunks, stats
712
712
713 # FIXME: NEWDIFFS: dan: this replaces _parse_lines
713 # FIXME: NEWDIFFS: dan: this replaces _parse_lines
714 def _new_parse_lines(self, diff_iter):
714 def _new_parse_lines(self, diff_iter):
715 """
715 """
716 Parse the diff an return data for the template.
716 Parse the diff an return data for the template.
717 """
717 """
718
718
719 stats = [0, 0]
719 stats = [0, 0]
720 chunks = []
720 chunks = []
721 raw_diff = []
721 raw_diff = []
722
722
723 try:
723 try:
724 line = next(diff_iter)
724 line = next(diff_iter)
725 assert isinstance(line, bytes)
725 assert isinstance(line, bytes)
726
726
727 while line:
727 while line:
728 raw_diff.append(line)
728 raw_diff.append(line)
729 # match header e.g @@ -0,0 +1 @@\n'
729 # match header e.g @@ -0,0 +1 @@\n'
730 match = self._chunk_re.match(line)
730 match = self._chunk_re.match(line)
731
731
732 if not match:
732 if not match:
733 break
733 break
734
734
735 gr = match.groups()
735 gr = match.groups()
736
736
737 (old_line, old_end,
737 (old_line, old_end,
738 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
738 new_line, new_end) = [int(x or 1) for x in gr[:-1]]
739
739
740 lines = []
740 lines = []
741 hunk = {
741 hunk = {
742 'section_header': gr[-1],
742 'section_header': gr[-1],
743 'source_start': old_line,
743 'source_start': old_line,
744 'source_length': old_end,
744 'source_length': old_end,
745 'target_start': new_line,
745 'target_start': new_line,
746 'target_length': new_end,
746 'target_length': new_end,
747 'lines': lines,
747 'lines': lines,
748 }
748 }
749 chunks.append(hunk)
749 chunks.append(hunk)
750
750
751 old_line -= 1
751 old_line -= 1
752 new_line -= 1
752 new_line -= 1
753
753
754 len(gr) == 5
754 len(gr) == 5
755 old_end += old_line
755 old_end += old_line
756 new_end += new_line
756 new_end += new_line
757
757
758 line = next(diff_iter)
758 line = next(diff_iter)
759
759
760 while old_line < old_end or new_line < new_end:
760 while old_line < old_end or new_line < new_end:
761 command = ' '
761 command = ' '
762 if line:
762 if line:
763 # This is bytes, so we need to convert it to a str
763 # This is bytes, so we need to convert it to a str
764 command: str = chr(line[0])
764 command: str = chr(line[0])
765
765
766 affects_old = affects_new = False
766 affects_old = affects_new = False
767
767
768 # ignore those if we don't expect them
768 # ignore those if we don't expect them
769 if command in '#@':
769 if command in '#@':
770 continue
770 continue
771 elif command == '+':
771 elif command == '+':
772 affects_new = True
772 affects_new = True
773 action = Action.ADD
773 action = Action.ADD
774 stats[0] += 1
774 stats[0] += 1
775 elif command == '-':
775 elif command == '-':
776 affects_old = True
776 affects_old = True
777 action = Action.DELETE
777 action = Action.DELETE
778 stats[1] += 1
778 stats[1] += 1
779 else:
779 else:
780 affects_old = affects_new = True
780 affects_old = affects_new = True
781 action = Action.UNMODIFIED
781 action = Action.UNMODIFIED
782
782
783 if not self._newline_marker.match(line):
783 if not self._newline_marker.match(line):
784 old_line += affects_old
784 old_line += affects_old
785 new_line += affects_new
785 new_line += affects_new
786 lines.append({
786 lines.append({
787 'old_lineno': affects_old and old_line or None,
787 'old_lineno': affects_old and old_line or None,
788 'new_lineno': affects_new and new_line or None,
788 'new_lineno': affects_new and new_line or None,
789 'action': action,
789 'action': action,
790 'line': self._clean_line(line, command)
790 'line': self._clean_line(line, command)
791 })
791 })
792 raw_diff.append(line)
792 raw_diff.append(line)
793
793
794 line = next(diff_iter)
794 line = next(diff_iter)
795
795
796 if self._newline_marker.match(line):
796 if self._newline_marker.match(line):
797 # we need to append to lines, since this is not
797 # we need to append to lines, since this is not
798 # counted in the line specs of diff
798 # counted in the line specs of diff
799 if affects_old:
799 if affects_old:
800 action = Action.OLD_NO_NL
800 action = Action.OLD_NO_NL
801 elif affects_new:
801 elif affects_new:
802 action = Action.NEW_NO_NL
802 action = Action.NEW_NO_NL
803 else:
803 else:
804 raise Exception('invalid context for no newline')
804 raise Exception('invalid context for no newline')
805
805
806 lines.append({
806 lines.append({
807 'old_lineno': None,
807 'old_lineno': None,
808 'new_lineno': None,
808 'new_lineno': None,
809 'action': action,
809 'action': action,
810 'line': self._clean_line(line, command)
810 'line': self._clean_line(line, command)
811 })
811 })
812
812
813 except StopIteration:
813 except StopIteration:
814 pass
814 pass
815
815
816 return b''.join(raw_diff), chunks, stats
816 return b''.join(raw_diff), chunks, stats
817
817
818 def _safe_id(self, idstring):
818 def _safe_id(self, idstring):
819 """Make a string safe for including in an id attribute.
819 r"""Make a string safe for including in an id attribute.
820
820
821 The HTML spec says that id attributes 'must begin with
821 The HTML spec says that id attributes 'must begin with
822 a letter ([A-Za-z]) and may be followed by any number
822 a letter ([A-Za-z]) and may be followed by any number
823 of letters, digits ([0-9]), hyphens ("-"), underscores
823 of letters, digits ([0-9]), hyphens ("-"), underscores
824 ("_"), colons (":"), and periods (".")'. These regexps
824 ("_"), colons (":"), and periods (".")'. These regexps
825 are slightly over-zealous, in that they remove colons
825 are slightly over-zealous, in that they remove colons
826 and periods unnecessarily.
826 and periods unnecessarily.
827
827
828 Whitespace is transformed into underscores, and then
828 Whitespace is transformed into underscores, and then
829 anything which is not a hyphen or a character that
829 anything which is not a hyphen or a character that
830 matches \w (alphanumerics and underscore) is removed.
830 matches \w (alphanumerics and underscore) is removed.
831 """
831
832
832 """
833 # Transform all whitespace to underscore
833 # Transform all whitespace to underscore
834 idstring = re.sub(r'\s', "_", f'{idstring}')
834 idstring = re.sub(r'\s', "_", f'{idstring}')
835 # Remove everything that is not a hyphen or a member of \w
835 # Remove everything that is not a hyphen or a member of \w
836 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
836 idstring = re.sub(r'(?!-)\W', "", idstring).lower()
837 return idstring
837 return idstring
838
838
839 @classmethod
839 @classmethod
840 def diff_splitter(cls, diff_string: bytes):
840 def diff_splitter(cls, diff_string: bytes):
841 """
841 """
842 Diff split that emulates .splitlines() but works only on \n
842 Diff split that emulates .splitlines() but works only on \n
843 """
843 """
844 if not diff_string:
844 if not diff_string:
845 return
845 return
846 elif diff_string == b'\n':
846 elif diff_string == b'\n':
847 yield b'\n'
847 yield b'\n'
848 else:
848 else:
849
849
850 has_newline = diff_string.endswith(b'\n')
850 has_newline = diff_string.endswith(b'\n')
851 elements = diff_string.split(b'\n')
851 elements = diff_string.split(b'\n')
852 if has_newline:
852 if has_newline:
853 # skip last element as it's empty string from newlines
853 # skip last element as it's empty string from newlines
854 elements = elements[:-1]
854 elements = elements[:-1]
855
855
856 len_elements = len(elements)
856 len_elements = len(elements)
857
857
858 for cnt, line in enumerate(elements, start=1):
858 for cnt, line in enumerate(elements, start=1):
859 last_line = cnt == len_elements
859 last_line = cnt == len_elements
860 if last_line and not has_newline:
860 if last_line and not has_newline:
861 yield line
861 yield line
862 else:
862 else:
863 yield line + b'\n'
863 yield line + b'\n'
864
864
865 def prepare(self, inline_diff=True):
865 def prepare(self, inline_diff=True):
866 """
866 """
867 Prepare the passed udiff for HTML rendering.
867 Prepare the passed udiff for HTML rendering.
868
868
869 :return: A list of dicts with diff information.
869 :return: A list of dicts with diff information.
870 """
870 """
871 parsed = self._parser(inline_diff=inline_diff)
871 parsed = self._parser(inline_diff=inline_diff)
872 self.parsed = True
872 self.parsed = True
873 self.parsed_diff = parsed
873 self.parsed_diff = parsed
874 return parsed
874 return parsed
875
875
876 def as_raw(self, diff_lines=None):
876 def as_raw(self, diff_lines=None):
877 """
877 """
878 Returns raw diff as a byte string
878 Returns raw diff as a byte string
879 """
879 """
880 return self._diff.raw.tobytes()
880 return self._diff.raw.tobytes()
881
881
882 def stat(self):
882 def stat(self):
883 """
883 """
884 Returns tuple of added, and removed lines for this instance
884 Returns tuple of added, and removed lines for this instance
885 """
885 """
886 return self.adds, self.removes
886 return self.adds, self.removes
887
887
888 def get_context_of_line(
888 def get_context_of_line(
889 self, path, diff_line: DiffLineNumber = None, context_before: int = 3, context_after: int = 3):
889 self, path, diff_line: DiffLineNumber = None, context_before: int = 3, context_after: int = 3):
890 """
890 """
891 Returns the context lines for the specified diff line.
891 Returns the context lines for the specified diff line.
892 """
892 """
893 assert self.parsed, "DiffProcessor is not initialized."
893 assert self.parsed, "DiffProcessor is not initialized."
894
894
895 if None not in diff_line:
895 if None not in diff_line:
896 raise ValueError(f"Cannot specify both line numbers in diff_line: {diff_line}")
896 raise ValueError(f"Cannot specify both line numbers in diff_line: {diff_line}")
897
897
898 file_diff = self._get_file_diff(path)
898 file_diff = self._get_file_diff(path)
899 chunk, idx = self._find_chunk_line_index(file_diff, diff_line)
899 chunk, idx = self._find_chunk_line_index(file_diff, diff_line)
900
900
901 first_line_to_include = max(idx - context_before, 0)
901 first_line_to_include = max(idx - context_before, 0)
902 first_line_after_context = idx + context_after + 1
902 first_line_after_context = idx + context_after + 1
903 context_lines = chunk['lines'][first_line_to_include:first_line_after_context]
903 context_lines = chunk['lines'][first_line_to_include:first_line_after_context]
904
904
905 line_contents = [
905 line_contents = [
906 _context_line(line) for line in context_lines
906 _context_line(line) for line in context_lines
907 if _is_diff_content(line)
907 if _is_diff_content(line)
908 ]
908 ]
909
909
910 # TODO: johbo: Interim fixup, the diff chunks drop the final newline.
910 # TODO: johbo: Interim fixup, the diff chunks drop the final newline.
911 # Once they are fixed, we can drop this line here.
911 # Once they are fixed, we can drop this line here.
912 if line_contents:
912 if line_contents:
913 line_contents[-1] = (
913 line_contents[-1] = (
914 line_contents[-1][0], line_contents[-1][1].rstrip(b'\n') + b'\n')
914 line_contents[-1][0], line_contents[-1][1].rstrip(b'\n') + b'\n')
915 return line_contents
915 return line_contents
916
916
917 def find_context(self, path, context, offset=0):
917 def find_context(self, path, context, offset=0):
918 """
918 """
919 Finds the given `context` inside of the diff.
919 Finds the given `context` inside of the diff.
920
920
921 Use the parameter `offset` to specify which offset the target line has
921 Use the parameter `offset` to specify which offset the target line has
922 inside of the given `context`. This way the correct diff line will be
922 inside of the given `context`. This way the correct diff line will be
923 returned.
923 returned.
924
924
925 :param offset: Shall be used to specify the offset of the main line
925 :param offset: Shall be used to specify the offset of the main line
926 within the given `context`.
926 within the given `context`.
927 """
927 """
928 if offset < 0 or offset >= len(context):
928 if offset < 0 or offset >= len(context):
929 raise ValueError(
929 raise ValueError(
930 "Only positive values up to the length of the context "
930 "Only positive values up to the length of the context "
931 "minus one are allowed.")
931 "minus one are allowed.")
932
932
933 matches = []
933 matches = []
934 file_diff = self._get_file_diff(path)
934 file_diff = self._get_file_diff(path)
935
935
936 for chunk in file_diff['chunks']:
936 for chunk in file_diff['chunks']:
937 if not isinstance(chunk, dict):
937 if not isinstance(chunk, dict):
938 continue
938 continue
939 context_iter = iter(context)
939 context_iter = iter(context)
940 for line_idx, line in enumerate(chunk['lines']):
940 for line_idx, line in enumerate(chunk['lines']):
941 try:
941 try:
942 if _context_line(line) == next(context_iter):
942 if _context_line(line) == next(context_iter):
943 continue
943 continue
944 except StopIteration:
944 except StopIteration:
945 matches.append((line_idx, chunk))
945 matches.append((line_idx, chunk))
946 context_iter = iter(context)
946 context_iter = iter(context)
947
947
948 # Increment position and triger StopIteration
948 # Increment position and triger StopIteration
949 # if we had a match at the end
949 # if we had a match at the end
950 line_idx += 1
950 line_idx += 1
951 try:
951 try:
952 next(context_iter)
952 next(context_iter)
953 except StopIteration:
953 except StopIteration:
954 matches.append((line_idx, chunk))
954 matches.append((line_idx, chunk))
955
955
956 effective_offset = len(context) - offset
956 effective_offset = len(context) - offset
957 found_at_diff_lines = [
957 found_at_diff_lines = [
958 _line_to_diff_line_number(chunk['lines'][idx - effective_offset])
958 _line_to_diff_line_number(chunk['lines'][idx - effective_offset])
959 for idx, chunk in matches]
959 for idx, chunk in matches]
960
960
961 return found_at_diff_lines
961 return found_at_diff_lines
962
962
963 def _get_file_diff(self, path):
963 def _get_file_diff(self, path):
964 for file_diff in self.parsed_diff:
964 for file_diff in self.parsed_diff:
965 if file_diff['filename'] == path:
965 if file_diff['filename'] == path:
966 break
966 break
967 else:
967 else:
968 raise FileNotInDiffException(f"File {path} not in diff")
968 raise FileNotInDiffException(f"File {path} not in diff")
969 return file_diff
969 return file_diff
970
970
971 def _find_chunk_line_index(self, file_diff, diff_line):
971 def _find_chunk_line_index(self, file_diff, diff_line):
972 for chunk in file_diff['chunks']:
972 for chunk in file_diff['chunks']:
973 if not isinstance(chunk, dict):
973 if not isinstance(chunk, dict):
974 continue
974 continue
975 for line_idx, line in enumerate(chunk['lines']):
975 for line_idx, line in enumerate(chunk['lines']):
976 if diff_line.old and line['old_lineno'] == diff_line.old:
976 if diff_line.old and line['old_lineno'] == diff_line.old:
977 return chunk, line_idx
977 return chunk, line_idx
978 if diff_line.new and line['new_lineno'] == diff_line.new:
978 if diff_line.new and line['new_lineno'] == diff_line.new:
979 return chunk, line_idx
979 return chunk, line_idx
980 raise LineNotInDiffException(f"The line {diff_line} is not part of the diff.")
980 raise LineNotInDiffException(f"The line {diff_line} is not part of the diff.")
981
981
982
982
983 def _is_diff_content(line):
983 def _is_diff_content(line):
984 return line['action'] in (
984 return line['action'] in (
985 Action.UNMODIFIED, Action.ADD, Action.DELETE)
985 Action.UNMODIFIED, Action.ADD, Action.DELETE)
986
986
987
987
988 def _context_line(line):
988 def _context_line(line):
989 return line['action'], line['line']
989 return line['action'], line['line']
990
990
991
991
992 def _line_to_diff_line_number(line):
992 def _line_to_diff_line_number(line):
993 new_line_no = line['new_lineno'] or None
993 new_line_no = line['new_lineno'] or None
994 old_line_no = line['old_lineno'] or None
994 old_line_no = line['old_lineno'] or None
995 return DiffLineNumber(old=old_line_no, new=new_line_no)
995 return DiffLineNumber(old=old_line_no, new=new_line_no)
996
996
997
997
998 class FileNotInDiffException(Exception):
998 class FileNotInDiffException(Exception):
999 """
999 """
1000 Raised when the context for a missing file is requested.
1000 Raised when the context for a missing file is requested.
1001
1001
1002 If you request the context for a line in a file which is not part of the
1002 If you request the context for a line in a file which is not part of the
1003 given diff, then this exception is raised.
1003 given diff, then this exception is raised.
1004 """
1004 """
1005
1005
1006
1006
1007 class LineNotInDiffException(Exception):
1007 class LineNotInDiffException(Exception):
1008 """
1008 """
1009 Raised when the context for a missing line is requested.
1009 Raised when the context for a missing line is requested.
1010
1010
1011 If you request the context for a line in a file and this line is not
1011 If you request the context for a line in a file and this line is not
1012 part of the given diff, then this exception is raised.
1012 part of the given diff, then this exception is raised.
1013 """
1013 """
1014
1014
1015
1015
1016 class DiffLimitExceeded(Exception):
1016 class DiffLimitExceeded(Exception):
1017 pass
1017 pass
1018
1018
1019
1019
1020 # NOTE(marcink): if diffs.mako change, probably this
1020 # NOTE(marcink): if diffs.mako change, probably this
1021 # needs a bump to next version
1021 # needs a bump to next version
1022 CURRENT_DIFF_VERSION = 'v5'
1022 CURRENT_DIFF_VERSION = 'v5'
1023
1023
1024
1024
1025 def _cleanup_cache_file(cached_diff_file):
1025 def _cleanup_cache_file(cached_diff_file):
1026 # cleanup file to not store it "damaged"
1026 # cleanup file to not store it "damaged"
1027 try:
1027 try:
1028 os.remove(cached_diff_file)
1028 os.remove(cached_diff_file)
1029 except Exception:
1029 except Exception:
1030 log.exception('Failed to cleanup path %s', cached_diff_file)
1030 log.exception('Failed to cleanup path %s', cached_diff_file)
1031
1031
1032
1032
1033 def _get_compression_mode(cached_diff_file):
1033 def _get_compression_mode(cached_diff_file):
1034 mode = 'bz2'
1034 mode = 'bz2'
1035 if 'mode:plain' in cached_diff_file:
1035 if 'mode:plain' in cached_diff_file:
1036 mode = 'plain'
1036 mode = 'plain'
1037 elif 'mode:gzip' in cached_diff_file:
1037 elif 'mode:gzip' in cached_diff_file:
1038 mode = 'gzip'
1038 mode = 'gzip'
1039 return mode
1039 return mode
1040
1040
1041
1041
1042 def cache_diff(cached_diff_file, diff, commits):
1042 def cache_diff(cached_diff_file, diff, commits):
1043 compression_mode = _get_compression_mode(cached_diff_file)
1043 compression_mode = _get_compression_mode(cached_diff_file)
1044
1044
1045 struct = {
1045 struct = {
1046 'version': CURRENT_DIFF_VERSION,
1046 'version': CURRENT_DIFF_VERSION,
1047 'diff': diff,
1047 'diff': diff,
1048 'commits': commits
1048 'commits': commits
1049 }
1049 }
1050
1050
1051 start = time.time()
1051 start = time.time()
1052 try:
1052 try:
1053 if compression_mode == 'plain':
1053 if compression_mode == 'plain':
1054 with open(cached_diff_file, 'wb') as f:
1054 with open(cached_diff_file, 'wb') as f:
1055 pickle.dump(struct, f)
1055 pickle.dump(struct, f)
1056 elif compression_mode == 'gzip':
1056 elif compression_mode == 'gzip':
1057 with gzip.GzipFile(cached_diff_file, 'wb') as f:
1057 with gzip.GzipFile(cached_diff_file, 'wb') as f:
1058 pickle.dump(struct, f)
1058 pickle.dump(struct, f)
1059 else:
1059 else:
1060 with bz2.BZ2File(cached_diff_file, 'wb') as f:
1060 with bz2.BZ2File(cached_diff_file, 'wb') as f:
1061 pickle.dump(struct, f)
1061 pickle.dump(struct, f)
1062 except Exception:
1062 except Exception:
1063 log.warning('Failed to save cache', exc_info=True)
1063 log.warning('Failed to save cache', exc_info=True)
1064 _cleanup_cache_file(cached_diff_file)
1064 _cleanup_cache_file(cached_diff_file)
1065
1065
1066 log.debug('Saved diff cache under %s in %.4fs', cached_diff_file, time.time() - start)
1066 log.debug('Saved diff cache under %s in %.4fs', cached_diff_file, time.time() - start)
1067
1067
1068
1068
1069 def load_cached_diff(cached_diff_file):
1069 def load_cached_diff(cached_diff_file):
1070 compression_mode = _get_compression_mode(cached_diff_file)
1070 compression_mode = _get_compression_mode(cached_diff_file)
1071
1071
1072 default_struct = {
1072 default_struct = {
1073 'version': CURRENT_DIFF_VERSION,
1073 'version': CURRENT_DIFF_VERSION,
1074 'diff': None,
1074 'diff': None,
1075 'commits': None
1075 'commits': None
1076 }
1076 }
1077
1077
1078 has_cache = os.path.isfile(cached_diff_file)
1078 has_cache = os.path.isfile(cached_diff_file)
1079 if not has_cache:
1079 if not has_cache:
1080 log.debug('Reading diff cache file failed %s', cached_diff_file)
1080 log.debug('Reading diff cache file failed %s', cached_diff_file)
1081 return default_struct
1081 return default_struct
1082
1082
1083 data = None
1083 data = None
1084
1084
1085 start = time.time()
1085 start = time.time()
1086 try:
1086 try:
1087 if compression_mode == 'plain':
1087 if compression_mode == 'plain':
1088 with open(cached_diff_file, 'rb') as f:
1088 with open(cached_diff_file, 'rb') as f:
1089 data = pickle.load(f)
1089 data = pickle.load(f)
1090 elif compression_mode == 'gzip':
1090 elif compression_mode == 'gzip':
1091 with gzip.GzipFile(cached_diff_file, 'rb') as f:
1091 with gzip.GzipFile(cached_diff_file, 'rb') as f:
1092 data = pickle.load(f)
1092 data = pickle.load(f)
1093 else:
1093 else:
1094 with bz2.BZ2File(cached_diff_file, 'rb') as f:
1094 with bz2.BZ2File(cached_diff_file, 'rb') as f:
1095 data = pickle.load(f)
1095 data = pickle.load(f)
1096 except Exception:
1096 except Exception:
1097 log.warning('Failed to read diff cache file', exc_info=True)
1097 log.warning('Failed to read diff cache file', exc_info=True)
1098
1098
1099 if not data:
1099 if not data:
1100 data = default_struct
1100 data = default_struct
1101
1101
1102 if not isinstance(data, dict):
1102 if not isinstance(data, dict):
1103 # old version of data ?
1103 # old version of data ?
1104 data = default_struct
1104 data = default_struct
1105
1105
1106 # check version
1106 # check version
1107 if data.get('version') != CURRENT_DIFF_VERSION:
1107 if data.get('version') != CURRENT_DIFF_VERSION:
1108 # purge cache
1108 # purge cache
1109 _cleanup_cache_file(cached_diff_file)
1109 _cleanup_cache_file(cached_diff_file)
1110 return default_struct
1110 return default_struct
1111
1111
1112 log.debug('Loaded diff cache from %s in %.4fs', cached_diff_file, time.time() - start)
1112 log.debug('Loaded diff cache from %s in %.4fs', cached_diff_file, time.time() - start)
1113
1113
1114 return data
1114 return data
1115
1115
1116
1116
1117 def generate_diff_cache_key(*args):
1117 def generate_diff_cache_key(*args):
1118 """
1118 """
1119 Helper to generate a cache key using arguments
1119 Helper to generate a cache key using arguments
1120 """
1120 """
1121 def arg_mapper(input_param):
1121 def arg_mapper(input_param):
1122 input_param = safe_str(input_param)
1122 input_param = safe_str(input_param)
1123 # we cannot allow '/' in arguments since it would allow
1123 # we cannot allow '/' in arguments since it would allow
1124 # subdirectory usage
1124 # subdirectory usage
1125 input_param.replace('/', '_')
1125 input_param.replace('/', '_')
1126 return input_param or None # prevent empty string arguments
1126 return input_param or None # prevent empty string arguments
1127
1127
1128 return '_'.join([
1128 return '_'.join([
1129 '{}' for _i in range(len(args))]).format(*list(map(arg_mapper, args)))
1129 '{}' for _i in range(len(args))]).format(*list(map(arg_mapper, args)))
1130
1130
1131
1131
1132 def diff_cache_exist(cache_storage, *args):
1132 def diff_cache_exist(cache_storage, *args):
1133 """
1133 """
1134 Based on all generated arguments check and return a cache path
1134 Based on all generated arguments check and return a cache path
1135 """
1135 """
1136 args = list(args) + ['mode:gzip']
1136 args = list(args) + ['mode:gzip']
1137 cache_key = generate_diff_cache_key(*args)
1137 cache_key = generate_diff_cache_key(*args)
1138 cache_file_path = os.path.join(cache_storage, cache_key)
1138 cache_file_path = os.path.join(cache_storage, cache_key)
1139 # prevent path traversal attacks using some param that have e.g '../../'
1139 # prevent path traversal attacks using some param that have e.g '../../'
1140 if not os.path.abspath(cache_file_path).startswith(cache_storage):
1140 if not os.path.abspath(cache_file_path).startswith(cache_storage):
1141 raise ValueError(f'Final path must be within {cache_storage}')
1141 raise ValueError(f'Final path must be within {cache_storage}')
1142
1142
1143 return cache_file_path
1143 return cache_file_path
@@ -1,2160 +1,2198 b''
1 # Copyright (C) 2010-2023 RhodeCode GmbH
1 # Copyright (C) 2010-2023 RhodeCode GmbH
2 #
2 #
3 # This program is free software: you can redistribute it and/or modify
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License, version 3
4 # it under the terms of the GNU Affero General Public License, version 3
5 # (only), as published by the Free Software Foundation.
5 # (only), as published by the Free Software Foundation.
6 #
6 #
7 # This program is distributed in the hope that it will be useful,
7 # This program is distributed in the hope that it will be useful,
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # GNU General Public License for more details.
10 # GNU General Public License for more details.
11 #
11 #
12 # You should have received a copy of the GNU Affero General Public License
12 # You should have received a copy of the GNU Affero General Public License
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
13 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 #
14 #
15 # This program is dual-licensed. If you wish to learn more about the
15 # This program is dual-licensed. If you wish to learn more about the
16 # RhodeCode Enterprise Edition, including its added features, Support services,
16 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
17 # and proprietary license terms, please see https://rhodecode.com/licenses/
18
18
19 """
19 """
20 Helper functions
20 Helper functions
21
21
22 Consists of functions to typically be used within templates, but also
22 Consists of functions to typically be used within templates, but also
23 available to Controllers. This module is available to both as 'h'.
23 available to Controllers. This module is available to both as 'h'.
24 """
24 """
25 import base64
25 import base64
26 import collections
26 import collections
27
27
28 import os
28 import os
29 import random
29 import random
30 import hashlib
30 import hashlib
31 import io
31 import io
32 import textwrap
32 import textwrap
33 import urllib.request
33 import urllib.request
34 import urllib.parse
34 import urllib.parse
35 import urllib.error
35 import urllib.error
36 import math
36 import math
37 import logging
37 import logging
38 import re
38 import re
39 import time
39 import time
40 import string
40 import string
41 import regex
41 import regex
42 from collections import OrderedDict
42 from collections import OrderedDict
43
43
44 import pygments
44 import pygments
45 import itertools
45 import itertools
46 import fnmatch
46 import fnmatch
47
47
48 from datetime import datetime
48 from datetime import datetime
49 from functools import partial
49 from functools import partial
50 from pygments.formatters.html import HtmlFormatter
50 from pygments.formatters.html import HtmlFormatter
51 from pygments.lexers import (
51 from pygments.lexers import (
52 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
52 get_lexer_by_name, get_lexer_for_filename, get_lexer_for_mimetype)
53
53
54 from pyramid.threadlocal import get_current_request
54 from pyramid.threadlocal import get_current_request
55 from tempita import looper
55 from tempita import looper
56 from webhelpers2.html import literal, HTML, escape
56 from webhelpers2.html import literal, HTML, escape
57 from webhelpers2.html._autolink import _auto_link_urls
57 from webhelpers2.html._autolink import _auto_link_urls
58 from webhelpers2.html.tools import (
58 from webhelpers2.html.tools import (
59 button_to, highlight, js_obfuscate, strip_links, strip_tags)
59 button_to, highlight, js_obfuscate, strip_links, strip_tags)
60
60
61 from webhelpers2.text import (
61 from webhelpers2.text import (
62 chop_at, collapse, convert_accented_entities,
62 chop_at, collapse, convert_accented_entities,
63 convert_misc_entities, lchop, plural, rchop, remove_formatting,
63 convert_misc_entities, lchop, plural, rchop, remove_formatting,
64 replace_whitespace, urlify, truncate, wrap_paragraphs)
64 replace_whitespace, urlify, truncate, wrap_paragraphs)
65 from webhelpers2.date import time_ago_in_words
65 from webhelpers2.date import time_ago_in_words
66
66
67 from webhelpers2.html.tags import (
67 from webhelpers2.html.tags import (
68 _input, NotGiven, _make_safe_id_component as safeid,
68 _input, NotGiven, _make_safe_id_component as safeid,
69 form as insecure_form,
69 form as insecure_form,
70 auto_discovery_link, checkbox, end_form, file,
70 auto_discovery_link, checkbox, end_form, file,
71 hidden, image, javascript_link, link_to, link_to_if, link_to_unless, ol,
71 hidden, image, javascript_link, link_to, link_to_if, link_to_unless, ol,
72 stylesheet_link, submit, text, password, textarea,
72 stylesheet_link, submit, text, password, textarea,
73 ul, radio, Options)
73 ul, radio, Options)
74
74
75 from webhelpers2.number import format_byte_size
75 from webhelpers2.number import format_byte_size
76 # python3.11 backport fixes for webhelpers2
76 # python3.11 backport fixes for webhelpers2
77 from rhodecode.lib._vendor.webhelpers_backports import raw_select
77 from rhodecode.lib._vendor.webhelpers_backports import raw_select
78
78
79 from rhodecode.lib.action_parser import action_parser
79 from rhodecode.lib.action_parser import action_parser
80 from rhodecode.lib.html_filters import sanitize_html
80 from rhodecode.lib.html_filters import sanitize_html
81 from rhodecode.lib.pagination import Page, RepoPage, SqlPage
81 from rhodecode.lib.pagination import Page, RepoPage, SqlPage
82 from rhodecode.lib import ext_json
82 from rhodecode.lib import ext_json
83 from rhodecode.lib.ext_json import json
83 from rhodecode.lib.ext_json import json
84 from rhodecode.lib.str_utils import safe_bytes, convert_special_chars, base64_to_str
84 from rhodecode.lib.str_utils import safe_bytes, convert_special_chars, base64_to_str
85 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
85 from rhodecode.lib.utils import repo_name_slug, get_custom_lexer
86 from rhodecode.lib.str_utils import safe_str
86 from rhodecode.lib.str_utils import safe_str
87 from rhodecode.lib.utils2 import (
87 from rhodecode.lib.utils2 import (
88 str2bool,
88 str2bool,
89 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime,
89 get_commit_safe, datetime_to_time, time_to_datetime, time_to_utcdatetime,
90 AttributeDict, safe_int, md5, md5_safe, get_host_info)
90 AttributeDict, safe_int, md5, md5_safe, get_host_info)
91 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
91 from rhodecode.lib.markup_renderer import MarkupRenderer, relative_links
92 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
92 from rhodecode.lib.vcs.exceptions import CommitDoesNotExistError
93 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
93 from rhodecode.lib.vcs.backends.base import BaseChangeset, EmptyCommit
94 from rhodecode.lib.vcs.conf.settings import ARCHIVE_SPECS
94 from rhodecode.lib.vcs.conf.settings import ARCHIVE_SPECS
95 from rhodecode.lib.index.search_utils import get_matching_line_offsets
95 from rhodecode.lib.index.search_utils import get_matching_line_offsets
96 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
96 from rhodecode.config.conf import DATE_FORMAT, DATETIME_FORMAT
97 from rhodecode.model.changeset_status import ChangesetStatusModel
97 from rhodecode.model.changeset_status import ChangesetStatusModel
98 from rhodecode.model.db import Permission, User, Repository, UserApiKeys, FileStore
98 from rhodecode.model.db import Permission, User, Repository, UserApiKeys, FileStore
99 from rhodecode.model.repo_group import RepoGroupModel
99 from rhodecode.model.repo_group import RepoGroupModel
100 from rhodecode.model.settings import IssueTrackerSettingsModel
100 from rhodecode.model.settings import IssueTrackerSettingsModel
101
101
102
102
103 log = logging.getLogger(__name__)
103 log = logging.getLogger(__name__)
104
104
105
105
106 DEFAULT_USER = User.DEFAULT_USER
106 DEFAULT_USER = User.DEFAULT_USER
107 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
107 DEFAULT_USER_EMAIL = User.DEFAULT_USER_EMAIL
108
108
109
109
110 def asset(path, ver=None, **kwargs):
110 def asset(path, ver=None, **kwargs):
111 """
111 """
112 Helper to generate a static asset file path for rhodecode assets
112 Helper to generate a static asset file path for rhodecode assets
113
113
114 eg. h.asset('images/image.png', ver='3923')
114 eg. h.asset('images/image.png', ver='3923')
115
115
116 :param path: path of asset
116 :param path: path of asset
117 :param ver: optional version query param to append as ?ver=
117 :param ver: optional version query param to append as ?ver=
118 """
118 """
119 request = get_current_request()
119 request = get_current_request()
120 query = {}
120 query = {}
121 query.update(kwargs)
121 query.update(kwargs)
122 if ver:
122 if ver:
123 query = {'ver': ver}
123 query = {'ver': ver}
124 return request.static_path(
124 return request.static_path(
125 f'rhodecode:public/{path}', _query=query)
125 f'rhodecode:public/{path}', _query=query)
126
126
127
127
128 default_html_escape_table = {
128 default_html_escape_table = {
129 ord('&'): '&amp;',
129 ord('&'): '&amp;',
130 ord('<'): '&lt;',
130 ord('<'): '&lt;',
131 ord('>'): '&gt;',
131 ord('>'): '&gt;',
132 ord('"'): '&quot;',
132 ord('"'): '&quot;',
133 ord("'"): '&#39;',
133 ord("'"): '&#39;',
134 }
134 }
135
135
136
136
137 def html_escape(text, html_escape_table=default_html_escape_table):
137 def html_escape(text, html_escape_table=default_html_escape_table):
138 """Produce entities within text."""
138 """Produce entities within text."""
139 return text.translate(html_escape_table)
139 return text.translate(html_escape_table)
140
140
141
141
142 def str_json(*args, **kwargs):
142 def str_json(*args, **kwargs):
143 return ext_json.str_json(*args, **kwargs)
143 return ext_json.str_json(*args, **kwargs)
144
144
145
145
146 def formatted_str_json(*args, **kwargs):
146 def formatted_str_json(*args, **kwargs):
147 return ext_json.formatted_str_json(*args, **kwargs)
147 return ext_json.formatted_str_json(*args, **kwargs)
148
148
149
149
150 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
150 def chop_at_smart(s, sub, inclusive=False, suffix_if_chopped=None):
151 """
151 """
152 Truncate string ``s`` at the first occurrence of ``sub``.
152 Truncate string ``s`` at the first occurrence of ``sub``.
153
153
154 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
154 If ``inclusive`` is true, truncate just after ``sub`` rather than at it.
155 """
155 """
156 suffix_if_chopped = suffix_if_chopped or ''
156 suffix_if_chopped = suffix_if_chopped or ''
157 pos = s.find(sub)
157 pos = s.find(sub)
158 if pos == -1:
158 if pos == -1:
159 return s
159 return s
160
160
161 if inclusive:
161 if inclusive:
162 pos += len(sub)
162 pos += len(sub)
163
163
164 chopped = s[:pos]
164 chopped = s[:pos]
165 left = s[pos:].strip()
165 left = s[pos:].strip()
166
166
167 if left and suffix_if_chopped:
167 if left and suffix_if_chopped:
168 chopped += suffix_if_chopped
168 chopped += suffix_if_chopped
169
169
170 return chopped
170 return chopped
171
171
172
172
173 def shorter(text, size=20, prefix=False):
173 def shorter(text, size=20, prefix=False):
174 postfix = '...'
174 postfix = '...'
175 if len(text) > size:
175 if len(text) > size:
176 if prefix:
176 if prefix:
177 # shorten in front
177 # shorten in front
178 return postfix + text[-(size - len(postfix)):]
178 return postfix + text[-(size - len(postfix)):]
179 else:
179 else:
180 return text[:size - len(postfix)] + postfix
180 return text[:size - len(postfix)] + postfix
181 return text
181 return text
182
182
183
183
184 def reset(name, value=None, id=NotGiven, type="reset", **attrs):
184 def reset(name, value=None, id=NotGiven, type="reset", **attrs):
185 """
185 """
186 Reset button
186 Reset button
187 """
187 """
188 return _input(type, name, value, id, attrs)
188 return _input(type, name, value, id, attrs)
189
189
190
190
191 def select(name, selected_values, options, id=NotGiven, **attrs):
191 def select(name, selected_values, options, id=NotGiven, **attrs):
192
192
193 if isinstance(options, (list, tuple)):
193 if isinstance(options, (list, tuple)):
194 options_iter = options
194 options_iter = options
195 # Handle old value,label lists ... where value also can be value,label lists
195 # Handle old value,label lists ... where value also can be value,label lists
196 options = Options()
196 options = Options()
197 for opt in options_iter:
197 for opt in options_iter:
198 if isinstance(opt, tuple) and len(opt) == 2:
198 if isinstance(opt, tuple) and len(opt) == 2:
199 value, label = opt
199 value, label = opt
200 elif isinstance(opt, str):
200 elif isinstance(opt, str):
201 value = label = opt
201 value = label = opt
202 else:
202 else:
203 raise ValueError('invalid select option type %r' % type(opt))
203 raise ValueError('invalid select option type %r' % type(opt))
204
204
205 if isinstance(value, (list, tuple)):
205 if isinstance(value, (list, tuple)):
206 option_group = options.add_optgroup(label)
206 option_group = options.add_optgroup(label)
207 for opt2 in value:
207 for opt2 in value:
208 if isinstance(opt2, tuple) and len(opt2) == 2:
208 if isinstance(opt2, tuple) and len(opt2) == 2:
209 group_value, group_label = opt2
209 group_value, group_label = opt2
210 elif isinstance(opt2, str):
210 elif isinstance(opt2, str):
211 group_value = group_label = opt2
211 group_value = group_label = opt2
212 else:
212 else:
213 raise ValueError('invalid select option type %r' % type(opt2))
213 raise ValueError('invalid select option type %r' % type(opt2))
214
214
215 option_group.add_option(group_label, group_value)
215 option_group.add_option(group_label, group_value)
216 else:
216 else:
217 options.add_option(label, value)
217 options.add_option(label, value)
218
218
219 return raw_select(name, selected_values, options, id=id, **attrs)
219 return raw_select(name, selected_values, options, id=id, **attrs)
220
220
221
221
222 def branding(name, length=40):
222 def branding(name, length=40):
223 return truncate(name, length, indicator="")
223 return truncate(name, length, indicator="")
224
224
225
225
226 def FID(raw_id, path):
226 def FID(raw_id, path):
227 """
227 """
228 Creates a unique ID for filenode based on it's hash of path and commit
228 Creates a unique ID for filenode based on it's hash of path and commit
229 it's safe to use in urls
229 it's safe to use in urls
230
230
231 :param raw_id:
231 :param raw_id:
232 :param path:
232 :param path:
233 """
233 """
234
234
235 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
235 return 'c-%s-%s' % (short_id(raw_id), md5_safe(path)[:12])
236
236
237
237
238 class _GetError(object):
238 class _GetError(object):
239 """Get error from form_errors, and represent it as span wrapped error
239 """Get error from form_errors, and represent it as span wrapped error
240 message
240 message
241
241
242 :param field_name: field to fetch errors for
242 :param field_name: field to fetch errors for
243 :param form_errors: form errors dict
243 :param form_errors: form errors dict
244 """
244 """
245
245
246 def __call__(self, field_name, form_errors):
246 def __call__(self, field_name, form_errors):
247 tmpl = """<span class="error_msg">%s</span>"""
247 tmpl = """<span class="error_msg">%s</span>"""
248 if form_errors and field_name in form_errors:
248 if form_errors and field_name in form_errors:
249 return literal(tmpl % form_errors.get(field_name))
249 return literal(tmpl % form_errors.get(field_name))
250
250
251
251
252 get_error = _GetError()
252 get_error = _GetError()
253
253
254
254
255 class _ToolTip(object):
255 class _ToolTip(object):
256
256
257 def __call__(self, tooltip_title, trim_at=50):
257 def __call__(self, tooltip_title, trim_at=50):
258 """
258 """
259 Special function just to wrap our text into nice formatted
259 Special function just to wrap our text into nice formatted
260 autowrapped text
260 autowrapped text
261
261
262 :param tooltip_title:
262 :param tooltip_title:
263 """
263 """
264 tooltip_title = escape(tooltip_title)
264 tooltip_title = escape(tooltip_title)
265 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
265 tooltip_title = tooltip_title.replace('<', '&lt;').replace('>', '&gt;')
266 return tooltip_title
266 return tooltip_title
267
267
268
268
269 tooltip = _ToolTip()
269 tooltip = _ToolTip()
270
270
271 files_icon = '<i class="file-breadcrumb-copy tooltip icon-clipboard clipboard-action" data-clipboard-text="{}" title="Copy file path"></i>'
271 files_icon = '<i class="file-breadcrumb-copy tooltip icon-clipboard clipboard-action" data-clipboard-text="{}" title="Copy file path"></i>'
272
272
273
273
274 def files_breadcrumbs(repo_name, repo_type, commit_id, file_path, landing_ref_name=None, at_ref=None,
274 def files_breadcrumbs(repo_name, repo_type, commit_id, file_path, landing_ref_name=None, at_ref=None,
275 limit_items=False, linkify_last_item=False, hide_last_item=False,
275 limit_items=False, linkify_last_item=False, hide_last_item=False,
276 copy_path_icon=True):
276 copy_path_icon=True):
277
277
278 if at_ref:
278 if at_ref:
279 route_qry = {'at': at_ref}
279 route_qry = {'at': at_ref}
280 default_landing_ref = at_ref or landing_ref_name or commit_id
280 default_landing_ref = at_ref or landing_ref_name or commit_id
281 else:
281 else:
282 route_qry = None
282 route_qry = None
283 default_landing_ref = commit_id
283 default_landing_ref = commit_id
284
284
285 # first segment is a `HOME` link to repo files root location
285 # first segment is a `HOME` link to repo files root location
286 root_name = literal('<i class="icon-home"></i>')
286 root_name = literal('<i class="icon-home"></i>')
287
287
288 url_segments = [
288 url_segments = [
289 link_to(
289 link_to(
290 root_name,
290 root_name,
291 repo_files_by_ref_url(
291 repo_files_by_ref_url(
292 repo_name,
292 repo_name,
293 repo_type,
293 repo_type,
294 f_path=None, # None here is a special case for SVN repos,
294 f_path=None, # None here is a special case for SVN repos,
295 # that won't prefix with a ref
295 # that won't prefix with a ref
296 ref_name=default_landing_ref,
296 ref_name=default_landing_ref,
297 commit_id=commit_id,
297 commit_id=commit_id,
298 query=route_qry
298 query=route_qry
299 )
299 )
300 )]
300 )]
301
301
302 path_segments = file_path.split('/')
302 path_segments = file_path.split('/')
303 last_cnt = len(path_segments) - 1
303 last_cnt = len(path_segments) - 1
304 for cnt, segment in enumerate(path_segments):
304 for cnt, segment in enumerate(path_segments):
305 if not segment:
305 if not segment:
306 continue
306 continue
307 segment_html = escape(segment)
307 segment_html = escape(segment)
308
308
309 last_item = cnt == last_cnt
309 last_item = cnt == last_cnt
310
310
311 if last_item and hide_last_item:
311 if last_item and hide_last_item:
312 # iterate over and hide last element
312 # iterate over and hide last element
313 continue
313 continue
314
314
315 if last_item and linkify_last_item is False:
315 if last_item and linkify_last_item is False:
316 # plain version
316 # plain version
317 url_segments.append(segment_html)
317 url_segments.append(segment_html)
318 else:
318 else:
319 url_segments.append(
319 url_segments.append(
320 link_to(
320 link_to(
321 segment_html,
321 segment_html,
322 repo_files_by_ref_url(
322 repo_files_by_ref_url(
323 repo_name,
323 repo_name,
324 repo_type,
324 repo_type,
325 f_path='/'.join(path_segments[:cnt + 1]),
325 f_path='/'.join(path_segments[:cnt + 1]),
326 ref_name=default_landing_ref,
326 ref_name=default_landing_ref,
327 commit_id=commit_id,
327 commit_id=commit_id,
328 query=route_qry
328 query=route_qry
329 ),
329 ),
330 ))
330 ))
331
331
332 limited_url_segments = url_segments[:1] + ['...'] + url_segments[-5:]
332 limited_url_segments = url_segments[:1] + ['...'] + url_segments[-5:]
333 if limit_items and len(limited_url_segments) < len(url_segments):
333 if limit_items and len(limited_url_segments) < len(url_segments):
334 url_segments = limited_url_segments
334 url_segments = limited_url_segments
335
335
336 full_path = file_path
336 full_path = file_path
337 if copy_path_icon:
337 if copy_path_icon:
338 icon = files_icon.format(escape(full_path))
338 icon = files_icon.format(escape(full_path))
339 else:
339 else:
340 icon = ''
340 icon = ''
341
341
342 if file_path == '':
342 if file_path == '':
343 return root_name
343 return root_name
344 else:
344 else:
345 return literal(' / '.join(url_segments) + icon)
345 return literal(' / '.join(url_segments) + icon)
346
346
347
347
348 def files_url_data(request):
348 def files_url_data(request):
349 matchdict = request.matchdict
349 matchdict = request.matchdict
350
350
351 if 'f_path' not in matchdict:
351 if 'f_path' not in matchdict:
352 matchdict['f_path'] = ''
352 matchdict['f_path'] = ''
353 else:
353 else:
354 matchdict['f_path'] = urllib.parse.quote(safe_str(matchdict['f_path']))
354 matchdict['f_path'] = urllib.parse.quote(safe_str(matchdict['f_path']))
355 if 'commit_id' not in matchdict:
355 if 'commit_id' not in matchdict:
356 matchdict['commit_id'] = 'tip'
356 matchdict['commit_id'] = 'tip'
357
357
358 return ext_json.str_json(matchdict)
358 return ext_json.str_json(matchdict)
359
359
360
360
361 def repo_files_by_ref_url(db_repo_name, db_repo_type, f_path, ref_name, commit_id, query=None, ):
361 def repo_files_by_ref_url(db_repo_name, db_repo_type, f_path, ref_name, commit_id, query=None, ):
362 _is_svn = is_svn(db_repo_type)
362 _is_svn = is_svn(db_repo_type)
363 final_f_path = f_path
363 final_f_path = f_path
364
364
365 if _is_svn:
365 if _is_svn:
366 """
366 """
367 For SVN the ref_name cannot be used as a commit_id, it needs to be prefixed with
367 For SVN the ref_name cannot be used as a commit_id, it needs to be prefixed with
368 actually commit_id followed by the ref_name. This should be done only in case
368 actually commit_id followed by the ref_name. This should be done only in case
369 This is a initial landing url, without additional paths.
369 This is a initial landing url, without additional paths.
370
370
371 like: /1000/tags/1.0.0/?at=tags/1.0.0
371 like: /1000/tags/1.0.0/?at=tags/1.0.0
372 """
372 """
373
373
374 if ref_name and ref_name != 'tip':
374 if ref_name and ref_name != 'tip':
375 # NOTE(marcink): for svn the ref_name is actually the stored path, so we prefix it
375 # NOTE(marcink): for svn the ref_name is actually the stored path, so we prefix it
376 # for SVN we only do this magic prefix if it's root, .eg landing revision
376 # for SVN we only do this magic prefix if it's root, .eg landing revision
377 # of files link. If we are in the tree we don't need this since we traverse the url
377 # of files link. If we are in the tree we don't need this since we traverse the url
378 # that has everything stored
378 # that has everything stored
379 if f_path in ['', '/']:
379 if f_path in ['', '/']:
380 final_f_path = '/'.join([ref_name, f_path])
380 final_f_path = '/'.join([ref_name, f_path])
381
381
382 # SVN always needs a commit_id explicitly, without a named REF
382 # SVN always needs a commit_id explicitly, without a named REF
383 default_commit_id = commit_id
383 default_commit_id = commit_id
384 else:
384 else:
385 """
385 """
386 For git and mercurial we construct a new URL using the names instead of commit_id
386 For git and mercurial we construct a new URL using the names instead of commit_id
387 like: /master/some_path?at=master
387 like: /master/some_path?at=master
388 """
388 """
389 # We currently do not support branches with slashes
389 # We currently do not support branches with slashes
390 if '/' in ref_name:
390 if '/' in ref_name:
391 default_commit_id = commit_id
391 default_commit_id = commit_id
392 else:
392 else:
393 default_commit_id = ref_name
393 default_commit_id = ref_name
394
394
395 # sometimes we pass f_path as None, to indicate explicit no prefix,
395 # sometimes we pass f_path as None, to indicate explicit no prefix,
396 # we translate it to string to not have None
396 # we translate it to string to not have None
397 final_f_path = final_f_path or ''
397 final_f_path = final_f_path or ''
398
398
399 files_url = route_path(
399 files_url = route_path(
400 'repo_files',
400 'repo_files',
401 repo_name=db_repo_name,
401 repo_name=db_repo_name,
402 commit_id=default_commit_id,
402 commit_id=default_commit_id,
403 f_path=final_f_path,
403 f_path=final_f_path,
404 _query=query
404 _query=query
405 )
405 )
406 return files_url
406 return files_url
407
407
408
408
409 def code_highlight(code, lexer, formatter, use_hl_filter=False):
409 def code_highlight(code, lexer, formatter, use_hl_filter=False):
410 """
410 """
411 Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
411 Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
412
412
413 If ``outfile`` is given and a valid file object (an object
413 If ``outfile`` is given and a valid file object (an object
414 with a ``write`` method), the result will be written to it, otherwise
414 with a ``write`` method), the result will be written to it, otherwise
415 it is returned as a string.
415 it is returned as a string.
416 """
416 """
417 if use_hl_filter:
417 if use_hl_filter:
418 # add HL filter
418 # add HL filter
419 from rhodecode.lib.index import search_utils
419 from rhodecode.lib.index import search_utils
420 lexer.add_filter(search_utils.ElasticSearchHLFilter())
420 lexer.add_filter(search_utils.ElasticSearchHLFilter())
421 return pygments.format(pygments.lex(code, lexer), formatter)
421 return pygments.format(pygments.lex(code, lexer), formatter)
422
422
423
423
424 class CodeHtmlFormatter(HtmlFormatter):
424 class CodeHtmlFormatter(HtmlFormatter):
425 """
425 """
426 My code Html Formatter for source codes
426 My code Html Formatter for source codes
427 """
427 """
428
428
429 def wrap(self, source):
429 def wrap(self, source):
430 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
430 return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
431
431
432 def _wrap_code(self, source):
432 def _wrap_code(self, source):
433 for cnt, it in enumerate(source):
433 for cnt, it in enumerate(source):
434 i, t = it
434 i, t = it
435 t = f'<div id="L{cnt+1}">{t}</div>'
435 t = f'<div id="L{cnt+1}">{t}</div>'
436 yield i, t
436 yield i, t
437
437
438 def _wrap_tablelinenos(self, inner):
438 def _wrap_tablelinenos(self, inner):
439 dummyoutfile = io.StringIO()
439 dummyoutfile = io.StringIO()
440 lncount = 0
440 lncount = 0
441 for t, line in inner:
441 for t, line in inner:
442 if t:
442 if t:
443 lncount += 1
443 lncount += 1
444 dummyoutfile.write(line)
444 dummyoutfile.write(line)
445
445
446 fl = self.linenostart
446 fl = self.linenostart
447 mw = len(str(lncount + fl - 1))
447 mw = len(str(lncount + fl - 1))
448 sp = self.linenospecial
448 sp = self.linenospecial
449 st = self.linenostep
449 st = self.linenostep
450 la = self.lineanchors
450 la = self.lineanchors
451 aln = self.anchorlinenos
451 aln = self.anchorlinenos
452 nocls = self.noclasses
452 nocls = self.noclasses
453 if sp:
453 if sp:
454 lines = []
454 lines = []
455
455
456 for i in range(fl, fl + lncount):
456 for i in range(fl, fl + lncount):
457 if i % st == 0:
457 if i % st == 0:
458 if i % sp == 0:
458 if i % sp == 0:
459 if aln:
459 if aln:
460 lines.append('<a href="#%s%d" class="special">%*d</a>' %
460 lines.append('<a href="#%s%d" class="special">%*d</a>' %
461 (la, i, mw, i))
461 (la, i, mw, i))
462 else:
462 else:
463 lines.append('<span class="special">%*d</span>' % (mw, i))
463 lines.append('<span class="special">%*d</span>' % (mw, i))
464 else:
464 else:
465 if aln:
465 if aln:
466 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
466 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
467 else:
467 else:
468 lines.append('%*d' % (mw, i))
468 lines.append('%*d' % (mw, i))
469 else:
469 else:
470 lines.append('')
470 lines.append('')
471 ls = '\n'.join(lines)
471 ls = '\n'.join(lines)
472 else:
472 else:
473 lines = []
473 lines = []
474 for i in range(fl, fl + lncount):
474 for i in range(fl, fl + lncount):
475 if i % st == 0:
475 if i % st == 0:
476 if aln:
476 if aln:
477 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
477 lines.append('<a href="#%s%d">%*d</a>' % (la, i, mw, i))
478 else:
478 else:
479 lines.append('%*d' % (mw, i))
479 lines.append('%*d' % (mw, i))
480 else:
480 else:
481 lines.append('')
481 lines.append('')
482 ls = '\n'.join(lines)
482 ls = '\n'.join(lines)
483
483
484 # in case you wonder about the seemingly redundant <div> here: since the
484 # in case you wonder about the seemingly redundant <div> here: since the
485 # content in the other cell also is wrapped in a div, some browsers in
485 # content in the other cell also is wrapped in a div, some browsers in
486 # some configurations seem to mess up the formatting...
486 # some configurations seem to mess up the formatting...
487 if nocls:
487 if nocls:
488 yield 0, ('<table class="%stable">' % self.cssclass +
488 yield 0, ('<table class="%stable">' % self.cssclass +
489 '<tr><td><div class="linenodiv" '
489 '<tr><td><div class="linenodiv" '
490 'style="background-color: #f0f0f0; padding-right: 10px">'
490 'style="background-color: #f0f0f0; padding-right: 10px">'
491 '<pre style="line-height: 125%">' +
491 '<pre style="line-height: 125%">' +
492 ls + '</pre></div></td><td id="hlcode" class="code">')
492 ls + '</pre></div></td><td id="hlcode" class="code">')
493 else:
493 else:
494 yield 0, ('<table class="%stable">' % self.cssclass +
494 yield 0, ('<table class="%stable">' % self.cssclass +
495 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
495 '<tr><td class="linenos"><div class="linenodiv"><pre>' +
496 ls + '</pre></div></td><td id="hlcode" class="code">')
496 ls + '</pre></div></td><td id="hlcode" class="code">')
497 yield 0, dummyoutfile.getvalue()
497 yield 0, dummyoutfile.getvalue()
498 yield 0, '</td></tr></table>'
498 yield 0, '</td></tr></table>'
499
499
500
500
501 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
501 class SearchContentCodeHtmlFormatter(CodeHtmlFormatter):
502 def __init__(self, **kw):
502 def __init__(self, **kw):
503 # only show these line numbers if set
503 # only show these line numbers if set
504 self.only_lines = kw.pop('only_line_numbers', [])
504 self.only_lines = kw.pop('only_line_numbers', [])
505 self.query_terms = kw.pop('query_terms', [])
505 self.query_terms = kw.pop('query_terms', [])
506 self.max_lines = kw.pop('max_lines', 5)
506 self.max_lines = kw.pop('max_lines', 5)
507 self.line_context = kw.pop('line_context', 3)
507 self.line_context = kw.pop('line_context', 3)
508 self.url = kw.pop('url', None)
508 self.url = kw.pop('url', None)
509
509
510 super(CodeHtmlFormatter, self).__init__(**kw)
510 super(CodeHtmlFormatter, self).__init__(**kw)
511
511
512 def _wrap_code(self, source):
512 def _wrap_code(self, source):
513 for cnt, it in enumerate(source):
513 for cnt, it in enumerate(source):
514 i, t = it
514 i, t = it
515 t = '<pre>%s</pre>' % t
515 t = '<pre>%s</pre>' % t
516 yield i, t
516 yield i, t
517
517
518 def _wrap_tablelinenos(self, inner):
518 def _wrap_tablelinenos(self, inner):
519 yield 0, '<table class="code-highlight %stable">' % self.cssclass
519 yield 0, '<table class="code-highlight %stable">' % self.cssclass
520
520
521 last_shown_line_number = 0
521 last_shown_line_number = 0
522 current_line_number = 1
522 current_line_number = 1
523
523
524 for t, line in inner:
524 for t, line in inner:
525 if not t:
525 if not t:
526 yield t, line
526 yield t, line
527 continue
527 continue
528
528
529 if current_line_number in self.only_lines:
529 if current_line_number in self.only_lines:
530 if last_shown_line_number + 1 != current_line_number:
530 if last_shown_line_number + 1 != current_line_number:
531 yield 0, '<tr>'
531 yield 0, '<tr>'
532 yield 0, '<td class="line">...</td>'
532 yield 0, '<td class="line">...</td>'
533 yield 0, '<td id="hlcode" class="code"></td>'
533 yield 0, '<td id="hlcode" class="code"></td>'
534 yield 0, '</tr>'
534 yield 0, '</tr>'
535
535
536 yield 0, '<tr>'
536 yield 0, '<tr>'
537 if self.url:
537 if self.url:
538 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
538 yield 0, '<td class="line"><a href="%s#L%i">%i</a></td>' % (
539 self.url, current_line_number, current_line_number)
539 self.url, current_line_number, current_line_number)
540 else:
540 else:
541 yield 0, '<td class="line"><a href="">%i</a></td>' % (
541 yield 0, '<td class="line"><a href="">%i</a></td>' % (
542 current_line_number)
542 current_line_number)
543 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
543 yield 0, '<td id="hlcode" class="code">' + line + '</td>'
544 yield 0, '</tr>'
544 yield 0, '</tr>'
545
545
546 last_shown_line_number = current_line_number
546 last_shown_line_number = current_line_number
547
547
548 current_line_number += 1
548 current_line_number += 1
549
549
550 yield 0, '</table>'
550 yield 0, '</table>'
551
551
552
552
553 def hsv_to_rgb(h, s, v):
553 def hsv_to_rgb(h, s, v):
554 """ Convert hsv color values to rgb """
554 """ Convert hsv color values to rgb """
555
555
556 if s == 0.0:
556 if s == 0.0:
557 return v, v, v
557 return v, v, v
558 i = int(h * 6.0) # XXX assume int() truncates!
558 i = int(h * 6.0) # XXX assume int() truncates!
559 f = (h * 6.0) - i
559 f = (h * 6.0) - i
560 p = v * (1.0 - s)
560 p = v * (1.0 - s)
561 q = v * (1.0 - s * f)
561 q = v * (1.0 - s * f)
562 t = v * (1.0 - s * (1.0 - f))
562 t = v * (1.0 - s * (1.0 - f))
563 i = i % 6
563 i = i % 6
564 if i == 0:
564 if i == 0:
565 return v, t, p
565 return v, t, p
566 if i == 1:
566 if i == 1:
567 return q, v, p
567 return q, v, p
568 if i == 2:
568 if i == 2:
569 return p, v, t
569 return p, v, t
570 if i == 3:
570 if i == 3:
571 return p, q, v
571 return p, q, v
572 if i == 4:
572 if i == 4:
573 return t, p, v
573 return t, p, v
574 if i == 5:
574 if i == 5:
575 return v, p, q
575 return v, p, q
576
576
577
577
578 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
578 def unique_color_generator(n=10000, saturation=0.10, lightness=0.95):
579 """
579 """
580 Generator for getting n of evenly distributed colors using
580 Generator for getting n of evenly distributed colors using
581 hsv color and golden ratio. It always return same order of colors
581 hsv color and golden ratio. It always return same order of colors
582
582
583 :param n: number of colors to generate
583 :param n: number of colors to generate
584 :param saturation: saturation of returned colors
584 :param saturation: saturation of returned colors
585 :param lightness: lightness of returned colors
585 :param lightness: lightness of returned colors
586 :returns: RGB tuple
586 :returns: RGB tuple
587 """
587 """
588
588
589 golden_ratio = 0.618033988749895
589 golden_ratio = 0.618033988749895
590 h = 0.22717784590367374
590 h = 0.22717784590367374
591
591
592 for _ in range(n):
592 for _ in range(n):
593 h += golden_ratio
593 h += golden_ratio
594 h %= 1
594 h %= 1
595 HSV_tuple = [h, saturation, lightness]
595 HSV_tuple = [h, saturation, lightness]
596 RGB_tuple = hsv_to_rgb(*HSV_tuple)
596 RGB_tuple = hsv_to_rgb(*HSV_tuple)
597 yield [str(int(x * 256)) for x in RGB_tuple]
597 yield [str(int(x * 256)) for x in RGB_tuple]
598
598
599
599
600 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
600 def color_hasher(n=10000, saturation=0.10, lightness=0.95):
601 """
601 """
602 Returns a function which when called with an argument returns a unique
602 Returns a function which when called with an argument returns a unique
603 color for that argument, eg.
603 color for that argument, eg.
604
604
605 :param n: number of colors to generate
605 :param n: number of colors to generate
606 :param saturation: saturation of returned colors
606 :param saturation: saturation of returned colors
607 :param lightness: lightness of returned colors
607 :param lightness: lightness of returned colors
608 :returns: css RGB string
608 :returns: css RGB string
609
609
610 >>> color_hash = color_hasher()
610 >>> color_hash = color_hasher()
611 >>> color_hash('hello')
611 >>> color_hash('hello')
612 'rgb(34, 12, 59)'
612 'rgb(34, 12, 59)'
613 >>> color_hash('hello')
613 >>> color_hash('hello')
614 'rgb(34, 12, 59)'
614 'rgb(34, 12, 59)'
615 >>> color_hash('other')
615 >>> color_hash('other')
616 'rgb(90, 224, 159)'
616 'rgb(90, 224, 159)'
617 """
617 """
618
618
619 color_dict = {}
619 color_dict = {}
620 cgenerator = unique_color_generator(
620 cgenerator = unique_color_generator(
621 saturation=saturation, lightness=lightness)
621 saturation=saturation, lightness=lightness)
622
622
623 def get_color_string(thing):
623 def get_color_string(thing):
624 if thing in color_dict:
624 if thing in color_dict:
625 col = color_dict[thing]
625 col = color_dict[thing]
626 else:
626 else:
627 col = color_dict[thing] = next(cgenerator)
627 col = color_dict[thing] = next(cgenerator)
628 return "rgb(%s)" % (', '.join(col))
628 return "rgb(%s)" % (', '.join(col))
629
629
630 return get_color_string
630 return get_color_string
631
631
632
632
633 def get_lexer_safe(mimetype=None, filepath=None):
633 def get_lexer_safe(mimetype=None, filepath=None):
634 """
634 """
635 Tries to return a relevant pygments lexer using mimetype/filepath name,
635 Tries to return a relevant pygments lexer using mimetype/filepath name,
636 defaulting to plain text if none could be found
636 defaulting to plain text if none could be found
637 """
637 """
638 lexer = None
638 lexer = None
639 try:
639 try:
640 if mimetype:
640 if mimetype:
641 lexer = get_lexer_for_mimetype(mimetype)
641 lexer = get_lexer_for_mimetype(mimetype)
642 if not lexer:
642 if not lexer:
643 lexer = get_lexer_for_filename(filepath)
643 lexer = get_lexer_for_filename(filepath)
644 except pygments.util.ClassNotFound:
644 except pygments.util.ClassNotFound:
645 pass
645 pass
646
646
647 if not lexer:
647 if not lexer:
648 lexer = get_lexer_by_name('text')
648 lexer = get_lexer_by_name('text')
649
649
650 return lexer
650 return lexer
651
651
652
652
653 def get_lexer_for_filenode(filenode):
653 def get_lexer_for_filenode(filenode):
654 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
654 lexer = get_custom_lexer(filenode.extension) or filenode.lexer
655 return lexer
655 return lexer
656
656
657
657
658 def pygmentize(filenode, **kwargs):
658 def pygmentize(filenode, **kwargs):
659 """
659 """
660 pygmentize function using pygments
660 pygmentize function using pygments
661
661
662 :param filenode:
662 :param filenode:
663 """
663 """
664 lexer = get_lexer_for_filenode(filenode)
664 lexer = get_lexer_for_filenode(filenode)
665 return literal(code_highlight(filenode.content, lexer,
665 return literal(code_highlight(filenode.content, lexer,
666 CodeHtmlFormatter(**kwargs)))
666 CodeHtmlFormatter(**kwargs)))
667
667
668
668
669 def is_following_repo(repo_name, user_id):
669 def is_following_repo(repo_name, user_id):
670 from rhodecode.model.scm import ScmModel
670 from rhodecode.model.scm import ScmModel
671 return ScmModel().is_following_repo(repo_name, user_id)
671 return ScmModel().is_following_repo(repo_name, user_id)
672
672
673
673
674 class _Message(object):
674 class _Message(object):
675 """A message returned by ``Flash.pop_messages()``.
675 """A message returned by ``Flash.pop_messages()``.
676
676
677 Converting the message to a string returns the message text. Instances
677 Converting the message to a string returns the message text. Instances
678 also have the following attributes:
678 also have the following attributes:
679
679
680 * ``message``: the message text.
680 * ``message``: the message text.
681 * ``category``: the category specified when the message was created.
681 * ``category``: the category specified when the message was created.
682 """
682 """
683
683
684 def __init__(self, category, message, sub_data=None):
684 def __init__(self, category, message, sub_data=None):
685 self.category = category
685 self.category = category
686 self.message = message
686 self.message = message
687 self.sub_data = sub_data or {}
687 self.sub_data = sub_data or {}
688
688
689 def __str__(self):
689 def __str__(self):
690 return self.message
690 return self.message
691
691
692 __unicode__ = __str__
692 __unicode__ = __str__
693
693
694 def __html__(self):
694 def __html__(self):
695 return escape(safe_str(self.message))
695 return escape(safe_str(self.message))
696
696
697
697
698 class Flash(object):
698 class Flash(object):
699 # List of allowed categories. If None, allow any category.
699 # List of allowed categories. If None, allow any category.
700 categories = ["warning", "notice", "error", "success"]
700 categories = ["warning", "notice", "error", "success"]
701
701
702 # Default category if none is specified.
702 # Default category if none is specified.
703 default_category = "notice"
703 default_category = "notice"
704
704
705 def __init__(self, session_key="flash", categories=None,
705 def __init__(self, session_key="flash", categories=None,
706 default_category=None):
706 default_category=None):
707 """
707 """
708 Instantiate a ``Flash`` object.
708 Instantiate a ``Flash`` object.
709
709
710 ``session_key`` is the key to save the messages under in the user's
710 ``session_key`` is the key to save the messages under in the user's
711 session.
711 session.
712
712
713 ``categories`` is an optional list which overrides the default list
713 ``categories`` is an optional list which overrides the default list
714 of categories.
714 of categories.
715
715
716 ``default_category`` overrides the default category used for messages
716 ``default_category`` overrides the default category used for messages
717 when none is specified.
717 when none is specified.
718 """
718 """
719 self.session_key = session_key
719 self.session_key = session_key
720 if categories is not None:
720 if categories is not None:
721 self.categories = categories
721 self.categories = categories
722 if default_category is not None:
722 if default_category is not None:
723 self.default_category = default_category
723 self.default_category = default_category
724 if self.categories and self.default_category not in self.categories:
724 if self.categories and self.default_category not in self.categories:
725 raise ValueError(
725 raise ValueError(
726 "unrecognized default category %r" % (self.default_category,))
726 "unrecognized default category %r" % (self.default_category,))
727
727
728 def pop_messages(self, session=None, request=None):
728 def pop_messages(self, session=None, request=None):
729 """
729 """
730 Return all accumulated messages and delete them from the session.
730 Return all accumulated messages and delete them from the session.
731
731
732 The return value is a list of ``Message`` objects.
732 The return value is a list of ``Message`` objects.
733 """
733 """
734 messages = []
734 messages = []
735
735
736 if not session:
736 if not session:
737 if not request:
737 if not request:
738 request = get_current_request()
738 request = get_current_request()
739 session = request.session
739 session = request.session
740
740
741 # Pop the 'old' pylons flash messages. They are tuples of the form
741 # Pop the 'old' pylons flash messages. They are tuples of the form
742 # (category, message)
742 # (category, message)
743 for cat, msg in session.pop(self.session_key, []):
743 for cat, msg in session.pop(self.session_key, []):
744 messages.append(_Message(cat, msg))
744 messages.append(_Message(cat, msg))
745
745
746 # Pop the 'new' pyramid flash messages for each category as list
746 # Pop the 'new' pyramid flash messages for each category as list
747 # of strings.
747 # of strings.
748 for cat in self.categories:
748 for cat in self.categories:
749 for msg in session.pop_flash(queue=cat):
749 for msg in session.pop_flash(queue=cat):
750 sub_data = {}
750 sub_data = {}
751 if hasattr(msg, 'rsplit'):
751 if hasattr(msg, 'rsplit'):
752 flash_data = msg.rsplit('|DELIM|', 1)
752 flash_data = msg.rsplit('|DELIM|', 1)
753 org_message = flash_data[0]
753 org_message = flash_data[0]
754 if len(flash_data) > 1:
754 if len(flash_data) > 1:
755 sub_data = json.loads(flash_data[1])
755 sub_data = json.loads(flash_data[1])
756 else:
756 else:
757 org_message = msg
757 org_message = msg
758
758
759 messages.append(_Message(cat, org_message, sub_data=sub_data))
759 messages.append(_Message(cat, org_message, sub_data=sub_data))
760
760
761 # Map messages from the default queue to the 'notice' category.
761 # Map messages from the default queue to the 'notice' category.
762 for msg in session.pop_flash():
762 for msg in session.pop_flash():
763 messages.append(_Message('notice', msg))
763 messages.append(_Message('notice', msg))
764
764
765 session.save()
765 session.save()
766 return messages
766 return messages
767
767
768 def json_alerts(self, session=None, request=None):
768 def json_alerts(self, session=None, request=None):
769 payloads = []
769 payloads = []
770 messages = flash.pop_messages(session=session, request=request) or []
770 messages = flash.pop_messages(session=session, request=request) or []
771 for message in messages:
771 for message in messages:
772 payloads.append({
772 payloads.append({
773 'message': {
773 'message': {
774 'message': '{}'.format(message.message),
774 'message': '{}'.format(message.message),
775 'level': message.category,
775 'level': message.category,
776 'force': True,
776 'force': True,
777 'subdata': message.sub_data
777 'subdata': message.sub_data
778 }
778 }
779 })
779 })
780 return safe_str(json.dumps(payloads))
780 return safe_str(json.dumps(payloads))
781
781
782 def __call__(self, message, category=None, ignore_duplicate=True,
782 def __call__(self, message, category=None, ignore_duplicate=True,
783 session=None, request=None):
783 session=None, request=None):
784
784
785 if not session:
785 if not session:
786 if not request:
786 if not request:
787 request = get_current_request()
787 request = get_current_request()
788 session = request.session
788 session = request.session
789
789
790 session.flash(
790 session.flash(
791 message, queue=category, allow_duplicate=not ignore_duplicate)
791 message, queue=category, allow_duplicate=not ignore_duplicate)
792
792
793
793
794 flash = Flash()
794 flash = Flash()
795
795
796 #==============================================================================
796 #==============================================================================
797 # SCM FILTERS available via h.
797 # SCM FILTERS available via h.
798 #==============================================================================
798 #==============================================================================
799 from rhodecode.lib.vcs.utils import author_name, author_email
799 from rhodecode.lib.vcs.utils import author_name, author_email
800 from rhodecode.lib.utils2 import age, age_from_seconds
800 from rhodecode.lib.utils2 import age, age_from_seconds
801 from rhodecode.model.db import User, ChangesetStatus
801 from rhodecode.model.db import User, ChangesetStatus
802
802
803
803
804 email = author_email
804 email = author_email
805
805
806
806
807 def capitalize(raw_text):
807 def capitalize(raw_text):
808 return raw_text.capitalize()
808 return raw_text.capitalize()
809
809
810
810
811 def short_id(long_id):
811 def short_id(long_id):
812 return long_id[:12]
812 return long_id[:12]
813
813
814
814
815 def hide_credentials(url):
815 def hide_credentials(url):
816 from rhodecode.lib.utils2 import credentials_filter
816 from rhodecode.lib.utils2 import credentials_filter
817 return credentials_filter(url)
817 return credentials_filter(url)
818
818
819 import zoneinfo
819 import zoneinfo
820 import tzlocal
820 import tzlocal
821 local_timezone = tzlocal.get_localzone()
821 local_timezone = tzlocal.get_localzone()
822
822
823
823
824 def get_timezone(datetime_iso, time_is_local=False):
824 def get_timezone(datetime_iso, time_is_local=False):
825 tzinfo = '+00:00'
825 tzinfo = '+00:00'
826
826
827 # detect if we have a timezone info, otherwise, add it
827 # detect if we have a timezone info, otherwise, add it
828 if time_is_local and isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
828 if time_is_local and isinstance(datetime_iso, datetime) and not datetime_iso.tzinfo:
829 force_timezone = os.environ.get('RC_TIMEZONE', '')
829 force_timezone = os.environ.get('RC_TIMEZONE', '')
830 if force_timezone:
830 if force_timezone:
831 force_timezone = zoneinfo.ZoneInfo(force_timezone)
831 force_timezone = zoneinfo.ZoneInfo(force_timezone)
832 timezone = force_timezone or local_timezone
832 timezone = force_timezone or local_timezone
833
833
834 offset = datetime_iso.replace(tzinfo=timezone).strftime('%z')
834 offset = datetime_iso.replace(tzinfo=timezone).strftime('%z')
835 tzinfo = '{}:{}'.format(offset[:-2], offset[-2:])
835 tzinfo = '{}:{}'.format(offset[:-2], offset[-2:])
836 return tzinfo
836 return tzinfo
837
837
838
838
839 def age_component(datetime_iso, value=None, time_is_local=False, tooltip=True):
839 def age_component(datetime_iso, value=None, time_is_local=False, tooltip=True):
840 title = value or format_date(datetime_iso)
840 title = value or format_date(datetime_iso)
841 tzinfo = get_timezone(datetime_iso, time_is_local=time_is_local)
841 tzinfo = get_timezone(datetime_iso, time_is_local=time_is_local)
842
842
843 return literal(
843 return literal(
844 '<time class="timeago {cls}" title="{tt_title}" datetime="{dt}{tzinfo}">{title}</time>'.format(
844 '<time class="timeago {cls}" title="{tt_title}" datetime="{dt}{tzinfo}">{title}</time>'.format(
845 cls='tooltip' if tooltip else '',
845 cls='tooltip' if tooltip else '',
846 tt_title=('{title}{tzinfo}'.format(title=title, tzinfo=tzinfo)) if tooltip else '',
846 tt_title=('{title}{tzinfo}'.format(title=title, tzinfo=tzinfo)) if tooltip else '',
847 title=title, dt=datetime_iso, tzinfo=tzinfo
847 title=title, dt=datetime_iso, tzinfo=tzinfo
848 ))
848 ))
849
849
850
850
851 def _shorten_commit_id(commit_id, commit_len=None):
851 def _shorten_commit_id(commit_id, commit_len=None):
852 if commit_len is None:
852 if commit_len is None:
853 request = get_current_request()
853 request = get_current_request()
854 commit_len = request.call_context.visual.show_sha_length
854 commit_len = request.call_context.visual.show_sha_length
855 return commit_id[:commit_len]
855 return commit_id[:commit_len]
856
856
857
857
858 def show_id(commit, show_idx=None, commit_len=None):
858 def show_id(commit, show_idx=None, commit_len=None):
859 """
859 """
860 Configurable function that shows ID
860 Configurable function that shows ID
861 by default it's r123:fffeeefffeee
861 by default it's r123:fffeeefffeee
862
862
863 :param commit: commit instance
863 :param commit: commit instance
864 """
864 """
865 if show_idx is None:
865 if show_idx is None:
866 request = get_current_request()
866 request = get_current_request()
867 show_idx = request.call_context.visual.show_revision_number
867 show_idx = request.call_context.visual.show_revision_number
868
868
869 raw_id = _shorten_commit_id(commit.raw_id, commit_len=commit_len)
869 raw_id = _shorten_commit_id(commit.raw_id, commit_len=commit_len)
870 if show_idx:
870 if show_idx:
871 return 'r%s:%s' % (commit.idx, raw_id)
871 return 'r%s:%s' % (commit.idx, raw_id)
872 else:
872 else:
873 return '%s' % (raw_id, )
873 return '%s' % (raw_id, )
874
874
875
875
876 def format_date(date):
876 def format_date(date):
877 """
877 """
878 use a standardized formatting for dates used in RhodeCode
878 use a standardized formatting for dates used in RhodeCode
879
879
880 :param date: date/datetime object
880 :param date: date/datetime object
881 :return: formatted date
881 :return: formatted date
882 """
882 """
883
883
884 if date:
884 if date:
885 _fmt = "%a, %d %b %Y %H:%M:%S"
885 _fmt = "%a, %d %b %Y %H:%M:%S"
886 return safe_str(date.strftime(_fmt))
886 return safe_str(date.strftime(_fmt))
887
887
888 return ""
888 return ""
889
889
890
890
891 class _RepoChecker(object):
891 class _RepoChecker(object):
892
892
893 def __init__(self, backend_alias):
893 def __init__(self, backend_alias):
894 self._backend_alias = backend_alias
894 self._backend_alias = backend_alias
895
895
896 def __call__(self, repository):
896 def __call__(self, repository):
897 if hasattr(repository, 'alias'):
897 if hasattr(repository, 'alias'):
898 _type = repository.alias
898 _type = repository.alias
899 elif hasattr(repository, 'repo_type'):
899 elif hasattr(repository, 'repo_type'):
900 _type = repository.repo_type
900 _type = repository.repo_type
901 else:
901 else:
902 _type = repository
902 _type = repository
903 return _type == self._backend_alias
903 return _type == self._backend_alias
904
904
905
905
906 is_git = _RepoChecker('git')
906 is_git = _RepoChecker('git')
907 is_hg = _RepoChecker('hg')
907 is_hg = _RepoChecker('hg')
908 is_svn = _RepoChecker('svn')
908 is_svn = _RepoChecker('svn')
909
909
910
910
911 def get_repo_type_by_name(repo_name):
911 def get_repo_type_by_name(repo_name):
912 repo = Repository.get_by_repo_name(repo_name)
912 repo = Repository.get_by_repo_name(repo_name)
913 if repo:
913 if repo:
914 return repo.repo_type
914 return repo.repo_type
915
915
916
916
917 def is_svn_without_proxy(repository):
917 def is_svn_without_proxy(repository):
918 if is_svn(repository):
918 if is_svn(repository):
919 from rhodecode.model.settings import VcsSettingsModel
919 from rhodecode.model.settings import VcsSettingsModel
920 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
920 conf = VcsSettingsModel().get_ui_settings_as_config_obj()
921 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
921 return not str2bool(conf.get('vcs_svn_proxy', 'http_requests_enabled'))
922 return False
922 return False
923
923
924
924
925 def discover_user(author):
925 def discover_user(author):
926 """
926 """
927 Tries to discover RhodeCode User based on the author string. Author string
927 Tries to discover RhodeCode User based on the author string. Author string
928 is typically `FirstName LastName <email@address.com>`
928 is typically `FirstName LastName <email@address.com>`
929 """
929 """
930
930
931 # if author is already an instance use it for extraction
931 # if author is already an instance use it for extraction
932 if isinstance(author, User):
932 if isinstance(author, User):
933 return author
933 return author
934
934
935 # Valid email in the attribute passed, see if they're in the system
935 # Valid email in the attribute passed, see if they're in the system
936 _email = author_email(author)
936 _email = author_email(author)
937 if _email != '':
937 if _email != '':
938 user = User.get_by_email(_email, case_insensitive=True, cache=True)
938 user = User.get_by_email(_email, case_insensitive=True, cache=True)
939 if user is not None:
939 if user is not None:
940 return user
940 return user
941
941
942 # Maybe it's a username, we try to extract it and fetch by username ?
942 # Maybe it's a username, we try to extract it and fetch by username ?
943 _author = author_name(author)
943 _author = author_name(author)
944 user = User.get_by_username(_author, case_insensitive=True, cache=True)
944 user = User.get_by_username(_author, case_insensitive=True, cache=True)
945 if user is not None:
945 if user is not None:
946 return user
946 return user
947
947
948 return None
948 return None
949
949
950
950
951 def email_or_none(author):
951 def email_or_none(author):
952 # extract email from the commit string
952 # extract email from the commit string
953 _email = author_email(author)
953 _email = author_email(author)
954
954
955 # If we have an email, use it, otherwise
955 # If we have an email, use it, otherwise
956 # see if it contains a username we can get an email from
956 # see if it contains a username we can get an email from
957 if _email != '':
957 if _email != '':
958 return _email
958 return _email
959 else:
959 else:
960 user = User.get_by_username(
960 user = User.get_by_username(
961 author_name(author), case_insensitive=True, cache=True)
961 author_name(author), case_insensitive=True, cache=True)
962
962
963 if user is not None:
963 if user is not None:
964 return user.email
964 return user.email
965
965
966 # No valid email, not a valid user in the system, none!
966 # No valid email, not a valid user in the system, none!
967 return None
967 return None
968
968
969
969
970 def link_to_user(author, length=0, **kwargs):
970 def link_to_user(author, length=0, **kwargs):
971 user = discover_user(author)
971 user = discover_user(author)
972 # user can be None, but if we have it already it means we can re-use it
972 # user can be None, but if we have it already it means we can re-use it
973 # in the person() function, so we save 1 intensive-query
973 # in the person() function, so we save 1 intensive-query
974 if user:
974 if user:
975 author = user
975 author = user
976
976
977 display_person = person(author, 'username_or_name_or_email')
977 display_person = person(author, 'username_or_name_or_email')
978 if length:
978 if length:
979 display_person = shorter(display_person, length)
979 display_person = shorter(display_person, length)
980
980
981 if user and user.username != user.DEFAULT_USER:
981 if user and user.username != user.DEFAULT_USER:
982 return link_to(
982 return link_to(
983 escape(display_person),
983 escape(display_person),
984 route_path('user_profile', username=user.username),
984 route_path('user_profile', username=user.username),
985 **kwargs)
985 **kwargs)
986 else:
986 else:
987 return escape(display_person)
987 return escape(display_person)
988
988
989
989
990 def link_to_group(users_group_name, **kwargs):
990 def link_to_group(users_group_name, **kwargs):
991 return link_to(
991 return link_to(
992 escape(users_group_name),
992 escape(users_group_name),
993 route_path('user_group_profile', user_group_name=users_group_name),
993 route_path('user_group_profile', user_group_name=users_group_name),
994 **kwargs)
994 **kwargs)
995
995
996
996
997 def person(author, show_attr="username_and_name"):
997 def person(author, show_attr="username_and_name"):
998 user = discover_user(author)
998 user = discover_user(author)
999 if user:
999 if user:
1000 return getattr(user, show_attr)
1000 return getattr(user, show_attr)
1001 else:
1001 else:
1002 _author = author_name(author)
1002 _author = author_name(author)
1003 _email = email(author)
1003 _email = email(author)
1004 return _author or _email
1004 return _author or _email
1005
1005
1006
1006
1007 def author_string(email):
1007 def author_string(email):
1008 if email:
1008 if email:
1009 user = User.get_by_email(email, case_insensitive=True, cache=True)
1009 user = User.get_by_email(email, case_insensitive=True, cache=True)
1010 if user:
1010 if user:
1011 if user.first_name or user.last_name:
1011 if user.first_name or user.last_name:
1012 return '%s %s &lt;%s&gt;' % (
1012 return '%s %s &lt;%s&gt;' % (
1013 user.first_name, user.last_name, email)
1013 user.first_name, user.last_name, email)
1014 else:
1014 else:
1015 return email
1015 return email
1016 else:
1016 else:
1017 return email
1017 return email
1018 else:
1018 else:
1019 return None
1019 return None
1020
1020
1021
1021
1022 def person_by_id(id_, show_attr="username_and_name"):
1022 def person_by_id(id_, show_attr="username_and_name"):
1023 # attr to return from fetched user
1023 # attr to return from fetched user
1024 def person_getter(usr):
1024 def person_getter(usr):
1025 return getattr(usr, show_attr)
1025 return getattr(usr, show_attr)
1026
1026
1027 #maybe it's an ID ?
1027 #maybe it's an ID ?
1028 if str(id_).isdigit() or isinstance(id_, int):
1028 if str(id_).isdigit() or isinstance(id_, int):
1029 id_ = int(id_)
1029 id_ = int(id_)
1030 user = User.get(id_)
1030 user = User.get(id_)
1031 if user is not None:
1031 if user is not None:
1032 return person_getter(user)
1032 return person_getter(user)
1033 return id_
1033 return id_
1034
1034
1035
1035
1036 def gravatar_with_user(request, author, show_disabled=False, tooltip=False):
1036 def gravatar_with_user(request, author, show_disabled=False, tooltip=False):
1037 _render = request.get_partial_renderer('rhodecode:templates/base/base.mako')
1037 _render = request.get_partial_renderer('rhodecode:templates/base/base.mako')
1038 return _render('gravatar_with_user', author, show_disabled=show_disabled, tooltip=tooltip)
1038 return _render('gravatar_with_user', author, show_disabled=show_disabled, tooltip=tooltip)
1039
1039
1040
1040
1041 tags_paterns = OrderedDict((
1041 tags_patterns = OrderedDict(
1042 ('lang', (re.compile(r'\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]'),
1042 (
1043 '<div class="metatag" tag="lang">\\2</div>')),
1043 (
1044
1044 "lang",
1045 ('see', (re.compile(r'\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
1045 (
1046 '<div class="metatag" tag="see">see: \\1 </div>')),
1046 re.compile(r"\[(lang|language)\ \=\&gt;\ *([a-zA-Z\-\/\#\+\.]*)\]"),
1047
1047 '<div class="metatag" tag="lang">\\2</div>',
1048 ('url', (re.compile(r'\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((http://|https://|/)(.*?)\)\]'),
1048 ),
1049 '<div class="metatag" tag="url"> <a href="\\2\\3">\\1</a> </div>')),
1049 ),
1050
1050 (
1051 ('license', (re.compile(r'\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]'),
1051 "see",
1052 '<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>')),
1052 (
1053
1053 re.compile(r"\[see\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]"),
1054 ('ref', (re.compile(r'\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]'),
1054 '<div class="metatag" tag="see">see: \\1 </div>',
1055 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>')),
1055 ),
1056
1056 ),
1057 ('state', (re.compile(r'\[(stable|featured|stale|dead|dev|deprecated)\]'),
1057 (
1058 '<div class="metatag" tag="state \\1">\\1</div>')),
1058 "url",
1059
1059 (
1060 re.compile(
1061 r"\[url\ \=\&gt;\ \[([a-zA-Z0-9\ \.\-\_]+)\]\((http://|https://|/)(.*?)\)\]"
1062 ),
1063 '<div class="metatag" tag="url"> <a href="\\2\\3">\\1</a> </div>',
1064 ),
1065 ),
1066 (
1067 "license",
1068 (
1069 re.compile(
1070 r"\[license\ \=\&gt;\ *([a-zA-Z0-9\/\=\?\&amp;\ \:\/\.\-]*)\]"
1071 ),
1072 r'<div class="metatag" tag="license"><a href="http:\/\/www.opensource.org/licenses/\\1">\\1</a></div>',
1073 ),
1074 ),
1075 (
1076 "ref",
1077 (
1078 re.compile(
1079 r"\[(requires|recommends|conflicts|base)\ \=\&gt;\ *([a-zA-Z0-9\-\/]*)\]"
1080 ),
1081 '<div class="metatag" tag="ref \\1">\\1: <a href="/\\2">\\2</a></div>',
1082 ),
1083 ),
1084 (
1085 "state",
1086 (
1087 re.compile(r"\[(stable|featured|stale|dead|dev|deprecated)\]"),
1088 '<div class="metatag" tag="state \\1">\\1</div>',
1089 ),
1090 ),
1060 # label in grey
1091 # label in grey
1061 ('label', (re.compile(r'\[([a-z]+)\]'),
1092 (
1062 '<div class="metatag" tag="label">\\1</div>')),
1093 "label",
1063
1094 (re.compile(r"\[([a-z]+)\]"), '<div class="metatag" tag="label">\\1</div>'),
1095 ),
1064 # generic catch all in grey
1096 # generic catch all in grey
1065 ('generic', (re.compile(r'\[([a-zA-Z0-9\.\-\_]+)\]'),
1097 (
1066 '<div class="metatag" tag="generic">\\1</div>')),
1098 "generic",
1067 ))
1099 (
1100 re.compile(r"\[([a-zA-Z0-9\.\-\_]+)\]"),
1101 '<div class="metatag" tag="generic">\\1</div>',
1102 ),
1103 ),
1104 )
1105 )
1068
1106
1069
1107
1070 def extract_metatags(value):
1108 def extract_metatags(value):
1071 """
1109 """
1072 Extract supported meta-tags from given text value
1110 Extract supported meta-tags from given text value
1073 """
1111 """
1074 tags = []
1112 tags = []
1075 if not value:
1113 if not value:
1076 return tags, ''
1114 return tags, ''
1077
1115
1078 for key, val in list(tags_paterns.items()):
1116 for key, val in list(tags_patterns.items()):
1079 pat, replace_html = val
1117 pat, replace_html = val
1080 tags.extend([(key, x.group()) for x in pat.finditer(value)])
1118 tags.extend([(key, x.group()) for x in pat.finditer(value)])
1081 value = pat.sub('', value)
1119 value = pat.sub('', value)
1082
1120
1083 return tags, value
1121 return tags, value
1084
1122
1085
1123
1086 def style_metatag(tag_type, value):
1124 def style_metatag(tag_type, value):
1087 """
1125 """
1088 converts tags from value into html equivalent
1126 converts tags from value into html equivalent
1089 """
1127 """
1090 if not value:
1128 if not value:
1091 return ''
1129 return ''
1092
1130
1093 html_value = value
1131 html_value = value
1094 tag_data = tags_paterns.get(tag_type)
1132 tag_data = tags_patterns.get(tag_type)
1095 if tag_data:
1133 if tag_data:
1096 pat, replace_html = tag_data
1134 pat, replace_html = tag_data
1097 # convert to plain `str` instead of a markup tag to be used in
1135 # convert to plain `str` instead of a markup tag to be used in
1098 # regex expressions. safe_str doesn't work here
1136 # regex expressions. safe_str doesn't work here
1099 html_value = pat.sub(replace_html, value)
1137 html_value = pat.sub(replace_html, value)
1100
1138
1101 return html_value
1139 return html_value
1102
1140
1103
1141
1104 def bool2icon(value, show_at_false=True):
1142 def bool2icon(value, show_at_false=True):
1105 """
1143 """
1106 Returns boolean value of a given value, represented as html element with
1144 Returns boolean value of a given value, represented as html element with
1107 classes that will represent icons
1145 classes that will represent icons
1108
1146
1109 :param value: given value to convert to html node
1147 :param value: given value to convert to html node
1110 """
1148 """
1111
1149
1112 if value: # does bool conversion
1150 if value: # does bool conversion
1113 return HTML.tag('i', class_="icon-true", title='True')
1151 return HTML.tag('i', class_="icon-true", title='True')
1114 else: # not true as bool
1152 else: # not true as bool
1115 if show_at_false:
1153 if show_at_false:
1116 return HTML.tag('i', class_="icon-false", title='False')
1154 return HTML.tag('i', class_="icon-false", title='False')
1117 return HTML.tag('i')
1155 return HTML.tag('i')
1118
1156
1119
1157
1120 def b64(inp):
1158 def b64(inp):
1121 return base64.b64encode(safe_bytes(inp))
1159 return base64.b64encode(safe_bytes(inp))
1122
1160
1123 #==============================================================================
1161 #==============================================================================
1124 # PERMS
1162 # PERMS
1125 #==============================================================================
1163 #==============================================================================
1126 from rhodecode.lib.auth import (
1164 from rhodecode.lib.auth import (
1127 HasPermissionAny, HasPermissionAll,
1165 HasPermissionAny, HasPermissionAll,
1128 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll,
1166 HasRepoPermissionAny, HasRepoPermissionAll, HasRepoGroupPermissionAll,
1129 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token,
1167 HasRepoGroupPermissionAny, HasRepoPermissionAnyApi, get_csrf_token,
1130 csrf_token_key, AuthUser)
1168 csrf_token_key, AuthUser)
1131
1169
1132
1170
1133 #==============================================================================
1171 #==============================================================================
1134 # GRAVATAR URL
1172 # GRAVATAR URL
1135 #==============================================================================
1173 #==============================================================================
1136 class InitialsGravatar(object):
1174 class InitialsGravatar(object):
1137 def __init__(self, email_address, first_name, last_name, size=30,
1175 def __init__(self, email_address, first_name, last_name, size=30,
1138 background=None, text_color='#fff'):
1176 background=None, text_color='#fff'):
1139 self.size = size
1177 self.size = size
1140 self.first_name = first_name
1178 self.first_name = first_name
1141 self.last_name = last_name
1179 self.last_name = last_name
1142 self.email_address = email_address
1180 self.email_address = email_address
1143 self.background = background or self.str2color(email_address)
1181 self.background = background or self.str2color(email_address)
1144 self.text_color = text_color
1182 self.text_color = text_color
1145
1183
1146 def get_color_bank(self):
1184 def get_color_bank(self):
1147 """
1185 """
1148 returns a predefined list of colors that gravatars can use.
1186 returns a predefined list of colors that gravatars can use.
1149 Those are randomized distinct colors that guarantee readability and
1187 Those are randomized distinct colors that guarantee readability and
1150 uniqueness.
1188 uniqueness.
1151
1189
1152 generated with: http://phrogz.net/css/distinct-colors.html
1190 generated with: http://phrogz.net/css/distinct-colors.html
1153 """
1191 """
1154 return [
1192 return [
1155 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1193 '#bf3030', '#a67f53', '#00ff00', '#5989b3', '#392040', '#d90000',
1156 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1194 '#402910', '#204020', '#79baf2', '#a700b3', '#bf6060', '#7f5320',
1157 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1195 '#008000', '#003059', '#ee00ff', '#ff0000', '#8c4b00', '#007300',
1158 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1196 '#005fb3', '#de73e6', '#ff4040', '#ffaa00', '#3df255', '#203140',
1159 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1197 '#47004d', '#591616', '#664400', '#59b365', '#0d2133', '#83008c',
1160 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1198 '#592d2d', '#bf9f60', '#73e682', '#1d3f73', '#73006b', '#402020',
1161 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1199 '#b2862d', '#397341', '#597db3', '#e600d6', '#a60000', '#736039',
1162 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1200 '#00b318', '#79aaf2', '#330d30', '#ff8080', '#403010', '#16591f',
1163 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1201 '#002459', '#8c4688', '#e50000', '#ffbf40', '#00732e', '#102340',
1164 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1202 '#bf60ac', '#8c4646', '#cc8800', '#00a642', '#1d3473', '#b32d98',
1165 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1203 '#660e00', '#ffd580', '#80ffb2', '#7391e6', '#733967', '#d97b6c',
1166 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1204 '#8c5e00', '#59b389', '#3967e6', '#590047', '#73281d', '#665200',
1167 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1205 '#00e67a', '#2d50b3', '#8c2377', '#734139', '#b2982d', '#16593a',
1168 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1206 '#001859', '#ff00aa', '#a65e53', '#ffcc00', '#0d3321', '#2d3959',
1169 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1207 '#731d56', '#401610', '#4c3d00', '#468c6c', '#002ca6', '#d936a3',
1170 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1208 '#d94c36', '#403920', '#36d9a3', '#0d1733', '#592d4a', '#993626',
1171 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1209 '#cca300', '#00734d', '#46598c', '#8c005e', '#7f1100', '#8c7000',
1172 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1210 '#00a66f', '#7382e6', '#b32d74', '#d9896c', '#ffe680', '#1d7362',
1173 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1211 '#364cd9', '#73003d', '#d93a00', '#998a4d', '#59b3a1', '#5965b3',
1174 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1212 '#e5007a', '#73341d', '#665f00', '#00b38f', '#0018b3', '#59163a',
1175 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1213 '#b2502d', '#bfb960', '#00ffcc', '#23318c', '#a6537f', '#734939',
1176 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1214 '#b2a700', '#104036', '#3d3df2', '#402031', '#e56739', '#736f39',
1177 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1215 '#79f2ea', '#000059', '#401029', '#4c1400', '#ffee00', '#005953',
1178 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1216 '#101040', '#990052', '#402820', '#403d10', '#00ffee', '#0000d9',
1179 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1217 '#ff80c4', '#a66953', '#eeff00', '#00ccbe', '#8080ff', '#e673a1',
1180 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1218 '#a62c00', '#474d00', '#1a3331', '#46468c', '#733950', '#662900',
1181 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1219 '#858c23', '#238c85', '#0f0073', '#b20047', '#d9986c', '#becc00',
1182 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1220 '#396f73', '#281d73', '#ff0066', '#ff6600', '#dee673', '#59adb3',
1183 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1221 '#6559b3', '#590024', '#b2622d', '#98b32d', '#36ced9', '#332d59',
1184 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1222 '#40001a', '#733f1d', '#526600', '#005359', '#242040', '#bf6079',
1185 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1223 '#735039', '#cef23d', '#007780', '#5630bf', '#66001b', '#b24700',
1186 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1224 '#acbf60', '#1d6273', '#25008c', '#731d34', '#a67453', '#50592d',
1187 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1225 '#00ccff', '#6600ff', '#ff0044', '#4c1f00', '#8a994d', '#79daf2',
1188 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1226 '#a173e6', '#d93662', '#402310', '#aaff00', '#2d98b3', '#8c40ff',
1189 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1227 '#592d39', '#ff8c40', '#354020', '#103640', '#1a0040', '#331a20',
1190 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1228 '#331400', '#334d00', '#1d5673', '#583973', '#7f0022', '#4c3626',
1191 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1229 '#88cc00', '#36a3d9', '#3d0073', '#d9364c', '#33241a', '#698c23',
1192 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1230 '#5995b3', '#300059', '#e57382', '#7f3300', '#366600', '#00aaff',
1193 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1231 '#3a1659', '#733941', '#663600', '#74b32d', '#003c59', '#7f53a6',
1194 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1232 '#73000f', '#ff8800', '#baf279', '#79caf2', '#291040', '#a6293a',
1195 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1233 '#b2742d', '#587339', '#0077b3', '#632699', '#400009', '#d9a66c',
1196 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1234 '#294010', '#2d4a59', '#aa00ff', '#4c131b', '#b25f00', '#5ce600',
1197 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1235 '#267399', '#a336d9', '#990014', '#664e33', '#86bf60', '#0088ff',
1198 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1236 '#7700b3', '#593a16', '#073300', '#1d4b73', '#ac60bf', '#e59539',
1199 '#4f8c46', '#368dd9', '#5c0073'
1237 '#4f8c46', '#368dd9', '#5c0073'
1200 ]
1238 ]
1201
1239
1202 def rgb_to_hex_color(self, rgb_tuple):
1240 def rgb_to_hex_color(self, rgb_tuple):
1203 """
1241 """
1204 Converts an rgb_tuple passed to an hex color.
1242 Converts an rgb_tuple passed to an hex color.
1205
1243
1206 :param rgb_tuple: tuple with 3 ints represents rgb color space
1244 :param rgb_tuple: tuple with 3 ints represents rgb color space
1207 """
1245 """
1208 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1246 return '#' + ("".join(map(chr, rgb_tuple)).encode('hex'))
1209
1247
1210 def email_to_int_list(self, email_str):
1248 def email_to_int_list(self, email_str):
1211 """
1249 """
1212 Get every byte of the hex digest value of email and turn it to integer.
1250 Get every byte of the hex digest value of email and turn it to integer.
1213 It's going to be always between 0-255
1251 It's going to be always between 0-255
1214 """
1252 """
1215 digest = md5_safe(email_str.lower())
1253 digest = md5_safe(email_str.lower())
1216 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1254 return [int(digest[i * 2:i * 2 + 2], 16) for i in range(16)]
1217
1255
1218 def pick_color_bank_index(self, email_str, color_bank):
1256 def pick_color_bank_index(self, email_str, color_bank):
1219 return self.email_to_int_list(email_str)[0] % len(color_bank)
1257 return self.email_to_int_list(email_str)[0] % len(color_bank)
1220
1258
1221 def str2color(self, email_str):
1259 def str2color(self, email_str):
1222 """
1260 """
1223 Tries to map in a stable algorithm an email to color
1261 Tries to map in a stable algorithm an email to color
1224
1262
1225 :param email_str:
1263 :param email_str:
1226 """
1264 """
1227 color_bank = self.get_color_bank()
1265 color_bank = self.get_color_bank()
1228 # pick position (module it's length so we always find it in the
1266 # pick position (module it's length so we always find it in the
1229 # bank even if it's smaller than 256 values
1267 # bank even if it's smaller than 256 values
1230 pos = self.pick_color_bank_index(email_str, color_bank)
1268 pos = self.pick_color_bank_index(email_str, color_bank)
1231 return color_bank[pos]
1269 return color_bank[pos]
1232
1270
1233 def normalize_email(self, email_address):
1271 def normalize_email(self, email_address):
1234 # default host used to fill in the fake/missing email
1272 # default host used to fill in the fake/missing email
1235 default_host = 'localhost'
1273 default_host = 'localhost'
1236
1274
1237 if not email_address:
1275 if not email_address:
1238 email_address = f'{User.DEFAULT_USER}@{default_host}'
1276 email_address = f'{User.DEFAULT_USER}@{default_host}'
1239
1277
1240 email_address = safe_str(email_address)
1278 email_address = safe_str(email_address)
1241
1279
1242 if '@' not in email_address:
1280 if '@' not in email_address:
1243 email_address = f'{email_address}@{default_host}'
1281 email_address = f'{email_address}@{default_host}'
1244
1282
1245 if email_address.endswith('@'):
1283 if email_address.endswith('@'):
1246 email_address = f'{email_address}{default_host}'
1284 email_address = f'{email_address}{default_host}'
1247
1285
1248 email_address = convert_special_chars(email_address)
1286 email_address = convert_special_chars(email_address)
1249
1287
1250 return email_address
1288 return email_address
1251
1289
1252 def get_initials(self):
1290 def get_initials(self):
1253 """
1291 """
1254 Returns 2 letter initials calculated based on the input.
1292 Returns 2 letter initials calculated based on the input.
1255 The algorithm picks first given email address, and takes first letter
1293 The algorithm picks first given email address, and takes first letter
1256 of part before @, and then the first letter of server name. In case
1294 of part before @, and then the first letter of server name. In case
1257 the part before @ is in a format of `somestring.somestring2` it replaces
1295 the part before @ is in a format of `somestring.somestring2` it replaces
1258 the server letter with first letter of somestring2
1296 the server letter with first letter of somestring2
1259
1297
1260 In case function was initialized with both first and lastname, this
1298 In case function was initialized with both first and lastname, this
1261 overrides the extraction from email by first letter of the first and
1299 overrides the extraction from email by first letter of the first and
1262 last name. We add special logic to that functionality, In case Full name
1300 last name. We add special logic to that functionality, In case Full name
1263 is compound, like Guido Von Rossum, we use last part of the last name
1301 is compound, like Guido Von Rossum, we use last part of the last name
1264 (Von Rossum) picking `R`.
1302 (Von Rossum) picking `R`.
1265
1303
1266 Function also normalizes the non-ascii characters to they ascii
1304 Function also normalizes the non-ascii characters to they ascii
1267 representation, eg Δ„ => A
1305 representation, eg Δ„ => A
1268 """
1306 """
1269 # replace non-ascii to ascii
1307 # replace non-ascii to ascii
1270 first_name = convert_special_chars(self.first_name)
1308 first_name = convert_special_chars(self.first_name)
1271 last_name = convert_special_chars(self.last_name)
1309 last_name = convert_special_chars(self.last_name)
1272 # multi word last names, Guido Von Rossum, we take the last part only
1310 # multi word last names, Guido Von Rossum, we take the last part only
1273 last_name = last_name.split(' ', 1)[-1]
1311 last_name = last_name.split(' ', 1)[-1]
1274
1312
1275 # do NFKD encoding, and also make sure email has proper format
1313 # do NFKD encoding, and also make sure email has proper format
1276 email_address = self.normalize_email(self.email_address)
1314 email_address = self.normalize_email(self.email_address)
1277
1315
1278 # first push the email initials
1316 # first push the email initials
1279 prefix, server = email_address.split('@', 1)
1317 prefix, server = email_address.split('@', 1)
1280
1318
1281 # check if prefix is maybe a 'first_name.last_name' syntax
1319 # check if prefix is maybe a 'first_name.last_name' syntax
1282 _dot_split = prefix.rsplit('.', 1)
1320 _dot_split = prefix.rsplit('.', 1)
1283 if len(_dot_split) == 2 and _dot_split[1]:
1321 if len(_dot_split) == 2 and _dot_split[1]:
1284 initials = [_dot_split[0][0], _dot_split[1][0]]
1322 initials = [_dot_split[0][0], _dot_split[1][0]]
1285 else:
1323 else:
1286 initials = [prefix[0], server[0]]
1324 initials = [prefix[0], server[0]]
1287
1325
1288 # get first letter of first and last names to create initials
1326 # get first letter of first and last names to create initials
1289 fn_letter = (first_name or " ")[0].strip()
1327 fn_letter = (first_name or " ")[0].strip()
1290 ln_letter = (last_name or " ")[0].strip()
1328 ln_letter = (last_name or " ")[0].strip()
1291
1329
1292 if fn_letter:
1330 if fn_letter:
1293 initials[0] = fn_letter
1331 initials[0] = fn_letter
1294
1332
1295 if ln_letter:
1333 if ln_letter:
1296 initials[1] = ln_letter
1334 initials[1] = ln_letter
1297
1335
1298 return ''.join(initials).upper()
1336 return ''.join(initials).upper()
1299
1337
1300 def get_img_data_by_type(self, font_family, img_type):
1338 def get_img_data_by_type(self, font_family, img_type):
1301 default_user = """
1339 default_user = """
1302 <svg xmlns="http://www.w3.org/2000/svg"
1340 <svg xmlns="http://www.w3.org/2000/svg"
1303 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1341 version="1.1" x="0px" y="0px" width="{size}" height="{size}"
1304 viewBox="-15 -10 439.165 429.164"
1342 viewBox="-15 -10 439.165 429.164"
1305
1343
1306 xml:space="preserve"
1344 xml:space="preserve"
1307 font-family="{font_family}"
1345 font-family="{font_family}"
1308 style="background:{background};" >
1346 style="background:{background};" >
1309
1347
1310 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1348 <path d="M204.583,216.671c50.664,0,91.74-48.075,
1311 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1349 91.74-107.378c0-82.237-41.074-107.377-91.74-107.377
1312 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1350 c-50.668,0-91.74,25.14-91.74,107.377C112.844,
1313 168.596,153.916,216.671,
1351 168.596,153.916,216.671,
1314 204.583,216.671z" fill="{text_color}"/>
1352 204.583,216.671z" fill="{text_color}"/>
1315 <path d="M407.164,374.717L360.88,
1353 <path d="M407.164,374.717L360.88,
1316 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1354 270.454c-2.117-4.771-5.836-8.728-10.465-11.138l-71.83-37.392
1317 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1355 c-1.584-0.823-3.502-0.663-4.926,0.415c-20.316,
1318 15.366-44.203,23.488-69.076,23.488c-24.877,
1356 15.366-44.203,23.488-69.076,23.488c-24.877,
1319 0-48.762-8.122-69.078-23.488
1357 0-48.762-8.122-69.078-23.488
1320 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1358 c-1.428-1.078-3.346-1.238-4.93-0.415L58.75,
1321 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1359 259.316c-4.631,2.41-8.346,6.365-10.465,11.138L2.001,374.717
1322 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1360 c-3.191,7.188-2.537,15.412,1.75,22.005c4.285,
1323 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1361 6.592,11.537,10.526,19.4,10.526h362.861c7.863,0,15.117-3.936,
1324 19.402-10.527 C409.699,390.129,
1362 19.402-10.527 C409.699,390.129,
1325 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1363 410.355,381.902,407.164,374.717z" fill="{text_color}"/>
1326 </svg>""".format(
1364 </svg>""".format(
1327 size=self.size,
1365 size=self.size,
1328 background='#979797', # @grey4
1366 background='#979797', # @grey4
1329 text_color=self.text_color,
1367 text_color=self.text_color,
1330 font_family=font_family)
1368 font_family=font_family)
1331
1369
1332 return {
1370 return {
1333 "default_user": default_user
1371 "default_user": default_user
1334 }[img_type]
1372 }[img_type]
1335
1373
1336 def get_img_data(self, svg_type=None):
1374 def get_img_data(self, svg_type=None):
1337 """
1375 """
1338 generates the svg metadata for image
1376 generates the svg metadata for image
1339 """
1377 """
1340 fonts = [
1378 fonts = [
1341 '-apple-system',
1379 '-apple-system',
1342 'BlinkMacSystemFont',
1380 'BlinkMacSystemFont',
1343 'Segoe UI',
1381 'Segoe UI',
1344 'Roboto',
1382 'Roboto',
1345 'Oxygen-Sans',
1383 'Oxygen-Sans',
1346 'Ubuntu',
1384 'Ubuntu',
1347 'Cantarell',
1385 'Cantarell',
1348 'Helvetica Neue',
1386 'Helvetica Neue',
1349 'sans-serif'
1387 'sans-serif'
1350 ]
1388 ]
1351 font_family = ','.join(fonts)
1389 font_family = ','.join(fonts)
1352 if svg_type:
1390 if svg_type:
1353 return self.get_img_data_by_type(font_family, svg_type)
1391 return self.get_img_data_by_type(font_family, svg_type)
1354
1392
1355 initials = self.get_initials()
1393 initials = self.get_initials()
1356 img_data = """
1394 img_data = """
1357 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1395 <svg xmlns="http://www.w3.org/2000/svg" pointer-events="none"
1358 width="{size}" height="{size}"
1396 width="{size}" height="{size}"
1359 style="width: 100%; height: 100%; background-color: {background}"
1397 style="width: 100%; height: 100%; background-color: {background}"
1360 viewBox="0 0 {size} {size}">
1398 viewBox="0 0 {size} {size}">
1361 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1399 <text text-anchor="middle" y="50%" x="50%" dy="0.35em"
1362 pointer-events="auto" fill="{text_color}"
1400 pointer-events="auto" fill="{text_color}"
1363 font-family="{font_family}"
1401 font-family="{font_family}"
1364 style="font-weight: 400; font-size: {f_size}px;">{text}
1402 style="font-weight: 400; font-size: {f_size}px;">{text}
1365 </text>
1403 </text>
1366 </svg>""".format(
1404 </svg>""".format(
1367 size=self.size,
1405 size=self.size,
1368 f_size=self.size/2.05, # scale the text inside the box nicely
1406 f_size=self.size/2.05, # scale the text inside the box nicely
1369 background=self.background,
1407 background=self.background,
1370 text_color=self.text_color,
1408 text_color=self.text_color,
1371 text=initials.upper(),
1409 text=initials.upper(),
1372 font_family=font_family)
1410 font_family=font_family)
1373
1411
1374 return img_data
1412 return img_data
1375
1413
1376 def generate_svg(self, svg_type=None):
1414 def generate_svg(self, svg_type=None):
1377 img_data = base64_to_str(self.get_img_data(svg_type))
1415 img_data = base64_to_str(self.get_img_data(svg_type))
1378 return "data:image/svg+xml;base64,{}".format(img_data)
1416 return "data:image/svg+xml;base64,{}".format(img_data)
1379
1417
1380
1418
1381 def initials_gravatar(request, email_address, first_name, last_name, size=30, store_on_disk=False):
1419 def initials_gravatar(request, email_address, first_name, last_name, size=30, store_on_disk=False):
1382
1420
1383 svg_type = None
1421 svg_type = None
1384 if email_address == User.DEFAULT_USER_EMAIL:
1422 if email_address == User.DEFAULT_USER_EMAIL:
1385 svg_type = 'default_user'
1423 svg_type = 'default_user'
1386
1424
1387 klass = InitialsGravatar(email_address, first_name, last_name, size)
1425 klass = InitialsGravatar(email_address, first_name, last_name, size)
1388
1426
1389 if store_on_disk:
1427 if store_on_disk:
1390 from rhodecode.apps.file_store import utils as store_utils
1428 from rhodecode.apps.file_store import utils as store_utils
1391 from rhodecode.apps.file_store.exceptions import FileNotAllowedException, \
1429 from rhodecode.apps.file_store.exceptions import FileNotAllowedException, \
1392 FileOverSizeException
1430 FileOverSizeException
1393 from rhodecode.model.db import Session
1431 from rhodecode.model.db import Session
1394
1432
1395 image_key = md5_safe(email_address.lower()
1433 image_key = md5_safe(email_address.lower()
1396 + first_name.lower() + last_name.lower())
1434 + first_name.lower() + last_name.lower())
1397
1435
1398 storage = store_utils.get_file_storage(request.registry.settings)
1436 storage = store_utils.get_file_storage(request.registry.settings)
1399 filename = '{}.svg'.format(image_key)
1437 filename = '{}.svg'.format(image_key)
1400 subdir = 'gravatars'
1438 subdir = 'gravatars'
1401 # since final name has a counter, we apply the 0
1439 # since final name has a counter, we apply the 0
1402 uid = storage.apply_counter(0, store_utils.uid_filename(filename, randomized=False))
1440 uid = storage.apply_counter(0, store_utils.uid_filename(filename, randomized=False))
1403 store_uid = os.path.join(subdir, uid)
1441 store_uid = os.path.join(subdir, uid)
1404
1442
1405 db_entry = FileStore.get_by_store_uid(store_uid)
1443 db_entry = FileStore.get_by_store_uid(store_uid)
1406 if db_entry:
1444 if db_entry:
1407 return request.route_path('download_file', fid=store_uid)
1445 return request.route_path('download_file', fid=store_uid)
1408
1446
1409 img_data = klass.get_img_data(svg_type=svg_type)
1447 img_data = klass.get_img_data(svg_type=svg_type)
1410 img_file = store_utils.bytes_to_file_obj(img_data)
1448 img_file = store_utils.bytes_to_file_obj(img_data)
1411
1449
1412 try:
1450 try:
1413 store_uid, metadata = storage.save_file(
1451 store_uid, metadata = storage.save_file(
1414 img_file, filename, directory=subdir,
1452 img_file, filename, directory=subdir,
1415 extensions=['.svg'], randomized_name=False)
1453 extensions=['.svg'], randomized_name=False)
1416 except (FileNotAllowedException, FileOverSizeException):
1454 except (FileNotAllowedException, FileOverSizeException):
1417 raise
1455 raise
1418
1456
1419 try:
1457 try:
1420 entry = FileStore.create(
1458 entry = FileStore.create(
1421 file_uid=store_uid, filename=metadata["filename"],
1459 file_uid=store_uid, filename=metadata["filename"],
1422 file_hash=metadata["sha256"], file_size=metadata["size"],
1460 file_hash=metadata["sha256"], file_size=metadata["size"],
1423 file_display_name=filename,
1461 file_display_name=filename,
1424 file_description=f'user gravatar `{safe_str(filename)}`',
1462 file_description=f'user gravatar `{safe_str(filename)}`',
1425 hidden=True, check_acl=False, user_id=1
1463 hidden=True, check_acl=False, user_id=1
1426 )
1464 )
1427 Session().add(entry)
1465 Session().add(entry)
1428 Session().commit()
1466 Session().commit()
1429 log.debug('Stored upload in DB as %s', entry)
1467 log.debug('Stored upload in DB as %s', entry)
1430 except Exception:
1468 except Exception:
1431 raise
1469 raise
1432
1470
1433 return request.route_path('download_file', fid=store_uid)
1471 return request.route_path('download_file', fid=store_uid)
1434
1472
1435 else:
1473 else:
1436 return klass.generate_svg(svg_type=svg_type)
1474 return klass.generate_svg(svg_type=svg_type)
1437
1475
1438
1476
1439 def gravatar_external(request, gravatar_url_tmpl, email_address, size=30):
1477 def gravatar_external(request, gravatar_url_tmpl, email_address, size=30):
1440 return safe_str(gravatar_url_tmpl)\
1478 return safe_str(gravatar_url_tmpl)\
1441 .replace('{email}', email_address) \
1479 .replace('{email}', email_address) \
1442 .replace('{md5email}', md5_safe(email_address.lower())) \
1480 .replace('{md5email}', md5_safe(email_address.lower())) \
1443 .replace('{netloc}', request.host) \
1481 .replace('{netloc}', request.host) \
1444 .replace('{scheme}', request.scheme) \
1482 .replace('{scheme}', request.scheme) \
1445 .replace('{size}', safe_str(size))
1483 .replace('{size}', safe_str(size))
1446
1484
1447
1485
1448 def gravatar_url(email_address, size=30, request=None):
1486 def gravatar_url(email_address, size=30, request=None):
1449 request = request or get_current_request()
1487 request = request or get_current_request()
1450 _use_gravatar = request.call_context.visual.use_gravatar
1488 _use_gravatar = request.call_context.visual.use_gravatar
1451
1489
1452 email_address = email_address or User.DEFAULT_USER_EMAIL
1490 email_address = email_address or User.DEFAULT_USER_EMAIL
1453 if isinstance(email_address, str):
1491 if isinstance(email_address, str):
1454 # hashlib crashes on unicode items
1492 # hashlib crashes on unicode items
1455 email_address = safe_str(email_address)
1493 email_address = safe_str(email_address)
1456
1494
1457 # empty email or default user
1495 # empty email or default user
1458 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1496 if not email_address or email_address == User.DEFAULT_USER_EMAIL:
1459 return initials_gravatar(request, User.DEFAULT_USER_EMAIL, '', '', size=size)
1497 return initials_gravatar(request, User.DEFAULT_USER_EMAIL, '', '', size=size)
1460
1498
1461 if _use_gravatar:
1499 if _use_gravatar:
1462 gravatar_url_tmpl = request.call_context.visual.gravatar_url \
1500 gravatar_url_tmpl = request.call_context.visual.gravatar_url \
1463 or User.DEFAULT_GRAVATAR_URL
1501 or User.DEFAULT_GRAVATAR_URL
1464 return gravatar_external(request, gravatar_url_tmpl, email_address, size=size)
1502 return gravatar_external(request, gravatar_url_tmpl, email_address, size=size)
1465
1503
1466 else:
1504 else:
1467 return initials_gravatar(request, email_address, '', '', size=size)
1505 return initials_gravatar(request, email_address, '', '', size=size)
1468
1506
1469
1507
1470 def breadcrumb_repo_link(repo):
1508 def breadcrumb_repo_link(repo):
1471 """
1509 """
1472 Makes a breadcrumbs path link to repo
1510 Makes a breadcrumbs path link to repo
1473
1511
1474 ex::
1512 ex::
1475 group >> subgroup >> repo
1513 group >> subgroup >> repo
1476
1514
1477 :param repo: a Repository instance
1515 :param repo: a Repository instance
1478 """
1516 """
1479
1517
1480 path = [
1518 path = [
1481 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name),
1519 link_to(group.name, route_path('repo_group_home', repo_group_name=group.group_name),
1482 title='last change:{}'.format(format_date(group.last_commit_change)))
1520 title='last change:{}'.format(format_date(group.last_commit_change)))
1483 for group in repo.groups_with_parents
1521 for group in repo.groups_with_parents
1484 ] + [
1522 ] + [
1485 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name),
1523 link_to(repo.just_name, route_path('repo_summary', repo_name=repo.repo_name),
1486 title='last change:{}'.format(format_date(repo.last_commit_change)))
1524 title='last change:{}'.format(format_date(repo.last_commit_change)))
1487 ]
1525 ]
1488
1526
1489 return literal(' &raquo; '.join(path))
1527 return literal(' &raquo; '.join(path))
1490
1528
1491
1529
1492 def breadcrumb_repo_group_link(repo_group):
1530 def breadcrumb_repo_group_link(repo_group):
1493 """
1531 """
1494 Makes a breadcrumbs path link to repo
1532 Makes a breadcrumbs path link to repo
1495
1533
1496 ex::
1534 ex::
1497 group >> subgroup
1535 group >> subgroup
1498
1536
1499 :param repo_group: a Repository Group instance
1537 :param repo_group: a Repository Group instance
1500 """
1538 """
1501
1539
1502 path = [
1540 path = [
1503 link_to(group.name,
1541 link_to(group.name,
1504 route_path('repo_group_home', repo_group_name=group.group_name),
1542 route_path('repo_group_home', repo_group_name=group.group_name),
1505 title='last change:{}'.format(format_date(group.last_commit_change)))
1543 title='last change:{}'.format(format_date(group.last_commit_change)))
1506 for group in repo_group.parents
1544 for group in repo_group.parents
1507 ] + [
1545 ] + [
1508 link_to(repo_group.name,
1546 link_to(repo_group.name,
1509 route_path('repo_group_home', repo_group_name=repo_group.group_name),
1547 route_path('repo_group_home', repo_group_name=repo_group.group_name),
1510 title='last change:{}'.format(format_date(repo_group.last_commit_change)))
1548 title='last change:{}'.format(format_date(repo_group.last_commit_change)))
1511 ]
1549 ]
1512
1550
1513 return literal(' &raquo; '.join(path))
1551 return literal(' &raquo; '.join(path))
1514
1552
1515
1553
1516 def format_byte_size_binary(file_size):
1554 def format_byte_size_binary(file_size):
1517 """
1555 """
1518 Formats file/folder sizes to standard.
1556 Formats file/folder sizes to standard.
1519 """
1557 """
1520 if file_size is None:
1558 if file_size is None:
1521 file_size = 0
1559 file_size = 0
1522
1560
1523 formatted_size = format_byte_size(file_size, binary=True)
1561 formatted_size = format_byte_size(file_size, binary=True)
1524 return formatted_size
1562 return formatted_size
1525
1563
1526
1564
1527 def urlify_text(text_, safe=True, **href_attrs):
1565 def urlify_text(text_, safe=True, **href_attrs):
1528 """
1566 """
1529 Extract urls from text and make html links out of them
1567 Extract urls from text and make html links out of them
1530 """
1568 """
1531
1569
1532 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1570 url_pat = re.compile(r'''(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@#.&+]'''
1533 '''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1571 r'''|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)''')
1534
1572
1535 def url_func(match_obj):
1573 def url_func(match_obj):
1536 url_full = match_obj.groups()[0]
1574 url_full = match_obj.groups()[0]
1537 a_options = dict(href_attrs)
1575 a_options = dict(href_attrs)
1538 a_options['href'] = url_full
1576 a_options['href'] = url_full
1539 a_text = url_full
1577 a_text = url_full
1540 return HTML.tag("a", a_text, **a_options)
1578 return HTML.tag("a", a_text, **a_options)
1541
1579
1542 _new_text = url_pat.sub(url_func, text_)
1580 _new_text = url_pat.sub(url_func, text_)
1543
1581
1544 if safe:
1582 if safe:
1545 return literal(_new_text)
1583 return literal(_new_text)
1546 return _new_text
1584 return _new_text
1547
1585
1548
1586
1549 def urlify_commits(text_, repo_name):
1587 def urlify_commits(text_, repo_name):
1550 """
1588 """
1551 Extract commit ids from text and make link from them
1589 Extract commit ids from text and make link from them
1552
1590
1553 :param text_:
1591 :param text_:
1554 :param repo_name: repo name to build the URL with
1592 :param repo_name: repo name to build the URL with
1555 """
1593 """
1556
1594
1557 url_pat = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1595 url_pat = re.compile(r'(^|\s)([0-9a-fA-F]{12,40})($|\s)')
1558
1596
1559 def url_func(match_obj):
1597 def url_func(match_obj):
1560 commit_id = match_obj.groups()[1]
1598 commit_id = match_obj.groups()[1]
1561 pref = match_obj.groups()[0]
1599 pref = match_obj.groups()[0]
1562 suf = match_obj.groups()[2]
1600 suf = match_obj.groups()[2]
1563
1601
1564 tmpl = (
1602 tmpl = (
1565 '%(pref)s<a class="tooltip-hovercard %(cls)s" href="%(url)s" data-hovercard-alt="%(hovercard_alt)s" data-hovercard-url="%(hovercard_url)s">'
1603 '%(pref)s<a class="tooltip-hovercard %(cls)s" href="%(url)s" data-hovercard-alt="%(hovercard_alt)s" data-hovercard-url="%(hovercard_url)s">'
1566 '%(commit_id)s</a>%(suf)s'
1604 '%(commit_id)s</a>%(suf)s'
1567 )
1605 )
1568 return tmpl % {
1606 return tmpl % {
1569 'pref': pref,
1607 'pref': pref,
1570 'cls': 'revision-link',
1608 'cls': 'revision-link',
1571 'url': route_url(
1609 'url': route_url(
1572 'repo_commit', repo_name=repo_name, commit_id=commit_id),
1610 'repo_commit', repo_name=repo_name, commit_id=commit_id),
1573 'commit_id': commit_id,
1611 'commit_id': commit_id,
1574 'suf': suf,
1612 'suf': suf,
1575 'hovercard_alt': 'Commit: {}'.format(commit_id),
1613 'hovercard_alt': 'Commit: {}'.format(commit_id),
1576 'hovercard_url': route_url(
1614 'hovercard_url': route_url(
1577 'hovercard_repo_commit', repo_name=repo_name, commit_id=commit_id)
1615 'hovercard_repo_commit', repo_name=repo_name, commit_id=commit_id)
1578 }
1616 }
1579
1617
1580 new_text = url_pat.sub(url_func, text_)
1618 new_text = url_pat.sub(url_func, text_)
1581
1619
1582 return new_text
1620 return new_text
1583
1621
1584
1622
1585 def _process_url_func(match_obj, repo_name, uid, entry,
1623 def _process_url_func(match_obj, repo_name, uid, entry,
1586 return_raw_data=False, link_format='html'):
1624 return_raw_data=False, link_format='html'):
1587 pref = ''
1625 pref = ''
1588 if match_obj.group().startswith(' '):
1626 if match_obj.group().startswith(' '):
1589 pref = ' '
1627 pref = ' '
1590
1628
1591 issue_id = ''.join(match_obj.groups())
1629 issue_id = ''.join(match_obj.groups())
1592
1630
1593 if link_format == 'html':
1631 if link_format == 'html':
1594 tmpl = (
1632 tmpl = (
1595 '%(pref)s<a class="tooltip %(cls)s" href="%(url)s" title="%(title)s">'
1633 '%(pref)s<a class="tooltip %(cls)s" href="%(url)s" title="%(title)s">'
1596 '%(issue-prefix)s%(id-repr)s'
1634 '%(issue-prefix)s%(id-repr)s'
1597 '</a>')
1635 '</a>')
1598 elif link_format == 'html+hovercard':
1636 elif link_format == 'html+hovercard':
1599 tmpl = (
1637 tmpl = (
1600 '%(pref)s<a class="tooltip-hovercard %(cls)s" href="%(url)s" data-hovercard-url="%(hovercard_url)s">'
1638 '%(pref)s<a class="tooltip-hovercard %(cls)s" href="%(url)s" data-hovercard-url="%(hovercard_url)s">'
1601 '%(issue-prefix)s%(id-repr)s'
1639 '%(issue-prefix)s%(id-repr)s'
1602 '</a>')
1640 '</a>')
1603 elif link_format in ['rst', 'rst+hovercard']:
1641 elif link_format in ['rst', 'rst+hovercard']:
1604 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1642 tmpl = '`%(issue-prefix)s%(id-repr)s <%(url)s>`_'
1605 elif link_format in ['markdown', 'markdown+hovercard']:
1643 elif link_format in ['markdown', 'markdown+hovercard']:
1606 tmpl = '[%(pref)s%(issue-prefix)s%(id-repr)s](%(url)s)'
1644 tmpl = '[%(pref)s%(issue-prefix)s%(id-repr)s](%(url)s)'
1607 else:
1645 else:
1608 raise ValueError('Bad link_format:{}'.format(link_format))
1646 raise ValueError('Bad link_format:{}'.format(link_format))
1609
1647
1610 (repo_name_cleaned,
1648 (repo_name_cleaned,
1611 parent_group_name) = RepoGroupModel()._get_group_name_and_parent(repo_name)
1649 parent_group_name) = RepoGroupModel()._get_group_name_and_parent(repo_name)
1612
1650
1613 # variables replacement
1651 # variables replacement
1614 named_vars = {
1652 named_vars = {
1615 'id': issue_id,
1653 'id': issue_id,
1616 'repo': repo_name,
1654 'repo': repo_name,
1617 'repo_name': repo_name_cleaned,
1655 'repo_name': repo_name_cleaned,
1618 'group_name': parent_group_name,
1656 'group_name': parent_group_name,
1619 # set dummy keys so we always have them
1657 # set dummy keys so we always have them
1620 'hostname': '',
1658 'hostname': '',
1621 'netloc': '',
1659 'netloc': '',
1622 'scheme': ''
1660 'scheme': ''
1623 }
1661 }
1624
1662
1625 request = get_current_request()
1663 request = get_current_request()
1626 if request:
1664 if request:
1627 # exposes, hostname, netloc, scheme
1665 # exposes, hostname, netloc, scheme
1628 host_data = get_host_info(request)
1666 host_data = get_host_info(request)
1629 named_vars.update(host_data)
1667 named_vars.update(host_data)
1630
1668
1631 # named regex variables
1669 # named regex variables
1632 named_vars.update(match_obj.groupdict())
1670 named_vars.update(match_obj.groupdict())
1633 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1671 _url = string.Template(entry['url']).safe_substitute(**named_vars)
1634 desc = string.Template(escape(entry['desc'])).safe_substitute(**named_vars)
1672 desc = string.Template(escape(entry['desc'])).safe_substitute(**named_vars)
1635 hovercard_url = string.Template(entry.get('hovercard_url', '')).safe_substitute(**named_vars)
1673 hovercard_url = string.Template(entry.get('hovercard_url', '')).safe_substitute(**named_vars)
1636
1674
1637 def quote_cleaner(input_str):
1675 def quote_cleaner(input_str):
1638 """Remove quotes as it's HTML"""
1676 """Remove quotes as it's HTML"""
1639 return input_str.replace('"', '')
1677 return input_str.replace('"', '')
1640
1678
1641 data = {
1679 data = {
1642 'pref': pref,
1680 'pref': pref,
1643 'cls': quote_cleaner('issue-tracker-link'),
1681 'cls': quote_cleaner('issue-tracker-link'),
1644 'url': quote_cleaner(_url),
1682 'url': quote_cleaner(_url),
1645 'id-repr': issue_id,
1683 'id-repr': issue_id,
1646 'issue-prefix': entry['pref'],
1684 'issue-prefix': entry['pref'],
1647 'serv': entry['url'],
1685 'serv': entry['url'],
1648 'title': sanitize_html(desc, strip=True),
1686 'title': sanitize_html(desc, strip=True),
1649 'hovercard_url': hovercard_url
1687 'hovercard_url': hovercard_url
1650 }
1688 }
1651
1689
1652 if return_raw_data:
1690 if return_raw_data:
1653 return {
1691 return {
1654 'id': issue_id,
1692 'id': issue_id,
1655 'url': _url
1693 'url': _url
1656 }
1694 }
1657 return tmpl % data
1695 return tmpl % data
1658
1696
1659
1697
1660 def get_active_pattern_entries(repo_name):
1698 def get_active_pattern_entries(repo_name):
1661 repo = None
1699 repo = None
1662 if repo_name:
1700 if repo_name:
1663 # Retrieving repo_name to avoid invalid repo_name to explode on
1701 # Retrieving repo_name to avoid invalid repo_name to explode on
1664 # IssueTrackerSettingsModel but still passing invalid name further down
1702 # IssueTrackerSettingsModel but still passing invalid name further down
1665 repo = Repository.get_by_repo_name(repo_name, cache=True)
1703 repo = Repository.get_by_repo_name(repo_name, cache=True)
1666
1704
1667 settings_model = IssueTrackerSettingsModel(repo=repo)
1705 settings_model = IssueTrackerSettingsModel(repo=repo)
1668 active_entries = settings_model.get_settings(cache=True)
1706 active_entries = settings_model.get_settings(cache=True)
1669 return active_entries
1707 return active_entries
1670
1708
1671
1709
1672 pr_pattern_re = regex.compile(r'(?:(?:^!)|(?: !))(\d+)')
1710 pr_pattern_re = regex.compile(r'(?:(?:^!)|(?: !))(\d+)')
1673
1711
1674 allowed_link_formats = [
1712 allowed_link_formats = [
1675 'html', 'rst', 'markdown', 'html+hovercard', 'rst+hovercard', 'markdown+hovercard']
1713 'html', 'rst', 'markdown', 'html+hovercard', 'rst+hovercard', 'markdown+hovercard']
1676
1714
1677 compile_cache = {
1715 compile_cache = {
1678
1716
1679 }
1717 }
1680
1718
1681
1719
1682 def process_patterns(text_string, repo_name, link_format='html', active_entries=None):
1720 def process_patterns(text_string, repo_name, link_format='html', active_entries=None):
1683
1721
1684 if link_format not in allowed_link_formats:
1722 if link_format not in allowed_link_formats:
1685 raise ValueError('Link format can be only one of:{} got {}'.format(
1723 raise ValueError('Link format can be only one of:{} got {}'.format(
1686 allowed_link_formats, link_format))
1724 allowed_link_formats, link_format))
1687 issues_data = []
1725 issues_data = []
1688 errors = []
1726 errors = []
1689 new_text = text_string
1727 new_text = text_string
1690
1728
1691 if active_entries is None:
1729 if active_entries is None:
1692 log.debug('Fetch active issue tracker patterns for repo: %s', repo_name)
1730 log.debug('Fetch active issue tracker patterns for repo: %s', repo_name)
1693 active_entries = get_active_pattern_entries(repo_name)
1731 active_entries = get_active_pattern_entries(repo_name)
1694
1732
1695 log.debug('Got %s pattern entries to process', len(active_entries))
1733 log.debug('Got %s pattern entries to process', len(active_entries))
1696
1734
1697 for uid, entry in list(active_entries.items()):
1735 for uid, entry in list(active_entries.items()):
1698
1736
1699 if not (entry['pat'] and entry['url']):
1737 if not (entry['pat'] and entry['url']):
1700 log.debug('skipping due to missing data')
1738 log.debug('skipping due to missing data')
1701 continue
1739 continue
1702
1740
1703 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s',
1741 log.debug('issue tracker entry: uid: `%s` PAT:%s URL:%s PREFIX:%s',
1704 uid, entry['pat'], entry['url'], entry['pref'])
1742 uid, entry['pat'], entry['url'], entry['pref'])
1705
1743
1706 if entry.get('pat_compiled'):
1744 if entry.get('pat_compiled'):
1707 pattern = entry['pat_compiled']
1745 pattern = entry['pat_compiled']
1708 elif entry['pat'] in compile_cache:
1746 elif entry['pat'] in compile_cache:
1709 pattern = compile_cache[entry['pat']]
1747 pattern = compile_cache[entry['pat']]
1710 else:
1748 else:
1711 try:
1749 try:
1712 pattern = regex.compile(r'%s' % entry['pat'])
1750 pattern = regex.compile(r'%s' % entry['pat'])
1713 except regex.error as e:
1751 except regex.error as e:
1714 regex_err = ValueError('{}:{}'.format(entry['pat'], e))
1752 regex_err = ValueError('{}:{}'.format(entry['pat'], e))
1715 log.exception('issue tracker pattern: `%s` failed to compile', regex_err)
1753 log.exception('issue tracker pattern: `%s` failed to compile', regex_err)
1716 errors.append(regex_err)
1754 errors.append(regex_err)
1717 continue
1755 continue
1718 compile_cache[entry['pat']] = pattern
1756 compile_cache[entry['pat']] = pattern
1719
1757
1720 data_func = partial(
1758 data_func = partial(
1721 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1759 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1722 return_raw_data=True)
1760 return_raw_data=True)
1723
1761
1724 for match_obj in pattern.finditer(text_string):
1762 for match_obj in pattern.finditer(text_string):
1725 issues_data.append(data_func(match_obj))
1763 issues_data.append(data_func(match_obj))
1726
1764
1727 url_func = partial(
1765 url_func = partial(
1728 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1766 _process_url_func, repo_name=repo_name, entry=entry, uid=uid,
1729 link_format=link_format)
1767 link_format=link_format)
1730
1768
1731 new_text = pattern.sub(url_func, new_text)
1769 new_text = pattern.sub(url_func, new_text)
1732 log.debug('processed prefix:uid `%s`', uid)
1770 log.debug('processed prefix:uid `%s`', uid)
1733
1771
1734 # finally use global replace, eg !123 -> pr-link, those will not catch
1772 # finally use global replace, eg !123 -> pr-link, those will not catch
1735 # if already similar pattern exists
1773 # if already similar pattern exists
1736 server_url = '${scheme}://${netloc}'
1774 server_url = '${scheme}://${netloc}'
1737 pr_entry = {
1775 pr_entry = {
1738 'pref': '!',
1776 'pref': '!',
1739 'url': server_url + '/_admin/pull-requests/${id}',
1777 'url': server_url + '/_admin/pull-requests/${id}',
1740 'desc': 'Pull Request !${id}',
1778 'desc': 'Pull Request !${id}',
1741 'hovercard_url': server_url + '/_hovercard/pull_request/${id}'
1779 'hovercard_url': server_url + '/_hovercard/pull_request/${id}'
1742 }
1780 }
1743 pr_url_func = partial(
1781 pr_url_func = partial(
1744 _process_url_func, repo_name=repo_name, entry=pr_entry, uid=None,
1782 _process_url_func, repo_name=repo_name, entry=pr_entry, uid=None,
1745 link_format=link_format+'+hovercard')
1783 link_format=link_format+'+hovercard')
1746 new_text = pr_pattern_re.sub(pr_url_func, new_text)
1784 new_text = pr_pattern_re.sub(pr_url_func, new_text)
1747 log.debug('processed !pr pattern')
1785 log.debug('processed !pr pattern')
1748
1786
1749 return new_text, issues_data, errors
1787 return new_text, issues_data, errors
1750
1788
1751
1789
1752 def urlify_commit_message(commit_text, repository=None, active_pattern_entries=None,
1790 def urlify_commit_message(commit_text, repository=None, active_pattern_entries=None,
1753 issues_container_callback=None, error_container=None):
1791 issues_container_callback=None, error_container=None):
1754 """
1792 """
1755 Parses given text message and makes proper links.
1793 Parses given text message and makes proper links.
1756 issues are linked to given issue-server, and rest is a commit link
1794 issues are linked to given issue-server, and rest is a commit link
1757 """
1795 """
1758
1796
1759 def escaper(_text):
1797 def escaper(_text):
1760 return _text.replace('<', '&lt;').replace('>', '&gt;')
1798 return _text.replace('<', '&lt;').replace('>', '&gt;')
1761
1799
1762 new_text = escaper(commit_text)
1800 new_text = escaper(commit_text)
1763
1801
1764 # extract http/https links and make them real urls
1802 # extract http/https links and make them real urls
1765 new_text = urlify_text(new_text, safe=False)
1803 new_text = urlify_text(new_text, safe=False)
1766
1804
1767 # urlify commits - extract commit ids and make link out of them, if we have
1805 # urlify commits - extract commit ids and make link out of them, if we have
1768 # the scope of repository present.
1806 # the scope of repository present.
1769 if repository:
1807 if repository:
1770 new_text = urlify_commits(new_text, repository)
1808 new_text = urlify_commits(new_text, repository)
1771
1809
1772 # process issue tracker patterns
1810 # process issue tracker patterns
1773 new_text, issues, errors = process_patterns(
1811 new_text, issues, errors = process_patterns(
1774 new_text, repository or '', active_entries=active_pattern_entries)
1812 new_text, repository or '', active_entries=active_pattern_entries)
1775
1813
1776 if issues_container_callback is not None:
1814 if issues_container_callback is not None:
1777 for issue in issues:
1815 for issue in issues:
1778 issues_container_callback(issue)
1816 issues_container_callback(issue)
1779
1817
1780 if error_container is not None:
1818 if error_container is not None:
1781 error_container.extend(errors)
1819 error_container.extend(errors)
1782
1820
1783 return literal(new_text)
1821 return literal(new_text)
1784
1822
1785
1823
1786 def render_binary(repo_name, file_obj):
1824 def render_binary(repo_name, file_obj):
1787 """
1825 """
1788 Choose how to render a binary file
1826 Choose how to render a binary file
1789 """
1827 """
1790
1828
1791 # unicode
1829 # unicode
1792 filename = file_obj.name
1830 filename = file_obj.name
1793
1831
1794 # images
1832 # images
1795 for ext in ['*.png', '*.jpeg', '*.jpg', '*.ico', '*.gif']:
1833 for ext in ['*.png', '*.jpeg', '*.jpg', '*.ico', '*.gif']:
1796 if fnmatch.fnmatch(filename, pat=ext):
1834 if fnmatch.fnmatch(filename, pat=ext):
1797 src = route_path(
1835 src = route_path(
1798 'repo_file_raw', repo_name=repo_name,
1836 'repo_file_raw', repo_name=repo_name,
1799 commit_id=file_obj.commit.raw_id,
1837 commit_id=file_obj.commit.raw_id,
1800 f_path=file_obj.path)
1838 f_path=file_obj.path)
1801
1839
1802 return literal(
1840 return literal(
1803 '<img class="rendered-binary" alt="rendered-image" src="{}">'.format(src))
1841 '<img class="rendered-binary" alt="rendered-image" src="{}">'.format(src))
1804
1842
1805
1843
1806 def renderer_from_filename(filename, exclude=None):
1844 def renderer_from_filename(filename, exclude=None):
1807 """
1845 """
1808 choose a renderer based on filename, this works only for text based files
1846 choose a renderer based on filename, this works only for text based files
1809 """
1847 """
1810
1848
1811 # ipython
1849 # ipython
1812 for ext in ['*.ipynb']:
1850 for ext in ['*.ipynb']:
1813 if fnmatch.fnmatch(filename, pat=ext):
1851 if fnmatch.fnmatch(filename, pat=ext):
1814 return 'jupyter'
1852 return 'jupyter'
1815
1853
1816 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1854 is_markup = MarkupRenderer.renderer_from_filename(filename, exclude=exclude)
1817 if is_markup:
1855 if is_markup:
1818 return is_markup
1856 return is_markup
1819 return None
1857 return None
1820
1858
1821
1859
1822 def render(source, renderer='rst', mentions=False, relative_urls=None,
1860 def render(source, renderer='rst', mentions=False, relative_urls=None,
1823 repo_name=None, active_pattern_entries=None, issues_container_callback=None):
1861 repo_name=None, active_pattern_entries=None, issues_container_callback=None):
1824
1862
1825 def maybe_convert_relative_links(html_source):
1863 def maybe_convert_relative_links(html_source):
1826 if relative_urls:
1864 if relative_urls:
1827 return relative_links(html_source, relative_urls)
1865 return relative_links(html_source, relative_urls)
1828 return html_source
1866 return html_source
1829
1867
1830 if renderer == 'plain':
1868 if renderer == 'plain':
1831 return literal(
1869 return literal(
1832 MarkupRenderer.plain(source, leading_newline=False))
1870 MarkupRenderer.plain(source, leading_newline=False))
1833
1871
1834 elif renderer == 'rst':
1872 elif renderer == 'rst':
1835 if repo_name:
1873 if repo_name:
1836 # process patterns on comments if we pass in repo name
1874 # process patterns on comments if we pass in repo name
1837 source, issues, errors = process_patterns(
1875 source, issues, errors = process_patterns(
1838 source, repo_name, link_format='rst',
1876 source, repo_name, link_format='rst',
1839 active_entries=active_pattern_entries)
1877 active_entries=active_pattern_entries)
1840 if issues_container_callback is not None:
1878 if issues_container_callback is not None:
1841 for issue in issues:
1879 for issue in issues:
1842 issues_container_callback(issue)
1880 issues_container_callback(issue)
1843
1881
1844 rendered_block = maybe_convert_relative_links(
1882 rendered_block = maybe_convert_relative_links(
1845 MarkupRenderer.rst(source, mentions=mentions))
1883 MarkupRenderer.rst(source, mentions=mentions))
1846
1884
1847 return literal(f'<div class="rst-block">{rendered_block}</div>')
1885 return literal(f'<div class="rst-block">{rendered_block}</div>')
1848
1886
1849 elif renderer == 'markdown':
1887 elif renderer == 'markdown':
1850 if repo_name:
1888 if repo_name:
1851 # process patterns on comments if we pass in repo name
1889 # process patterns on comments if we pass in repo name
1852 source, issues, errors = process_patterns(
1890 source, issues, errors = process_patterns(
1853 source, repo_name, link_format='markdown',
1891 source, repo_name, link_format='markdown',
1854 active_entries=active_pattern_entries)
1892 active_entries=active_pattern_entries)
1855 if issues_container_callback is not None:
1893 if issues_container_callback is not None:
1856 for issue in issues:
1894 for issue in issues:
1857 issues_container_callback(issue)
1895 issues_container_callback(issue)
1858
1896
1859 rendered_block = maybe_convert_relative_links(
1897 rendered_block = maybe_convert_relative_links(
1860 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1898 MarkupRenderer.markdown(source, flavored=True, mentions=mentions))
1861 return literal(f'<div class="markdown-block">{rendered_block}</div>')
1899 return literal(f'<div class="markdown-block">{rendered_block}</div>')
1862
1900
1863 elif renderer == 'jupyter':
1901 elif renderer == 'jupyter':
1864 rendered_block = maybe_convert_relative_links(
1902 rendered_block = maybe_convert_relative_links(
1865 MarkupRenderer.jupyter(source))
1903 MarkupRenderer.jupyter(source))
1866 return literal(f'<div class="ipynb">{rendered_block}</div>')
1904 return literal(f'<div class="ipynb">{rendered_block}</div>')
1867
1905
1868 # None means just show the file-source
1906 # None means just show the file-source
1869 return None
1907 return None
1870
1908
1871
1909
1872 def commit_status(repo, commit_id):
1910 def commit_status(repo, commit_id):
1873 return ChangesetStatusModel().get_status(repo, commit_id)
1911 return ChangesetStatusModel().get_status(repo, commit_id)
1874
1912
1875
1913
1876 def commit_status_lbl(commit_status):
1914 def commit_status_lbl(commit_status):
1877 return dict(ChangesetStatus.STATUSES).get(commit_status)
1915 return dict(ChangesetStatus.STATUSES).get(commit_status)
1878
1916
1879
1917
1880 def commit_time(repo_name, commit_id):
1918 def commit_time(repo_name, commit_id):
1881 repo = Repository.get_by_repo_name(repo_name)
1919 repo = Repository.get_by_repo_name(repo_name)
1882 commit = repo.get_commit(commit_id=commit_id)
1920 commit = repo.get_commit(commit_id=commit_id)
1883 return commit.date
1921 return commit.date
1884
1922
1885
1923
1886 def get_permission_name(key):
1924 def get_permission_name(key):
1887 return dict(Permission.PERMS).get(key)
1925 return dict(Permission.PERMS).get(key)
1888
1926
1889
1927
1890 def journal_filter_help(request):
1928 def journal_filter_help(request):
1891 _ = request.translate
1929 _ = request.translate
1892 from rhodecode.lib.audit_logger import ACTIONS
1930 from rhodecode.lib.audit_logger import ACTIONS
1893 actions = '\n'.join(textwrap.wrap(', '.join(sorted(ACTIONS.keys())), 80))
1931 actions = '\n'.join(textwrap.wrap(', '.join(sorted(ACTIONS.keys())), 80))
1894
1932
1895 return _(
1933 return _(
1896 'Example filter terms:\n' +
1934 'Example filter terms:\n' +
1897 ' repository:vcs\n' +
1935 ' repository:vcs\n' +
1898 ' username:marcin\n' +
1936 ' username:marcin\n' +
1899 ' username:(NOT marcin)\n' +
1937 ' username:(NOT marcin)\n' +
1900 ' action:*push*\n' +
1938 ' action:*push*\n' +
1901 ' ip:127.0.0.1\n' +
1939 ' ip:127.0.0.1\n' +
1902 ' date:20120101\n' +
1940 ' date:20120101\n' +
1903 ' date:[20120101100000 TO 20120102]\n' +
1941 ' date:[20120101100000 TO 20120102]\n' +
1904 '\n' +
1942 '\n' +
1905 'Actions: {actions}\n' +
1943 'Actions: {actions}\n' +
1906 '\n' +
1944 '\n' +
1907 'Generate wildcards using \'*\' character:\n' +
1945 'Generate wildcards using \'*\' character:\n' +
1908 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1946 ' "repository:vcs*" - search everything starting with \'vcs\'\n' +
1909 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1947 ' "repository:*vcs*" - search for repository containing \'vcs\'\n' +
1910 '\n' +
1948 '\n' +
1911 'Optional AND / OR operators in queries\n' +
1949 'Optional AND / OR operators in queries\n' +
1912 ' "repository:vcs OR repository:test"\n' +
1950 ' "repository:vcs OR repository:test"\n' +
1913 ' "username:test AND repository:test*"\n'
1951 ' "username:test AND repository:test*"\n'
1914 ).format(actions=actions)
1952 ).format(actions=actions)
1915
1953
1916
1954
1917 def not_mapped_error(repo_name):
1955 def not_mapped_error(repo_name):
1918 from rhodecode.translation import _
1956 from rhodecode.translation import _
1919 flash(_('%s repository is not mapped to db perhaps'
1957 flash(_('%s repository is not mapped to db perhaps'
1920 ' it was created or renamed from the filesystem'
1958 ' it was created or renamed from the filesystem'
1921 ' please run the application again'
1959 ' please run the application again'
1922 ' in order to rescan repositories') % repo_name, category='error')
1960 ' in order to rescan repositories') % repo_name, category='error')
1923
1961
1924
1962
1925 def ip_range(ip_addr):
1963 def ip_range(ip_addr):
1926 from rhodecode.model.db import UserIpMap
1964 from rhodecode.model.db import UserIpMap
1927 s, e = UserIpMap._get_ip_range(ip_addr)
1965 s, e = UserIpMap._get_ip_range(ip_addr)
1928 return '%s - %s' % (s, e)
1966 return '%s - %s' % (s, e)
1929
1967
1930
1968
1931 def form(url, method='post', needs_csrf_token=True, **attrs):
1969 def form(url, method='post', needs_csrf_token=True, **attrs):
1932 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1970 """Wrapper around webhelpers.tags.form to prevent CSRF attacks."""
1933 if method.lower() != 'get' and needs_csrf_token:
1971 if method.lower() != 'get' and needs_csrf_token:
1934 raise Exception(
1972 raise Exception(
1935 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1973 'Forms to POST/PUT/DELETE endpoints should have (in general) a ' +
1936 'CSRF token. If the endpoint does not require such token you can ' +
1974 'CSRF token. If the endpoint does not require such token you can ' +
1937 'explicitly set the parameter needs_csrf_token to false.')
1975 'explicitly set the parameter needs_csrf_token to false.')
1938
1976
1939 return insecure_form(url, method=method, **attrs)
1977 return insecure_form(url, method=method, **attrs)
1940
1978
1941
1979
1942 def secure_form(form_url, method="POST", multipart=False, **attrs):
1980 def secure_form(form_url, method="POST", multipart=False, **attrs):
1943 """Start a form tag that points the action to an url. This
1981 """Start a form tag that points the action to an url. This
1944 form tag will also include the hidden field containing
1982 form tag will also include the hidden field containing
1945 the auth token.
1983 the auth token.
1946
1984
1947 The url options should be given either as a string, or as a
1985 The url options should be given either as a string, or as a
1948 ``url()`` function. The method for the form defaults to POST.
1986 ``url()`` function. The method for the form defaults to POST.
1949
1987
1950 Options:
1988 Options:
1951
1989
1952 ``multipart``
1990 ``multipart``
1953 If set to True, the enctype is set to "multipart/form-data".
1991 If set to True, the enctype is set to "multipart/form-data".
1954 ``method``
1992 ``method``
1955 The method to use when submitting the form, usually either
1993 The method to use when submitting the form, usually either
1956 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1994 "GET" or "POST". If "PUT", "DELETE", or another verb is used, a
1957 hidden input with name _method is added to simulate the verb
1995 hidden input with name _method is added to simulate the verb
1958 over POST.
1996 over POST.
1959
1997
1960 """
1998 """
1961
1999
1962 if 'request' in attrs:
2000 if 'request' in attrs:
1963 session = attrs['request'].session
2001 session = attrs['request'].session
1964 del attrs['request']
2002 del attrs['request']
1965 else:
2003 else:
1966 raise ValueError(
2004 raise ValueError(
1967 'Calling this form requires request= to be passed as argument')
2005 'Calling this form requires request= to be passed as argument')
1968
2006
1969 _form = insecure_form(form_url, method, multipart, **attrs)
2007 _form = insecure_form(form_url, method, multipart, **attrs)
1970 token = literal(
2008 token = literal(
1971 '<input type="hidden" name="{}" value="{}">'.format(
2009 '<input type="hidden" name="{}" value="{}">'.format(
1972 csrf_token_key, get_csrf_token(session)))
2010 csrf_token_key, get_csrf_token(session)))
1973
2011
1974 return literal("%s\n%s" % (_form, token))
2012 return literal("%s\n%s" % (_form, token))
1975
2013
1976
2014
1977 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
2015 def dropdownmenu(name, selected, options, enable_filter=False, **attrs):
1978 select_html = select(name, selected, options, **attrs)
2016 select_html = select(name, selected, options, **attrs)
1979
2017
1980 select2 = """
2018 select2 = """
1981 <script>
2019 <script>
1982 $(document).ready(function() {
2020 $(document).ready(function() {
1983 $('#%s').select2({
2021 $('#%s').select2({
1984 containerCssClass: 'drop-menu %s',
2022 containerCssClass: 'drop-menu %s',
1985 dropdownCssClass: 'drop-menu-dropdown',
2023 dropdownCssClass: 'drop-menu-dropdown',
1986 dropdownAutoWidth: true%s
2024 dropdownAutoWidth: true%s
1987 });
2025 });
1988 });
2026 });
1989 </script>
2027 </script>
1990 """
2028 """
1991
2029
1992 filter_option = """,
2030 filter_option = """,
1993 minimumResultsForSearch: -1
2031 minimumResultsForSearch: -1
1994 """
2032 """
1995 input_id = attrs.get('id') or name
2033 input_id = attrs.get('id') or name
1996 extra_classes = ' '.join(attrs.pop('extra_classes', []))
2034 extra_classes = ' '.join(attrs.pop('extra_classes', []))
1997 filter_enabled = "" if enable_filter else filter_option
2035 filter_enabled = "" if enable_filter else filter_option
1998 select_script = literal(select2 % (input_id, extra_classes, filter_enabled))
2036 select_script = literal(select2 % (input_id, extra_classes, filter_enabled))
1999
2037
2000 return literal(select_html+select_script)
2038 return literal(select_html+select_script)
2001
2039
2002
2040
2003 def get_visual_attr(tmpl_context_var, attr_name):
2041 def get_visual_attr(tmpl_context_var, attr_name):
2004 """
2042 """
2005 A safe way to get a variable from visual variable of template context
2043 A safe way to get a variable from visual variable of template context
2006
2044
2007 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
2045 :param tmpl_context_var: instance of tmpl_context, usually present as `c`
2008 :param attr_name: name of the attribute we fetch from the c.visual
2046 :param attr_name: name of the attribute we fetch from the c.visual
2009 """
2047 """
2010 visual = getattr(tmpl_context_var, 'visual', None)
2048 visual = getattr(tmpl_context_var, 'visual', None)
2011 if not visual:
2049 if not visual:
2012 return
2050 return
2013 else:
2051 else:
2014 return getattr(visual, attr_name, None)
2052 return getattr(visual, attr_name, None)
2015
2053
2016
2054
2017 def get_last_path_part(file_node):
2055 def get_last_path_part(file_node):
2018 if not file_node.path:
2056 if not file_node.path:
2019 return '/'
2057 return '/'
2020
2058
2021 path = safe_str(file_node.path.split('/')[-1])
2059 path = safe_str(file_node.path.split('/')[-1])
2022 return '../' + path
2060 return '../' + path
2023
2061
2024
2062
2025 def route_url(*args, **kwargs):
2063 def route_url(*args, **kwargs):
2026 """
2064 """
2027 Wrapper around pyramids `route_url` (fully qualified url) function.
2065 Wrapper around pyramids `route_url` (fully qualified url) function.
2028 """
2066 """
2029 req = get_current_request()
2067 req = get_current_request()
2030 return req.route_url(*args, **kwargs)
2068 return req.route_url(*args, **kwargs)
2031
2069
2032
2070
2033 def route_path(*args, **kwargs):
2071 def route_path(*args, **kwargs):
2034 """
2072 """
2035 Wrapper around pyramids `route_path` function.
2073 Wrapper around pyramids `route_path` function.
2036 """
2074 """
2037 req = get_current_request()
2075 req = get_current_request()
2038 return req.route_path(*args, **kwargs)
2076 return req.route_path(*args, **kwargs)
2039
2077
2040
2078
2041 def route_path_or_none(*args, **kwargs):
2079 def route_path_or_none(*args, **kwargs):
2042 try:
2080 try:
2043 return route_path(*args, **kwargs)
2081 return route_path(*args, **kwargs)
2044 except KeyError:
2082 except KeyError:
2045 return None
2083 return None
2046
2084
2047
2085
2048 def current_route_path(request, **kw):
2086 def current_route_path(request, **kw):
2049 new_args = request.GET.mixed()
2087 new_args = request.GET.mixed()
2050 new_args.update(kw)
2088 new_args.update(kw)
2051 return request.current_route_path(_query=new_args)
2089 return request.current_route_path(_query=new_args)
2052
2090
2053
2091
2054 def curl_api_example(method, args):
2092 def curl_api_example(method, args):
2055 args_json = json.dumps(OrderedDict([
2093 args_json = json.dumps(OrderedDict([
2056 ('id', 1),
2094 ('id', 1),
2057 ('auth_token', 'SECRET'),
2095 ('auth_token', 'SECRET'),
2058 ('method', method),
2096 ('method', method),
2059 ('args', args)
2097 ('args', args)
2060 ]))
2098 ]))
2061
2099
2062 return "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{args_json}'".format(
2100 return "curl {api_url} -X POST -H 'content-type:text/plain' --data-binary '{args_json}'".format(
2063 api_url=route_url('apiv2'),
2101 api_url=route_url('apiv2'),
2064 args_json=args_json
2102 args_json=args_json
2065 )
2103 )
2066
2104
2067
2105
2068 def api_call_example(method, args):
2106 def api_call_example(method, args):
2069 """
2107 """
2070 Generates an API call example via CURL
2108 Generates an API call example via CURL
2071 """
2109 """
2072 curl_call = curl_api_example(method, args)
2110 curl_call = curl_api_example(method, args)
2073
2111
2074 return literal(
2112 return literal(
2075 curl_call +
2113 curl_call +
2076 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2114 "<br/><br/>SECRET can be found in <a href=\"{token_url}\">auth-tokens</a> page, "
2077 "and needs to be of `api calls` role."
2115 "and needs to be of `api calls` role."
2078 .format(token_url=route_url('my_account_auth_tokens')))
2116 .format(token_url=route_url('my_account_auth_tokens')))
2079
2117
2080
2118
2081 def notification_description(notification, request):
2119 def notification_description(notification, request):
2082 """
2120 """
2083 Generate notification human readable description based on notification type
2121 Generate notification human readable description based on notification type
2084 """
2122 """
2085 from rhodecode.model.notification import NotificationModel
2123 from rhodecode.model.notification import NotificationModel
2086 return NotificationModel().make_description(
2124 return NotificationModel().make_description(
2087 notification, translate=request.translate)
2125 notification, translate=request.translate)
2088
2126
2089
2127
2090 def go_import_header(request, db_repo=None):
2128 def go_import_header(request, db_repo=None):
2091 """
2129 """
2092 Creates a header for go-import functionality in Go Lang
2130 Creates a header for go-import functionality in Go Lang
2093 """
2131 """
2094
2132
2095 if not db_repo:
2133 if not db_repo:
2096 return
2134 return
2097 if 'go-get' not in request.GET:
2135 if 'go-get' not in request.GET:
2098 return
2136 return
2099
2137
2100 clone_url = db_repo.clone_url()
2138 clone_url = db_repo.clone_url()
2101 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2139 prefix = re.split(r'^https?:\/\/', clone_url)[-1]
2102 # we have a repo and go-get flag,
2140 # we have a repo and go-get flag,
2103 return literal('<meta name="go-import" content="{} {} {}">'.format(
2141 return literal('<meta name="go-import" content="{} {} {}">'.format(
2104 prefix, db_repo.repo_type, clone_url))
2142 prefix, db_repo.repo_type, clone_url))
2105
2143
2106
2144
2107 def reviewer_as_json(*args, **kwargs):
2145 def reviewer_as_json(*args, **kwargs):
2108 from rhodecode.apps.repository.utils import reviewer_as_json as _reviewer_as_json
2146 from rhodecode.apps.repository.utils import reviewer_as_json as _reviewer_as_json
2109 return _reviewer_as_json(*args, **kwargs)
2147 return _reviewer_as_json(*args, **kwargs)
2110
2148
2111
2149
2112 def get_repo_view_type(request):
2150 def get_repo_view_type(request):
2113 route_name = request.matched_route.name
2151 route_name = request.matched_route.name
2114 route_to_view_type = {
2152 route_to_view_type = {
2115 'repo_changelog': 'commits',
2153 'repo_changelog': 'commits',
2116 'repo_commits': 'commits',
2154 'repo_commits': 'commits',
2117 'repo_files': 'files',
2155 'repo_files': 'files',
2118 'repo_summary': 'summary',
2156 'repo_summary': 'summary',
2119 'repo_commit': 'commit'
2157 'repo_commit': 'commit'
2120 }
2158 }
2121
2159
2122 return route_to_view_type.get(route_name)
2160 return route_to_view_type.get(route_name)
2123
2161
2124
2162
2125 def is_active(menu_entry, selected):
2163 def is_active(menu_entry, selected):
2126 """
2164 """
2127 Returns active class for selecting menus in templates
2165 Returns active class for selecting menus in templates
2128 <li class=${h.is_active('settings', current_active)}></li>
2166 <li class=${h.is_active('settings', current_active)}></li>
2129 """
2167 """
2130 if not isinstance(menu_entry, list):
2168 if not isinstance(menu_entry, list):
2131 menu_entry = [menu_entry]
2169 menu_entry = [menu_entry]
2132
2170
2133 if selected in menu_entry:
2171 if selected in menu_entry:
2134 return "active"
2172 return "active"
2135
2173
2136
2174
2137 class IssuesRegistry(object):
2175 class IssuesRegistry(object):
2138 """
2176 """
2139 issue_registry = IssuesRegistry()
2177 issue_registry = IssuesRegistry()
2140 some_func(issues_callback=issues_registry(...))
2178 some_func(issues_callback=issues_registry(...))
2141 """
2179 """
2142
2180
2143 def __init__(self):
2181 def __init__(self):
2144 self.issues = []
2182 self.issues = []
2145 self.unique_issues = collections.defaultdict(lambda: [])
2183 self.unique_issues = collections.defaultdict(lambda: [])
2146
2184
2147 def __call__(self, commit_dict=None):
2185 def __call__(self, commit_dict=None):
2148 def callback(issue):
2186 def callback(issue):
2149 if commit_dict and issue:
2187 if commit_dict and issue:
2150 issue['commit'] = commit_dict
2188 issue['commit'] = commit_dict
2151 self.issues.append(issue)
2189 self.issues.append(issue)
2152 self.unique_issues[issue['id']].append(issue)
2190 self.unique_issues[issue['id']].append(issue)
2153 return callback
2191 return callback
2154
2192
2155 def get_issues(self):
2193 def get_issues(self):
2156 return self.issues
2194 return self.issues
2157
2195
2158 @property
2196 @property
2159 def issues_unique_count(self):
2197 def issues_unique_count(self):
2160 return len(set(i['id'] for i in self.issues))
2198 return len(set(i['id'] for i in self.issues))
@@ -1,197 +1,197 b''
1
1
2
2
3 # Copyright (C) 2012-2023 RhodeCode GmbH
3 # Copyright (C) 2012-2023 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20 import re
20 import re
21
21
22 import pygments.filter
22 import pygments.filter
23 import pygments.filters
23 import pygments.filters
24 from pygments.token import Comment
24 from pygments.token import Comment
25
25
26 HL_BEG_MARKER = '__RCSearchHLMarkBEG__'
26 HL_BEG_MARKER = '__RCSearchHLMarkBEG__'
27 HL_END_MARKER = '__RCSearchHLMarkEND__'
27 HL_END_MARKER = '__RCSearchHLMarkEND__'
28 HL_MARKER_RE = '{}(.*?){}'.format(HL_BEG_MARKER, HL_END_MARKER)
28 HL_MARKER_RE = '{}(.*?){}'.format(HL_BEG_MARKER, HL_END_MARKER)
29
29
30
30
31 class ElasticSearchHLFilter(pygments.filters.Filter):
31 class ElasticSearchHLFilter(pygments.filters.Filter):
32 _names = [HL_BEG_MARKER, HL_END_MARKER]
32 _names = [HL_BEG_MARKER, HL_END_MARKER]
33
33
34 def __init__(self, **options):
34 def __init__(self, **options):
35 pygments.filters.Filter.__init__(self, **options)
35 pygments.filters.Filter.__init__(self, **options)
36
36
37 def filter(self, lexer, stream):
37 def filter(self, lexer, stream):
38 def tokenize(_value):
38 def tokenize(_value):
39 for token in re.split('({}|{})'.format(
39 for token in re.split('({}|{})'.format(
40 self._names[0], self._names[1]), _value):
40 self._names[0], self._names[1]), _value):
41 if token:
41 if token:
42 yield token
42 yield token
43
43
44 hl = False
44 hl = False
45 for ttype, value in stream:
45 for ttype, value in stream:
46
46
47 if self._names[0] in value or self._names[1] in value:
47 if self._names[0] in value or self._names[1] in value:
48 for item in tokenize(value):
48 for item in tokenize(value):
49 if item == self._names[0]:
49 if item == self._names[0]:
50 # skip marker, but start HL
50 # skip marker, but start HL
51 hl = True
51 hl = True
52 continue
52 continue
53 elif item == self._names[1]:
53 elif item == self._names[1]:
54 hl = False
54 hl = False
55 continue
55 continue
56
56
57 if hl:
57 if hl:
58 yield Comment.ElasticMatch, item
58 yield Comment.ElasticMatch, item
59 else:
59 else:
60 yield ttype, item
60 yield ttype, item
61 else:
61 else:
62 if hl:
62 if hl:
63 yield Comment.ElasticMatch, value
63 yield Comment.ElasticMatch, value
64 else:
64 else:
65 yield ttype, value
65 yield ttype, value
66
66
67
67
68 def extract_phrases(text_query):
68 def extract_phrases(text_query):
69 """
69 """
70 Extracts phrases from search term string making sure phrases
70 Extracts phrases from search term string making sure phrases
71 contained in double quotes are kept together - and discarding empty values
71 contained in double quotes are kept together - and discarding empty values
72 or fully whitespace values eg.
72 or fully whitespace values eg.
73
73
74 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
74 'some text "a phrase" more' => ['some', 'text', 'a phrase', 'more']
75
75
76 """
76 """
77
77
78 in_phrase = False
78 in_phrase = False
79 buf = ''
79 buf = ''
80 phrases = []
80 phrases = []
81 for char in text_query:
81 for char in text_query:
82 if in_phrase:
82 if in_phrase:
83 if char == '"': # end phrase
83 if char == '"': # end phrase
84 phrases.append(buf)
84 phrases.append(buf)
85 buf = ''
85 buf = ''
86 in_phrase = False
86 in_phrase = False
87 continue
87 continue
88 else:
88 else:
89 buf += char
89 buf += char
90 continue
90 continue
91 else:
91 else:
92 if char == '"': # start phrase
92 if char == '"': # start phrase
93 in_phrase = True
93 in_phrase = True
94 phrases.append(buf)
94 phrases.append(buf)
95 buf = ''
95 buf = ''
96 continue
96 continue
97 elif char == ' ':
97 elif char == ' ':
98 phrases.append(buf)
98 phrases.append(buf)
99 buf = ''
99 buf = ''
100 continue
100 continue
101 else:
101 else:
102 buf += char
102 buf += char
103
103
104 phrases.append(buf)
104 phrases.append(buf)
105 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
105 phrases = [phrase.strip() for phrase in phrases if phrase.strip()]
106 return phrases
106 return phrases
107
107
108
108
109 def get_matching_phrase_offsets(text, phrases):
109 def get_matching_phrase_offsets(text, phrases):
110 """
110 """
111 Returns a list of string offsets in `text` that the list of `terms` match
111 Returns a list of string offsets in `text` that the list of `terms` match
112
112
113 >>> get_matching_phrase_offsets('some text here', ['some', 'here'])
113 >>> get_matching_phrase_offsets('some text here', ['some', 'here'])
114 [(0, 4), (10, 14)]
114 [(0, 4), (10, 14)]
115
115
116 """
116 """
117 phrases = phrases or []
117 phrases = phrases or []
118 offsets = []
118 offsets = []
119
119
120 for phrase in phrases:
120 for phrase in phrases:
121 for match in re.finditer(phrase, text):
121 for match in re.finditer(phrase, text):
122 offsets.append((match.start(), match.end()))
122 offsets.append((match.start(), match.end()))
123
123
124 return offsets
124 return offsets
125
125
126
126
127 def get_matching_markers_offsets(text, markers=None):
127 def get_matching_markers_offsets(text, markers=None):
128 """
128 """
129 Returns a list of string offsets in `text` that the are between matching markers
129 Returns a list of string offsets in `text` that the are between matching markers
130
130
131 >>> get_matching_markers_offsets('$1some$2 text $1here$2 marked', ['\$1(.*?)\$2'])
131 >>> get_matching_markers_offsets('$1some$2 text $1here$2 marked', ['\$1(.*?)\$2'])
132 [(0, 5), (16, 22)]
132 [(0, 5), (16, 22)]
133
133
134 """
134 """
135 markers = markers or [HL_MARKER_RE]
135 markers = markers or [HL_MARKER_RE]
136 offsets = []
136 offsets = []
137
137
138 if markers:
138 if markers:
139 for mark in markers:
139 for mark in markers:
140 for match in re.finditer(mark, text):
140 for match in re.finditer(mark, text):
141 offsets.append((match.start(), match.end()))
141 offsets.append((match.start(), match.end()))
142
142
143 return offsets
143 return offsets
144
144
145
145
146 def normalize_text_for_matching(x):
146 def normalize_text_for_matching(x):
147 """
147 """
148 Replaces all non alfanum characters to spaces and lower cases the string,
148 Replaces all non alfanum characters to spaces and lower cases the string,
149 useful for comparing two text strings without punctuation
149 useful for comparing two text strings without punctuation
150 """
150 """
151 return re.sub(r'[^\w]', ' ', x.lower())
151 return re.sub(r'\W', ' ', x.lower())
152
152
153
153
154 def get_matching_line_offsets(lines, terms=None, markers=None):
154 def get_matching_line_offsets(lines, terms=None, markers=None):
155 """ Return a set of `lines` indices (starting from 1) matching a
155 """ Return a set of `lines` indices (starting from 1) matching a
156 text search query, along with `context` lines above/below matching lines
156 text search query, along with `context` lines above/below matching lines
157
157
158 :param lines: list of strings representing lines
158 :param lines: list of strings representing lines
159 :param terms: search term string to match in lines eg. 'some text'
159 :param terms: search term string to match in lines eg. 'some text'
160 :param markers: instead of terms, use highlight markers instead that
160 :param markers: instead of terms, use highlight markers instead that
161 mark beginning and end for matched item. eg. ['START(.*?)END']
161 mark beginning and end for matched item. eg. ['START(.*?)END']
162
162
163 eg.
163 eg.
164
164
165 text = '''
165 text = '''
166 words words words
166 words words words
167 words words words
167 words words words
168 some text some
168 some text some
169 words words words
169 words words words
170 words words words
170 words words words
171 text here what
171 text here what
172 '''
172 '''
173 get_matching_line_offsets(text, 'text', context=1)
173 get_matching_line_offsets(text, 'text', context=1)
174 6, {3: [(5, 9)], 6: [(0, 4)]]
174 6, {3: [(5, 9)], 6: [(0, 4)]]
175
175
176 """
176 """
177 matching_lines = {}
177 matching_lines = {}
178 line_index = 0
178 line_index = 0
179
179
180 if terms:
180 if terms:
181 phrases = [normalize_text_for_matching(phrase)
181 phrases = [normalize_text_for_matching(phrase)
182 for phrase in extract_phrases(terms)]
182 for phrase in extract_phrases(terms)]
183
183
184 for line_index, line in enumerate(lines.splitlines(), start=1):
184 for line_index, line in enumerate(lines.splitlines(), start=1):
185 normalized_line = normalize_text_for_matching(line)
185 normalized_line = normalize_text_for_matching(line)
186 match_offsets = get_matching_phrase_offsets(normalized_line, phrases)
186 match_offsets = get_matching_phrase_offsets(normalized_line, phrases)
187 if match_offsets:
187 if match_offsets:
188 matching_lines[line_index] = match_offsets
188 matching_lines[line_index] = match_offsets
189
189
190 else:
190 else:
191 markers = markers or [HL_MARKER_RE]
191 markers = markers or [HL_MARKER_RE]
192 for line_index, line in enumerate(lines.splitlines(), start=1):
192 for line_index, line in enumerate(lines.splitlines(), start=1):
193 match_offsets = get_matching_markers_offsets(line, markers=markers)
193 match_offsets = get_matching_markers_offsets(line, markers=markers)
194 if match_offsets:
194 if match_offsets:
195 matching_lines[line_index] = match_offsets
195 matching_lines[line_index] = match_offsets
196
196
197 return line_index, matching_lines
197 return line_index, matching_lines
@@ -1,240 +1,240 b''
1
1
2 # Copyright (C) 2010-2023 RhodeCode GmbH
2 # Copyright (C) 2010-2023 RhodeCode GmbH
3 #
3 #
4 # This program is free software: you can redistribute it and/or modify
4 # This program is free software: you can redistribute it and/or modify
5 # it under the terms of the GNU Affero General Public License, version 3
5 # it under the terms of the GNU Affero General Public License, version 3
6 # (only), as published by the Free Software Foundation.
6 # (only), as published by the Free Software Foundation.
7 #
7 #
8 # This program is distributed in the hope that it will be useful,
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
11 # GNU General Public License for more details.
12 #
12 #
13 # You should have received a copy of the GNU Affero General Public License
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see <http://www.gnu.org/licenses/>.
14 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 #
15 #
16 # This program is dual-licensed. If you wish to learn more about the
16 # This program is dual-licensed. If you wish to learn more about the
17 # RhodeCode Enterprise Edition, including its added features, Support services,
17 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # and proprietary license terms, please see https://rhodecode.com/licenses/
18 # and proprietary license terms, please see https://rhodecode.com/licenses/
19
19
20 import copy
20 import copy
21 import mock
21 import mock
22 import pytest
22 import pytest
23
23
24 from rhodecode.lib import helpers
24 from rhodecode.lib import helpers
25 from rhodecode.lib.utils2 import AttributeDict
25 from rhodecode.lib.utils2 import AttributeDict
26 from rhodecode.model.settings import IssueTrackerSettingsModel
26 from rhodecode.model.settings import IssueTrackerSettingsModel
27 from rhodecode.tests import no_newline_id_generator
27 from rhodecode.tests import no_newline_id_generator
28
28
29
29
30 @pytest.mark.parametrize('url, expected_url', [
30 @pytest.mark.parametrize('url, expected_url', [
31 ('http://rc.com', '<a href="http://rc.com">http://rc.com</a>'),
31 (r'https://rc.com', '<a href="https://rc.com">http://rc.com</a>'),
32 ('http://rc.com/test', '<a href="http://rc.com/test">http://rc.com/test</a>'),
32 (r'https://rc.com/test', '<a href="https://rc.com/test">https://rc.com/test</a>'),
33 ('http://rc.com/!foo', '<a href="http://rc.com/!foo">http://rc.com/!foo</a>'),
33 (r'https://rc.com/!foo', '<a href="https://rc.com/!foo">https://rc.com/!foo</a>'),
34 ('http://rc.com/&foo', '<a href="http://rc.com/&amp;foo">http://rc.com/&amp;foo</a>'),
34 (r'https://rc.com/&foo', '<a href="https://rc.com/&amp;foo">https://rc.com/&amp;foo</a>'),
35 ('http://rc.com/?foo-1&bar=1', '<a href="http://rc.com/?foo-1&amp;bar=1">http://rc.com/?foo-1&amp;bar=1</a>'),
35 (r'https://rc.com/?foo-1&bar=1', '<a href="https://rc.com/?foo-1&amp;bar=1">https://rc.com/?foo-1&amp;bar=1</a>'),
36 ('http://rc.com?foo-1&bar=1', '<a href="http://rc.com?foo-1&amp;bar=1">http://rc.com?foo-1&amp;bar=1</a>'),
36 (r'https://rc.com?foo-1&bar=1', '<a href="https://rc.com?foo-1&amp;bar=1">https://rc.com?foo-1&amp;bar=1</a>'),
37 ('http://rc.com/#foo', '<a href="http://rc.com/#foo">http://rc.com/#foo</a>'),
37 (r'https://rc.com/#foo', '<a href="https://rc.com/#foo">https://rc.com/#foo</a>'),
38 ('http://rc.com/@foo', '<a href="http://rc.com/@foo">http://rc.com/@foo</a>'),
38 (r'https://rc.com/@foo', '<a href="https://rc.com/@foo">https://rc.com/@foo</a>'),
39 ])
39 ])
40 def test_urlify_text(url, expected_url):
40 def test_urlify_text(url, expected_url):
41 assert helpers.urlify_text(url) == expected_url
41 assert helpers.urlify_text(url) == expected_url
42
42
43
43
44 @pytest.mark.parametrize('repo_name, commit_id, path, expected_result', [
44 @pytest.mark.parametrize('repo_name, commit_id, path, expected_result', [
45 # Simple case 1
45 # Simple case 1
46 ('repo', 'commit', 'a/b',
46 ('repo', 'commit', 'a/b',
47 '<a href="/repo/files/commit/"><i class="icon-home"></i></a>'
47 '<a href="/repo/files/commit/"><i class="icon-home"></i></a>'
48 ' / '
48 ' / '
49 '<a href="/repo/files/commit/a">a</a>'
49 '<a href="/repo/files/commit/a">a</a>'
50 ' / '
50 ' / '
51 'b'),
51 'b'),
52
52
53 # Simple case
53 # Simple case
54 ('rX<X', 'cX<X', 'pX<X/aX<X/bX<X',
54 ('rX<X', 'cX<X', 'pX<X/aX<X/bX<X',
55 '<a href="/rX%3CX/files/cX%3CX/"><i class="icon-home"></i></a>'
55 '<a href="/rX%3CX/files/cX%3CX/"><i class="icon-home"></i></a>'
56 ' / '
56 ' / '
57 '<a href="/rX%3CX/files/cX%3CX/pX%3CX">pX&lt;X</a>'
57 '<a href="/rX%3CX/files/cX%3CX/pX%3CX">pX&lt;X</a>'
58 ' / '
58 ' / '
59 '<a href="/rX%3CX/files/cX%3CX/pX%3CX/aX%3CX">aX&lt;X</a>'
59 '<a href="/rX%3CX/files/cX%3CX/pX%3CX/aX%3CX">aX&lt;X</a>'
60 ' / '
60 ' / '
61 'bX&lt;X'),
61 'bX&lt;X'),
62
62
63 # Path with only one segment
63 # Path with only one segment
64 ('rX<X', 'cX<X', 'pX<X',
64 ('rX<X', 'cX<X', 'pX<X',
65 '<a href="/rX%3CX/files/cX%3CX/"><i class="icon-home"></i></a>'
65 '<a href="/rX%3CX/files/cX%3CX/"><i class="icon-home"></i></a>'
66 ' / '
66 ' / '
67 'pX&lt;X'),
67 'pX&lt;X'),
68
68
69 # Empty path
69 # Empty path
70 ('rX<X', 'cX<X', '',
70 ('rX<X', 'cX<X', '',
71 '<i class="icon-home"></i>'),
71 '<i class="icon-home"></i>'),
72
72
73 # simple quote
73 # simple quote
74 ('rX"X', 'cX"X', 'pX"X/aX"X/bX"X',
74 ('rX"X', 'cX"X', 'pX"X/aX"X/bX"X',
75 '<a href="/rX%22X/files/cX%22X/"><i class="icon-home"></i></a>'
75 '<a href="/rX%22X/files/cX%22X/"><i class="icon-home"></i></a>'
76 ' / '
76 ' / '
77 '<a href="/rX%22X/files/cX%22X/pX%22X">pX&#34;X</a>'
77 '<a href="/rX%22X/files/cX%22X/pX%22X">pX&#34;X</a>'
78 ' / '
78 ' / '
79 '<a href="/rX%22X/files/cX%22X/pX%22X/aX%22X">aX&#34;X</a>'
79 '<a href="/rX%22X/files/cX%22X/pX%22X/aX%22X">aX&#34;X</a>'
80 ' / '
80 ' / '
81 'bX&#34;X'),
81 'bX&#34;X'),
82
82
83 ], ids=['simple1', 'simple2', 'one_segment', 'empty_path', 'simple_quote'])
83 ], ids=['simple1', 'simple2', 'one_segment', 'empty_path', 'simple_quote'])
84 def test_files_breadcrumbs_xss(repo_name, commit_id, path, app, expected_result):
84 def test_files_breadcrumbs_xss(repo_name, commit_id, path, app, expected_result):
85 result = helpers.files_breadcrumbs(repo_name, 'hg', commit_id, path)
85 result = helpers.files_breadcrumbs(repo_name, 'hg', commit_id, path)
86 # Expect it to encode all path fragments properly. This is important
86 # Expect it to encode all path fragments properly. This is important
87 # because it returns an instance of `literal`.
87 # because it returns an instance of `literal`.
88 if path != '':
88 if path != '':
89 expected_result = expected_result + helpers.files_icon.format(helpers.escape(path))
89 expected_result = expected_result + helpers.files_icon.format(helpers.escape(path))
90 assert result == expected_result
90 assert result == expected_result
91
91
92
92
93 def test_format_binary():
93 def test_format_binary():
94 assert helpers.format_byte_size_binary(298489462784) == '278.0 GiB'
94 assert helpers.format_byte_size_binary(298489462784) == '278.0 GiB'
95
95
96
96
97 @pytest.mark.parametrize('text_string, pattern, expected', [
97 @pytest.mark.parametrize('text_string, pattern, expected', [
98 ('No issue here', '(?:#)(?P<issue_id>\d+)', []),
98 ('No issue here', r'(?:#)(?P<issue_id>\d+)', []),
99 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
99 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
100 [{'url': 'http://r.io/{repo}/i/42', 'id': '42'}]),
100 [{'url': 'https://r.io/{repo}/i/42', 'id': '42'}]),
101 ('Fix #42, #53', '(?:#)(?P<issue_id>\d+)', [
101 ('Fix #42, #53', '(?:#)(?P<issue_id>\d+)', [
102 {'url': 'http://r.io/{repo}/i/42', 'id': '42'},
102 {'url': 'https://r.io/{repo}/i/42', 'id': '42'},
103 {'url': 'http://r.io/{repo}/i/53', 'id': '53'}]),
103 {'url': 'https://r.io/{repo}/i/53', 'id': '53'}]),
104 ('Fix #42', '(?:#)?<issue_id>\d+)', []), # Broken regex
104 ('Fix #42', '(?:#)?<issue_id>\d+)', []), # Broken regex
105 ])
105 ])
106 def test_extract_issues(backend, text_string, pattern, expected):
106 def test_extract_issues(backend, text_string, pattern, expected):
107 repo = backend.create_repo()
107 repo = backend.create_repo()
108 config = {
108 config = {
109 '123': {
109 '123': {
110 'uid': '123',
110 'uid': '123',
111 'pat': pattern,
111 'pat': pattern,
112 'url': 'http://r.io/${repo}/i/${issue_id}',
112 'url': r'https://r.io/${repo}/i/${issue_id}',
113 'pref': '#',
113 'pref': '#',
114 'desc': 'Test Pattern'
114 'desc': 'Test Pattern'
115 }
115 }
116 }
116 }
117
117
118 def get_settings_mock(self, cache=True):
118 def get_settings_mock(self, cache=True):
119 return config
119 return config
120
120
121 with mock.patch.object(IssueTrackerSettingsModel,
121 with mock.patch.object(IssueTrackerSettingsModel,
122 'get_settings', get_settings_mock):
122 'get_settings', get_settings_mock):
123 text, issues, errors = helpers.process_patterns(text_string, repo.repo_name)
123 text, issues, errors = helpers.process_patterns(text_string, repo.repo_name)
124
124
125 expected = copy.deepcopy(expected)
125 expected = copy.deepcopy(expected)
126 for item in expected:
126 for item in expected:
127 item['url'] = item['url'].format(repo=repo.repo_name)
127 item['url'] = item['url'].format(repo=repo.repo_name)
128
128
129 assert issues == expected
129 assert issues == expected
130
130
131
131
132 @pytest.mark.parametrize('text_string, pattern, link_format, expected_text', [
132 @pytest.mark.parametrize('text_string, pattern, link_format, expected_text', [
133 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'html',
133 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'html',
134 'Fix <a class="tooltip issue-tracker-link" href="http://r.io/{repo}/i/42" title="Test Pattern">#42</a>'),
134 'Fix <a class="tooltip issue-tracker-link" href="http://r.io/{repo}/i/42" title="Test Pattern">#42</a>'),
135
135
136 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'markdown',
136 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'markdown',
137 'Fix [#42](http://r.io/{repo}/i/42)'),
137 'Fix [#42](http://r.io/{repo}/i/42)'),
138
138
139 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'rst',
139 ('Fix #42', '(?:#)(?P<issue_id>\d+)', 'rst',
140 'Fix `#42 <http://r.io/{repo}/i/42>`_'),
140 'Fix `#42 <http://r.io/{repo}/i/42>`_'),
141
141
142 ('Fix #42', '(?:#)?<issue_id>\d+)', 'html',
142 ('Fix #42', '(?:#)?<issue_id>\d+)', 'html',
143 'Fix #42'), # Broken regex
143 'Fix #42'), # Broken regex
144 ])
144 ])
145 def test_process_patterns_repo(backend, text_string, pattern, expected_text, link_format):
145 def test_process_patterns_repo(backend, text_string, pattern, expected_text, link_format):
146 repo = backend.create_repo()
146 repo = backend.create_repo()
147
147
148 def get_settings_mock(self, cache=True):
148 def get_settings_mock(self, cache=True):
149 return {
149 return {
150 '123': {
150 '123': {
151 'uid': '123',
151 'uid': '123',
152 'pat': pattern,
152 'pat': pattern,
153 'url': 'http://r.io/${repo}/i/${issue_id}',
153 'url': 'http://r.io/${repo}/i/${issue_id}',
154 'pref': '#',
154 'pref': '#',
155 'desc': 'Test Pattern'
155 'desc': 'Test Pattern'
156 }
156 }
157 }
157 }
158
158
159 with mock.patch.object(IssueTrackerSettingsModel,
159 with mock.patch.object(IssueTrackerSettingsModel,
160 'get_settings', get_settings_mock):
160 'get_settings', get_settings_mock):
161 processed_text, issues, error = helpers.process_patterns(
161 processed_text, issues, error = helpers.process_patterns(
162 text_string, repo.repo_name, link_format)
162 text_string, repo.repo_name, link_format)
163
163
164 assert processed_text == expected_text.format(repo=repo.repo_name)
164 assert processed_text == expected_text.format(repo=repo.repo_name)
165
165
166
166
167 @pytest.mark.parametrize('text_string, pattern, expected_text', [
167 @pytest.mark.parametrize('text_string, pattern, expected_text', [
168 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
168 ('Fix #42', '(?:#)(?P<issue_id>\d+)',
169 'Fix <a class="tooltip issue-tracker-link" href="http://r.io/i/42" title="Test Pattern">#42</a>'),
169 'Fix <a class="tooltip issue-tracker-link" href="http://r.io/i/42" title="Test Pattern">#42</a>'),
170 ('Fix #42', '(?:#)?<issue_id>\d+)',
170 ('Fix #42', '(?:#)?<issue_id>\d+)',
171 'Fix #42'), # Broken regex
171 'Fix #42'), # Broken regex
172 ])
172 ])
173 def test_process_patterns_no_repo(text_string, pattern, expected_text):
173 def test_process_patterns_no_repo(text_string, pattern, expected_text):
174
174
175 def get_settings_mock(self, cache=True):
175 def get_settings_mock(self, cache=True):
176 return {
176 return {
177 '123': {
177 '123': {
178 'uid': '123',
178 'uid': '123',
179 'pat': pattern,
179 'pat': pattern,
180 'url': 'http://r.io/i/${issue_id}',
180 'url': 'http://r.io/i/${issue_id}',
181 'pref': '#',
181 'pref': '#',
182 'desc': 'Test Pattern'
182 'desc': 'Test Pattern'
183 }
183 }
184 }
184 }
185
185
186 with mock.patch.object(IssueTrackerSettingsModel,
186 with mock.patch.object(IssueTrackerSettingsModel,
187 'get_global_settings', get_settings_mock):
187 'get_global_settings', get_settings_mock):
188 processed_text, issues, errors = helpers.process_patterns(
188 processed_text, issues, errors = helpers.process_patterns(
189 text_string, '')
189 text_string, '')
190
190
191 assert processed_text == expected_text
191 assert processed_text == expected_text
192
192
193
193
194 def test_process_patterns_non_existent_repo_name(backend):
194 def test_process_patterns_non_existent_repo_name(backend):
195 text_string = 'Fix #42'
195 text_string = 'Fix #42'
196 pattern = '(?:#)(?P<issue_id>\d+)'
196 pattern = '(?:#)(?P<issue_id>\d+)'
197 expected_text = ('Fix <a class="tooltip issue-tracker-link" '
197 expected_text = ('Fix <a class="tooltip issue-tracker-link" '
198 'href="http://r.io/do-not-exist/i/42" title="Test Pattern">#42</a>')
198 'href="http://r.io/do-not-exist/i/42" title="Test Pattern">#42</a>')
199
199
200 def get_settings_mock(self, cache=True):
200 def get_settings_mock(self, cache=True):
201 return {
201 return {
202 '123': {
202 '123': {
203 'uid': '123',
203 'uid': '123',
204 'pat': pattern,
204 'pat': pattern,
205 'url': 'http://r.io/${repo}/i/${issue_id}',
205 'url': 'http://r.io/${repo}/i/${issue_id}',
206 'pref': '#',
206 'pref': '#',
207 'desc': 'Test Pattern'
207 'desc': 'Test Pattern'
208 }
208 }
209 }
209 }
210
210
211 with mock.patch.object(IssueTrackerSettingsModel,
211 with mock.patch.object(IssueTrackerSettingsModel,
212 'get_global_settings', get_settings_mock):
212 'get_global_settings', get_settings_mock):
213 processed_text, issues, errors = helpers.process_patterns(
213 processed_text, issues, errors = helpers.process_patterns(
214 text_string, 'do-not-exist')
214 text_string, 'do-not-exist')
215
215
216 assert processed_text == expected_text
216 assert processed_text == expected_text
217
217
218
218
219 def test_get_visual_attr(baseapp):
219 def test_get_visual_attr(baseapp):
220 from rhodecode.apps._base import TemplateArgs
220 from rhodecode.apps._base import TemplateArgs
221 c = TemplateArgs()
221 c = TemplateArgs()
222 assert None is helpers.get_visual_attr(c, 'fakse')
222 assert None is helpers.get_visual_attr(c, 'fakse')
223
223
224 # emulate the c.visual behaviour
224 # emulate the c.visual behaviour
225 c.visual = AttributeDict({})
225 c.visual = AttributeDict({})
226 assert None is helpers.get_visual_attr(c, 'some_var')
226 assert None is helpers.get_visual_attr(c, 'some_var')
227
227
228 c.visual.some_var = 'foobar'
228 c.visual.some_var = 'foobar'
229 assert 'foobar' == helpers.get_visual_attr(c, 'some_var')
229 assert 'foobar' == helpers.get_visual_attr(c, 'some_var')
230
230
231
231
232 @pytest.mark.parametrize('test_text, inclusive, expected_text', [
232 @pytest.mark.parametrize('test_text, inclusive, expected_text', [
233 ('just a string', False, 'just a string'),
233 ('just a string', False, 'just a string'),
234 ('just a string\n', False, 'just a string'),
234 ('just a string\n', False, 'just a string'),
235 ('just a string\n next line', False, 'just a string...'),
235 ('just a string\n next line', False, 'just a string...'),
236 ('just a string\n next line', True, 'just a string\n...'),
236 ('just a string\n next line', True, 'just a string\n...'),
237 ], ids=no_newline_id_generator)
237 ], ids=no_newline_id_generator)
238 def test_chop_at(test_text, inclusive, expected_text):
238 def test_chop_at(test_text, inclusive, expected_text):
239 assert helpers.chop_at_smart(
239 assert helpers.chop_at_smart(
240 test_text, '\n', inclusive, '...') == expected_text
240 test_text, '\n', inclusive, '...') == expected_text
@@ -1,102 +1,102 b''
1
1
2
2
3 # Copyright (C) 2016-2023 RhodeCode GmbH
3 # Copyright (C) 2016-2023 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import colander
21 import colander
22 import pytest
22 import pytest
23
23
24 from rhodecode.model.validation_schema.types import (
24 from rhodecode.model.validation_schema.types import (
25 GroupNameType, RepoNameType, StringBooleanType)
25 GroupNameType, RepoNameType, StringBooleanType)
26
26
27
27
28 class TestGroupNameType(object):
28 class TestGroupNameType(object):
29 @pytest.mark.parametrize('given, expected', [
29 @pytest.mark.parametrize('given, expected', [
30 ('//group1/group2//', 'group1/group2'),
30 ('//group1/group2//', 'group1/group2'),
31 ('//group1///group2//', 'group1/group2'),
31 ('//group1///group2//', 'group1/group2'),
32 ('group1/group2///group3', 'group1/group2/group3'),
32 ('group1/group2///group3', 'group1/group2/group3'),
33 ])
33 ])
34 def test_normalize_path(self, given, expected):
34 def test_normalize_path(self, given, expected):
35 result = GroupNameType()._normalize(given)
35 result = GroupNameType()._normalize(given)
36 assert result == expected
36 assert result == expected
37
37
38 @pytest.mark.parametrize('given, expected', [
38 @pytest.mark.parametrize('given, expected', [
39 ('//group1/group2//', 'group1/group2'),
39 (r'//group1/group2//', 'group1/group2'),
40 ('//group1///group2//', 'group1/group2'),
40 (r'//group1///group2//', 'group1/group2'),
41 ('group1/group2///group3', 'group1/group2/group3'),
41 (r'group1/group2///group3', 'group1/group2/group3'),
42 ('v1.2', 'v1.2'),
42 (r'v1.2', 'v1.2'),
43 ('/v1.2', 'v1.2'),
43 (r'/v1.2', 'v1.2'),
44 ('.dirs', '.dirs'),
44 (r'.dirs', '.dirs'),
45 ('..dirs', '.dirs'),
45 (r'..dirs', '.dirs'),
46 ('./..dirs', '.dirs'),
46 (r'./..dirs', '.dirs'),
47 ('dir/;name;/;[];/sub', 'dir/name/sub'),
47 (r'dir/;name;/;[];/sub', 'dir/name/sub'),
48 (',/,/,d,,,', 'd'),
48 (r',/,/,d,,,', 'd'),
49 ('/;/#/,d,,,', 'd'),
49 (r'/;/#/,d,,,', 'd'),
50 ('long../../..name', 'long./.name'),
50 (r'long../../..name', 'long./.name'),
51 ('long../..name', 'long./.name'),
51 (r'long../..name', 'long./.name'),
52 ('../', ''),
52 (r'../', ''),
53 ('\'../"../', ''),
53 (r'\'../"../', ''),
54 ('c,/,/..//./,c,,,/.d/../.........c', 'c/c/.d/.c'),
54 (r'c,/,/..//./,c,,,/.d/../.........c', 'c/c/.d/.c'),
55 ('c,/,/..//./,c,,,', 'c/c'),
55 (r'c,/,/..//./,c,,,', 'c/c'),
56 ('d../..d', 'd./.d'),
56 (r'd../..d', 'd./.d'),
57 ('d../../d', 'd./d'),
57 (r'd../../d', 'd./d'),
58
58
59 ('d\;\./\,\./d', 'd./d'),
59 (r'd\;\./\,\./d', 'd./d'),
60 ('d\.\./\.\./d', 'd./d'),
60 (r'd\.\./\.\./d', 'd./d'),
61 ('d\.\./\..\../d', 'd./d'),
61 (r'd\.\./\..\../d', 'd./d'),
62 ])
62 ])
63 def test_deserialize_clean_up_name(self, given, expected):
63 def test_deserialize_clean_up_name(self, given, expected):
64 class TestSchema(colander.Schema):
64 class TestSchema(colander.Schema):
65 field_group = colander.SchemaNode(GroupNameType())
65 field_group = colander.SchemaNode(GroupNameType())
66 field_repo = colander.SchemaNode(RepoNameType())
66 field_repo = colander.SchemaNode(RepoNameType())
67
67
68 schema = TestSchema()
68 schema = TestSchema()
69 cleaned_data = schema.deserialize({
69 cleaned_data = schema.deserialize({
70 'field_group': given,
70 'field_group': given,
71 'field_repo': given
71 'field_repo': given
72 })
72 })
73 assert cleaned_data['field_group'] == expected
73 assert cleaned_data['field_group'] == expected
74 assert cleaned_data['field_repo'] == expected
74 assert cleaned_data['field_repo'] == expected
75
75
76
76
77 class TestStringBooleanType(object):
77 class TestStringBooleanType(object):
78
78
79 def _get_schema(self):
79 def _get_schema(self):
80 class Schema(colander.MappingSchema):
80 class Schema(colander.MappingSchema):
81 bools = colander.SchemaNode(StringBooleanType())
81 bools = colander.SchemaNode(StringBooleanType())
82 return Schema()
82 return Schema()
83
83
84 @pytest.mark.parametrize('given, expected', [
84 @pytest.mark.parametrize('given, expected', [
85 ('1', True),
85 ('1', True),
86 ('yEs', True),
86 ('yEs', True),
87 ('true', True),
87 ('true', True),
88
88
89 ('0', False),
89 ('0', False),
90 ('NO', False),
90 ('NO', False),
91 ('FALSE', False),
91 ('FALSE', False),
92
92
93 ])
93 ])
94 def test_convert_type(self, given, expected):
94 def test_convert_type(self, given, expected):
95 schema = self._get_schema()
95 schema = self._get_schema()
96 result = schema.deserialize({'bools':given})
96 result = schema.deserialize({'bools':given})
97 assert result['bools'] == expected
97 assert result['bools'] == expected
98
98
99 def test_try_convert_bad_type(self):
99 def test_try_convert_bad_type(self):
100 schema = self._get_schema()
100 schema = self._get_schema()
101 with pytest.raises(colander.Invalid):
101 with pytest.raises(colander.Invalid):
102 result = schema.deserialize({'bools': 'boom'})
102 result = schema.deserialize({'bools': 'boom'})
General Comments 0
You need to be logged in to leave comments. Login now