Show More
@@ -1,728 +1,754 b'' | |||||
1 | # -*- coding: utf-8 -*- |
|
1 | # -*- coding: utf-8 -*- | |
2 |
|
2 | |||
3 | # Copyright (C) 2011-2018 RhodeCode GmbH |
|
3 | # Copyright (C) 2011-2018 RhodeCode GmbH | |
4 | # |
|
4 | # | |
5 | # This program is free software: you can redistribute it and/or modify |
|
5 | # This program is free software: you can redistribute it and/or modify | |
6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
6 | # it under the terms of the GNU Affero General Public License, version 3 | |
7 | # (only), as published by the Free Software Foundation. |
|
7 | # (only), as published by the Free Software Foundation. | |
8 | # |
|
8 | # | |
9 | # This program is distributed in the hope that it will be useful, |
|
9 | # This program is distributed in the hope that it will be useful, | |
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | # GNU General Public License for more details. |
|
12 | # GNU General Public License for more details. | |
13 | # |
|
13 | # | |
14 | # You should have received a copy of the GNU Affero General Public License |
|
14 | # You should have received a copy of the GNU Affero General Public License | |
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | # |
|
16 | # | |
17 | # This program is dual-licensed. If you wish to learn more about the |
|
17 | # This program is dual-licensed. If you wish to learn more about the | |
18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
18 | # RhodeCode Enterprise Edition, including its added features, Support services, | |
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ | |
20 |
|
20 | |||
21 | import logging |
|
21 | import logging | |
22 | import difflib |
|
22 | import difflib | |
|
23 | import string | |||
23 | from itertools import groupby |
|
24 | from itertools import groupby | |
24 |
|
25 | |||
25 | from pygments import lex |
|
26 | from pygments import lex | |
26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class |
|
27 | from pygments.formatters.html import _get_ttype_class as pygment_token_class | |
27 | from pygments.lexers.special import TextLexer, Token |
|
28 | from pygments.lexers.special import TextLexer, Token | |
28 |
|
29 | |||
29 | from rhodecode.lib.helpers import ( |
|
30 | from rhodecode.lib.helpers import ( | |
30 | get_lexer_for_filenode, html_escape, get_custom_lexer) |
|
31 | get_lexer_for_filenode, html_escape, get_custom_lexer) | |
31 | from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict |
|
32 | from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode | |
32 | from rhodecode.lib.vcs.nodes import FileNode |
|
33 | from rhodecode.lib.vcs.nodes import FileNode | |
33 | from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError |
|
34 | from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError | |
34 | from rhodecode.lib.diff_match_patch import diff_match_patch |
|
35 | from rhodecode.lib.diff_match_patch import diff_match_patch | |
35 | from rhodecode.lib.diffs import LimitedDiffContainer |
|
36 | from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE | |
36 | from pygments.lexers import get_lexer_by_name |
|
37 | from pygments.lexers import get_lexer_by_name | |
37 |
|
38 | |||
38 | plain_text_lexer = get_lexer_by_name( |
|
39 | plain_text_lexer = get_lexer_by_name( | |
39 | 'text', stripall=False, stripnl=False, ensurenl=False) |
|
40 | 'text', stripall=False, stripnl=False, ensurenl=False) | |
40 |
|
41 | |||
41 |
|
42 | |||
42 | log = logging.getLogger(__name__) |
|
43 | log = logging.getLogger(__name__) | |
43 |
|
44 | |||
44 |
|
45 | |||
45 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
46 | def filenode_as_lines_tokens(filenode, lexer=None): | |
46 | org_lexer = lexer |
|
47 | org_lexer = lexer | |
47 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
48 | lexer = lexer or get_lexer_for_filenode(filenode) | |
48 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', |
|
49 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', | |
49 | lexer, filenode, org_lexer) |
|
50 | lexer, filenode, org_lexer) | |
50 | tokens = tokenize_string(filenode.content, lexer) |
|
51 | tokens = tokenize_string(filenode.content, lexer) | |
51 | lines = split_token_stream(tokens) |
|
52 | lines = split_token_stream(tokens) | |
52 | rv = list(lines) |
|
53 | rv = list(lines) | |
53 | return rv |
|
54 | return rv | |
54 |
|
55 | |||
55 |
|
56 | |||
56 | def tokenize_string(content, lexer): |
|
57 | def tokenize_string(content, lexer): | |
57 | """ |
|
58 | """ | |
58 | Use pygments to tokenize some content based on a lexer |
|
59 | Use pygments to tokenize some content based on a lexer | |
59 | ensuring all original new lines and whitespace is preserved |
|
60 | ensuring all original new lines and whitespace is preserved | |
60 | """ |
|
61 | """ | |
61 |
|
62 | |||
62 | lexer.stripall = False |
|
63 | lexer.stripall = False | |
63 | lexer.stripnl = False |
|
64 | lexer.stripnl = False | |
64 | lexer.ensurenl = False |
|
65 | lexer.ensurenl = False | |
65 |
|
66 | |||
66 | if isinstance(lexer, TextLexer): |
|
67 | if isinstance(lexer, TextLexer): | |
67 | lexed = [(Token.Text, content)] |
|
68 | lexed = [(Token.Text, content)] | |
68 | else: |
|
69 | else: | |
69 | lexed = lex(content, lexer) |
|
70 | lexed = lex(content, lexer) | |
70 |
|
71 | |||
71 | for token_type, token_text in lexed: |
|
72 | for token_type, token_text in lexed: | |
72 | yield pygment_token_class(token_type), token_text |
|
73 | yield pygment_token_class(token_type), token_text | |
73 |
|
74 | |||
74 |
|
75 | |||
75 | def split_token_stream(tokens): |
|
76 | def split_token_stream(tokens): | |
76 | """ |
|
77 | """ | |
77 | Take a list of (TokenType, text) tuples and split them by a string |
|
78 | Take a list of (TokenType, text) tuples and split them by a string | |
78 |
|
79 | |||
79 | split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) |
|
80 | split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) | |
80 | [(TEXT, 'some'), (TEXT, 'text'), |
|
81 | [(TEXT, 'some'), (TEXT, 'text'), | |
81 | (TEXT, 'more'), (TEXT, 'text')] |
|
82 | (TEXT, 'more'), (TEXT, 'text')] | |
82 | """ |
|
83 | """ | |
83 |
|
84 | |||
84 | buffer = [] |
|
85 | buffer = [] | |
85 | for token_class, token_text in tokens: |
|
86 | for token_class, token_text in tokens: | |
86 | parts = token_text.split('\n') |
|
87 | parts = token_text.split('\n') | |
87 | for part in parts[:-1]: |
|
88 | for part in parts[:-1]: | |
88 | buffer.append((token_class, part)) |
|
89 | buffer.append((token_class, part)) | |
89 | yield buffer |
|
90 | yield buffer | |
90 | buffer = [] |
|
91 | buffer = [] | |
91 |
|
92 | |||
92 | buffer.append((token_class, parts[-1])) |
|
93 | buffer.append((token_class, parts[-1])) | |
93 |
|
94 | |||
94 | if buffer: |
|
95 | if buffer: | |
95 | yield buffer |
|
96 | yield buffer | |
96 |
|
97 | |||
97 |
|
98 | |||
98 | def filenode_as_annotated_lines_tokens(filenode): |
|
99 | def filenode_as_annotated_lines_tokens(filenode): | |
99 | """ |
|
100 | """ | |
100 | Take a file node and return a list of annotations => lines, if no annotation |
|
101 | Take a file node and return a list of annotations => lines, if no annotation | |
101 | is found, it will be None. |
|
102 | is found, it will be None. | |
102 |
|
103 | |||
103 | eg: |
|
104 | eg: | |
104 |
|
105 | |||
105 | [ |
|
106 | [ | |
106 | (annotation1, [ |
|
107 | (annotation1, [ | |
107 | (1, line1_tokens_list), |
|
108 | (1, line1_tokens_list), | |
108 | (2, line2_tokens_list), |
|
109 | (2, line2_tokens_list), | |
109 | ]), |
|
110 | ]), | |
110 | (annotation2, [ |
|
111 | (annotation2, [ | |
111 | (3, line1_tokens_list), |
|
112 | (3, line1_tokens_list), | |
112 | ]), |
|
113 | ]), | |
113 | (None, [ |
|
114 | (None, [ | |
114 | (4, line1_tokens_list), |
|
115 | (4, line1_tokens_list), | |
115 | ]), |
|
116 | ]), | |
116 | (annotation1, [ |
|
117 | (annotation1, [ | |
117 | (5, line1_tokens_list), |
|
118 | (5, line1_tokens_list), | |
118 | (6, line2_tokens_list), |
|
119 | (6, line2_tokens_list), | |
119 | ]) |
|
120 | ]) | |
120 | ] |
|
121 | ] | |
121 | """ |
|
122 | """ | |
122 |
|
123 | |||
123 | commit_cache = {} # cache commit_getter lookups |
|
124 | commit_cache = {} # cache commit_getter lookups | |
124 |
|
125 | |||
125 | def _get_annotation(commit_id, commit_getter): |
|
126 | def _get_annotation(commit_id, commit_getter): | |
126 | if commit_id not in commit_cache: |
|
127 | if commit_id not in commit_cache: | |
127 | commit_cache[commit_id] = commit_getter() |
|
128 | commit_cache[commit_id] = commit_getter() | |
128 | return commit_cache[commit_id] |
|
129 | return commit_cache[commit_id] | |
129 |
|
130 | |||
130 | annotation_lookup = { |
|
131 | annotation_lookup = { | |
131 | line_no: _get_annotation(commit_id, commit_getter) |
|
132 | line_no: _get_annotation(commit_id, commit_getter) | |
132 | for line_no, commit_id, commit_getter, line_content |
|
133 | for line_no, commit_id, commit_getter, line_content | |
133 | in filenode.annotate |
|
134 | in filenode.annotate | |
134 | } |
|
135 | } | |
135 |
|
136 | |||
136 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) |
|
137 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) | |
137 | for line_no, tokens |
|
138 | for line_no, tokens | |
138 | in enumerate(filenode_as_lines_tokens(filenode), 1)) |
|
139 | in enumerate(filenode_as_lines_tokens(filenode), 1)) | |
139 |
|
140 | |||
140 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) |
|
141 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) | |
141 |
|
142 | |||
142 | for annotation, group in grouped_annotations_lines: |
|
143 | for annotation, group in grouped_annotations_lines: | |
143 | yield ( |
|
144 | yield ( | |
144 | annotation, [(line_no, tokens) |
|
145 | annotation, [(line_no, tokens) | |
145 | for (_, line_no, tokens) in group] |
|
146 | for (_, line_no, tokens) in group] | |
146 | ) |
|
147 | ) | |
147 |
|
148 | |||
148 |
|
149 | |||
149 | def render_tokenstream(tokenstream): |
|
150 | def render_tokenstream(tokenstream): | |
150 | result = [] |
|
151 | result = [] | |
151 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): |
|
152 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): | |
152 |
|
153 | |||
153 | if token_class: |
|
154 | if token_class: | |
154 | result.append(u'<span class="%s">' % token_class) |
|
155 | result.append(u'<span class="%s">' % token_class) | |
155 | else: |
|
156 | else: | |
156 | result.append(u'<span>') |
|
157 | result.append(u'<span>') | |
157 |
|
158 | |||
158 | for op_tag, token_text in token_ops_texts: |
|
159 | for op_tag, token_text in token_ops_texts: | |
159 |
|
160 | |||
160 | if op_tag: |
|
161 | if op_tag: | |
161 | result.append(u'<%s>' % op_tag) |
|
162 | result.append(u'<%s>' % op_tag) | |
162 |
|
163 | |||
163 | escaped_text = html_escape(token_text) |
|
164 | escaped_text = html_escape(token_text) | |
164 |
|
165 | |||
165 | # TODO: dan: investigate showing hidden characters like space/nl/tab |
|
166 | # TODO: dan: investigate showing hidden characters like space/nl/tab | |
166 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') |
|
167 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') | |
167 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') |
|
168 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') | |
168 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') |
|
169 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') | |
169 |
|
170 | |||
170 | result.append(escaped_text) |
|
171 | result.append(escaped_text) | |
171 |
|
172 | |||
172 | if op_tag: |
|
173 | if op_tag: | |
173 | result.append(u'</%s>' % op_tag) |
|
174 | result.append(u'</%s>' % op_tag) | |
174 |
|
175 | |||
175 | result.append(u'</span>') |
|
176 | result.append(u'</span>') | |
176 |
|
177 | |||
177 | html = ''.join(result) |
|
178 | html = ''.join(result) | |
178 | return html |
|
179 | return html | |
179 |
|
180 | |||
180 |
|
181 | |||
181 | def rollup_tokenstream(tokenstream): |
|
182 | def rollup_tokenstream(tokenstream): | |
182 | """ |
|
183 | """ | |
183 | Group a token stream of the format: |
|
184 | Group a token stream of the format: | |
184 |
|
185 | |||
185 | ('class', 'op', 'text') |
|
186 | ('class', 'op', 'text') | |
186 | or |
|
187 | or | |
187 | ('class', 'text') |
|
188 | ('class', 'text') | |
188 |
|
189 | |||
189 | into |
|
190 | into | |
190 |
|
191 | |||
191 | [('class1', |
|
192 | [('class1', | |
192 | [('op1', 'text'), |
|
193 | [('op1', 'text'), | |
193 | ('op2', 'text')]), |
|
194 | ('op2', 'text')]), | |
194 | ('class2', |
|
195 | ('class2', | |
195 | [('op3', 'text')])] |
|
196 | [('op3', 'text')])] | |
196 |
|
197 | |||
197 | This is used to get the minimal tags necessary when |
|
198 | This is used to get the minimal tags necessary when | |
198 | rendering to html eg for a token stream ie. |
|
199 | rendering to html eg for a token stream ie. | |
199 |
|
200 | |||
200 | <span class="A"><ins>he</ins>llo</span> |
|
201 | <span class="A"><ins>he</ins>llo</span> | |
201 | vs |
|
202 | vs | |
202 | <span class="A"><ins>he</ins></span><span class="A">llo</span> |
|
203 | <span class="A"><ins>he</ins></span><span class="A">llo</span> | |
203 |
|
204 | |||
204 | If a 2 tuple is passed in, the output op will be an empty string. |
|
205 | If a 2 tuple is passed in, the output op will be an empty string. | |
205 |
|
206 | |||
206 | eg: |
|
207 | eg: | |
207 |
|
208 | |||
208 | >>> rollup_tokenstream([('classA', '', 'h'), |
|
209 | >>> rollup_tokenstream([('classA', '', 'h'), | |
209 | ('classA', 'del', 'ell'), |
|
210 | ('classA', 'del', 'ell'), | |
210 | ('classA', '', 'o'), |
|
211 | ('classA', '', 'o'), | |
211 | ('classB', '', ' '), |
|
212 | ('classB', '', ' '), | |
212 | ('classA', '', 'the'), |
|
213 | ('classA', '', 'the'), | |
213 | ('classA', '', 're'), |
|
214 | ('classA', '', 're'), | |
214 | ]) |
|
215 | ]) | |
215 |
|
216 | |||
216 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], |
|
217 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], | |
217 | ('classB', [('', ' ')], |
|
218 | ('classB', [('', ' ')], | |
218 | ('classA', [('', 'there')]] |
|
219 | ('classA', [('', 'there')]] | |
219 |
|
220 | |||
220 | """ |
|
221 | """ | |
221 | if tokenstream and len(tokenstream[0]) == 2: |
|
222 | if tokenstream and len(tokenstream[0]) == 2: | |
222 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) |
|
223 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) | |
223 |
|
224 | |||
224 | result = [] |
|
225 | result = [] | |
225 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): |
|
226 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): | |
226 | ops = [] |
|
227 | ops = [] | |
227 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): |
|
228 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): | |
228 | text_buffer = [] |
|
229 | text_buffer = [] | |
229 | for t_class, t_op, t_text in token_text_list: |
|
230 | for t_class, t_op, t_text in token_text_list: | |
230 | text_buffer.append(t_text) |
|
231 | text_buffer.append(t_text) | |
231 | ops.append((token_op, ''.join(text_buffer))) |
|
232 | ops.append((token_op, ''.join(text_buffer))) | |
232 | result.append((token_class, ops)) |
|
233 | result.append((token_class, ops)) | |
233 | return result |
|
234 | return result | |
234 |
|
235 | |||
235 |
|
236 | |||
236 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): |
|
237 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): | |
237 | """ |
|
238 | """ | |
238 | Converts a list of (token_class, token_text) tuples to a list of |
|
239 | Converts a list of (token_class, token_text) tuples to a list of | |
239 | (token_class, token_op, token_text) tuples where token_op is one of |
|
240 | (token_class, token_op, token_text) tuples where token_op is one of | |
240 | ('ins', 'del', '') |
|
241 | ('ins', 'del', '') | |
241 |
|
242 | |||
242 | :param old_tokens: list of (token_class, token_text) tuples of old line |
|
243 | :param old_tokens: list of (token_class, token_text) tuples of old line | |
243 | :param new_tokens: list of (token_class, token_text) tuples of new line |
|
244 | :param new_tokens: list of (token_class, token_text) tuples of new line | |
244 | :param use_diff_match_patch: boolean, will use google's diff match patch |
|
245 | :param use_diff_match_patch: boolean, will use google's diff match patch | |
245 | library which has options to 'smooth' out the character by character |
|
246 | library which has options to 'smooth' out the character by character | |
246 | differences making nicer ins/del blocks |
|
247 | differences making nicer ins/del blocks | |
247 | """ |
|
248 | """ | |
248 |
|
249 | |||
249 | old_tokens_result = [] |
|
250 | old_tokens_result = [] | |
250 | new_tokens_result = [] |
|
251 | new_tokens_result = [] | |
251 |
|
252 | |||
252 | similarity = difflib.SequenceMatcher(None, |
|
253 | similarity = difflib.SequenceMatcher(None, | |
253 | ''.join(token_text for token_class, token_text in old_tokens), |
|
254 | ''.join(token_text for token_class, token_text in old_tokens), | |
254 | ''.join(token_text for token_class, token_text in new_tokens) |
|
255 | ''.join(token_text for token_class, token_text in new_tokens) | |
255 | ).ratio() |
|
256 | ).ratio() | |
256 |
|
257 | |||
257 | if similarity < 0.6: # return, the blocks are too different |
|
258 | if similarity < 0.6: # return, the blocks are too different | |
258 | for token_class, token_text in old_tokens: |
|
259 | for token_class, token_text in old_tokens: | |
259 | old_tokens_result.append((token_class, '', token_text)) |
|
260 | old_tokens_result.append((token_class, '', token_text)) | |
260 | for token_class, token_text in new_tokens: |
|
261 | for token_class, token_text in new_tokens: | |
261 | new_tokens_result.append((token_class, '', token_text)) |
|
262 | new_tokens_result.append((token_class, '', token_text)) | |
262 | return old_tokens_result, new_tokens_result, similarity |
|
263 | return old_tokens_result, new_tokens_result, similarity | |
263 |
|
264 | |||
264 | token_sequence_matcher = difflib.SequenceMatcher(None, |
|
265 | token_sequence_matcher = difflib.SequenceMatcher(None, | |
265 | [x[1] for x in old_tokens], |
|
266 | [x[1] for x in old_tokens], | |
266 | [x[1] for x in new_tokens]) |
|
267 | [x[1] for x in new_tokens]) | |
267 |
|
268 | |||
268 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): |
|
269 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): | |
269 | # check the differences by token block types first to give a more |
|
270 | # check the differences by token block types first to give a more | |
270 | # nicer "block" level replacement vs character diffs |
|
271 | # nicer "block" level replacement vs character diffs | |
271 |
|
272 | |||
272 | if tag == 'equal': |
|
273 | if tag == 'equal': | |
273 | for token_class, token_text in old_tokens[o1:o2]: |
|
274 | for token_class, token_text in old_tokens[o1:o2]: | |
274 | old_tokens_result.append((token_class, '', token_text)) |
|
275 | old_tokens_result.append((token_class, '', token_text)) | |
275 | for token_class, token_text in new_tokens[n1:n2]: |
|
276 | for token_class, token_text in new_tokens[n1:n2]: | |
276 | new_tokens_result.append((token_class, '', token_text)) |
|
277 | new_tokens_result.append((token_class, '', token_text)) | |
277 | elif tag == 'delete': |
|
278 | elif tag == 'delete': | |
278 | for token_class, token_text in old_tokens[o1:o2]: |
|
279 | for token_class, token_text in old_tokens[o1:o2]: | |
279 | old_tokens_result.append((token_class, 'del', token_text)) |
|
280 | old_tokens_result.append((token_class, 'del', token_text)) | |
280 | elif tag == 'insert': |
|
281 | elif tag == 'insert': | |
281 | for token_class, token_text in new_tokens[n1:n2]: |
|
282 | for token_class, token_text in new_tokens[n1:n2]: | |
282 | new_tokens_result.append((token_class, 'ins', token_text)) |
|
283 | new_tokens_result.append((token_class, 'ins', token_text)) | |
283 | elif tag == 'replace': |
|
284 | elif tag == 'replace': | |
284 | # if same type token blocks must be replaced, do a diff on the |
|
285 | # if same type token blocks must be replaced, do a diff on the | |
285 | # characters in the token blocks to show individual changes |
|
286 | # characters in the token blocks to show individual changes | |
286 |
|
287 | |||
287 | old_char_tokens = [] |
|
288 | old_char_tokens = [] | |
288 | new_char_tokens = [] |
|
289 | new_char_tokens = [] | |
289 | for token_class, token_text in old_tokens[o1:o2]: |
|
290 | for token_class, token_text in old_tokens[o1:o2]: | |
290 | for char in token_text: |
|
291 | for char in token_text: | |
291 | old_char_tokens.append((token_class, char)) |
|
292 | old_char_tokens.append((token_class, char)) | |
292 |
|
293 | |||
293 | for token_class, token_text in new_tokens[n1:n2]: |
|
294 | for token_class, token_text in new_tokens[n1:n2]: | |
294 | for char in token_text: |
|
295 | for char in token_text: | |
295 | new_char_tokens.append((token_class, char)) |
|
296 | new_char_tokens.append((token_class, char)) | |
296 |
|
297 | |||
297 | old_string = ''.join([token_text for |
|
298 | old_string = ''.join([token_text for | |
298 | token_class, token_text in old_char_tokens]) |
|
299 | token_class, token_text in old_char_tokens]) | |
299 | new_string = ''.join([token_text for |
|
300 | new_string = ''.join([token_text for | |
300 | token_class, token_text in new_char_tokens]) |
|
301 | token_class, token_text in new_char_tokens]) | |
301 |
|
302 | |||
302 | char_sequence = difflib.SequenceMatcher( |
|
303 | char_sequence = difflib.SequenceMatcher( | |
303 | None, old_string, new_string) |
|
304 | None, old_string, new_string) | |
304 | copcodes = char_sequence.get_opcodes() |
|
305 | copcodes = char_sequence.get_opcodes() | |
305 | obuffer, nbuffer = [], [] |
|
306 | obuffer, nbuffer = [], [] | |
306 |
|
307 | |||
307 | if use_diff_match_patch: |
|
308 | if use_diff_match_patch: | |
308 | dmp = diff_match_patch() |
|
309 | dmp = diff_match_patch() | |
309 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting |
|
310 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting | |
310 | reps = dmp.diff_main(old_string, new_string) |
|
311 | reps = dmp.diff_main(old_string, new_string) | |
311 | dmp.diff_cleanupEfficiency(reps) |
|
312 | dmp.diff_cleanupEfficiency(reps) | |
312 |
|
313 | |||
313 | a, b = 0, 0 |
|
314 | a, b = 0, 0 | |
314 | for op, rep in reps: |
|
315 | for op, rep in reps: | |
315 | l = len(rep) |
|
316 | l = len(rep) | |
316 | if op == 0: |
|
317 | if op == 0: | |
317 | for i, c in enumerate(rep): |
|
318 | for i, c in enumerate(rep): | |
318 | obuffer.append((old_char_tokens[a+i][0], '', c)) |
|
319 | obuffer.append((old_char_tokens[a+i][0], '', c)) | |
319 | nbuffer.append((new_char_tokens[b+i][0], '', c)) |
|
320 | nbuffer.append((new_char_tokens[b+i][0], '', c)) | |
320 | a += l |
|
321 | a += l | |
321 | b += l |
|
322 | b += l | |
322 | elif op == -1: |
|
323 | elif op == -1: | |
323 | for i, c in enumerate(rep): |
|
324 | for i, c in enumerate(rep): | |
324 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) |
|
325 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) | |
325 | a += l |
|
326 | a += l | |
326 | elif op == 1: |
|
327 | elif op == 1: | |
327 | for i, c in enumerate(rep): |
|
328 | for i, c in enumerate(rep): | |
328 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) |
|
329 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) | |
329 | b += l |
|
330 | b += l | |
330 | else: |
|
331 | else: | |
331 | for ctag, co1, co2, cn1, cn2 in copcodes: |
|
332 | for ctag, co1, co2, cn1, cn2 in copcodes: | |
332 | if ctag == 'equal': |
|
333 | if ctag == 'equal': | |
333 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
334 | for token_class, token_text in old_char_tokens[co1:co2]: | |
334 | obuffer.append((token_class, '', token_text)) |
|
335 | obuffer.append((token_class, '', token_text)) | |
335 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
336 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
336 | nbuffer.append((token_class, '', token_text)) |
|
337 | nbuffer.append((token_class, '', token_text)) | |
337 | elif ctag == 'delete': |
|
338 | elif ctag == 'delete': | |
338 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
339 | for token_class, token_text in old_char_tokens[co1:co2]: | |
339 | obuffer.append((token_class, 'del', token_text)) |
|
340 | obuffer.append((token_class, 'del', token_text)) | |
340 | elif ctag == 'insert': |
|
341 | elif ctag == 'insert': | |
341 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
342 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
342 | nbuffer.append((token_class, 'ins', token_text)) |
|
343 | nbuffer.append((token_class, 'ins', token_text)) | |
343 | elif ctag == 'replace': |
|
344 | elif ctag == 'replace': | |
344 | for token_class, token_text in old_char_tokens[co1:co2]: |
|
345 | for token_class, token_text in old_char_tokens[co1:co2]: | |
345 | obuffer.append((token_class, 'del', token_text)) |
|
346 | obuffer.append((token_class, 'del', token_text)) | |
346 | for token_class, token_text in new_char_tokens[cn1:cn2]: |
|
347 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
347 | nbuffer.append((token_class, 'ins', token_text)) |
|
348 | nbuffer.append((token_class, 'ins', token_text)) | |
348 |
|
349 | |||
349 | old_tokens_result.extend(obuffer) |
|
350 | old_tokens_result.extend(obuffer) | |
350 | new_tokens_result.extend(nbuffer) |
|
351 | new_tokens_result.extend(nbuffer) | |
351 |
|
352 | |||
352 | return old_tokens_result, new_tokens_result, similarity |
|
353 | return old_tokens_result, new_tokens_result, similarity | |
353 |
|
354 | |||
354 |
|
355 | |||
355 | def diffset_node_getter(commit): |
|
356 | def diffset_node_getter(commit): | |
356 | def get_node(fname): |
|
357 | def get_node(fname): | |
357 | try: |
|
358 | try: | |
358 | return commit.get_node(fname) |
|
359 | return commit.get_node(fname) | |
359 | except NodeDoesNotExistError: |
|
360 | except NodeDoesNotExistError: | |
360 | return None |
|
361 | return None | |
361 |
|
362 | |||
362 | return get_node |
|
363 | return get_node | |
363 |
|
364 | |||
364 |
|
365 | |||
365 | class DiffSet(object): |
|
366 | class DiffSet(object): | |
366 | """ |
|
367 | """ | |
367 | An object for parsing the diff result from diffs.DiffProcessor and |
|
368 | An object for parsing the diff result from diffs.DiffProcessor and | |
368 | adding highlighting, side by side/unified renderings and line diffs |
|
369 | adding highlighting, side by side/unified renderings and line diffs | |
369 | """ |
|
370 | """ | |
370 |
|
371 | |||
371 | HL_REAL = 'REAL' # highlights using original file, slow |
|
372 | HL_REAL = 'REAL' # highlights using original file, slow | |
372 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct |
|
373 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct | |
373 | # in the case of multiline code |
|
374 | # in the case of multiline code | |
374 | HL_NONE = 'NONE' # no highlighting, fastest |
|
375 | HL_NONE = 'NONE' # no highlighting, fastest | |
375 |
|
376 | |||
376 | def __init__(self, highlight_mode=HL_REAL, repo_name=None, |
|
377 | def __init__(self, highlight_mode=HL_REAL, repo_name=None, | |
377 | source_repo_name=None, |
|
378 | source_repo_name=None, | |
378 | source_node_getter=lambda filename: None, |
|
379 | source_node_getter=lambda filename: None, | |
379 | target_node_getter=lambda filename: None, |
|
380 | target_node_getter=lambda filename: None, | |
380 | source_nodes=None, target_nodes=None, |
|
381 | source_nodes=None, target_nodes=None, | |
381 | # files over this size will use fast highlighting |
|
382 | # files over this size will use fast highlighting | |
382 | max_file_size_limit=150 * 1024, |
|
383 | max_file_size_limit=150 * 1024, | |
383 | ): |
|
384 | ): | |
384 |
|
385 | |||
385 | self.highlight_mode = highlight_mode |
|
386 | self.highlight_mode = highlight_mode | |
386 | self.highlighted_filenodes = {} |
|
387 | self.highlighted_filenodes = {} | |
387 | self.source_node_getter = source_node_getter |
|
388 | self.source_node_getter = source_node_getter | |
388 | self.target_node_getter = target_node_getter |
|
389 | self.target_node_getter = target_node_getter | |
389 | self.source_nodes = source_nodes or {} |
|
390 | self.source_nodes = source_nodes or {} | |
390 | self.target_nodes = target_nodes or {} |
|
391 | self.target_nodes = target_nodes or {} | |
391 | self.repo_name = repo_name |
|
392 | self.repo_name = repo_name | |
392 | self.source_repo_name = source_repo_name or repo_name |
|
393 | self.source_repo_name = source_repo_name or repo_name | |
393 | self.max_file_size_limit = max_file_size_limit |
|
394 | self.max_file_size_limit = max_file_size_limit | |
394 |
|
395 | |||
395 | def render_patchset(self, patchset, source_ref=None, target_ref=None): |
|
396 | def render_patchset(self, patchset, source_ref=None, target_ref=None): | |
396 | diffset = AttributeDict(dict( |
|
397 | diffset = AttributeDict(dict( | |
397 | lines_added=0, |
|
398 | lines_added=0, | |
398 | lines_deleted=0, |
|
399 | lines_deleted=0, | |
399 | changed_files=0, |
|
400 | changed_files=0, | |
400 | files=[], |
|
401 | files=[], | |
401 | file_stats={}, |
|
402 | file_stats={}, | |
402 | limited_diff=isinstance(patchset, LimitedDiffContainer), |
|
403 | limited_diff=isinstance(patchset, LimitedDiffContainer), | |
403 | repo_name=self.repo_name, |
|
404 | repo_name=self.repo_name, | |
404 | source_repo_name=self.source_repo_name, |
|
405 | source_repo_name=self.source_repo_name, | |
405 | source_ref=source_ref, |
|
406 | source_ref=source_ref, | |
406 | target_ref=target_ref, |
|
407 | target_ref=target_ref, | |
407 | )) |
|
408 | )) | |
408 | for patch in patchset: |
|
409 | for patch in patchset: | |
409 | diffset.file_stats[patch['filename']] = patch['stats'] |
|
410 | diffset.file_stats[patch['filename']] = patch['stats'] | |
410 | filediff = self.render_patch(patch) |
|
411 | filediff = self.render_patch(patch) | |
411 | filediff.diffset = StrictAttributeDict(dict( |
|
412 | filediff.diffset = StrictAttributeDict(dict( | |
412 | source_ref=diffset.source_ref, |
|
413 | source_ref=diffset.source_ref, | |
413 | target_ref=diffset.target_ref, |
|
414 | target_ref=diffset.target_ref, | |
414 | repo_name=diffset.repo_name, |
|
415 | repo_name=diffset.repo_name, | |
415 | source_repo_name=diffset.source_repo_name, |
|
416 | source_repo_name=diffset.source_repo_name, | |
416 | )) |
|
417 | )) | |
417 | diffset.files.append(filediff) |
|
418 | diffset.files.append(filediff) | |
418 | diffset.changed_files += 1 |
|
419 | diffset.changed_files += 1 | |
419 | if not patch['stats']['binary']: |
|
420 | if not patch['stats']['binary']: | |
420 | diffset.lines_added += patch['stats']['added'] |
|
421 | diffset.lines_added += patch['stats']['added'] | |
421 | diffset.lines_deleted += patch['stats']['deleted'] |
|
422 | diffset.lines_deleted += patch['stats']['deleted'] | |
422 |
|
423 | |||
423 | return diffset |
|
424 | return diffset | |
424 |
|
425 | |||
425 | _lexer_cache = {} |
|
426 | _lexer_cache = {} | |
426 |
|
427 | |||
427 | def _get_lexer_for_filename(self, filename, filenode=None): |
|
428 | def _get_lexer_for_filename(self, filename, filenode=None): | |
428 | # cached because we might need to call it twice for source/target |
|
429 | # cached because we might need to call it twice for source/target | |
429 | if filename not in self._lexer_cache: |
|
430 | if filename not in self._lexer_cache: | |
430 | if filenode: |
|
431 | if filenode: | |
431 | lexer = filenode.lexer |
|
432 | lexer = filenode.lexer | |
432 | extension = filenode.extension |
|
433 | extension = filenode.extension | |
433 | else: |
|
434 | else: | |
434 | lexer = FileNode.get_lexer(filename=filename) |
|
435 | lexer = FileNode.get_lexer(filename=filename) | |
435 | extension = filename.split('.')[-1] |
|
436 | extension = filename.split('.')[-1] | |
436 |
|
437 | |||
437 | lexer = get_custom_lexer(extension) or lexer |
|
438 | lexer = get_custom_lexer(extension) or lexer | |
438 | self._lexer_cache[filename] = lexer |
|
439 | self._lexer_cache[filename] = lexer | |
439 | return self._lexer_cache[filename] |
|
440 | return self._lexer_cache[filename] | |
440 |
|
441 | |||
441 | def render_patch(self, patch): |
|
442 | def render_patch(self, patch): | |
442 | log.debug('rendering diff for %r', patch['filename']) |
|
443 | log.debug('rendering diff for %r', patch['filename']) | |
443 |
|
444 | |||
444 | source_filename = patch['original_filename'] |
|
445 | source_filename = patch['original_filename'] | |
445 | target_filename = patch['filename'] |
|
446 | target_filename = patch['filename'] | |
446 |
|
447 | |||
447 | source_lexer = plain_text_lexer |
|
448 | source_lexer = plain_text_lexer | |
448 | target_lexer = plain_text_lexer |
|
449 | target_lexer = plain_text_lexer | |
449 |
|
450 | |||
450 | if not patch['stats']['binary']: |
|
451 | if not patch['stats']['binary']: | |
451 | if self.highlight_mode == self.HL_REAL: |
|
452 | if self.highlight_mode == self.HL_REAL: | |
452 | if (source_filename and patch['operation'] in ('D', 'M') |
|
453 | if (source_filename and patch['operation'] in ('D', 'M') | |
453 | and source_filename not in self.source_nodes): |
|
454 | and source_filename not in self.source_nodes): | |
454 | self.source_nodes[source_filename] = ( |
|
455 | self.source_nodes[source_filename] = ( | |
455 | self.source_node_getter(source_filename)) |
|
456 | self.source_node_getter(source_filename)) | |
456 |
|
457 | |||
457 | if (target_filename and patch['operation'] in ('A', 'M') |
|
458 | if (target_filename and patch['operation'] in ('A', 'M') | |
458 | and target_filename not in self.target_nodes): |
|
459 | and target_filename not in self.target_nodes): | |
459 | self.target_nodes[target_filename] = ( |
|
460 | self.target_nodes[target_filename] = ( | |
460 | self.target_node_getter(target_filename)) |
|
461 | self.target_node_getter(target_filename)) | |
461 |
|
462 | |||
462 | elif self.highlight_mode == self.HL_FAST: |
|
463 | elif self.highlight_mode == self.HL_FAST: | |
463 | source_lexer = self._get_lexer_for_filename(source_filename) |
|
464 | source_lexer = self._get_lexer_for_filename(source_filename) | |
464 | target_lexer = self._get_lexer_for_filename(target_filename) |
|
465 | target_lexer = self._get_lexer_for_filename(target_filename) | |
465 |
|
466 | |||
466 | source_file = self.source_nodes.get(source_filename, source_filename) |
|
467 | source_file = self.source_nodes.get(source_filename, source_filename) | |
467 | target_file = self.target_nodes.get(target_filename, target_filename) |
|
468 | target_file = self.target_nodes.get(target_filename, target_filename) | |
468 |
|
469 | |||
469 | source_filenode, target_filenode = None, None |
|
470 | source_filenode, target_filenode = None, None | |
470 |
|
471 | |||
471 | # TODO: dan: FileNode.lexer works on the content of the file - which |
|
472 | # TODO: dan: FileNode.lexer works on the content of the file - which | |
472 | # can be slow - issue #4289 explains a lexer clean up - which once |
|
473 | # can be slow - issue #4289 explains a lexer clean up - which once | |
473 | # done can allow caching a lexer for a filenode to avoid the file lookup |
|
474 | # done can allow caching a lexer for a filenode to avoid the file lookup | |
474 | if isinstance(source_file, FileNode): |
|
475 | if isinstance(source_file, FileNode): | |
475 | source_filenode = source_file |
|
476 | source_filenode = source_file | |
476 | #source_lexer = source_file.lexer |
|
477 | #source_lexer = source_file.lexer | |
477 | source_lexer = self._get_lexer_for_filename(source_filename) |
|
478 | source_lexer = self._get_lexer_for_filename(source_filename) | |
478 | source_file.lexer = source_lexer |
|
479 | source_file.lexer = source_lexer | |
479 |
|
480 | |||
480 | if isinstance(target_file, FileNode): |
|
481 | if isinstance(target_file, FileNode): | |
481 | target_filenode = target_file |
|
482 | target_filenode = target_file | |
482 | #target_lexer = target_file.lexer |
|
483 | #target_lexer = target_file.lexer | |
483 | target_lexer = self._get_lexer_for_filename(target_filename) |
|
484 | target_lexer = self._get_lexer_for_filename(target_filename) | |
484 | target_file.lexer = target_lexer |
|
485 | target_file.lexer = target_lexer | |
485 |
|
486 | |||
486 | source_file_path, target_file_path = None, None |
|
487 | source_file_path, target_file_path = None, None | |
487 |
|
488 | |||
488 | if source_filename != '/dev/null': |
|
489 | if source_filename != '/dev/null': | |
489 | source_file_path = source_filename |
|
490 | source_file_path = source_filename | |
490 | if target_filename != '/dev/null': |
|
491 | if target_filename != '/dev/null': | |
491 | target_file_path = target_filename |
|
492 | target_file_path = target_filename | |
492 |
|
493 | |||
493 | source_file_type = source_lexer.name |
|
494 | source_file_type = source_lexer.name | |
494 | target_file_type = target_lexer.name |
|
495 | target_file_type = target_lexer.name | |
495 |
|
496 | |||
496 | filediff = AttributeDict({ |
|
497 | filediff = AttributeDict({ | |
497 | 'source_file_path': source_file_path, |
|
498 | 'source_file_path': source_file_path, | |
498 | 'target_file_path': target_file_path, |
|
499 | 'target_file_path': target_file_path, | |
499 | 'source_filenode': source_filenode, |
|
500 | 'source_filenode': source_filenode, | |
500 | 'target_filenode': target_filenode, |
|
501 | 'target_filenode': target_filenode, | |
501 | 'source_file_type': target_file_type, |
|
502 | 'source_file_type': target_file_type, | |
502 | 'target_file_type': source_file_type, |
|
503 | 'target_file_type': source_file_type, | |
503 | 'patch': {'filename': patch['filename'], 'stats': patch['stats']}, |
|
504 | 'patch': {'filename': patch['filename'], 'stats': patch['stats']}, | |
504 | 'operation': patch['operation'], |
|
505 | 'operation': patch['operation'], | |
505 | 'source_mode': patch['stats']['old_mode'], |
|
506 | 'source_mode': patch['stats']['old_mode'], | |
506 | 'target_mode': patch['stats']['new_mode'], |
|
507 | 'target_mode': patch['stats']['new_mode'], | |
507 | 'limited_diff': isinstance(patch, LimitedDiffContainer), |
|
508 | 'limited_diff': isinstance(patch, LimitedDiffContainer), | |
508 | 'hunks': [], |
|
509 | 'hunks': [], | |
|
510 | 'hunk_ops': None, | |||
509 | 'diffset': self, |
|
511 | 'diffset': self, | |
510 | }) |
|
512 | }) | |
511 |
|
513 | |||
512 | for hunk in patch['chunks'][1:]: |
|
514 | for hunk in patch['chunks'][1:]: | |
513 | hunkbit = self.parse_hunk(hunk, source_file, target_file) |
|
515 | hunkbit = self.parse_hunk(hunk, source_file, target_file) | |
514 | hunkbit.source_file_path = source_file_path |
|
516 | hunkbit.source_file_path = source_file_path | |
515 | hunkbit.target_file_path = target_file_path |
|
517 | hunkbit.target_file_path = target_file_path | |
516 | filediff.hunks.append(hunkbit) |
|
518 | filediff.hunks.append(hunkbit) | |
517 |
|
519 | |||
|
520 | # Simulate hunk on OPS type line which doesn't really contain any diff | |||
|
521 | # this allows commenting on those | |||
|
522 | actions = [] | |||
|
523 | for op_id, op_text in filediff.patch['stats']['ops'].items(): | |||
|
524 | if op_id == DEL_FILENODE: | |||
|
525 | actions.append(u'file was deleted') | |||
|
526 | elif op_id == BIN_FILENODE: | |||
|
527 | actions.append(u'binary diff hidden') | |||
|
528 | else: | |||
|
529 | actions.append(safe_unicode(op_text)) | |||
|
530 | action_line = u'FILE WITHOUT CONTENT: ' + \ | |||
|
531 | u', '.join(map(string.upper, actions)) or u'UNDEFINED_ACTION' | |||
|
532 | ||||
|
533 | hunk_ops = {'source_length': 0, 'source_start': 0, | |||
|
534 | 'lines': [ | |||
|
535 | {'new_lineno': 0, 'old_lineno': 1, | |||
|
536 | 'action': 'unmod', 'line': action_line} | |||
|
537 | ], | |||
|
538 | 'section_header': u'', 'target_start': 1, 'target_length': 1} | |||
|
539 | ||||
|
540 | hunkbit = self.parse_hunk(hunk_ops, source_file, target_file) | |||
|
541 | hunkbit.source_file_path = source_file_path | |||
|
542 | hunkbit.target_file_path = target_file_path | |||
|
543 | filediff.hunk_ops = hunkbit | |||
518 | return filediff |
|
544 | return filediff | |
519 |
|
545 | |||
520 | def parse_hunk(self, hunk, source_file, target_file): |
|
546 | def parse_hunk(self, hunk, source_file, target_file): | |
521 | result = AttributeDict(dict( |
|
547 | result = AttributeDict(dict( | |
522 | source_start=hunk['source_start'], |
|
548 | source_start=hunk['source_start'], | |
523 | source_length=hunk['source_length'], |
|
549 | source_length=hunk['source_length'], | |
524 | target_start=hunk['target_start'], |
|
550 | target_start=hunk['target_start'], | |
525 | target_length=hunk['target_length'], |
|
551 | target_length=hunk['target_length'], | |
526 | section_header=hunk['section_header'], |
|
552 | section_header=hunk['section_header'], | |
527 | lines=[], |
|
553 | lines=[], | |
528 | )) |
|
554 | )) | |
529 | before, after = [], [] |
|
555 | before, after = [], [] | |
530 |
|
556 | |||
531 | for line in hunk['lines']: |
|
557 | for line in hunk['lines']: | |
532 |
|
558 | |||
533 | if line['action'] == 'unmod': |
|
559 | if line['action'] == 'unmod': | |
534 | result.lines.extend( |
|
560 | result.lines.extend( | |
535 | self.parse_lines(before, after, source_file, target_file)) |
|
561 | self.parse_lines(before, after, source_file, target_file)) | |
536 | after.append(line) |
|
562 | after.append(line) | |
537 | before.append(line) |
|
563 | before.append(line) | |
538 | elif line['action'] == 'add': |
|
564 | elif line['action'] == 'add': | |
539 | after.append(line) |
|
565 | after.append(line) | |
540 | elif line['action'] == 'del': |
|
566 | elif line['action'] == 'del': | |
541 | before.append(line) |
|
567 | before.append(line) | |
542 | elif line['action'] == 'old-no-nl': |
|
568 | elif line['action'] == 'old-no-nl': | |
543 | before.append(line) |
|
569 | before.append(line) | |
544 | elif line['action'] == 'new-no-nl': |
|
570 | elif line['action'] == 'new-no-nl': | |
545 | after.append(line) |
|
571 | after.append(line) | |
546 |
|
572 | |||
547 | result.lines.extend( |
|
573 | result.lines.extend( | |
548 | self.parse_lines(before, after, source_file, target_file)) |
|
574 | self.parse_lines(before, after, source_file, target_file)) | |
549 | result.unified = list(self.as_unified(result.lines)) |
|
575 | result.unified = list(self.as_unified(result.lines)) | |
550 | result.sideside = result.lines |
|
576 | result.sideside = result.lines | |
551 |
|
577 | |||
552 | return result |
|
578 | return result | |
553 |
|
579 | |||
554 | def parse_lines(self, before_lines, after_lines, source_file, target_file): |
|
580 | def parse_lines(self, before_lines, after_lines, source_file, target_file): | |
555 | # TODO: dan: investigate doing the diff comparison and fast highlighting |
|
581 | # TODO: dan: investigate doing the diff comparison and fast highlighting | |
556 | # on the entire before and after buffered block lines rather than by |
|
582 | # on the entire before and after buffered block lines rather than by | |
557 | # line, this means we can get better 'fast' highlighting if the context |
|
583 | # line, this means we can get better 'fast' highlighting if the context | |
558 | # allows it - eg. |
|
584 | # allows it - eg. | |
559 | # line 4: """ |
|
585 | # line 4: """ | |
560 | # line 5: this gets highlighted as a string |
|
586 | # line 5: this gets highlighted as a string | |
561 | # line 6: """ |
|
587 | # line 6: """ | |
562 |
|
588 | |||
563 | lines = [] |
|
589 | lines = [] | |
564 |
|
590 | |||
565 | before_newline = AttributeDict() |
|
591 | before_newline = AttributeDict() | |
566 | after_newline = AttributeDict() |
|
592 | after_newline = AttributeDict() | |
567 | if before_lines and before_lines[-1]['action'] == 'old-no-nl': |
|
593 | if before_lines and before_lines[-1]['action'] == 'old-no-nl': | |
568 | before_newline_line = before_lines.pop(-1) |
|
594 | before_newline_line = before_lines.pop(-1) | |
569 | before_newline.content = '\n {}'.format( |
|
595 | before_newline.content = '\n {}'.format( | |
570 | render_tokenstream( |
|
596 | render_tokenstream( | |
571 | [(x[0], '', x[1]) |
|
597 | [(x[0], '', x[1]) | |
572 | for x in [('nonl', before_newline_line['line'])]])) |
|
598 | for x in [('nonl', before_newline_line['line'])]])) | |
573 |
|
599 | |||
574 | if after_lines and after_lines[-1]['action'] == 'new-no-nl': |
|
600 | if after_lines and after_lines[-1]['action'] == 'new-no-nl': | |
575 | after_newline_line = after_lines.pop(-1) |
|
601 | after_newline_line = after_lines.pop(-1) | |
576 | after_newline.content = '\n {}'.format( |
|
602 | after_newline.content = '\n {}'.format( | |
577 | render_tokenstream( |
|
603 | render_tokenstream( | |
578 | [(x[0], '', x[1]) |
|
604 | [(x[0], '', x[1]) | |
579 | for x in [('nonl', after_newline_line['line'])]])) |
|
605 | for x in [('nonl', after_newline_line['line'])]])) | |
580 |
|
606 | |||
581 | while before_lines or after_lines: |
|
607 | while before_lines or after_lines: | |
582 | before, after = None, None |
|
608 | before, after = None, None | |
583 | before_tokens, after_tokens = None, None |
|
609 | before_tokens, after_tokens = None, None | |
584 |
|
610 | |||
585 | if before_lines: |
|
611 | if before_lines: | |
586 | before = before_lines.pop(0) |
|
612 | before = before_lines.pop(0) | |
587 | if after_lines: |
|
613 | if after_lines: | |
588 | after = after_lines.pop(0) |
|
614 | after = after_lines.pop(0) | |
589 |
|
615 | |||
590 | original = AttributeDict() |
|
616 | original = AttributeDict() | |
591 | modified = AttributeDict() |
|
617 | modified = AttributeDict() | |
592 |
|
618 | |||
593 | if before: |
|
619 | if before: | |
594 | if before['action'] == 'old-no-nl': |
|
620 | if before['action'] == 'old-no-nl': | |
595 | before_tokens = [('nonl', before['line'])] |
|
621 | before_tokens = [('nonl', before['line'])] | |
596 | else: |
|
622 | else: | |
597 | before_tokens = self.get_line_tokens( |
|
623 | before_tokens = self.get_line_tokens( | |
598 | line_text=before['line'], |
|
624 | line_text=before['line'], | |
599 | line_number=before['old_lineno'], |
|
625 | line_number=before['old_lineno'], | |
600 | file=source_file) |
|
626 | file=source_file) | |
601 | original.lineno = before['old_lineno'] |
|
627 | original.lineno = before['old_lineno'] | |
602 | original.content = before['line'] |
|
628 | original.content = before['line'] | |
603 | original.action = self.action_to_op(before['action']) |
|
629 | original.action = self.action_to_op(before['action']) | |
604 |
|
630 | |||
605 | original.get_comment_args = ( |
|
631 | original.get_comment_args = ( | |
606 | source_file, 'o', before['old_lineno']) |
|
632 | source_file, 'o', before['old_lineno']) | |
607 |
|
633 | |||
608 | if after: |
|
634 | if after: | |
609 | if after['action'] == 'new-no-nl': |
|
635 | if after['action'] == 'new-no-nl': | |
610 | after_tokens = [('nonl', after['line'])] |
|
636 | after_tokens = [('nonl', after['line'])] | |
611 | else: |
|
637 | else: | |
612 | after_tokens = self.get_line_tokens( |
|
638 | after_tokens = self.get_line_tokens( | |
613 | line_text=after['line'], line_number=after['new_lineno'], |
|
639 | line_text=after['line'], line_number=after['new_lineno'], | |
614 | file=target_file) |
|
640 | file=target_file) | |
615 | modified.lineno = after['new_lineno'] |
|
641 | modified.lineno = after['new_lineno'] | |
616 | modified.content = after['line'] |
|
642 | modified.content = after['line'] | |
617 | modified.action = self.action_to_op(after['action']) |
|
643 | modified.action = self.action_to_op(after['action']) | |
618 |
|
644 | |||
619 | modified.get_comment_args = ( |
|
645 | modified.get_comment_args = ( | |
620 | target_file, 'n', after['new_lineno']) |
|
646 | target_file, 'n', after['new_lineno']) | |
621 |
|
647 | |||
622 | # diff the lines |
|
648 | # diff the lines | |
623 | if before_tokens and after_tokens: |
|
649 | if before_tokens and after_tokens: | |
624 | o_tokens, m_tokens, similarity = tokens_diff( |
|
650 | o_tokens, m_tokens, similarity = tokens_diff( | |
625 | before_tokens, after_tokens) |
|
651 | before_tokens, after_tokens) | |
626 | original.content = render_tokenstream(o_tokens) |
|
652 | original.content = render_tokenstream(o_tokens) | |
627 | modified.content = render_tokenstream(m_tokens) |
|
653 | modified.content = render_tokenstream(m_tokens) | |
628 | elif before_tokens: |
|
654 | elif before_tokens: | |
629 | original.content = render_tokenstream( |
|
655 | original.content = render_tokenstream( | |
630 | [(x[0], '', x[1]) for x in before_tokens]) |
|
656 | [(x[0], '', x[1]) for x in before_tokens]) | |
631 | elif after_tokens: |
|
657 | elif after_tokens: | |
632 | modified.content = render_tokenstream( |
|
658 | modified.content = render_tokenstream( | |
633 | [(x[0], '', x[1]) for x in after_tokens]) |
|
659 | [(x[0], '', x[1]) for x in after_tokens]) | |
634 |
|
660 | |||
635 | if not before_lines and before_newline: |
|
661 | if not before_lines and before_newline: | |
636 | original.content += before_newline.content |
|
662 | original.content += before_newline.content | |
637 | before_newline = None |
|
663 | before_newline = None | |
638 | if not after_lines and after_newline: |
|
664 | if not after_lines and after_newline: | |
639 | modified.content += after_newline.content |
|
665 | modified.content += after_newline.content | |
640 | after_newline = None |
|
666 | after_newline = None | |
641 |
|
667 | |||
642 | lines.append(AttributeDict({ |
|
668 | lines.append(AttributeDict({ | |
643 | 'original': original, |
|
669 | 'original': original, | |
644 | 'modified': modified, |
|
670 | 'modified': modified, | |
645 | })) |
|
671 | })) | |
646 |
|
672 | |||
647 | return lines |
|
673 | return lines | |
648 |
|
674 | |||
649 | def get_line_tokens(self, line_text, line_number, file=None): |
|
675 | def get_line_tokens(self, line_text, line_number, file=None): | |
650 | filenode = None |
|
676 | filenode = None | |
651 | filename = None |
|
677 | filename = None | |
652 |
|
678 | |||
653 | if isinstance(file, basestring): |
|
679 | if isinstance(file, basestring): | |
654 | filename = file |
|
680 | filename = file | |
655 | elif isinstance(file, FileNode): |
|
681 | elif isinstance(file, FileNode): | |
656 | filenode = file |
|
682 | filenode = file | |
657 | filename = file.unicode_path |
|
683 | filename = file.unicode_path | |
658 |
|
684 | |||
659 | if self.highlight_mode == self.HL_REAL and filenode: |
|
685 | if self.highlight_mode == self.HL_REAL and filenode: | |
660 | lexer = self._get_lexer_for_filename(filename) |
|
686 | lexer = self._get_lexer_for_filename(filename) | |
661 | file_size_allowed = file.size < self.max_file_size_limit |
|
687 | file_size_allowed = file.size < self.max_file_size_limit | |
662 | if line_number and file_size_allowed: |
|
688 | if line_number and file_size_allowed: | |
663 | return self.get_tokenized_filenode_line( |
|
689 | return self.get_tokenized_filenode_line( | |
664 | file, line_number, lexer) |
|
690 | file, line_number, lexer) | |
665 |
|
691 | |||
666 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: |
|
692 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: | |
667 | lexer = self._get_lexer_for_filename(filename) |
|
693 | lexer = self._get_lexer_for_filename(filename) | |
668 | return list(tokenize_string(line_text, lexer)) |
|
694 | return list(tokenize_string(line_text, lexer)) | |
669 |
|
695 | |||
670 | return list(tokenize_string(line_text, plain_text_lexer)) |
|
696 | return list(tokenize_string(line_text, plain_text_lexer)) | |
671 |
|
697 | |||
672 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None): |
|
698 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None): | |
673 |
|
699 | |||
674 | if filenode not in self.highlighted_filenodes: |
|
700 | if filenode not in self.highlighted_filenodes: | |
675 | tokenized_lines = filenode_as_lines_tokens(filenode, lexer) |
|
701 | tokenized_lines = filenode_as_lines_tokens(filenode, lexer) | |
676 | self.highlighted_filenodes[filenode] = tokenized_lines |
|
702 | self.highlighted_filenodes[filenode] = tokenized_lines | |
677 | return self.highlighted_filenodes[filenode][line_number - 1] |
|
703 | return self.highlighted_filenodes[filenode][line_number - 1] | |
678 |
|
704 | |||
679 | def action_to_op(self, action): |
|
705 | def action_to_op(self, action): | |
680 | return { |
|
706 | return { | |
681 | 'add': '+', |
|
707 | 'add': '+', | |
682 | 'del': '-', |
|
708 | 'del': '-', | |
683 | 'unmod': ' ', |
|
709 | 'unmod': ' ', | |
684 | 'old-no-nl': ' ', |
|
710 | 'old-no-nl': ' ', | |
685 | 'new-no-nl': ' ', |
|
711 | 'new-no-nl': ' ', | |
686 | }.get(action, action) |
|
712 | }.get(action, action) | |
687 |
|
713 | |||
688 | def as_unified(self, lines): |
|
714 | def as_unified(self, lines): | |
689 | """ |
|
715 | """ | |
690 | Return a generator that yields the lines of a diff in unified order |
|
716 | Return a generator that yields the lines of a diff in unified order | |
691 | """ |
|
717 | """ | |
692 | def generator(): |
|
718 | def generator(): | |
693 | buf = [] |
|
719 | buf = [] | |
694 | for line in lines: |
|
720 | for line in lines: | |
695 |
|
721 | |||
696 | if buf and not line.original or line.original.action == ' ': |
|
722 | if buf and not line.original or line.original.action == ' ': | |
697 | for b in buf: |
|
723 | for b in buf: | |
698 | yield b |
|
724 | yield b | |
699 | buf = [] |
|
725 | buf = [] | |
700 |
|
726 | |||
701 | if line.original: |
|
727 | if line.original: | |
702 | if line.original.action == ' ': |
|
728 | if line.original.action == ' ': | |
703 | yield (line.original.lineno, line.modified.lineno, |
|
729 | yield (line.original.lineno, line.modified.lineno, | |
704 | line.original.action, line.original.content, |
|
730 | line.original.action, line.original.content, | |
705 | line.original.get_comment_args) |
|
731 | line.original.get_comment_args) | |
706 | continue |
|
732 | continue | |
707 |
|
733 | |||
708 | if line.original.action == '-': |
|
734 | if line.original.action == '-': | |
709 | yield (line.original.lineno, None, |
|
735 | yield (line.original.lineno, None, | |
710 | line.original.action, line.original.content, |
|
736 | line.original.action, line.original.content, | |
711 | line.original.get_comment_args) |
|
737 | line.original.get_comment_args) | |
712 |
|
738 | |||
713 | if line.modified.action == '+': |
|
739 | if line.modified.action == '+': | |
714 | buf.append(( |
|
740 | buf.append(( | |
715 | None, line.modified.lineno, |
|
741 | None, line.modified.lineno, | |
716 | line.modified.action, line.modified.content, |
|
742 | line.modified.action, line.modified.content, | |
717 | line.modified.get_comment_args)) |
|
743 | line.modified.get_comment_args)) | |
718 | continue |
|
744 | continue | |
719 |
|
745 | |||
720 | if line.modified: |
|
746 | if line.modified: | |
721 | yield (None, line.modified.lineno, |
|
747 | yield (None, line.modified.lineno, | |
722 | line.modified.action, line.modified.content, |
|
748 | line.modified.action, line.modified.content, | |
723 | line.modified.get_comment_args) |
|
749 | line.modified.get_comment_args) | |
724 |
|
750 | |||
725 | for b in buf: |
|
751 | for b in buf: | |
726 | yield b |
|
752 | yield b | |
727 |
|
753 | |||
728 | return generator() |
|
754 | return generator() |
@@ -1,760 +1,761 b'' | |||||
1 | <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/> |
|
1 | <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/> | |
2 |
|
2 | |||
3 | <%def name="diff_line_anchor(filename, line, type)"><% |
|
3 | <%def name="diff_line_anchor(filename, line, type)"><% | |
4 | return '%s_%s_%i' % (h.safeid(filename), type, line) |
|
4 | return '%s_%s_%i' % (h.safeid(filename), type, line) | |
5 | %></%def> |
|
5 | %></%def> | |
6 |
|
6 | |||
7 | <%def name="action_class(action)"> |
|
7 | <%def name="action_class(action)"> | |
8 | <% |
|
8 | <% | |
9 | return { |
|
9 | return { | |
10 | '-': 'cb-deletion', |
|
10 | '-': 'cb-deletion', | |
11 | '+': 'cb-addition', |
|
11 | '+': 'cb-addition', | |
12 | ' ': 'cb-context', |
|
12 | ' ': 'cb-context', | |
13 | }.get(action, 'cb-empty') |
|
13 | }.get(action, 'cb-empty') | |
14 | %> |
|
14 | %> | |
15 | </%def> |
|
15 | </%def> | |
16 |
|
16 | |||
17 | <%def name="op_class(op_id)"> |
|
17 | <%def name="op_class(op_id)"> | |
18 | <% |
|
18 | <% | |
19 | return { |
|
19 | return { | |
20 | DEL_FILENODE: 'deletion', # file deleted |
|
20 | DEL_FILENODE: 'deletion', # file deleted | |
21 | BIN_FILENODE: 'warning' # binary diff hidden |
|
21 | BIN_FILENODE: 'warning' # binary diff hidden | |
22 | }.get(op_id, 'addition') |
|
22 | }.get(op_id, 'addition') | |
23 | %> |
|
23 | %> | |
24 | </%def> |
|
24 | </%def> | |
25 |
|
25 | |||
26 |
|
26 | |||
27 |
|
27 | |||
28 | <%def name="render_diffset(diffset, commit=None, |
|
28 | <%def name="render_diffset(diffset, commit=None, | |
29 |
|
29 | |||
30 | # collapse all file diff entries when there are more than this amount of files in the diff |
|
30 | # collapse all file diff entries when there are more than this amount of files in the diff | |
31 | collapse_when_files_over=20, |
|
31 | collapse_when_files_over=20, | |
32 |
|
32 | |||
33 | # collapse lines in the diff when more than this amount of lines changed in the file diff |
|
33 | # collapse lines in the diff when more than this amount of lines changed in the file diff | |
34 | lines_changed_limit=500, |
|
34 | lines_changed_limit=500, | |
35 |
|
35 | |||
36 | # add a ruler at to the output |
|
36 | # add a ruler at to the output | |
37 | ruler_at_chars=0, |
|
37 | ruler_at_chars=0, | |
38 |
|
38 | |||
39 | # show inline comments |
|
39 | # show inline comments | |
40 | use_comments=False, |
|
40 | use_comments=False, | |
41 |
|
41 | |||
42 | # disable new comments |
|
42 | # disable new comments | |
43 | disable_new_comments=False, |
|
43 | disable_new_comments=False, | |
44 |
|
44 | |||
45 | # special file-comments that were deleted in previous versions |
|
45 | # special file-comments that were deleted in previous versions | |
46 | # it's used for showing outdated comments for deleted files in a PR |
|
46 | # it's used for showing outdated comments for deleted files in a PR | |
47 | deleted_files_comments=None, |
|
47 | deleted_files_comments=None, | |
48 |
|
48 | |||
49 | # for cache purpose |
|
49 | # for cache purpose | |
50 | inline_comments=None |
|
50 | inline_comments=None | |
51 |
|
51 | |||
52 | )"> |
|
52 | )"> | |
53 | %if use_comments: |
|
53 | %if use_comments: | |
54 | <div id="cb-comments-inline-container-template" class="js-template"> |
|
54 | <div id="cb-comments-inline-container-template" class="js-template"> | |
55 | ${inline_comments_container([], inline_comments)} |
|
55 | ${inline_comments_container([], inline_comments)} | |
56 | </div> |
|
56 | </div> | |
57 | <div class="js-template" id="cb-comment-inline-form-template"> |
|
57 | <div class="js-template" id="cb-comment-inline-form-template"> | |
58 | <div class="comment-inline-form ac"> |
|
58 | <div class="comment-inline-form ac"> | |
59 |
|
59 | |||
60 | %if c.rhodecode_user.username != h.DEFAULT_USER: |
|
60 | %if c.rhodecode_user.username != h.DEFAULT_USER: | |
61 | ## render template for inline comments |
|
61 | ## render template for inline comments | |
62 | ${commentblock.comment_form(form_type='inline')} |
|
62 | ${commentblock.comment_form(form_type='inline')} | |
63 | %else: |
|
63 | %else: | |
64 | ${h.form('', class_='inline-form comment-form-login', method='get')} |
|
64 | ${h.form('', class_='inline-form comment-form-login', method='get')} | |
65 | <div class="pull-left"> |
|
65 | <div class="pull-left"> | |
66 | <div class="comment-help pull-right"> |
|
66 | <div class="comment-help pull-right"> | |
67 | ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.current_route_path(request)})}">${_('Login now')}</a> |
|
67 | ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.current_route_path(request)})}">${_('Login now')}</a> | |
68 | </div> |
|
68 | </div> | |
69 | </div> |
|
69 | </div> | |
70 | <div class="comment-button pull-right"> |
|
70 | <div class="comment-button pull-right"> | |
71 | <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);"> |
|
71 | <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);"> | |
72 | ${_('Cancel')} |
|
72 | ${_('Cancel')} | |
73 | </button> |
|
73 | </button> | |
74 | </div> |
|
74 | </div> | |
75 | <div class="clearfix"></div> |
|
75 | <div class="clearfix"></div> | |
76 | ${h.end_form()} |
|
76 | ${h.end_form()} | |
77 | %endif |
|
77 | %endif | |
78 | </div> |
|
78 | </div> | |
79 | </div> |
|
79 | </div> | |
80 |
|
80 | |||
81 | %endif |
|
81 | %endif | |
82 | <% |
|
82 | <% | |
83 | collapse_all = len(diffset.files) > collapse_when_files_over |
|
83 | collapse_all = len(diffset.files) > collapse_when_files_over | |
84 | %> |
|
84 | %> | |
85 |
|
85 | |||
86 | %if c.diffmode == 'sideside': |
|
86 | %if c.diffmode == 'sideside': | |
87 | <style> |
|
87 | <style> | |
88 | .wrapper { |
|
88 | .wrapper { | |
89 | max-width: 1600px !important; |
|
89 | max-width: 1600px !important; | |
90 | } |
|
90 | } | |
91 | </style> |
|
91 | </style> | |
92 | %endif |
|
92 | %endif | |
93 |
|
93 | |||
94 | %if ruler_at_chars: |
|
94 | %if ruler_at_chars: | |
95 | <style> |
|
95 | <style> | |
96 | .diff table.cb .cb-content:after { |
|
96 | .diff table.cb .cb-content:after { | |
97 | content: ""; |
|
97 | content: ""; | |
98 | border-left: 1px solid blue; |
|
98 | border-left: 1px solid blue; | |
99 | position: absolute; |
|
99 | position: absolute; | |
100 | top: 0; |
|
100 | top: 0; | |
101 | height: 18px; |
|
101 | height: 18px; | |
102 | opacity: .2; |
|
102 | opacity: .2; | |
103 | z-index: 10; |
|
103 | z-index: 10; | |
104 | //## +5 to account for diff action (+/-) |
|
104 | //## +5 to account for diff action (+/-) | |
105 | left: ${ruler_at_chars + 5}ch; |
|
105 | left: ${ruler_at_chars + 5}ch; | |
106 | </style> |
|
106 | </style> | |
107 | %endif |
|
107 | %endif | |
108 |
|
108 | |||
109 | <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}"> |
|
109 | <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}"> | |
110 | <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}"> |
|
110 | <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}"> | |
111 | %if commit: |
|
111 | %if commit: | |
112 | <div class="pull-right"> |
|
112 | <div class="pull-right"> | |
113 | <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.route_path('repo_files',repo_name=diffset.repo_name, commit_id=commit.raw_id, f_path='')}"> |
|
113 | <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.route_path('repo_files',repo_name=diffset.repo_name, commit_id=commit.raw_id, f_path='')}"> | |
114 | ${_('Browse Files')} |
|
114 | ${_('Browse Files')} | |
115 | </a> |
|
115 | </a> | |
116 | </div> |
|
116 | </div> | |
117 | %endif |
|
117 | %endif | |
118 | <h2 class="clearinner"> |
|
118 | <h2 class="clearinner"> | |
119 | %if commit: |
|
119 | %if commit: | |
120 | <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.route_path('repo_commit',repo_name=c.repo_name,commit_id=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> - |
|
120 | <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.route_path('repo_commit',repo_name=c.repo_name,commit_id=commit.raw_id)}">${'r%s:%s' % (commit.revision,h.short_id(commit.raw_id))}</a> - | |
121 | ${h.age_component(commit.date)} - |
|
121 | ${h.age_component(commit.date)} - | |
122 | %endif |
|
122 | %endif | |
123 |
|
123 | |||
124 | %if diffset.limited_diff: |
|
124 | %if diffset.limited_diff: | |
125 | ${_('The requested commit is too big and content was truncated.')} |
|
125 | ${_('The requested commit is too big and content was truncated.')} | |
126 |
|
126 | |||
127 | ${_ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}} |
|
127 | ${_ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}} | |
128 | <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
128 | <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
129 | %else: |
|
129 | %else: | |
130 | ${_ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', |
|
130 | ${_ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', | |
131 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} |
|
131 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} | |
132 | %endif |
|
132 | %endif | |
133 |
|
133 | |||
134 | </h2> |
|
134 | </h2> | |
135 | </div> |
|
135 | </div> | |
136 |
|
136 | |||
137 | %if diffset.has_hidden_changes: |
|
137 | %if diffset.has_hidden_changes: | |
138 | <p class="empty_data">${_('Some changes may be hidden')}</p> |
|
138 | <p class="empty_data">${_('Some changes may be hidden')}</p> | |
139 | %elif not diffset.files: |
|
139 | %elif not diffset.files: | |
140 | <p class="empty_data">${_('No files')}</p> |
|
140 | <p class="empty_data">${_('No files')}</p> | |
141 | %endif |
|
141 | %endif | |
142 |
|
142 | |||
143 | <div class="filediffs"> |
|
143 | <div class="filediffs"> | |
144 | ## initial value could be marked as False later on |
|
144 | ## initial value could be marked as False later on | |
145 | <% over_lines_changed_limit = False %> |
|
145 | <% over_lines_changed_limit = False %> | |
146 | %for i, filediff in enumerate(diffset.files): |
|
146 | %for i, filediff in enumerate(diffset.files): | |
147 |
|
147 | |||
148 | <% |
|
148 | <% | |
149 | lines_changed = filediff.patch['stats']['added'] + filediff.patch['stats']['deleted'] |
|
149 | lines_changed = filediff.patch['stats']['added'] + filediff.patch['stats']['deleted'] | |
150 | over_lines_changed_limit = lines_changed > lines_changed_limit |
|
150 | over_lines_changed_limit = lines_changed > lines_changed_limit | |
151 | %> |
|
151 | %> | |
152 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox"> |
|
152 | ||
|
153 | <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox"> | |||
153 | <div |
|
154 | <div | |
154 | class="filediff" |
|
155 | class="filediff" | |
155 | data-f-path="${filediff.patch['filename']}" |
|
156 | data-f-path="${filediff.patch['filename']}" | |
156 |
id="a_${h.FID('', filediff.patch['filename'])}" |
|
157 | id="a_${h.FID('', filediff.patch['filename'])}" | |
|
158 | > | |||
|
159 | ||||
157 |
|
|
160 | <label for="filediff-collapse-${id(filediff)}" class="filediff-heading"> | |
158 |
|
|
161 | <div class="filediff-collapse-indicator"></div> | |
159 |
|
|
162 | ${diff_ops(filediff)} | |
160 |
|
|
163 | </label> | |
161 |
|
|
164 | ${diff_menu(filediff, use_comments=use_comments)} | |
162 |
|
|
165 | <table class="cb cb-diff-${c.diffmode} code-highlight ${(over_lines_changed_limit and 'cb-collapsed' or '')}"> | |
|
166 | ||||
|
167 | ## new/deleted/empty content case | |||
163 | %if not filediff.hunks: |
|
168 | % if not filediff.hunks: | |
164 | %for op_id, op_text in filediff.patch['stats']['ops'].items(): |
|
169 | ## Comment container, on "fakes" hunk that contains all data to render comments | |
165 | <tr> |
|
170 | ${render_hunk_lines(c.diffmode, filediff.hunk_ops, use_comments=use_comments, inline_comments=inline_comments)} | |
166 | <td class="cb-text cb-${op_class(op_id)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
|||
167 | %if op_id == DEL_FILENODE: |
|
|||
168 | ${_('File was deleted')} |
|
|||
169 | %elif op_id == BIN_FILENODE: |
|
|||
170 | ${_('Binary file hidden')} |
|
|||
171 | %else: |
|
|||
172 | ${op_text} |
|
|||
173 |
|
|
171 | % endif | |
174 | </td> |
|
172 | ||
175 | </tr> |
|
|||
176 | %endfor |
|
|||
177 | %endif |
|
|||
178 |
|
|
173 | %if filediff.limited_diff: | |
179 | <tr class="cb-warning cb-collapser"> |
|
174 | <tr class="cb-warning cb-collapser"> | |
180 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
175 | <td class="cb-text" ${(c.diffmode == 'unified' and 'colspan=4' or 'colspan=6')}> | |
181 | ${_('The requested commit is too big and content was truncated.')} <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> |
|
176 | ${_('The requested commit is too big and content was truncated.')} <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
182 | </td> |
|
177 | </td> | |
183 | </tr> |
|
178 | </tr> | |
184 | %else: |
|
179 | %else: | |
185 | %if over_lines_changed_limit: |
|
180 | %if over_lines_changed_limit: | |
186 | <tr class="cb-warning cb-collapser"> |
|
181 | <tr class="cb-warning cb-collapser"> | |
187 | <td class="cb-text" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=6'}> |
|
182 | <td class="cb-text" ${(c.diffmode == 'unified' and 'colspan=4' or 'colspan=6')}> | |
188 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} |
|
183 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} | |
189 | <a href="#" class="cb-expand" |
|
184 | <a href="#" class="cb-expand" | |
190 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} |
|
185 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} | |
191 | </a> |
|
186 | </a> | |
192 | <a href="#" class="cb-collapse" |
|
187 | <a href="#" class="cb-collapse" | |
193 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} |
|
188 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} | |
194 | </a> |
|
189 | </a> | |
195 | </td> |
|
190 | </td> | |
196 | </tr> |
|
191 | </tr> | |
197 | %endif |
|
192 | %endif | |
198 | %endif |
|
193 | %endif | |
199 |
|
194 | |||
200 | %for hunk in filediff.hunks: |
|
195 | % for hunk in filediff.hunks: | |
201 |
|
|
196 | <tr class="cb-hunk"> | |
202 |
|
|
197 | <td ${(c.diffmode == 'unified' and 'colspan=3' or '')}> | |
203 |
|
|
198 | ## TODO: dan: add ajax loading of more context here | |
204 |
|
|
199 | ## <a href="#"> | |
205 |
|
|
200 | <i class="icon-more"></i> | |
206 |
|
|
201 | ## </a> | |
207 |
|
|
202 | </td> | |
208 |
|
|
203 | <td ${(c.diffmode == 'sideside' and 'colspan=5' or '')}> | |
209 |
|
|
204 | @@ | |
210 |
|
|
205 | -${hunk.source_start},${hunk.source_length} | |
211 |
|
|
206 | +${hunk.target_start},${hunk.target_length} | |
212 |
|
|
207 | ${hunk.section_header} | |
213 |
|
|
208 | </td> | |
214 |
|
|
209 | </tr> | |
215 | %if c.diffmode == 'unified': |
|
210 | ${render_hunk_lines(c.diffmode, hunk, use_comments=use_comments, inline_comments=inline_comments)} | |
216 | ${render_hunk_lines_unified(hunk, use_comments=use_comments, inline_comments=inline_comments)} |
|
|||
217 | %elif c.diffmode == 'sideside': |
|
|||
218 | ${render_hunk_lines_sideside(hunk, use_comments=use_comments, inline_comments=inline_comments)} |
|
|||
219 | %else: |
|
|||
220 | <tr class="cb-line"> |
|
|||
221 | <td>unknown diff mode</td> |
|
|||
222 | </tr> |
|
|||
223 | %endif |
|
|||
224 |
|
|
211 | % endfor | |
225 |
|
212 | |||
226 | <% unmatched_comments = (inline_comments or {}).get(filediff.patch['filename'], {}) %> |
|
213 | <% unmatched_comments = (inline_comments or {}).get(filediff.patch['filename'], {}) %> | |
227 |
|
214 | |||
228 | ## outdated comments that do not fit into currently displayed lines |
|
215 | ## outdated comments that do not fit into currently displayed lines | |
229 | % for lineno, comments in unmatched_comments.items(): |
|
216 | % for lineno, comments in unmatched_comments.items(): | |
230 |
|
217 | |||
231 | %if c.diffmode == 'unified': |
|
218 | %if c.diffmode == 'unified': | |
232 | % if loop.index == 0: |
|
219 | % if loop.index == 0: | |
233 | <tr class="cb-hunk"> |
|
220 | <tr class="cb-hunk"> | |
234 | <td colspan="3"></td> |
|
221 | <td colspan="3"></td> | |
235 | <td> |
|
222 | <td> | |
236 | <div> |
|
223 | <div> | |
237 | ${_('Unmatched inline comments below')} |
|
224 | ${_('Unmatched inline comments below')} | |
238 | </div> |
|
225 | </div> | |
239 | </td> |
|
226 | </td> | |
240 | </tr> |
|
227 | </tr> | |
241 | % endif |
|
228 | % endif | |
242 | <tr class="cb-line"> |
|
229 | <tr class="cb-line"> | |
243 | <td class="cb-data cb-context"></td> |
|
230 | <td class="cb-data cb-context"></td> | |
244 | <td class="cb-lineno cb-context"></td> |
|
231 | <td class="cb-lineno cb-context"></td> | |
245 | <td class="cb-lineno cb-context"></td> |
|
232 | <td class="cb-lineno cb-context"></td> | |
246 | <td class="cb-content cb-context"> |
|
233 | <td class="cb-content cb-context"> | |
247 | ${inline_comments_container(comments, inline_comments)} |
|
234 | ${inline_comments_container(comments, inline_comments)} | |
248 | </td> |
|
235 | </td> | |
249 | </tr> |
|
236 | </tr> | |
250 | %elif c.diffmode == 'sideside': |
|
237 | %elif c.diffmode == 'sideside': | |
251 | % if loop.index == 0: |
|
238 | % if loop.index == 0: | |
252 | <tr class="cb-hunk"> |
|
239 | <tr class="cb-hunk"> | |
253 | <td colspan="2"></td> |
|
240 | <td colspan="2"></td> | |
254 | <td class="cb-line" colspan="6"> |
|
241 | <td class="cb-line" colspan="6"> | |
255 | <div> |
|
242 | <div> | |
256 | ${_('Unmatched comments below')} |
|
243 | ${_('Unmatched comments below')} | |
257 | </div> |
|
244 | </div> | |
258 | </td> |
|
245 | </td> | |
259 | </tr> |
|
246 | </tr> | |
260 | % endif |
|
247 | % endif | |
261 | <tr class="cb-line"> |
|
248 | <tr class="cb-line"> | |
262 | <td class="cb-data cb-context"></td> |
|
249 | <td class="cb-data cb-context"></td> | |
263 | <td class="cb-lineno cb-context"></td> |
|
250 | <td class="cb-lineno cb-context"></td> | |
264 | <td class="cb-content cb-context"> |
|
251 | <td class="cb-content cb-context"> | |
265 | % if lineno.startswith('o'): |
|
252 | % if lineno.startswith('o'): | |
266 | ${inline_comments_container(comments, inline_comments)} |
|
253 | ${inline_comments_container(comments, inline_comments)} | |
267 | % endif |
|
254 | % endif | |
268 | </td> |
|
255 | </td> | |
269 |
|
256 | |||
270 | <td class="cb-data cb-context"></td> |
|
257 | <td class="cb-data cb-context"></td> | |
271 | <td class="cb-lineno cb-context"></td> |
|
258 | <td class="cb-lineno cb-context"></td> | |
272 | <td class="cb-content cb-context"> |
|
259 | <td class="cb-content cb-context"> | |
273 | % if lineno.startswith('n'): |
|
260 | % if lineno.startswith('n'): | |
274 | ${inline_comments_container(comments, inline_comments)} |
|
261 | ${inline_comments_container(comments, inline_comments)} | |
275 | % endif |
|
262 | % endif | |
276 | </td> |
|
263 | </td> | |
277 | </tr> |
|
264 | </tr> | |
278 | %endif |
|
265 | %endif | |
279 |
|
266 | |||
280 | % endfor |
|
267 | % endfor | |
281 |
|
268 | |||
282 | </table> |
|
269 | </table> | |
283 | </div> |
|
270 | </div> | |
284 | %endfor |
|
271 | %endfor | |
285 |
|
272 | |||
286 | ## outdated comments that are made for a file that has been deleted |
|
273 | ## outdated comments that are made for a file that has been deleted | |
287 | % for filename, comments_dict in (deleted_files_comments or {}).items(): |
|
274 | % for filename, comments_dict in (deleted_files_comments or {}).items(): | |
288 | <% |
|
275 | <% | |
289 | display_state = 'display: none' |
|
276 | display_state = 'display: none' | |
290 | open_comments_in_file = [x for x in comments_dict['comments'] if x.outdated is False] |
|
277 | open_comments_in_file = [x for x in comments_dict['comments'] if x.outdated is False] | |
291 | if open_comments_in_file: |
|
278 | if open_comments_in_file: | |
292 | display_state = '' |
|
279 | display_state = '' | |
293 | %> |
|
280 | %> | |
294 | <div class="filediffs filediff-outdated" style="${display_state}"> |
|
281 | <div class="filediffs filediff-outdated" style="${display_state}"> | |
295 | <input ${collapse_all and 'checked' or ''} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox"> |
|
282 | <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox"> | |
296 | <div class="filediff" data-f-path="${filename}" id="a_${h.FID('', filename)}"> |
|
283 | <div class="filediff" data-f-path="${filename}" id="a_${h.FID('', filename)}"> | |
297 | <label for="filediff-collapse-${id(filename)}" class="filediff-heading"> |
|
284 | <label for="filediff-collapse-${id(filename)}" class="filediff-heading"> | |
298 | <div class="filediff-collapse-indicator"></div> |
|
285 | <div class="filediff-collapse-indicator"></div> | |
299 | <span class="pill"> |
|
286 | <span class="pill"> | |
300 | ## file was deleted |
|
287 | ## file was deleted | |
301 | <strong>${filename}</strong> |
|
288 | <strong>${filename}</strong> | |
302 | </span> |
|
289 | </span> | |
303 | <span class="pill-group" style="float: left"> |
|
290 | <span class="pill-group" style="float: left"> | |
304 | ## file op, doesn't need translation |
|
291 | ## file op, doesn't need translation | |
305 | <span class="pill" op="removed">removed in this version</span> |
|
292 | <span class="pill" op="removed">removed in this version</span> | |
306 | </span> |
|
293 | </span> | |
307 | <a class="pill filediff-anchor" href="#a_${h.FID('', filename)}">ΒΆ</a> |
|
294 | <a class="pill filediff-anchor" href="#a_${h.FID('', filename)}">ΒΆ</a> | |
308 | <span class="pill-group" style="float: right"> |
|
295 | <span class="pill-group" style="float: right"> | |
309 | <span class="pill" op="deleted">-${comments_dict['stats']}</span> |
|
296 | <span class="pill" op="deleted">-${comments_dict['stats']}</span> | |
310 | </span> |
|
297 | </span> | |
311 | </label> |
|
298 | </label> | |
312 |
|
299 | |||
313 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> |
|
300 | <table class="cb cb-diff-${c.diffmode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> | |
314 | <tr> |
|
301 | <tr> | |
315 | % if c.diffmode == 'unified': |
|
302 | % if c.diffmode == 'unified': | |
316 | <td></td> |
|
303 | <td></td> | |
317 | %endif |
|
304 | %endif | |
318 |
|
305 | |||
319 | <td></td> |
|
306 | <td></td> | |
320 | <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=5'}> |
|
307 | <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${c.diffmode == 'unified' and 'colspan=4' or 'colspan=5'}> | |
321 | ${_('File was deleted in this version. There are still outdated/unresolved comments attached to it.')} |
|
308 | ${_('File was deleted in this version. There are still outdated/unresolved comments attached to it.')} | |
322 | </td> |
|
309 | </td> | |
323 | </tr> |
|
310 | </tr> | |
324 | %if c.diffmode == 'unified': |
|
311 | %if c.diffmode == 'unified': | |
325 | <tr class="cb-line"> |
|
312 | <tr class="cb-line"> | |
326 | <td class="cb-data cb-context"></td> |
|
313 | <td class="cb-data cb-context"></td> | |
327 | <td class="cb-lineno cb-context"></td> |
|
314 | <td class="cb-lineno cb-context"></td> | |
328 | <td class="cb-lineno cb-context"></td> |
|
315 | <td class="cb-lineno cb-context"></td> | |
329 | <td class="cb-content cb-context"> |
|
316 | <td class="cb-content cb-context"> | |
330 | ${inline_comments_container(comments_dict['comments'], inline_comments)} |
|
317 | ${inline_comments_container(comments_dict['comments'], inline_comments)} | |
331 | </td> |
|
318 | </td> | |
332 | </tr> |
|
319 | </tr> | |
333 | %elif c.diffmode == 'sideside': |
|
320 | %elif c.diffmode == 'sideside': | |
334 | <tr class="cb-line"> |
|
321 | <tr class="cb-line"> | |
335 | <td class="cb-data cb-context"></td> |
|
322 | <td class="cb-data cb-context"></td> | |
336 | <td class="cb-lineno cb-context"></td> |
|
323 | <td class="cb-lineno cb-context"></td> | |
337 | <td class="cb-content cb-context"></td> |
|
324 | <td class="cb-content cb-context"></td> | |
338 |
|
325 | |||
339 | <td class="cb-data cb-context"></td> |
|
326 | <td class="cb-data cb-context"></td> | |
340 | <td class="cb-lineno cb-context"></td> |
|
327 | <td class="cb-lineno cb-context"></td> | |
341 | <td class="cb-content cb-context"> |
|
328 | <td class="cb-content cb-context"> | |
342 | ${inline_comments_container(comments_dict['comments'], inline_comments)} |
|
329 | ${inline_comments_container(comments_dict['comments'], inline_comments)} | |
343 | </td> |
|
330 | </td> | |
344 | </tr> |
|
331 | </tr> | |
345 | %endif |
|
332 | %endif | |
346 | </table> |
|
333 | </table> | |
347 | </div> |
|
334 | </div> | |
348 | </div> |
|
335 | </div> | |
349 | % endfor |
|
336 | % endfor | |
350 |
|
337 | |||
351 | </div> |
|
338 | </div> | |
352 | </div> |
|
339 | </div> | |
353 | </%def> |
|
340 | </%def> | |
354 |
|
341 | |||
355 | <%def name="diff_ops(filediff)"> |
|
342 | <%def name="diff_ops(filediff)"> | |
356 | <% |
|
343 | <% | |
357 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ |
|
344 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ | |
358 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE |
|
345 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE | |
359 | %> |
|
346 | %> | |
360 | <span class="pill"> |
|
347 | <span class="pill"> | |
361 | %if filediff.source_file_path and filediff.target_file_path: |
|
348 | %if filediff.source_file_path and filediff.target_file_path: | |
362 | %if filediff.source_file_path != filediff.target_file_path: |
|
349 | %if filediff.source_file_path != filediff.target_file_path: | |
363 | ## file was renamed, or copied |
|
350 | ## file was renamed, or copied | |
364 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: |
|
351 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: | |
365 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> |
|
352 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> | |
366 | <% final_path = filediff.target_file_path %> |
|
353 | <% final_path = filediff.target_file_path %> | |
367 | %elif COPIED_FILENODE in filediff.patch['stats']['ops']: |
|
354 | %elif COPIED_FILENODE in filediff.patch['stats']['ops']: | |
368 | <strong>${filediff.target_file_path}</strong> β¬ ${filediff.source_file_path} |
|
355 | <strong>${filediff.target_file_path}</strong> β¬ ${filediff.source_file_path} | |
369 | <% final_path = filediff.target_file_path %> |
|
356 | <% final_path = filediff.target_file_path %> | |
370 | %endif |
|
357 | %endif | |
371 | %else: |
|
358 | %else: | |
372 | ## file was modified |
|
359 | ## file was modified | |
373 | <strong>${filediff.source_file_path}</strong> |
|
360 | <strong>${filediff.source_file_path}</strong> | |
374 | <% final_path = filediff.source_file_path %> |
|
361 | <% final_path = filediff.source_file_path %> | |
375 | %endif |
|
362 | %endif | |
376 | %else: |
|
363 | %else: | |
377 | %if filediff.source_file_path: |
|
364 | %if filediff.source_file_path: | |
378 | ## file was deleted |
|
365 | ## file was deleted | |
379 | <strong>${filediff.source_file_path}</strong> |
|
366 | <strong>${filediff.source_file_path}</strong> | |
380 | <% final_path = filediff.source_file_path %> |
|
367 | <% final_path = filediff.source_file_path %> | |
381 | %else: |
|
368 | %else: | |
382 | ## file was added |
|
369 | ## file was added | |
383 | <strong>${filediff.target_file_path}</strong> |
|
370 | <strong>${filediff.target_file_path}</strong> | |
384 | <% final_path = filediff.target_file_path %> |
|
371 | <% final_path = filediff.target_file_path %> | |
385 | %endif |
|
372 | %endif | |
386 | %endif |
|
373 | %endif | |
387 | <i style="color: #aaa" class="tooltip icon-clipboard clipboard-action" data-clipboard-text="${final_path}" title="${_('Copy the full path')}" onclick="return false;"></i> |
|
374 | <i style="color: #aaa" class="tooltip icon-clipboard clipboard-action" data-clipboard-text="${final_path}" title="${_('Copy the full path')}" onclick="return false;"></i> | |
388 | </span> |
|
375 | </span> | |
389 | <span class="pill-group" style="float: left"> |
|
376 | <span class="pill-group" style="float: left"> | |
390 | %if filediff.limited_diff: |
|
377 | %if filediff.limited_diff: | |
391 | <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> |
|
378 | <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> | |
392 | %endif |
|
379 | %endif | |
393 |
|
380 | |||
394 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: |
|
381 | %if RENAMED_FILENODE in filediff.patch['stats']['ops']: | |
395 | <span class="pill" op="renamed">renamed</span> |
|
382 | <span class="pill" op="renamed">renamed</span> | |
396 | %endif |
|
383 | %endif | |
397 |
|
384 | |||
398 | %if COPIED_FILENODE in filediff.patch['stats']['ops']: |
|
385 | %if COPIED_FILENODE in filediff.patch['stats']['ops']: | |
399 | <span class="pill" op="copied">copied</span> |
|
386 | <span class="pill" op="copied">copied</span> | |
400 | %endif |
|
387 | %endif | |
401 |
|
388 | |||
402 | %if NEW_FILENODE in filediff.patch['stats']['ops']: |
|
389 | %if NEW_FILENODE in filediff.patch['stats']['ops']: | |
403 | <span class="pill" op="created">created</span> |
|
390 | <span class="pill" op="created">created</span> | |
404 | %if filediff['target_mode'].startswith('120'): |
|
391 | %if filediff['target_mode'].startswith('120'): | |
405 | <span class="pill" op="symlink">symlink</span> |
|
392 | <span class="pill" op="symlink">symlink</span> | |
406 | %else: |
|
393 | %else: | |
407 | <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span> |
|
394 | <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span> | |
408 | %endif |
|
395 | %endif | |
409 | %endif |
|
396 | %endif | |
410 |
|
397 | |||
411 | %if DEL_FILENODE in filediff.patch['stats']['ops']: |
|
398 | %if DEL_FILENODE in filediff.patch['stats']['ops']: | |
412 | <span class="pill" op="removed">removed</span> |
|
399 | <span class="pill" op="removed">removed</span> | |
413 | %endif |
|
400 | %endif | |
414 |
|
401 | |||
415 | %if CHMOD_FILENODE in filediff.patch['stats']['ops']: |
|
402 | %if CHMOD_FILENODE in filediff.patch['stats']['ops']: | |
416 | <span class="pill" op="mode"> |
|
403 | <span class="pill" op="mode"> | |
417 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} |
|
404 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} | |
418 | </span> |
|
405 | </span> | |
419 | %endif |
|
406 | %endif | |
420 | </span> |
|
407 | </span> | |
421 |
|
408 | |||
422 | <a class="pill filediff-anchor" href="#a_${h.FID('', filediff.patch['filename'])}">ΒΆ</a> |
|
409 | <a class="pill filediff-anchor" href="#a_${h.FID('', filediff.patch['filename'])}">ΒΆ</a> | |
423 |
|
410 | |||
424 | <span class="pill-group" style="float: right"> |
|
411 | <span class="pill-group" style="float: right"> | |
425 | %if BIN_FILENODE in filediff.patch['stats']['ops']: |
|
412 | %if BIN_FILENODE in filediff.patch['stats']['ops']: | |
426 | <span class="pill" op="binary">binary</span> |
|
413 | <span class="pill" op="binary">binary</span> | |
427 | %if MOD_FILENODE in filediff.patch['stats']['ops']: |
|
414 | %if MOD_FILENODE in filediff.patch['stats']['ops']: | |
428 | <span class="pill" op="modified">modified</span> |
|
415 | <span class="pill" op="modified">modified</span> | |
429 | %endif |
|
416 | %endif | |
430 | %endif |
|
417 | %endif | |
431 | %if filediff.patch['stats']['added']: |
|
418 | %if filediff.patch['stats']['added']: | |
432 | <span class="pill" op="added">+${filediff.patch['stats']['added']}</span> |
|
419 | <span class="pill" op="added">+${filediff.patch['stats']['added']}</span> | |
433 | %endif |
|
420 | %endif | |
434 | %if filediff.patch['stats']['deleted']: |
|
421 | %if filediff.patch['stats']['deleted']: | |
435 | <span class="pill" op="deleted">-${filediff.patch['stats']['deleted']}</span> |
|
422 | <span class="pill" op="deleted">-${filediff.patch['stats']['deleted']}</span> | |
436 | %endif |
|
423 | %endif | |
437 | </span> |
|
424 | </span> | |
438 |
|
425 | |||
439 | </%def> |
|
426 | </%def> | |
440 |
|
427 | |||
441 | <%def name="nice_mode(filemode)"> |
|
428 | <%def name="nice_mode(filemode)"> | |
442 | ${filemode.startswith('100') and filemode[3:] or filemode} |
|
429 | ${filemode.startswith('100') and filemode[3:] or filemode} | |
443 | </%def> |
|
430 | </%def> | |
444 |
|
431 | |||
445 | <%def name="diff_menu(filediff, use_comments=False)"> |
|
432 | <%def name="diff_menu(filediff, use_comments=False)"> | |
446 | <div class="filediff-menu"> |
|
433 | <div class="filediff-menu"> | |
447 | %if filediff.diffset.source_ref: |
|
434 | %if filediff.diffset.source_ref: | |
448 | %if filediff.operation in ['D', 'M']: |
|
435 | %if filediff.operation in ['D', 'M']: | |
449 | <a |
|
436 | <a | |
450 | class="tooltip" |
|
437 | class="tooltip" | |
451 | href="${h.route_path('repo_files',repo_name=filediff.diffset.repo_name,commit_id=filediff.diffset.source_ref,f_path=filediff.source_file_path)}" |
|
438 | href="${h.route_path('repo_files',repo_name=filediff.diffset.repo_name,commit_id=filediff.diffset.source_ref,f_path=filediff.source_file_path)}" | |
452 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
439 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
453 | > |
|
440 | > | |
454 | ${_('Show file before')} |
|
441 | ${_('Show file before')} | |
455 | </a> | |
|
442 | </a> | | |
456 | %else: |
|
443 | %else: | |
457 | <span |
|
444 | <span | |
458 | class="tooltip" |
|
445 | class="tooltip" | |
459 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" |
|
446 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
460 | > |
|
447 | > | |
461 | ${_('Show file before')} |
|
448 | ${_('Show file before')} | |
462 | </span> | |
|
449 | </span> | | |
463 | %endif |
|
450 | %endif | |
464 | %if filediff.operation in ['A', 'M']: |
|
451 | %if filediff.operation in ['A', 'M']: | |
465 | <a |
|
452 | <a | |
466 | class="tooltip" |
|
453 | class="tooltip" | |
467 | href="${h.route_path('repo_files',repo_name=filediff.diffset.source_repo_name,commit_id=filediff.diffset.target_ref,f_path=filediff.target_file_path)}" |
|
454 | href="${h.route_path('repo_files',repo_name=filediff.diffset.source_repo_name,commit_id=filediff.diffset.target_ref,f_path=filediff.target_file_path)}" | |
468 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
455 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
469 | > |
|
456 | > | |
470 | ${_('Show file after')} |
|
457 | ${_('Show file after')} | |
471 | </a> | |
|
458 | </a> | | |
472 | %else: |
|
459 | %else: | |
473 | <span |
|
460 | <span | |
474 | class="tooltip" |
|
461 | class="tooltip" | |
475 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" |
|
462 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
476 | > |
|
463 | > | |
477 | ${_('Show file after')} |
|
464 | ${_('Show file after')} | |
478 | </span> | |
|
465 | </span> | | |
479 | %endif |
|
466 | %endif | |
480 | <a |
|
467 | <a | |
481 | class="tooltip" |
|
468 | class="tooltip" | |
482 | title="${h.tooltip(_('Raw diff'))}" |
|
469 | title="${h.tooltip(_('Raw diff'))}" | |
483 | href="${h.route_path('repo_files_diff',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path, _query=dict(diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw'))}" |
|
470 | href="${h.route_path('repo_files_diff',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path, _query=dict(diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw'))}" | |
484 | > |
|
471 | > | |
485 | ${_('Raw diff')} |
|
472 | ${_('Raw diff')} | |
486 | </a> | |
|
473 | </a> | | |
487 | <a |
|
474 | <a | |
488 | class="tooltip" |
|
475 | class="tooltip" | |
489 | title="${h.tooltip(_('Download diff'))}" |
|
476 | title="${h.tooltip(_('Download diff'))}" | |
490 | href="${h.route_path('repo_files_diff',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path, _query=dict(diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download'))}" |
|
477 | href="${h.route_path('repo_files_diff',repo_name=filediff.diffset.repo_name,f_path=filediff.target_file_path, _query=dict(diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download'))}" | |
491 | > |
|
478 | > | |
492 | ${_('Download diff')} |
|
479 | ${_('Download diff')} | |
493 | </a> |
|
480 | </a> | |
494 | % if use_comments: |
|
481 | % if use_comments: | |
495 | | |
|
482 | | | |
496 | % endif |
|
483 | % endif | |
497 |
|
484 | |||
498 | ## TODO: dan: refactor ignorews_url and context_url into the diff renderer same as diffmode=unified/sideside. Also use ajax to load more context (by clicking hunks) |
|
485 | ## TODO: dan: refactor ignorews_url and context_url into the diff renderer same as diffmode=unified/sideside. Also use ajax to load more context (by clicking hunks) | |
499 | %if hasattr(c, 'ignorews_url'): |
|
486 | %if hasattr(c, 'ignorews_url'): | |
500 | ${c.ignorews_url(request, h.FID('', filediff.patch['filename']))} |
|
487 | ${c.ignorews_url(request, h.FID('', filediff.patch['filename']))} | |
501 | %endif |
|
488 | %endif | |
502 | %if hasattr(c, 'context_url'): |
|
489 | %if hasattr(c, 'context_url'): | |
503 | ${c.context_url(request, h.FID('', filediff.patch['filename']))} |
|
490 | ${c.context_url(request, h.FID('', filediff.patch['filename']))} | |
504 | %endif |
|
491 | %endif | |
505 |
|
492 | |||
506 | %if use_comments: |
|
493 | %if use_comments: | |
507 | <a href="#" onclick="return Rhodecode.comments.toggleComments(this);"> |
|
494 | <a href="#" onclick="return Rhodecode.comments.toggleComments(this);"> | |
508 | <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span> |
|
495 | <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span> | |
509 | </a> |
|
496 | </a> | |
510 | %endif |
|
497 | %endif | |
511 | %endif |
|
498 | %endif | |
512 | </div> |
|
499 | </div> | |
513 | </%def> |
|
500 | </%def> | |
514 |
|
501 | |||
515 |
|
502 | |||
516 | <%def name="inline_comments_container(comments, inline_comments)"> |
|
503 | <%def name="inline_comments_container(comments, inline_comments)"> | |
517 | <div class="inline-comments"> |
|
504 | <div class="inline-comments"> | |
518 | %for comment in comments: |
|
505 | %for comment in comments: | |
519 | ${commentblock.comment_block(comment, inline=True)} |
|
506 | ${commentblock.comment_block(comment, inline=True)} | |
520 | %endfor |
|
507 | %endfor | |
521 | % if comments and comments[-1].outdated: |
|
508 | % if comments and comments[-1].outdated: | |
522 | <span class="btn btn-secondary cb-comment-add-button comment-outdated}" |
|
509 | <span class="btn btn-secondary cb-comment-add-button comment-outdated}" | |
523 | style="display: none;}"> |
|
510 | style="display: none;}"> | |
524 | ${_('Add another comment')} |
|
511 | ${_('Add another comment')} | |
525 | </span> |
|
512 | </span> | |
526 | % else: |
|
513 | % else: | |
527 | <span onclick="return Rhodecode.comments.createComment(this)" |
|
514 | <span onclick="return Rhodecode.comments.createComment(this)" | |
528 | class="btn btn-secondary cb-comment-add-button"> |
|
515 | class="btn btn-secondary cb-comment-add-button"> | |
529 | ${_('Add another comment')} |
|
516 | ${_('Add another comment')} | |
530 | </span> |
|
517 | </span> | |
531 | % endif |
|
518 | % endif | |
532 |
|
519 | |||
533 | </div> |
|
520 | </div> | |
534 | </%def> |
|
521 | </%def> | |
535 |
|
522 | |||
536 | <%! |
|
523 | <%! | |
537 | def get_comments_for(diff_type, comments, filename, line_version, line_number): |
|
524 | def get_comments_for(diff_type, comments, filename, line_version, line_number): | |
538 | if hasattr(filename, 'unicode_path'): |
|
525 | if hasattr(filename, 'unicode_path'): | |
539 | filename = filename.unicode_path |
|
526 | filename = filename.unicode_path | |
540 |
|
527 | |||
541 | if not isinstance(filename, basestring): |
|
528 | if not isinstance(filename, basestring): | |
542 | return None |
|
529 | return None | |
543 |
|
530 | |||
544 | line_key = '{}{}'.format(line_version, line_number) ## e.g o37, n12 |
|
531 | line_key = '{}{}'.format(line_version, line_number) ## e.g o37, n12 | |
545 |
|
532 | |||
546 | if comments and filename in comments: |
|
533 | if comments and filename in comments: | |
547 | file_comments = comments[filename] |
|
534 | file_comments = comments[filename] | |
548 | if line_key in file_comments: |
|
535 | if line_key in file_comments: | |
549 | data = file_comments.pop(line_key) |
|
536 | data = file_comments.pop(line_key) | |
550 | return data |
|
537 | return data | |
551 | %> |
|
538 | %> | |
552 |
|
539 | |||
553 | <%def name="render_hunk_lines_sideside(hunk, use_comments=False, inline_comments=None)"> |
|
540 | <%def name="render_hunk_lines_sideside(hunk, use_comments=False, inline_comments=None)"> | |
554 |
|
541 | |||
555 | %for i, line in enumerate(hunk.sideside): |
|
542 | %for i, line in enumerate(hunk.sideside): | |
556 | <% |
|
543 | <% | |
557 | old_line_anchor, new_line_anchor = None, None |
|
544 | old_line_anchor, new_line_anchor = None, None | |
558 | if line.original.lineno: |
|
545 | if line.original.lineno: | |
559 | old_line_anchor = diff_line_anchor(hunk.source_file_path, line.original.lineno, 'o') |
|
546 | old_line_anchor = diff_line_anchor(hunk.source_file_path, line.original.lineno, 'o') | |
560 | if line.modified.lineno: |
|
547 | if line.modified.lineno: | |
561 | new_line_anchor = diff_line_anchor(hunk.target_file_path, line.modified.lineno, 'n') |
|
548 | new_line_anchor = diff_line_anchor(hunk.target_file_path, line.modified.lineno, 'n') | |
562 | %> |
|
549 | %> | |
563 |
|
550 | |||
564 | <tr class="cb-line"> |
|
551 | <tr class="cb-line"> | |
565 | <td class="cb-data ${action_class(line.original.action)}" |
|
552 | <td class="cb-data ${action_class(line.original.action)}" | |
566 | data-line-no="${line.original.lineno}" |
|
553 | data-line-no="${line.original.lineno}" | |
567 | > |
|
554 | > | |
568 | <div> |
|
555 | <div> | |
569 |
|
556 | |||
570 | <% line_old_comments = None %> |
|
557 | <% line_old_comments = None %> | |
571 | %if line.original.get_comment_args: |
|
558 | %if line.original.get_comment_args: | |
572 | <% line_old_comments = get_comments_for('side-by-side', inline_comments, *line.original.get_comment_args) %> |
|
559 | <% line_old_comments = get_comments_for('side-by-side', inline_comments, *line.original.get_comment_args) %> | |
573 | %endif |
|
560 | %endif | |
574 | %if line_old_comments: |
|
561 | %if line_old_comments: | |
575 | <% has_outdated = any([x.outdated for x in line_old_comments]) %> |
|
562 | <% has_outdated = any([x.outdated for x in line_old_comments]) %> | |
576 | % if has_outdated: |
|
563 | % if has_outdated: | |
577 | <i title="${_('comments including outdated')}:${len(line_old_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
564 | <i title="${_('comments including outdated')}:${len(line_old_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
578 | % else: |
|
565 | % else: | |
579 | <i title="${_('comments')}: ${len(line_old_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
566 | <i title="${_('comments')}: ${len(line_old_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
580 | % endif |
|
567 | % endif | |
581 | %endif |
|
568 | %endif | |
582 | </div> |
|
569 | </div> | |
583 | </td> |
|
570 | </td> | |
584 | <td class="cb-lineno ${action_class(line.original.action)}" |
|
571 | <td class="cb-lineno ${action_class(line.original.action)}" | |
585 | data-line-no="${line.original.lineno}" |
|
572 | data-line-no="${line.original.lineno}" | |
586 | %if old_line_anchor: |
|
573 | %if old_line_anchor: | |
587 | id="${old_line_anchor}" |
|
574 | id="${old_line_anchor}" | |
588 | %endif |
|
575 | %endif | |
589 | > |
|
576 | > | |
590 | %if line.original.lineno: |
|
577 | %if line.original.lineno: | |
591 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> |
|
578 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> | |
592 | %endif |
|
579 | %endif | |
593 | </td> |
|
580 | </td> | |
594 | <td class="cb-content ${action_class(line.original.action)}" |
|
581 | <td class="cb-content ${action_class(line.original.action)}" | |
595 | data-line-no="o${line.original.lineno}" |
|
582 | data-line-no="o${line.original.lineno}" | |
596 | > |
|
583 | > | |
597 | %if use_comments and line.original.lineno: |
|
584 | %if use_comments and line.original.lineno: | |
598 | ${render_add_comment_button()} |
|
585 | ${render_add_comment_button()} | |
599 | %endif |
|
586 | %endif | |
600 | <span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> |
|
587 | <span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> | |
601 |
|
588 | |||
602 | %if use_comments and line.original.lineno and line_old_comments: |
|
589 | %if use_comments and line.original.lineno and line_old_comments: | |
603 | ${inline_comments_container(line_old_comments, inline_comments)} |
|
590 | ${inline_comments_container(line_old_comments, inline_comments)} | |
604 | %endif |
|
591 | %endif | |
605 |
|
592 | |||
606 | </td> |
|
593 | </td> | |
607 | <td class="cb-data ${action_class(line.modified.action)}" |
|
594 | <td class="cb-data ${action_class(line.modified.action)}" | |
608 | data-line-no="${line.modified.lineno}" |
|
595 | data-line-no="${line.modified.lineno}" | |
609 | > |
|
596 | > | |
610 | <div> |
|
597 | <div> | |
611 |
|
598 | |||
612 | %if line.modified.get_comment_args: |
|
599 | %if line.modified.get_comment_args: | |
613 | <% line_new_comments = get_comments_for('side-by-side', inline_comments, *line.modified.get_comment_args) %> |
|
600 | <% line_new_comments = get_comments_for('side-by-side', inline_comments, *line.modified.get_comment_args) %> | |
614 | %else: |
|
601 | %else: | |
615 | <% line_new_comments = None%> |
|
602 | <% line_new_comments = None%> | |
616 | %endif |
|
603 | %endif | |
617 | %if line_new_comments: |
|
604 | %if line_new_comments: | |
618 | <% has_outdated = any([x.outdated for x in line_new_comments]) %> |
|
605 | <% has_outdated = any([x.outdated for x in line_new_comments]) %> | |
619 | % if has_outdated: |
|
606 | % if has_outdated: | |
620 | <i title="${_('comments including outdated')}:${len(line_new_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
607 | <i title="${_('comments including outdated')}:${len(line_new_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
621 | % else: |
|
608 | % else: | |
622 | <i title="${_('comments')}: ${len(line_new_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
609 | <i title="${_('comments')}: ${len(line_new_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
623 | % endif |
|
610 | % endif | |
624 | %endif |
|
611 | %endif | |
625 | </div> |
|
612 | </div> | |
626 | </td> |
|
613 | </td> | |
627 | <td class="cb-lineno ${action_class(line.modified.action)}" |
|
614 | <td class="cb-lineno ${action_class(line.modified.action)}" | |
628 | data-line-no="${line.modified.lineno}" |
|
615 | data-line-no="${line.modified.lineno}" | |
629 | %if new_line_anchor: |
|
616 | %if new_line_anchor: | |
630 | id="${new_line_anchor}" |
|
617 | id="${new_line_anchor}" | |
631 | %endif |
|
618 | %endif | |
632 | > |
|
619 | > | |
633 | %if line.modified.lineno: |
|
620 | %if line.modified.lineno: | |
634 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> |
|
621 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> | |
635 | %endif |
|
622 | %endif | |
636 | </td> |
|
623 | </td> | |
637 | <td class="cb-content ${action_class(line.modified.action)}" |
|
624 | <td class="cb-content ${action_class(line.modified.action)}" | |
638 | data-line-no="n${line.modified.lineno}" |
|
625 | data-line-no="n${line.modified.lineno}" | |
639 | > |
|
626 | > | |
640 | %if use_comments and line.modified.lineno: |
|
627 | %if use_comments and line.modified.lineno: | |
641 | ${render_add_comment_button()} |
|
628 | ${render_add_comment_button()} | |
642 | %endif |
|
629 | %endif | |
643 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> |
|
630 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> | |
644 | %if use_comments and line.modified.lineno and line_new_comments: |
|
631 | %if use_comments and line.modified.lineno and line_new_comments: | |
645 | ${inline_comments_container(line_new_comments, inline_comments)} |
|
632 | ${inline_comments_container(line_new_comments, inline_comments)} | |
646 | %endif |
|
633 | %endif | |
647 | </td> |
|
634 | </td> | |
648 | </tr> |
|
635 | </tr> | |
649 | %endfor |
|
636 | %endfor | |
650 | </%def> |
|
637 | </%def> | |
651 |
|
638 | |||
652 |
|
639 | |||
653 | <%def name="render_hunk_lines_unified(hunk, use_comments=False, inline_comments=None)"> |
|
640 | <%def name="render_hunk_lines_unified(hunk, use_comments=False, inline_comments=None)"> | |
654 | %for old_line_no, new_line_no, action, content, comments_args in hunk.unified: |
|
641 | %for old_line_no, new_line_no, action, content, comments_args in hunk.unified: | |
655 | <% |
|
642 | <% | |
656 | old_line_anchor, new_line_anchor = None, None |
|
643 | old_line_anchor, new_line_anchor = None, None | |
657 | if old_line_no: |
|
644 | if old_line_no: | |
658 | old_line_anchor = diff_line_anchor(hunk.source_file_path, old_line_no, 'o') |
|
645 | old_line_anchor = diff_line_anchor(hunk.source_file_path, old_line_no, 'o') | |
659 | if new_line_no: |
|
646 | if new_line_no: | |
660 | new_line_anchor = diff_line_anchor(hunk.target_file_path, new_line_no, 'n') |
|
647 | new_line_anchor = diff_line_anchor(hunk.target_file_path, new_line_no, 'n') | |
661 | %> |
|
648 | %> | |
662 | <tr class="cb-line"> |
|
649 | <tr class="cb-line"> | |
663 | <td class="cb-data ${action_class(action)}"> |
|
650 | <td class="cb-data ${action_class(action)}"> | |
664 | <div> |
|
651 | <div> | |
665 |
|
652 | |||
666 | %if comments_args: |
|
653 | %if comments_args: | |
667 | <% comments = get_comments_for('unified', inline_comments, *comments_args) %> |
|
654 | <% comments = get_comments_for('unified', inline_comments, *comments_args) %> | |
668 | %else: |
|
655 | %else: | |
669 | <% comments = None%> |
|
656 | <% comments = None%> | |
670 | %endif |
|
657 | %endif | |
671 |
|
658 | |||
672 | % if comments: |
|
659 | % if comments: | |
673 | <% has_outdated = any([x.outdated for x in comments]) %> |
|
660 | <% has_outdated = any([x.outdated for x in comments]) %> | |
674 | % if has_outdated: |
|
661 | % if has_outdated: | |
675 | <i title="${_('comments including outdated')}:${len(comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
662 | <i title="${_('comments including outdated')}:${len(comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
676 | % else: |
|
663 | % else: | |
677 | <i title="${_('comments')}: ${len(comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> |
|
664 | <i title="${_('comments')}: ${len(comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i> | |
678 | % endif |
|
665 | % endif | |
679 | % endif |
|
666 | % endif | |
680 | </div> |
|
667 | </div> | |
681 | </td> |
|
668 | </td> | |
682 | <td class="cb-lineno ${action_class(action)}" |
|
669 | <td class="cb-lineno ${action_class(action)}" | |
683 | data-line-no="${old_line_no}" |
|
670 | data-line-no="${old_line_no}" | |
684 | %if old_line_anchor: |
|
671 | %if old_line_anchor: | |
685 | id="${old_line_anchor}" |
|
672 | id="${old_line_anchor}" | |
686 | %endif |
|
673 | %endif | |
687 | > |
|
674 | > | |
688 | %if old_line_anchor: |
|
675 | %if old_line_anchor: | |
689 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> |
|
676 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> | |
690 | %endif |
|
677 | %endif | |
691 | </td> |
|
678 | </td> | |
692 | <td class="cb-lineno ${action_class(action)}" |
|
679 | <td class="cb-lineno ${action_class(action)}" | |
693 | data-line-no="${new_line_no}" |
|
680 | data-line-no="${new_line_no}" | |
694 | %if new_line_anchor: |
|
681 | %if new_line_anchor: | |
695 | id="${new_line_anchor}" |
|
682 | id="${new_line_anchor}" | |
696 | %endif |
|
683 | %endif | |
697 | > |
|
684 | > | |
698 | %if new_line_anchor: |
|
685 | %if new_line_anchor: | |
699 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> |
|
686 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> | |
700 | %endif |
|
687 | %endif | |
701 | </td> |
|
688 | </td> | |
702 | <td class="cb-content ${action_class(action)}" |
|
689 | <td class="cb-content ${action_class(action)}" | |
703 | data-line-no="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" |
|
690 | data-line-no="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" | |
704 | > |
|
691 | > | |
705 | %if use_comments: |
|
692 | %if use_comments: | |
706 | ${render_add_comment_button()} |
|
693 | ${render_add_comment_button()} | |
707 | %endif |
|
694 | %endif | |
708 | <span class="cb-code">${action} ${content or '' | n}</span> |
|
695 | <span class="cb-code">${action} ${content or '' | n}</span> | |
709 | %if use_comments and comments: |
|
696 | %if use_comments and comments: | |
710 | ${inline_comments_container(comments, inline_comments)} |
|
697 | ${inline_comments_container(comments, inline_comments)} | |
711 | %endif |
|
698 | %endif | |
712 | </td> |
|
699 | </td> | |
713 | </tr> |
|
700 | </tr> | |
714 | %endfor |
|
701 | %endfor | |
715 | </%def> |
|
702 | </%def> | |
716 |
|
703 | |||
|
704 | ||||
|
705 | <%def name="render_hunk_lines(diff_mode, hunk, use_comments, inline_comments)"> | |||
|
706 | % if diff_mode == 'unified': | |||
|
707 | ${render_hunk_lines_unified(hunk, use_comments=use_comments, inline_comments=inline_comments)} | |||
|
708 | % elif diff_mode == 'sideside': | |||
|
709 | ${render_hunk_lines_sideside(hunk, use_comments=use_comments, inline_comments=inline_comments)} | |||
|
710 | % else: | |||
|
711 | <tr class="cb-line"> | |||
|
712 | <td>unknown diff mode</td> | |||
|
713 | </tr> | |||
|
714 | % endif | |||
|
715 | </%def> | |||
|
716 | ||||
|
717 | ||||
717 | <%def name="render_add_comment_button()"> |
|
718 | <%def name="render_add_comment_button()"> | |
718 | <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)"> |
|
719 | <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)"> | |
719 | <span><i class="icon-comment"></i></span> |
|
720 | <span><i class="icon-comment"></i></span> | |
720 | </button> |
|
721 | </button> | |
721 | </%def> |
|
722 | </%def> | |
722 |
|
723 | |||
723 | <%def name="render_diffset_menu()"> |
|
724 | <%def name="render_diffset_menu()"> | |
724 |
|
725 | |||
725 | <div class="diffset-menu clearinner"> |
|
726 | <div class="diffset-menu clearinner"> | |
726 | <div class="pull-right"> |
|
727 | <div class="pull-right"> | |
727 | <div class="btn-group"> |
|
728 | <div class="btn-group"> | |
728 |
|
729 | |||
729 | <a |
|
730 | <a | |
730 | class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip" |
|
731 | class="btn ${c.diffmode == 'sideside' and 'btn-primary'} tooltip" | |
731 | title="${h.tooltip(_('View side by side'))}" |
|
732 | title="${h.tooltip(_('View side by side'))}" | |
732 | href="${h.current_route_path(request, diffmode='sideside')}"> |
|
733 | href="${h.current_route_path(request, diffmode='sideside')}"> | |
733 | <span>${_('Side by Side')}</span> |
|
734 | <span>${_('Side by Side')}</span> | |
734 | </a> |
|
735 | </a> | |
735 | <a |
|
736 | <a | |
736 | class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip" |
|
737 | class="btn ${c.diffmode == 'unified' and 'btn-primary'} tooltip" | |
737 | title="${h.tooltip(_('View unified'))}" href="${h.current_route_path(request, diffmode='unified')}"> |
|
738 | title="${h.tooltip(_('View unified'))}" href="${h.current_route_path(request, diffmode='unified')}"> | |
738 | <span>${_('Unified')}</span> |
|
739 | <span>${_('Unified')}</span> | |
739 | </a> |
|
740 | </a> | |
740 | </div> |
|
741 | </div> | |
741 | </div> |
|
742 | </div> | |
742 |
|
743 | |||
743 | <div class="pull-left"> |
|
744 | <div class="pull-left"> | |
744 | <div class="btn-group"> |
|
745 | <div class="btn-group"> | |
745 | <a |
|
746 | <a | |
746 | class="btn" |
|
747 | class="btn" | |
747 | href="#" |
|
748 | href="#" | |
748 | onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All Files')}</a> |
|
749 | onclick="$('input[class=filediff-collapse-state]').prop('checked', false); return false">${_('Expand All Files')}</a> | |
749 | <a |
|
750 | <a | |
750 | class="btn" |
|
751 | class="btn" | |
751 | href="#" |
|
752 | href="#" | |
752 | onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All Files')}</a> |
|
753 | onclick="$('input[class=filediff-collapse-state]').prop('checked', true); return false">${_('Collapse All Files')}</a> | |
753 | <a |
|
754 | <a | |
754 | class="btn" |
|
755 | class="btn" | |
755 | href="#" |
|
756 | href="#" | |
756 | onclick="return Rhodecode.comments.toggleWideMode(this)">${_('Wide Mode Diff')}</a> |
|
757 | onclick="return Rhodecode.comments.toggleWideMode(this)">${_('Wide Mode Diff')}</a> | |
757 | </div> |
|
758 | </div> | |
758 | </div> |
|
759 | </div> | |
759 | </div> |
|
760 | </div> | |
760 | </%def> |
|
761 | </%def> |
General Comments 0
You need to be logged in to leave comments.
Login now