##// END OF EJS Templates
Diffs: fixed missing limited diff container display on large diffs
dan -
r3409:6b48c2d1 stable
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,775 +1,775 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2011-2018 RhodeCode GmbH
3 # Copyright (C) 2011-2018 RhodeCode GmbH
4 #
4 #
5 # This program is free software: you can redistribute it and/or modify
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU Affero General Public License, version 3
6 # it under the terms of the GNU Affero General Public License, version 3
7 # (only), as published by the Free Software Foundation.
7 # (only), as published by the Free Software Foundation.
8 #
8 #
9 # This program is distributed in the hope that it will be useful,
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
12 # GNU General Public License for more details.
13 #
13 #
14 # You should have received a copy of the GNU Affero General Public License
14 # You should have received a copy of the GNU Affero General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16 #
16 #
17 # This program is dual-licensed. If you wish to learn more about the
17 # This program is dual-licensed. If you wish to learn more about the
18 # RhodeCode Enterprise Edition, including its added features, Support services,
18 # RhodeCode Enterprise Edition, including its added features, Support services,
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
19 # and proprietary license terms, please see https://rhodecode.com/licenses/
20
20
21 import logging
21 import logging
22 import difflib
22 import difflib
23 from itertools import groupby
23 from itertools import groupby
24
24
25 from pygments import lex
25 from pygments import lex
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
27 from pygments.lexers.special import TextLexer, Token
27 from pygments.lexers.special import TextLexer, Token
28 from pygments.lexers import get_lexer_by_name
28 from pygments.lexers import get_lexer_by_name
29
29
30 from rhodecode.lib.helpers import (
30 from rhodecode.lib.helpers import (
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
32 from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode
32 from rhodecode.lib.utils2 import AttributeDict, StrictAttributeDict, safe_unicode
33 from rhodecode.lib.vcs.nodes import FileNode
33 from rhodecode.lib.vcs.nodes import FileNode
34 from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError
34 from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError
35 from rhodecode.lib.diff_match_patch import diff_match_patch
35 from rhodecode.lib.diff_match_patch import diff_match_patch
36 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
36 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
37
37
38
38
39 plain_text_lexer = get_lexer_by_name(
39 plain_text_lexer = get_lexer_by_name(
40 'text', stripall=False, stripnl=False, ensurenl=False)
40 'text', stripall=False, stripnl=False, ensurenl=False)
41
41
42
42
43 log = logging.getLogger(__name__)
43 log = logging.getLogger(__name__)
44
44
45
45
46 def filenode_as_lines_tokens(filenode, lexer=None):
46 def filenode_as_lines_tokens(filenode, lexer=None):
47 org_lexer = lexer
47 org_lexer = lexer
48 lexer = lexer or get_lexer_for_filenode(filenode)
48 lexer = lexer or get_lexer_for_filenode(filenode)
49 log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s',
49 log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s',
50 lexer, filenode, org_lexer)
50 lexer, filenode, org_lexer)
51 tokens = tokenize_string(filenode.content, lexer)
51 tokens = tokenize_string(filenode.content, lexer)
52 lines = split_token_stream(tokens)
52 lines = split_token_stream(tokens)
53 rv = list(lines)
53 rv = list(lines)
54 return rv
54 return rv
55
55
56
56
57 def tokenize_string(content, lexer):
57 def tokenize_string(content, lexer):
58 """
58 """
59 Use pygments to tokenize some content based on a lexer
59 Use pygments to tokenize some content based on a lexer
60 ensuring all original new lines and whitespace is preserved
60 ensuring all original new lines and whitespace is preserved
61 """
61 """
62
62
63 lexer.stripall = False
63 lexer.stripall = False
64 lexer.stripnl = False
64 lexer.stripnl = False
65 lexer.ensurenl = False
65 lexer.ensurenl = False
66
66
67 if isinstance(lexer, TextLexer):
67 if isinstance(lexer, TextLexer):
68 lexed = [(Token.Text, content)]
68 lexed = [(Token.Text, content)]
69 else:
69 else:
70 lexed = lex(content, lexer)
70 lexed = lex(content, lexer)
71
71
72 for token_type, token_text in lexed:
72 for token_type, token_text in lexed:
73 yield pygment_token_class(token_type), token_text
73 yield pygment_token_class(token_type), token_text
74
74
75
75
76 def split_token_stream(tokens):
76 def split_token_stream(tokens):
77 """
77 """
78 Take a list of (TokenType, text) tuples and split them by a string
78 Take a list of (TokenType, text) tuples and split them by a string
79
79
80 split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')])
80 split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')])
81 [(TEXT, 'some'), (TEXT, 'text'),
81 [(TEXT, 'some'), (TEXT, 'text'),
82 (TEXT, 'more'), (TEXT, 'text')]
82 (TEXT, 'more'), (TEXT, 'text')]
83 """
83 """
84
84
85 buffer = []
85 buffer = []
86 for token_class, token_text in tokens:
86 for token_class, token_text in tokens:
87 parts = token_text.split('\n')
87 parts = token_text.split('\n')
88 for part in parts[:-1]:
88 for part in parts[:-1]:
89 buffer.append((token_class, part))
89 buffer.append((token_class, part))
90 yield buffer
90 yield buffer
91 buffer = []
91 buffer = []
92
92
93 buffer.append((token_class, parts[-1]))
93 buffer.append((token_class, parts[-1]))
94
94
95 if buffer:
95 if buffer:
96 yield buffer
96 yield buffer
97
97
98
98
99 def filenode_as_annotated_lines_tokens(filenode):
99 def filenode_as_annotated_lines_tokens(filenode):
100 """
100 """
101 Take a file node and return a list of annotations => lines, if no annotation
101 Take a file node and return a list of annotations => lines, if no annotation
102 is found, it will be None.
102 is found, it will be None.
103
103
104 eg:
104 eg:
105
105
106 [
106 [
107 (annotation1, [
107 (annotation1, [
108 (1, line1_tokens_list),
108 (1, line1_tokens_list),
109 (2, line2_tokens_list),
109 (2, line2_tokens_list),
110 ]),
110 ]),
111 (annotation2, [
111 (annotation2, [
112 (3, line1_tokens_list),
112 (3, line1_tokens_list),
113 ]),
113 ]),
114 (None, [
114 (None, [
115 (4, line1_tokens_list),
115 (4, line1_tokens_list),
116 ]),
116 ]),
117 (annotation1, [
117 (annotation1, [
118 (5, line1_tokens_list),
118 (5, line1_tokens_list),
119 (6, line2_tokens_list),
119 (6, line2_tokens_list),
120 ])
120 ])
121 ]
121 ]
122 """
122 """
123
123
124 commit_cache = {} # cache commit_getter lookups
124 commit_cache = {} # cache commit_getter lookups
125
125
126 def _get_annotation(commit_id, commit_getter):
126 def _get_annotation(commit_id, commit_getter):
127 if commit_id not in commit_cache:
127 if commit_id not in commit_cache:
128 commit_cache[commit_id] = commit_getter()
128 commit_cache[commit_id] = commit_getter()
129 return commit_cache[commit_id]
129 return commit_cache[commit_id]
130
130
131 annotation_lookup = {
131 annotation_lookup = {
132 line_no: _get_annotation(commit_id, commit_getter)
132 line_no: _get_annotation(commit_id, commit_getter)
133 for line_no, commit_id, commit_getter, line_content
133 for line_no, commit_id, commit_getter, line_content
134 in filenode.annotate
134 in filenode.annotate
135 }
135 }
136
136
137 annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens)
137 annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens)
138 for line_no, tokens
138 for line_no, tokens
139 in enumerate(filenode_as_lines_tokens(filenode), 1))
139 in enumerate(filenode_as_lines_tokens(filenode), 1))
140
140
141 grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0])
141 grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0])
142
142
143 for annotation, group in grouped_annotations_lines:
143 for annotation, group in grouped_annotations_lines:
144 yield (
144 yield (
145 annotation, [(line_no, tokens)
145 annotation, [(line_no, tokens)
146 for (_, line_no, tokens) in group]
146 for (_, line_no, tokens) in group]
147 )
147 )
148
148
149
149
150 def render_tokenstream(tokenstream):
150 def render_tokenstream(tokenstream):
151 result = []
151 result = []
152 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
152 for token_class, token_ops_texts in rollup_tokenstream(tokenstream):
153
153
154 if token_class:
154 if token_class:
155 result.append(u'<span class="%s">' % token_class)
155 result.append(u'<span class="%s">' % token_class)
156 else:
156 else:
157 result.append(u'<span>')
157 result.append(u'<span>')
158
158
159 for op_tag, token_text in token_ops_texts:
159 for op_tag, token_text in token_ops_texts:
160
160
161 if op_tag:
161 if op_tag:
162 result.append(u'<%s>' % op_tag)
162 result.append(u'<%s>' % op_tag)
163
163
164 escaped_text = html_escape(token_text)
164 escaped_text = html_escape(token_text)
165
165
166 # TODO: dan: investigate showing hidden characters like space/nl/tab
166 # TODO: dan: investigate showing hidden characters like space/nl/tab
167 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
167 # escaped_text = escaped_text.replace(' ', '<sp> </sp>')
168 # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>')
168 # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>')
169 # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>')
169 # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>')
170
170
171 result.append(escaped_text)
171 result.append(escaped_text)
172
172
173 if op_tag:
173 if op_tag:
174 result.append(u'</%s>' % op_tag)
174 result.append(u'</%s>' % op_tag)
175
175
176 result.append(u'</span>')
176 result.append(u'</span>')
177
177
178 html = ''.join(result)
178 html = ''.join(result)
179 return html
179 return html
180
180
181
181
182 def rollup_tokenstream(tokenstream):
182 def rollup_tokenstream(tokenstream):
183 """
183 """
184 Group a token stream of the format:
184 Group a token stream of the format:
185
185
186 ('class', 'op', 'text')
186 ('class', 'op', 'text')
187 or
187 or
188 ('class', 'text')
188 ('class', 'text')
189
189
190 into
190 into
191
191
192 [('class1',
192 [('class1',
193 [('op1', 'text'),
193 [('op1', 'text'),
194 ('op2', 'text')]),
194 ('op2', 'text')]),
195 ('class2',
195 ('class2',
196 [('op3', 'text')])]
196 [('op3', 'text')])]
197
197
198 This is used to get the minimal tags necessary when
198 This is used to get the minimal tags necessary when
199 rendering to html eg for a token stream ie.
199 rendering to html eg for a token stream ie.
200
200
201 <span class="A"><ins>he</ins>llo</span>
201 <span class="A"><ins>he</ins>llo</span>
202 vs
202 vs
203 <span class="A"><ins>he</ins></span><span class="A">llo</span>
203 <span class="A"><ins>he</ins></span><span class="A">llo</span>
204
204
205 If a 2 tuple is passed in, the output op will be an empty string.
205 If a 2 tuple is passed in, the output op will be an empty string.
206
206
207 eg:
207 eg:
208
208
209 >>> rollup_tokenstream([('classA', '', 'h'),
209 >>> rollup_tokenstream([('classA', '', 'h'),
210 ('classA', 'del', 'ell'),
210 ('classA', 'del', 'ell'),
211 ('classA', '', 'o'),
211 ('classA', '', 'o'),
212 ('classB', '', ' '),
212 ('classB', '', ' '),
213 ('classA', '', 'the'),
213 ('classA', '', 'the'),
214 ('classA', '', 're'),
214 ('classA', '', 're'),
215 ])
215 ])
216
216
217 [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')],
217 [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')],
218 ('classB', [('', ' ')],
218 ('classB', [('', ' ')],
219 ('classA', [('', 'there')]]
219 ('classA', [('', 'there')]]
220
220
221 """
221 """
222 if tokenstream and len(tokenstream[0]) == 2:
222 if tokenstream and len(tokenstream[0]) == 2:
223 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
223 tokenstream = ((t[0], '', t[1]) for t in tokenstream)
224
224
225 result = []
225 result = []
226 for token_class, op_list in groupby(tokenstream, lambda t: t[0]):
226 for token_class, op_list in groupby(tokenstream, lambda t: t[0]):
227 ops = []
227 ops = []
228 for token_op, token_text_list in groupby(op_list, lambda o: o[1]):
228 for token_op, token_text_list in groupby(op_list, lambda o: o[1]):
229 text_buffer = []
229 text_buffer = []
230 for t_class, t_op, t_text in token_text_list:
230 for t_class, t_op, t_text in token_text_list:
231 text_buffer.append(t_text)
231 text_buffer.append(t_text)
232 ops.append((token_op, ''.join(text_buffer)))
232 ops.append((token_op, ''.join(text_buffer)))
233 result.append((token_class, ops))
233 result.append((token_class, ops))
234 return result
234 return result
235
235
236
236
237 def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True):
237 def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True):
238 """
238 """
239 Converts a list of (token_class, token_text) tuples to a list of
239 Converts a list of (token_class, token_text) tuples to a list of
240 (token_class, token_op, token_text) tuples where token_op is one of
240 (token_class, token_op, token_text) tuples where token_op is one of
241 ('ins', 'del', '')
241 ('ins', 'del', '')
242
242
243 :param old_tokens: list of (token_class, token_text) tuples of old line
243 :param old_tokens: list of (token_class, token_text) tuples of old line
244 :param new_tokens: list of (token_class, token_text) tuples of new line
244 :param new_tokens: list of (token_class, token_text) tuples of new line
245 :param use_diff_match_patch: boolean, will use google's diff match patch
245 :param use_diff_match_patch: boolean, will use google's diff match patch
246 library which has options to 'smooth' out the character by character
246 library which has options to 'smooth' out the character by character
247 differences making nicer ins/del blocks
247 differences making nicer ins/del blocks
248 """
248 """
249
249
250 old_tokens_result = []
250 old_tokens_result = []
251 new_tokens_result = []
251 new_tokens_result = []
252
252
253 similarity = difflib.SequenceMatcher(None,
253 similarity = difflib.SequenceMatcher(None,
254 ''.join(token_text for token_class, token_text in old_tokens),
254 ''.join(token_text for token_class, token_text in old_tokens),
255 ''.join(token_text for token_class, token_text in new_tokens)
255 ''.join(token_text for token_class, token_text in new_tokens)
256 ).ratio()
256 ).ratio()
257
257
258 if similarity < 0.6: # return, the blocks are too different
258 if similarity < 0.6: # return, the blocks are too different
259 for token_class, token_text in old_tokens:
259 for token_class, token_text in old_tokens:
260 old_tokens_result.append((token_class, '', token_text))
260 old_tokens_result.append((token_class, '', token_text))
261 for token_class, token_text in new_tokens:
261 for token_class, token_text in new_tokens:
262 new_tokens_result.append((token_class, '', token_text))
262 new_tokens_result.append((token_class, '', token_text))
263 return old_tokens_result, new_tokens_result, similarity
263 return old_tokens_result, new_tokens_result, similarity
264
264
265 token_sequence_matcher = difflib.SequenceMatcher(None,
265 token_sequence_matcher = difflib.SequenceMatcher(None,
266 [x[1] for x in old_tokens],
266 [x[1] for x in old_tokens],
267 [x[1] for x in new_tokens])
267 [x[1] for x in new_tokens])
268
268
269 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
269 for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes():
270 # check the differences by token block types first to give a more
270 # check the differences by token block types first to give a more
271 # nicer "block" level replacement vs character diffs
271 # nicer "block" level replacement vs character diffs
272
272
273 if tag == 'equal':
273 if tag == 'equal':
274 for token_class, token_text in old_tokens[o1:o2]:
274 for token_class, token_text in old_tokens[o1:o2]:
275 old_tokens_result.append((token_class, '', token_text))
275 old_tokens_result.append((token_class, '', token_text))
276 for token_class, token_text in new_tokens[n1:n2]:
276 for token_class, token_text in new_tokens[n1:n2]:
277 new_tokens_result.append((token_class, '', token_text))
277 new_tokens_result.append((token_class, '', token_text))
278 elif tag == 'delete':
278 elif tag == 'delete':
279 for token_class, token_text in old_tokens[o1:o2]:
279 for token_class, token_text in old_tokens[o1:o2]:
280 old_tokens_result.append((token_class, 'del', token_text))
280 old_tokens_result.append((token_class, 'del', token_text))
281 elif tag == 'insert':
281 elif tag == 'insert':
282 for token_class, token_text in new_tokens[n1:n2]:
282 for token_class, token_text in new_tokens[n1:n2]:
283 new_tokens_result.append((token_class, 'ins', token_text))
283 new_tokens_result.append((token_class, 'ins', token_text))
284 elif tag == 'replace':
284 elif tag == 'replace':
285 # if same type token blocks must be replaced, do a diff on the
285 # if same type token blocks must be replaced, do a diff on the
286 # characters in the token blocks to show individual changes
286 # characters in the token blocks to show individual changes
287
287
288 old_char_tokens = []
288 old_char_tokens = []
289 new_char_tokens = []
289 new_char_tokens = []
290 for token_class, token_text in old_tokens[o1:o2]:
290 for token_class, token_text in old_tokens[o1:o2]:
291 for char in token_text:
291 for char in token_text:
292 old_char_tokens.append((token_class, char))
292 old_char_tokens.append((token_class, char))
293
293
294 for token_class, token_text in new_tokens[n1:n2]:
294 for token_class, token_text in new_tokens[n1:n2]:
295 for char in token_text:
295 for char in token_text:
296 new_char_tokens.append((token_class, char))
296 new_char_tokens.append((token_class, char))
297
297
298 old_string = ''.join([token_text for
298 old_string = ''.join([token_text for
299 token_class, token_text in old_char_tokens])
299 token_class, token_text in old_char_tokens])
300 new_string = ''.join([token_text for
300 new_string = ''.join([token_text for
301 token_class, token_text in new_char_tokens])
301 token_class, token_text in new_char_tokens])
302
302
303 char_sequence = difflib.SequenceMatcher(
303 char_sequence = difflib.SequenceMatcher(
304 None, old_string, new_string)
304 None, old_string, new_string)
305 copcodes = char_sequence.get_opcodes()
305 copcodes = char_sequence.get_opcodes()
306 obuffer, nbuffer = [], []
306 obuffer, nbuffer = [], []
307
307
308 if use_diff_match_patch:
308 if use_diff_match_patch:
309 dmp = diff_match_patch()
309 dmp = diff_match_patch()
310 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
310 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
311 reps = dmp.diff_main(old_string, new_string)
311 reps = dmp.diff_main(old_string, new_string)
312 dmp.diff_cleanupEfficiency(reps)
312 dmp.diff_cleanupEfficiency(reps)
313
313
314 a, b = 0, 0
314 a, b = 0, 0
315 for op, rep in reps:
315 for op, rep in reps:
316 l = len(rep)
316 l = len(rep)
317 if op == 0:
317 if op == 0:
318 for i, c in enumerate(rep):
318 for i, c in enumerate(rep):
319 obuffer.append((old_char_tokens[a+i][0], '', c))
319 obuffer.append((old_char_tokens[a+i][0], '', c))
320 nbuffer.append((new_char_tokens[b+i][0], '', c))
320 nbuffer.append((new_char_tokens[b+i][0], '', c))
321 a += l
321 a += l
322 b += l
322 b += l
323 elif op == -1:
323 elif op == -1:
324 for i, c in enumerate(rep):
324 for i, c in enumerate(rep):
325 obuffer.append((old_char_tokens[a+i][0], 'del', c))
325 obuffer.append((old_char_tokens[a+i][0], 'del', c))
326 a += l
326 a += l
327 elif op == 1:
327 elif op == 1:
328 for i, c in enumerate(rep):
328 for i, c in enumerate(rep):
329 nbuffer.append((new_char_tokens[b+i][0], 'ins', c))
329 nbuffer.append((new_char_tokens[b+i][0], 'ins', c))
330 b += l
330 b += l
331 else:
331 else:
332 for ctag, co1, co2, cn1, cn2 in copcodes:
332 for ctag, co1, co2, cn1, cn2 in copcodes:
333 if ctag == 'equal':
333 if ctag == 'equal':
334 for token_class, token_text in old_char_tokens[co1:co2]:
334 for token_class, token_text in old_char_tokens[co1:co2]:
335 obuffer.append((token_class, '', token_text))
335 obuffer.append((token_class, '', token_text))
336 for token_class, token_text in new_char_tokens[cn1:cn2]:
336 for token_class, token_text in new_char_tokens[cn1:cn2]:
337 nbuffer.append((token_class, '', token_text))
337 nbuffer.append((token_class, '', token_text))
338 elif ctag == 'delete':
338 elif ctag == 'delete':
339 for token_class, token_text in old_char_tokens[co1:co2]:
339 for token_class, token_text in old_char_tokens[co1:co2]:
340 obuffer.append((token_class, 'del', token_text))
340 obuffer.append((token_class, 'del', token_text))
341 elif ctag == 'insert':
341 elif ctag == 'insert':
342 for token_class, token_text in new_char_tokens[cn1:cn2]:
342 for token_class, token_text in new_char_tokens[cn1:cn2]:
343 nbuffer.append((token_class, 'ins', token_text))
343 nbuffer.append((token_class, 'ins', token_text))
344 elif ctag == 'replace':
344 elif ctag == 'replace':
345 for token_class, token_text in old_char_tokens[co1:co2]:
345 for token_class, token_text in old_char_tokens[co1:co2]:
346 obuffer.append((token_class, 'del', token_text))
346 obuffer.append((token_class, 'del', token_text))
347 for token_class, token_text in new_char_tokens[cn1:cn2]:
347 for token_class, token_text in new_char_tokens[cn1:cn2]:
348 nbuffer.append((token_class, 'ins', token_text))
348 nbuffer.append((token_class, 'ins', token_text))
349
349
350 old_tokens_result.extend(obuffer)
350 old_tokens_result.extend(obuffer)
351 new_tokens_result.extend(nbuffer)
351 new_tokens_result.extend(nbuffer)
352
352
353 return old_tokens_result, new_tokens_result, similarity
353 return old_tokens_result, new_tokens_result, similarity
354
354
355
355
356 def diffset_node_getter(commit):
356 def diffset_node_getter(commit):
357 def get_node(fname):
357 def get_node(fname):
358 try:
358 try:
359 return commit.get_node(fname)
359 return commit.get_node(fname)
360 except NodeDoesNotExistError:
360 except NodeDoesNotExistError:
361 return None
361 return None
362
362
363 return get_node
363 return get_node
364
364
365
365
366 class DiffSet(object):
366 class DiffSet(object):
367 """
367 """
368 An object for parsing the diff result from diffs.DiffProcessor and
368 An object for parsing the diff result from diffs.DiffProcessor and
369 adding highlighting, side by side/unified renderings and line diffs
369 adding highlighting, side by side/unified renderings and line diffs
370 """
370 """
371
371
372 HL_REAL = 'REAL' # highlights using original file, slow
372 HL_REAL = 'REAL' # highlights using original file, slow
373 HL_FAST = 'FAST' # highlights using just the line, fast but not correct
373 HL_FAST = 'FAST' # highlights using just the line, fast but not correct
374 # in the case of multiline code
374 # in the case of multiline code
375 HL_NONE = 'NONE' # no highlighting, fastest
375 HL_NONE = 'NONE' # no highlighting, fastest
376
376
377 def __init__(self, highlight_mode=HL_REAL, repo_name=None,
377 def __init__(self, highlight_mode=HL_REAL, repo_name=None,
378 source_repo_name=None,
378 source_repo_name=None,
379 source_node_getter=lambda filename: None,
379 source_node_getter=lambda filename: None,
380 target_repo_name=None,
380 target_repo_name=None,
381 target_node_getter=lambda filename: None,
381 target_node_getter=lambda filename: None,
382 source_nodes=None, target_nodes=None,
382 source_nodes=None, target_nodes=None,
383 # files over this size will use fast highlighting
383 # files over this size will use fast highlighting
384 max_file_size_limit=150 * 1024,
384 max_file_size_limit=150 * 1024,
385 ):
385 ):
386
386
387 self.highlight_mode = highlight_mode
387 self.highlight_mode = highlight_mode
388 self.highlighted_filenodes = {}
388 self.highlighted_filenodes = {}
389 self.source_node_getter = source_node_getter
389 self.source_node_getter = source_node_getter
390 self.target_node_getter = target_node_getter
390 self.target_node_getter = target_node_getter
391 self.source_nodes = source_nodes or {}
391 self.source_nodes = source_nodes or {}
392 self.target_nodes = target_nodes or {}
392 self.target_nodes = target_nodes or {}
393 self.repo_name = repo_name
393 self.repo_name = repo_name
394 self.target_repo_name = target_repo_name or repo_name
394 self.target_repo_name = target_repo_name or repo_name
395 self.source_repo_name = source_repo_name or repo_name
395 self.source_repo_name = source_repo_name or repo_name
396 self.max_file_size_limit = max_file_size_limit
396 self.max_file_size_limit = max_file_size_limit
397
397
398 def render_patchset(self, patchset, source_ref=None, target_ref=None):
398 def render_patchset(self, patchset, source_ref=None, target_ref=None):
399 diffset = AttributeDict(dict(
399 diffset = AttributeDict(dict(
400 lines_added=0,
400 lines_added=0,
401 lines_deleted=0,
401 lines_deleted=0,
402 changed_files=0,
402 changed_files=0,
403 files=[],
403 files=[],
404 file_stats={},
404 file_stats={},
405 limited_diff=isinstance(patchset, LimitedDiffContainer),
405 limited_diff=isinstance(patchset, LimitedDiffContainer),
406 repo_name=self.repo_name,
406 repo_name=self.repo_name,
407 target_repo_name=self.target_repo_name,
407 target_repo_name=self.target_repo_name,
408 source_repo_name=self.source_repo_name,
408 source_repo_name=self.source_repo_name,
409 source_ref=source_ref,
409 source_ref=source_ref,
410 target_ref=target_ref,
410 target_ref=target_ref,
411 ))
411 ))
412 for patch in patchset:
412 for patch in patchset:
413 diffset.file_stats[patch['filename']] = patch['stats']
413 diffset.file_stats[patch['filename']] = patch['stats']
414 filediff = self.render_patch(patch)
414 filediff = self.render_patch(patch)
415 filediff.diffset = StrictAttributeDict(dict(
415 filediff.diffset = StrictAttributeDict(dict(
416 source_ref=diffset.source_ref,
416 source_ref=diffset.source_ref,
417 target_ref=diffset.target_ref,
417 target_ref=diffset.target_ref,
418 repo_name=diffset.repo_name,
418 repo_name=diffset.repo_name,
419 source_repo_name=diffset.source_repo_name,
419 source_repo_name=diffset.source_repo_name,
420 target_repo_name=diffset.target_repo_name,
420 target_repo_name=diffset.target_repo_name,
421 ))
421 ))
422 diffset.files.append(filediff)
422 diffset.files.append(filediff)
423 diffset.changed_files += 1
423 diffset.changed_files += 1
424 if not patch['stats']['binary']:
424 if not patch['stats']['binary']:
425 diffset.lines_added += patch['stats']['added']
425 diffset.lines_added += patch['stats']['added']
426 diffset.lines_deleted += patch['stats']['deleted']
426 diffset.lines_deleted += patch['stats']['deleted']
427
427
428 return diffset
428 return diffset
429
429
430 _lexer_cache = {}
430 _lexer_cache = {}
431
431
432 def _get_lexer_for_filename(self, filename, filenode=None):
432 def _get_lexer_for_filename(self, filename, filenode=None):
433 # cached because we might need to call it twice for source/target
433 # cached because we might need to call it twice for source/target
434 if filename not in self._lexer_cache:
434 if filename not in self._lexer_cache:
435 if filenode:
435 if filenode:
436 lexer = filenode.lexer
436 lexer = filenode.lexer
437 extension = filenode.extension
437 extension = filenode.extension
438 else:
438 else:
439 lexer = FileNode.get_lexer(filename=filename)
439 lexer = FileNode.get_lexer(filename=filename)
440 extension = filename.split('.')[-1]
440 extension = filename.split('.')[-1]
441
441
442 lexer = get_custom_lexer(extension) or lexer
442 lexer = get_custom_lexer(extension) or lexer
443 self._lexer_cache[filename] = lexer
443 self._lexer_cache[filename] = lexer
444 return self._lexer_cache[filename]
444 return self._lexer_cache[filename]
445
445
446 def render_patch(self, patch):
446 def render_patch(self, patch):
447 log.debug('rendering diff for %r', patch['filename'])
447 log.debug('rendering diff for %r', patch['filename'])
448
448
449 source_filename = patch['original_filename']
449 source_filename = patch['original_filename']
450 target_filename = patch['filename']
450 target_filename = patch['filename']
451
451
452 source_lexer = plain_text_lexer
452 source_lexer = plain_text_lexer
453 target_lexer = plain_text_lexer
453 target_lexer = plain_text_lexer
454
454
455 if not patch['stats']['binary']:
455 if not patch['stats']['binary']:
456 node_hl_mode = self.HL_NONE if patch['chunks'] == [] else None
456 node_hl_mode = self.HL_NONE if patch['chunks'] == [] else None
457 hl_mode = node_hl_mode or self.highlight_mode
457 hl_mode = node_hl_mode or self.highlight_mode
458
458
459 if hl_mode == self.HL_REAL:
459 if hl_mode == self.HL_REAL:
460 if (source_filename and patch['operation'] in ('D', 'M')
460 if (source_filename and patch['operation'] in ('D', 'M')
461 and source_filename not in self.source_nodes):
461 and source_filename not in self.source_nodes):
462 self.source_nodes[source_filename] = (
462 self.source_nodes[source_filename] = (
463 self.source_node_getter(source_filename))
463 self.source_node_getter(source_filename))
464
464
465 if (target_filename and patch['operation'] in ('A', 'M')
465 if (target_filename and patch['operation'] in ('A', 'M')
466 and target_filename not in self.target_nodes):
466 and target_filename not in self.target_nodes):
467 self.target_nodes[target_filename] = (
467 self.target_nodes[target_filename] = (
468 self.target_node_getter(target_filename))
468 self.target_node_getter(target_filename))
469
469
470 elif hl_mode == self.HL_FAST:
470 elif hl_mode == self.HL_FAST:
471 source_lexer = self._get_lexer_for_filename(source_filename)
471 source_lexer = self._get_lexer_for_filename(source_filename)
472 target_lexer = self._get_lexer_for_filename(target_filename)
472 target_lexer = self._get_lexer_for_filename(target_filename)
473
473
474 source_file = self.source_nodes.get(source_filename, source_filename)
474 source_file = self.source_nodes.get(source_filename, source_filename)
475 target_file = self.target_nodes.get(target_filename, target_filename)
475 target_file = self.target_nodes.get(target_filename, target_filename)
476 raw_id_uid = ''
476 raw_id_uid = ''
477 if self.source_nodes.get(source_filename):
477 if self.source_nodes.get(source_filename):
478 raw_id_uid = self.source_nodes[source_filename].commit.raw_id
478 raw_id_uid = self.source_nodes[source_filename].commit.raw_id
479
479
480 if not raw_id_uid and self.target_nodes.get(target_filename):
480 if not raw_id_uid and self.target_nodes.get(target_filename):
481 # in case this is a new file we only have it in target
481 # in case this is a new file we only have it in target
482 raw_id_uid = self.target_nodes[target_filename].commit.raw_id
482 raw_id_uid = self.target_nodes[target_filename].commit.raw_id
483
483
484 source_filenode, target_filenode = None, None
484 source_filenode, target_filenode = None, None
485
485
486 # TODO: dan: FileNode.lexer works on the content of the file - which
486 # TODO: dan: FileNode.lexer works on the content of the file - which
487 # can be slow - issue #4289 explains a lexer clean up - which once
487 # can be slow - issue #4289 explains a lexer clean up - which once
488 # done can allow caching a lexer for a filenode to avoid the file lookup
488 # done can allow caching a lexer for a filenode to avoid the file lookup
489 if isinstance(source_file, FileNode):
489 if isinstance(source_file, FileNode):
490 source_filenode = source_file
490 source_filenode = source_file
491 #source_lexer = source_file.lexer
491 #source_lexer = source_file.lexer
492 source_lexer = self._get_lexer_for_filename(source_filename)
492 source_lexer = self._get_lexer_for_filename(source_filename)
493 source_file.lexer = source_lexer
493 source_file.lexer = source_lexer
494
494
495 if isinstance(target_file, FileNode):
495 if isinstance(target_file, FileNode):
496 target_filenode = target_file
496 target_filenode = target_file
497 #target_lexer = target_file.lexer
497 #target_lexer = target_file.lexer
498 target_lexer = self._get_lexer_for_filename(target_filename)
498 target_lexer = self._get_lexer_for_filename(target_filename)
499 target_file.lexer = target_lexer
499 target_file.lexer = target_lexer
500
500
501 source_file_path, target_file_path = None, None
501 source_file_path, target_file_path = None, None
502
502
503 if source_filename != '/dev/null':
503 if source_filename != '/dev/null':
504 source_file_path = source_filename
504 source_file_path = source_filename
505 if target_filename != '/dev/null':
505 if target_filename != '/dev/null':
506 target_file_path = target_filename
506 target_file_path = target_filename
507
507
508 source_file_type = source_lexer.name
508 source_file_type = source_lexer.name
509 target_file_type = target_lexer.name
509 target_file_type = target_lexer.name
510
510
511 filediff = AttributeDict({
511 filediff = AttributeDict({
512 'source_file_path': source_file_path,
512 'source_file_path': source_file_path,
513 'target_file_path': target_file_path,
513 'target_file_path': target_file_path,
514 'source_filenode': source_filenode,
514 'source_filenode': source_filenode,
515 'target_filenode': target_filenode,
515 'target_filenode': target_filenode,
516 'source_file_type': target_file_type,
516 'source_file_type': target_file_type,
517 'target_file_type': source_file_type,
517 'target_file_type': source_file_type,
518 'patch': {'filename': patch['filename'], 'stats': patch['stats']},
518 'patch': {'filename': patch['filename'], 'stats': patch['stats']},
519 'operation': patch['operation'],
519 'operation': patch['operation'],
520 'source_mode': patch['stats']['old_mode'],
520 'source_mode': patch['stats']['old_mode'],
521 'target_mode': patch['stats']['new_mode'],
521 'target_mode': patch['stats']['new_mode'],
522 'limited_diff': isinstance(patch, LimitedDiffContainer),
522 'limited_diff': patch['is_limited_diff'],
523 'hunks': [],
523 'hunks': [],
524 'hunk_ops': None,
524 'hunk_ops': None,
525 'diffset': self,
525 'diffset': self,
526 'raw_id': raw_id_uid,
526 'raw_id': raw_id_uid,
527 })
527 })
528
528
529 file_chunks = patch['chunks'][1:]
529 file_chunks = patch['chunks'][1:]
530 for hunk in file_chunks:
530 for hunk in file_chunks:
531 hunkbit = self.parse_hunk(hunk, source_file, target_file)
531 hunkbit = self.parse_hunk(hunk, source_file, target_file)
532 hunkbit.source_file_path = source_file_path
532 hunkbit.source_file_path = source_file_path
533 hunkbit.target_file_path = target_file_path
533 hunkbit.target_file_path = target_file_path
534 filediff.hunks.append(hunkbit)
534 filediff.hunks.append(hunkbit)
535
535
536 # Simulate hunk on OPS type line which doesn't really contain any diff
536 # Simulate hunk on OPS type line which doesn't really contain any diff
537 # this allows commenting on those
537 # this allows commenting on those
538 if not file_chunks:
538 if not file_chunks:
539 actions = []
539 actions = []
540 for op_id, op_text in filediff.patch['stats']['ops'].items():
540 for op_id, op_text in filediff.patch['stats']['ops'].items():
541 if op_id == DEL_FILENODE:
541 if op_id == DEL_FILENODE:
542 actions.append(u'file was removed')
542 actions.append(u'file was removed')
543 elif op_id == BIN_FILENODE:
543 elif op_id == BIN_FILENODE:
544 actions.append(u'binary diff hidden')
544 actions.append(u'binary diff hidden')
545 else:
545 else:
546 actions.append(safe_unicode(op_text))
546 actions.append(safe_unicode(op_text))
547 action_line = u'NO CONTENT: ' + \
547 action_line = u'NO CONTENT: ' + \
548 u', '.join(actions) or u'UNDEFINED_ACTION'
548 u', '.join(actions) or u'UNDEFINED_ACTION'
549
549
550 hunk_ops = {'source_length': 0, 'source_start': 0,
550 hunk_ops = {'source_length': 0, 'source_start': 0,
551 'lines': [
551 'lines': [
552 {'new_lineno': 0, 'old_lineno': 1,
552 {'new_lineno': 0, 'old_lineno': 1,
553 'action': 'unmod-no-hl', 'line': action_line}
553 'action': 'unmod-no-hl', 'line': action_line}
554 ],
554 ],
555 'section_header': u'', 'target_start': 1, 'target_length': 1}
555 'section_header': u'', 'target_start': 1, 'target_length': 1}
556
556
557 hunkbit = self.parse_hunk(hunk_ops, source_file, target_file)
557 hunkbit = self.parse_hunk(hunk_ops, source_file, target_file)
558 hunkbit.source_file_path = source_file_path
558 hunkbit.source_file_path = source_file_path
559 hunkbit.target_file_path = target_file_path
559 hunkbit.target_file_path = target_file_path
560 filediff.hunk_ops = hunkbit
560 filediff.hunk_ops = hunkbit
561 return filediff
561 return filediff
562
562
563 def parse_hunk(self, hunk, source_file, target_file):
563 def parse_hunk(self, hunk, source_file, target_file):
564 result = AttributeDict(dict(
564 result = AttributeDict(dict(
565 source_start=hunk['source_start'],
565 source_start=hunk['source_start'],
566 source_length=hunk['source_length'],
566 source_length=hunk['source_length'],
567 target_start=hunk['target_start'],
567 target_start=hunk['target_start'],
568 target_length=hunk['target_length'],
568 target_length=hunk['target_length'],
569 section_header=hunk['section_header'],
569 section_header=hunk['section_header'],
570 lines=[],
570 lines=[],
571 ))
571 ))
572 before, after = [], []
572 before, after = [], []
573
573
574 for line in hunk['lines']:
574 for line in hunk['lines']:
575 if line['action'] in ['unmod', 'unmod-no-hl']:
575 if line['action'] in ['unmod', 'unmod-no-hl']:
576 no_hl = line['action'] == 'unmod-no-hl'
576 no_hl = line['action'] == 'unmod-no-hl'
577 result.lines.extend(
577 result.lines.extend(
578 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
578 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
579 after.append(line)
579 after.append(line)
580 before.append(line)
580 before.append(line)
581 elif line['action'] == 'add':
581 elif line['action'] == 'add':
582 after.append(line)
582 after.append(line)
583 elif line['action'] == 'del':
583 elif line['action'] == 'del':
584 before.append(line)
584 before.append(line)
585 elif line['action'] == 'old-no-nl':
585 elif line['action'] == 'old-no-nl':
586 before.append(line)
586 before.append(line)
587 elif line['action'] == 'new-no-nl':
587 elif line['action'] == 'new-no-nl':
588 after.append(line)
588 after.append(line)
589
589
590 all_actions = [x['action'] for x in after] + [x['action'] for x in before]
590 all_actions = [x['action'] for x in after] + [x['action'] for x in before]
591 no_hl = {x for x in all_actions} == {'unmod-no-hl'}
591 no_hl = {x for x in all_actions} == {'unmod-no-hl'}
592 result.lines.extend(
592 result.lines.extend(
593 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
593 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
594 # NOTE(marcink): we must keep list() call here so we can cache the result...
594 # NOTE(marcink): we must keep list() call here so we can cache the result...
595 result.unified = list(self.as_unified(result.lines))
595 result.unified = list(self.as_unified(result.lines))
596 result.sideside = result.lines
596 result.sideside = result.lines
597
597
598 return result
598 return result
599
599
600 def parse_lines(self, before_lines, after_lines, source_file, target_file,
600 def parse_lines(self, before_lines, after_lines, source_file, target_file,
601 no_hl=False):
601 no_hl=False):
602 # TODO: dan: investigate doing the diff comparison and fast highlighting
602 # TODO: dan: investigate doing the diff comparison and fast highlighting
603 # on the entire before and after buffered block lines rather than by
603 # on the entire before and after buffered block lines rather than by
604 # line, this means we can get better 'fast' highlighting if the context
604 # line, this means we can get better 'fast' highlighting if the context
605 # allows it - eg.
605 # allows it - eg.
606 # line 4: """
606 # line 4: """
607 # line 5: this gets highlighted as a string
607 # line 5: this gets highlighted as a string
608 # line 6: """
608 # line 6: """
609
609
610 lines = []
610 lines = []
611
611
612 before_newline = AttributeDict()
612 before_newline = AttributeDict()
613 after_newline = AttributeDict()
613 after_newline = AttributeDict()
614 if before_lines and before_lines[-1]['action'] == 'old-no-nl':
614 if before_lines and before_lines[-1]['action'] == 'old-no-nl':
615 before_newline_line = before_lines.pop(-1)
615 before_newline_line = before_lines.pop(-1)
616 before_newline.content = '\n {}'.format(
616 before_newline.content = '\n {}'.format(
617 render_tokenstream(
617 render_tokenstream(
618 [(x[0], '', x[1])
618 [(x[0], '', x[1])
619 for x in [('nonl', before_newline_line['line'])]]))
619 for x in [('nonl', before_newline_line['line'])]]))
620
620
621 if after_lines and after_lines[-1]['action'] == 'new-no-nl':
621 if after_lines and after_lines[-1]['action'] == 'new-no-nl':
622 after_newline_line = after_lines.pop(-1)
622 after_newline_line = after_lines.pop(-1)
623 after_newline.content = '\n {}'.format(
623 after_newline.content = '\n {}'.format(
624 render_tokenstream(
624 render_tokenstream(
625 [(x[0], '', x[1])
625 [(x[0], '', x[1])
626 for x in [('nonl', after_newline_line['line'])]]))
626 for x in [('nonl', after_newline_line['line'])]]))
627
627
628 while before_lines or after_lines:
628 while before_lines or after_lines:
629 before, after = None, None
629 before, after = None, None
630 before_tokens, after_tokens = None, None
630 before_tokens, after_tokens = None, None
631
631
632 if before_lines:
632 if before_lines:
633 before = before_lines.pop(0)
633 before = before_lines.pop(0)
634 if after_lines:
634 if after_lines:
635 after = after_lines.pop(0)
635 after = after_lines.pop(0)
636
636
637 original = AttributeDict()
637 original = AttributeDict()
638 modified = AttributeDict()
638 modified = AttributeDict()
639
639
640 if before:
640 if before:
641 if before['action'] == 'old-no-nl':
641 if before['action'] == 'old-no-nl':
642 before_tokens = [('nonl', before['line'])]
642 before_tokens = [('nonl', before['line'])]
643 else:
643 else:
644 before_tokens = self.get_line_tokens(
644 before_tokens = self.get_line_tokens(
645 line_text=before['line'], line_number=before['old_lineno'],
645 line_text=before['line'], line_number=before['old_lineno'],
646 input_file=source_file, no_hl=no_hl)
646 input_file=source_file, no_hl=no_hl)
647 original.lineno = before['old_lineno']
647 original.lineno = before['old_lineno']
648 original.content = before['line']
648 original.content = before['line']
649 original.action = self.action_to_op(before['action'])
649 original.action = self.action_to_op(before['action'])
650
650
651 original.get_comment_args = (
651 original.get_comment_args = (
652 source_file, 'o', before['old_lineno'])
652 source_file, 'o', before['old_lineno'])
653
653
654 if after:
654 if after:
655 if after['action'] == 'new-no-nl':
655 if after['action'] == 'new-no-nl':
656 after_tokens = [('nonl', after['line'])]
656 after_tokens = [('nonl', after['line'])]
657 else:
657 else:
658 after_tokens = self.get_line_tokens(
658 after_tokens = self.get_line_tokens(
659 line_text=after['line'], line_number=after['new_lineno'],
659 line_text=after['line'], line_number=after['new_lineno'],
660 input_file=target_file, no_hl=no_hl)
660 input_file=target_file, no_hl=no_hl)
661 modified.lineno = after['new_lineno']
661 modified.lineno = after['new_lineno']
662 modified.content = after['line']
662 modified.content = after['line']
663 modified.action = self.action_to_op(after['action'])
663 modified.action = self.action_to_op(after['action'])
664
664
665 modified.get_comment_args = (target_file, 'n', after['new_lineno'])
665 modified.get_comment_args = (target_file, 'n', after['new_lineno'])
666
666
667 # diff the lines
667 # diff the lines
668 if before_tokens and after_tokens:
668 if before_tokens and after_tokens:
669 o_tokens, m_tokens, similarity = tokens_diff(
669 o_tokens, m_tokens, similarity = tokens_diff(
670 before_tokens, after_tokens)
670 before_tokens, after_tokens)
671 original.content = render_tokenstream(o_tokens)
671 original.content = render_tokenstream(o_tokens)
672 modified.content = render_tokenstream(m_tokens)
672 modified.content = render_tokenstream(m_tokens)
673 elif before_tokens:
673 elif before_tokens:
674 original.content = render_tokenstream(
674 original.content = render_tokenstream(
675 [(x[0], '', x[1]) for x in before_tokens])
675 [(x[0], '', x[1]) for x in before_tokens])
676 elif after_tokens:
676 elif after_tokens:
677 modified.content = render_tokenstream(
677 modified.content = render_tokenstream(
678 [(x[0], '', x[1]) for x in after_tokens])
678 [(x[0], '', x[1]) for x in after_tokens])
679
679
680 if not before_lines and before_newline:
680 if not before_lines and before_newline:
681 original.content += before_newline.content
681 original.content += before_newline.content
682 before_newline = None
682 before_newline = None
683 if not after_lines and after_newline:
683 if not after_lines and after_newline:
684 modified.content += after_newline.content
684 modified.content += after_newline.content
685 after_newline = None
685 after_newline = None
686
686
687 lines.append(AttributeDict({
687 lines.append(AttributeDict({
688 'original': original,
688 'original': original,
689 'modified': modified,
689 'modified': modified,
690 }))
690 }))
691
691
692 return lines
692 return lines
693
693
694 def get_line_tokens(self, line_text, line_number, input_file=None, no_hl=False):
694 def get_line_tokens(self, line_text, line_number, input_file=None, no_hl=False):
695 filenode = None
695 filenode = None
696 filename = None
696 filename = None
697
697
698 if isinstance(input_file, basestring):
698 if isinstance(input_file, basestring):
699 filename = input_file
699 filename = input_file
700 elif isinstance(input_file, FileNode):
700 elif isinstance(input_file, FileNode):
701 filenode = input_file
701 filenode = input_file
702 filename = input_file.unicode_path
702 filename = input_file.unicode_path
703
703
704 hl_mode = self.HL_NONE if no_hl else self.highlight_mode
704 hl_mode = self.HL_NONE if no_hl else self.highlight_mode
705 if hl_mode == self.HL_REAL and filenode:
705 if hl_mode == self.HL_REAL and filenode:
706 lexer = self._get_lexer_for_filename(filename)
706 lexer = self._get_lexer_for_filename(filename)
707 file_size_allowed = input_file.size < self.max_file_size_limit
707 file_size_allowed = input_file.size < self.max_file_size_limit
708 if line_number and file_size_allowed:
708 if line_number and file_size_allowed:
709 return self.get_tokenized_filenode_line(
709 return self.get_tokenized_filenode_line(
710 input_file, line_number, lexer)
710 input_file, line_number, lexer)
711
711
712 if hl_mode in (self.HL_REAL, self.HL_FAST) and filename:
712 if hl_mode in (self.HL_REAL, self.HL_FAST) and filename:
713 lexer = self._get_lexer_for_filename(filename)
713 lexer = self._get_lexer_for_filename(filename)
714 return list(tokenize_string(line_text, lexer))
714 return list(tokenize_string(line_text, lexer))
715
715
716 return list(tokenize_string(line_text, plain_text_lexer))
716 return list(tokenize_string(line_text, plain_text_lexer))
717
717
718 def get_tokenized_filenode_line(self, filenode, line_number, lexer=None):
718 def get_tokenized_filenode_line(self, filenode, line_number, lexer=None):
719
719
720 if filenode not in self.highlighted_filenodes:
720 if filenode not in self.highlighted_filenodes:
721 tokenized_lines = filenode_as_lines_tokens(filenode, lexer)
721 tokenized_lines = filenode_as_lines_tokens(filenode, lexer)
722 self.highlighted_filenodes[filenode] = tokenized_lines
722 self.highlighted_filenodes[filenode] = tokenized_lines
723 return self.highlighted_filenodes[filenode][line_number - 1]
723 return self.highlighted_filenodes[filenode][line_number - 1]
724
724
725 def action_to_op(self, action):
725 def action_to_op(self, action):
726 return {
726 return {
727 'add': '+',
727 'add': '+',
728 'del': '-',
728 'del': '-',
729 'unmod': ' ',
729 'unmod': ' ',
730 'unmod-no-hl': ' ',
730 'unmod-no-hl': ' ',
731 'old-no-nl': ' ',
731 'old-no-nl': ' ',
732 'new-no-nl': ' ',
732 'new-no-nl': ' ',
733 }.get(action, action)
733 }.get(action, action)
734
734
735 def as_unified(self, lines):
735 def as_unified(self, lines):
736 """
736 """
737 Return a generator that yields the lines of a diff in unified order
737 Return a generator that yields the lines of a diff in unified order
738 """
738 """
739 def generator():
739 def generator():
740 buf = []
740 buf = []
741 for line in lines:
741 for line in lines:
742
742
743 if buf and not line.original or line.original.action == ' ':
743 if buf and not line.original or line.original.action == ' ':
744 for b in buf:
744 for b in buf:
745 yield b
745 yield b
746 buf = []
746 buf = []
747
747
748 if line.original:
748 if line.original:
749 if line.original.action == ' ':
749 if line.original.action == ' ':
750 yield (line.original.lineno, line.modified.lineno,
750 yield (line.original.lineno, line.modified.lineno,
751 line.original.action, line.original.content,
751 line.original.action, line.original.content,
752 line.original.get_comment_args)
752 line.original.get_comment_args)
753 continue
753 continue
754
754
755 if line.original.action == '-':
755 if line.original.action == '-':
756 yield (line.original.lineno, None,
756 yield (line.original.lineno, None,
757 line.original.action, line.original.content,
757 line.original.action, line.original.content,
758 line.original.get_comment_args)
758 line.original.get_comment_args)
759
759
760 if line.modified.action == '+':
760 if line.modified.action == '+':
761 buf.append((
761 buf.append((
762 None, line.modified.lineno,
762 None, line.modified.lineno,
763 line.modified.action, line.modified.content,
763 line.modified.action, line.modified.content,
764 line.modified.get_comment_args))
764 line.modified.get_comment_args))
765 continue
765 continue
766
766
767 if line.modified:
767 if line.modified:
768 yield (None, line.modified.lineno,
768 yield (None, line.modified.lineno,
769 line.modified.action, line.modified.content,
769 line.modified.action, line.modified.content,
770 line.modified.get_comment_args)
770 line.modified.get_comment_args)
771
771
772 for b in buf:
772 for b in buf:
773 yield b
773 yield b
774
774
775 return generator()
775 return generator()
@@ -1,1025 +1,1025 b''
1 <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/>
1 <%namespace name="commentblock" file="/changeset/changeset_file_comment.mako"/>
2
2
3 <%def name="diff_line_anchor(commit, filename, line, type)"><%
3 <%def name="diff_line_anchor(commit, filename, line, type)"><%
4 return '%s_%s_%i' % (h.md5_safe(commit+filename), type, line)
4 return '%s_%s_%i' % (h.md5_safe(commit+filename), type, line)
5 %></%def>
5 %></%def>
6
6
7 <%def name="action_class(action)">
7 <%def name="action_class(action)">
8 <%
8 <%
9 return {
9 return {
10 '-': 'cb-deletion',
10 '-': 'cb-deletion',
11 '+': 'cb-addition',
11 '+': 'cb-addition',
12 ' ': 'cb-context',
12 ' ': 'cb-context',
13 }.get(action, 'cb-empty')
13 }.get(action, 'cb-empty')
14 %>
14 %>
15 </%def>
15 </%def>
16
16
17 <%def name="op_class(op_id)">
17 <%def name="op_class(op_id)">
18 <%
18 <%
19 return {
19 return {
20 DEL_FILENODE: 'deletion', # file deleted
20 DEL_FILENODE: 'deletion', # file deleted
21 BIN_FILENODE: 'warning' # binary diff hidden
21 BIN_FILENODE: 'warning' # binary diff hidden
22 }.get(op_id, 'addition')
22 }.get(op_id, 'addition')
23 %>
23 %>
24 </%def>
24 </%def>
25
25
26
26
27
27
28 <%def name="render_diffset(diffset, commit=None,
28 <%def name="render_diffset(diffset, commit=None,
29
29
30 # collapse all file diff entries when there are more than this amount of files in the diff
30 # collapse all file diff entries when there are more than this amount of files in the diff
31 collapse_when_files_over=20,
31 collapse_when_files_over=20,
32
32
33 # collapse lines in the diff when more than this amount of lines changed in the file diff
33 # collapse lines in the diff when more than this amount of lines changed in the file diff
34 lines_changed_limit=500,
34 lines_changed_limit=500,
35
35
36 # add a ruler at to the output
36 # add a ruler at to the output
37 ruler_at_chars=0,
37 ruler_at_chars=0,
38
38
39 # show inline comments
39 # show inline comments
40 use_comments=False,
40 use_comments=False,
41
41
42 # disable new comments
42 # disable new comments
43 disable_new_comments=False,
43 disable_new_comments=False,
44
44
45 # special file-comments that were deleted in previous versions
45 # special file-comments that were deleted in previous versions
46 # it's used for showing outdated comments for deleted files in a PR
46 # it's used for showing outdated comments for deleted files in a PR
47 deleted_files_comments=None,
47 deleted_files_comments=None,
48
48
49 # for cache purpose
49 # for cache purpose
50 inline_comments=None,
50 inline_comments=None,
51
51
52 )">
52 )">
53 %if use_comments:
53 %if use_comments:
54 <div id="cb-comments-inline-container-template" class="js-template">
54 <div id="cb-comments-inline-container-template" class="js-template">
55 ${inline_comments_container([], inline_comments)}
55 ${inline_comments_container([], inline_comments)}
56 </div>
56 </div>
57 <div class="js-template" id="cb-comment-inline-form-template">
57 <div class="js-template" id="cb-comment-inline-form-template">
58 <div class="comment-inline-form ac">
58 <div class="comment-inline-form ac">
59
59
60 %if c.rhodecode_user.username != h.DEFAULT_USER:
60 %if c.rhodecode_user.username != h.DEFAULT_USER:
61 ## render template for inline comments
61 ## render template for inline comments
62 ${commentblock.comment_form(form_type='inline')}
62 ${commentblock.comment_form(form_type='inline')}
63 %else:
63 %else:
64 ${h.form('', class_='inline-form comment-form-login', method='get')}
64 ${h.form('', class_='inline-form comment-form-login', method='get')}
65 <div class="pull-left">
65 <div class="pull-left">
66 <div class="comment-help pull-right">
66 <div class="comment-help pull-right">
67 ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.current_route_path(request)})}">${_('Login now')}</a>
67 ${_('You need to be logged in to leave comments.')} <a href="${h.route_path('login', _query={'came_from': h.current_route_path(request)})}">${_('Login now')}</a>
68 </div>
68 </div>
69 </div>
69 </div>
70 <div class="comment-button pull-right">
70 <div class="comment-button pull-right">
71 <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);">
71 <button type="button" class="cb-comment-cancel" onclick="return Rhodecode.comments.cancelComment(this);">
72 ${_('Cancel')}
72 ${_('Cancel')}
73 </button>
73 </button>
74 </div>
74 </div>
75 <div class="clearfix"></div>
75 <div class="clearfix"></div>
76 ${h.end_form()}
76 ${h.end_form()}
77 %endif
77 %endif
78 </div>
78 </div>
79 </div>
79 </div>
80
80
81 %endif
81 %endif
82 <%
82 <%
83 collapse_all = len(diffset.files) > collapse_when_files_over
83 collapse_all = len(diffset.files) > collapse_when_files_over
84 %>
84 %>
85
85
86 %if c.user_session_attrs["diffmode"] == 'sideside':
86 %if c.user_session_attrs["diffmode"] == 'sideside':
87 <style>
87 <style>
88 .wrapper {
88 .wrapper {
89 max-width: 1600px !important;
89 max-width: 1600px !important;
90 }
90 }
91 </style>
91 </style>
92 %endif
92 %endif
93
93
94 %if ruler_at_chars:
94 %if ruler_at_chars:
95 <style>
95 <style>
96 .diff table.cb .cb-content:after {
96 .diff table.cb .cb-content:after {
97 content: "";
97 content: "";
98 border-left: 1px solid blue;
98 border-left: 1px solid blue;
99 position: absolute;
99 position: absolute;
100 top: 0;
100 top: 0;
101 height: 18px;
101 height: 18px;
102 opacity: .2;
102 opacity: .2;
103 z-index: 10;
103 z-index: 10;
104 //## +5 to account for diff action (+/-)
104 //## +5 to account for diff action (+/-)
105 left: ${ruler_at_chars + 5}ch;
105 left: ${ruler_at_chars + 5}ch;
106 </style>
106 </style>
107 %endif
107 %endif
108
108
109 <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}">
109 <div class="diffset ${disable_new_comments and 'diffset-comments-disabled'}">
110 <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}">
110 <div class="diffset-heading ${diffset.limited_diff and 'diffset-heading-warning' or ''}">
111 %if commit:
111 %if commit:
112 <div class="pull-right">
112 <div class="pull-right">
113 <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.route_path('repo_files',repo_name=diffset.repo_name, commit_id=commit.raw_id, f_path='')}">
113 <a class="btn tooltip" title="${h.tooltip(_('Browse Files at revision {}').format(commit.raw_id))}" href="${h.route_path('repo_files',repo_name=diffset.repo_name, commit_id=commit.raw_id, f_path='')}">
114 ${_('Browse Files')}
114 ${_('Browse Files')}
115 </a>
115 </a>
116 </div>
116 </div>
117 %endif
117 %endif
118 <h2 class="clearinner">
118 <h2 class="clearinner">
119 ## invidual commit
119 ## invidual commit
120 % if commit:
120 % if commit:
121 <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.route_path('repo_commit',repo_name=diffset.repo_name,commit_id=commit.raw_id)}">${('r%s:%s' % (commit.idx,h.short_id(commit.raw_id)))}</a> -
121 <a class="tooltip revision" title="${h.tooltip(commit.message)}" href="${h.route_path('repo_commit',repo_name=diffset.repo_name,commit_id=commit.raw_id)}">${('r%s:%s' % (commit.idx,h.short_id(commit.raw_id)))}</a> -
122 ${h.age_component(commit.date)}
122 ${h.age_component(commit.date)}
123 % if diffset.limited_diff:
123 % if diffset.limited_diff:
124 - ${_('The requested commit is too big and content was truncated.')}
124 - ${_('The requested changes are too big and content was truncated.')}
125 ${_ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}}
125 ${_ungettext('%(num)s file changed.', '%(num)s files changed.', diffset.changed_files) % {'num': diffset.changed_files}}
126 <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
126 <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
127 % elif hasattr(c, 'commit_ranges') and len(c.commit_ranges) > 1:
127 % elif hasattr(c, 'commit_ranges') and len(c.commit_ranges) > 1:
128 ## compare diff, has no file-selector and we want to show stats anyway
128 ## compare diff, has no file-selector and we want to show stats anyway
129 ${_ungettext('{num} file changed: {linesadd} inserted, ''{linesdel} deleted',
129 ${_ungettext('{num} file changed: {linesadd} inserted, ''{linesdel} deleted',
130 '{num} files changed: {linesadd} inserted, {linesdel} deleted', diffset.changed_files) \
130 '{num} files changed: {linesadd} inserted, {linesdel} deleted', diffset.changed_files) \
131 .format(num=diffset.changed_files, linesadd=diffset.lines_added, linesdel=diffset.lines_deleted)}
131 .format(num=diffset.changed_files, linesadd=diffset.lines_added, linesdel=diffset.lines_deleted)}
132 % endif
132 % endif
133 % else:
133 % else:
134 ## pull requests/compare
134 ## pull requests/compare
135 ${_('File Changes')}
135 ${_('File Changes')}
136 % endif
136 % endif
137
137
138 </h2>
138 </h2>
139 </div>
139 </div>
140
140
141 %if diffset.has_hidden_changes:
141 %if diffset.has_hidden_changes:
142 <p class="empty_data">${_('Some changes may be hidden')}</p>
142 <p class="empty_data">${_('Some changes may be hidden')}</p>
143 %elif not diffset.files:
143 %elif not diffset.files:
144 <p class="empty_data">${_('No files')}</p>
144 <p class="empty_data">${_('No files')}</p>
145 %endif
145 %endif
146
146
147 <div class="filediffs">
147 <div class="filediffs">
148
148
149 ## initial value could be marked as False later on
149 ## initial value could be marked as False later on
150 <% over_lines_changed_limit = False %>
150 <% over_lines_changed_limit = False %>
151 %for i, filediff in enumerate(diffset.files):
151 %for i, filediff in enumerate(diffset.files):
152
152
153 <%
153 <%
154 lines_changed = filediff.patch['stats']['added'] + filediff.patch['stats']['deleted']
154 lines_changed = filediff.patch['stats']['added'] + filediff.patch['stats']['deleted']
155 over_lines_changed_limit = lines_changed > lines_changed_limit
155 over_lines_changed_limit = lines_changed > lines_changed_limit
156 %>
156 %>
157 ## anchor with support of sticky header
157 ## anchor with support of sticky header
158 <div class="anchor" id="a_${h.FID(filediff.raw_id, filediff.patch['filename'])}"></div>
158 <div class="anchor" id="a_${h.FID(filediff.raw_id, filediff.patch['filename'])}"></div>
159
159
160 <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox" onchange="updateSticky();">
160 <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filediff)}" type="checkbox" onchange="updateSticky();">
161 <div
161 <div
162 class="filediff"
162 class="filediff"
163 data-f-path="${filediff.patch['filename']}"
163 data-f-path="${filediff.patch['filename']}"
164 data-anchor-id="${h.FID(filediff.raw_id, filediff.patch['filename'])}"
164 data-anchor-id="${h.FID(filediff.raw_id, filediff.patch['filename'])}"
165 >
165 >
166 <label for="filediff-collapse-${id(filediff)}" class="filediff-heading">
166 <label for="filediff-collapse-${id(filediff)}" class="filediff-heading">
167 <div class="filediff-collapse-indicator"></div>
167 <div class="filediff-collapse-indicator"></div>
168 ${diff_ops(filediff)}
168 ${diff_ops(filediff)}
169 </label>
169 </label>
170
170
171 ${diff_menu(filediff, use_comments=use_comments)}
171 ${diff_menu(filediff, use_comments=use_comments)}
172 <table data-f-path="${filediff.patch['filename']}" data-anchor-id="${h.FID(filediff.raw_id, filediff.patch['filename'])}" class="code-visible-block cb cb-diff-${c.user_session_attrs["diffmode"]} code-highlight ${(over_lines_changed_limit and 'cb-collapsed' or '')}">
172 <table data-f-path="${filediff.patch['filename']}" data-anchor-id="${h.FID(filediff.raw_id, filediff.patch['filename'])}" class="code-visible-block cb cb-diff-${c.user_session_attrs["diffmode"]} code-highlight ${(over_lines_changed_limit and 'cb-collapsed' or '')}">
173
173
174 ## new/deleted/empty content case
174 ## new/deleted/empty content case
175 % if not filediff.hunks:
175 % if not filediff.hunks:
176 ## Comment container, on "fakes" hunk that contains all data to render comments
176 ## Comment container, on "fakes" hunk that contains all data to render comments
177 ${render_hunk_lines(filediff, c.user_session_attrs["diffmode"], filediff.hunk_ops, use_comments=use_comments, inline_comments=inline_comments)}
177 ${render_hunk_lines(filediff, c.user_session_attrs["diffmode"], filediff.hunk_ops, use_comments=use_comments, inline_comments=inline_comments)}
178 % endif
178 % endif
179
179
180 %if filediff.limited_diff:
180 %if filediff.limited_diff:
181 <tr class="cb-warning cb-collapser">
181 <tr class="cb-warning cb-collapser">
182 <td class="cb-text" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=6')}>
182 <td class="cb-text" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=6')}>
183 ${_('The requested commit is too big and content was truncated.')} <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
183 ${_('The requested commit or file is too big and content was truncated.')} <a href="${h.current_route_path(request, fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a>
184 </td>
184 </td>
185 </tr>
185 </tr>
186 %else:
186 %else:
187 %if over_lines_changed_limit:
187 %if over_lines_changed_limit:
188 <tr class="cb-warning cb-collapser">
188 <tr class="cb-warning cb-collapser">
189 <td class="cb-text" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=6')}>
189 <td class="cb-text" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=6')}>
190 ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)}
190 ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)}
191 <a href="#" class="cb-expand"
191 <a href="#" class="cb-expand"
192 onclick="$(this).closest('table').removeClass('cb-collapsed'); updateSticky(); return false;">${_('Show them')}
192 onclick="$(this).closest('table').removeClass('cb-collapsed'); updateSticky(); return false;">${_('Show them')}
193 </a>
193 </a>
194 <a href="#" class="cb-collapse"
194 <a href="#" class="cb-collapse"
195 onclick="$(this).closest('table').addClass('cb-collapsed'); updateSticky(); return false;">${_('Hide them')}
195 onclick="$(this).closest('table').addClass('cb-collapsed'); updateSticky(); return false;">${_('Hide them')}
196 </a>
196 </a>
197 </td>
197 </td>
198 </tr>
198 </tr>
199 %endif
199 %endif
200 %endif
200 %endif
201
201
202 % for hunk in filediff.hunks:
202 % for hunk in filediff.hunks:
203 <tr class="cb-hunk">
203 <tr class="cb-hunk">
204 <td ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=3' or '')}>
204 <td ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=3' or '')}>
205 ## TODO: dan: add ajax loading of more context here
205 ## TODO: dan: add ajax loading of more context here
206 ## <a href="#">
206 ## <a href="#">
207 <i class="icon-more"></i>
207 <i class="icon-more"></i>
208 ## </a>
208 ## </a>
209 </td>
209 </td>
210 <td ${(c.user_session_attrs["diffmode"] == 'sideside' and 'colspan=5' or '')}>
210 <td ${(c.user_session_attrs["diffmode"] == 'sideside' and 'colspan=5' or '')}>
211 @@
211 @@
212 -${hunk.source_start},${hunk.source_length}
212 -${hunk.source_start},${hunk.source_length}
213 +${hunk.target_start},${hunk.target_length}
213 +${hunk.target_start},${hunk.target_length}
214 ${hunk.section_header}
214 ${hunk.section_header}
215 </td>
215 </td>
216 </tr>
216 </tr>
217 ${render_hunk_lines(filediff, c.user_session_attrs["diffmode"], hunk, use_comments=use_comments, inline_comments=inline_comments)}
217 ${render_hunk_lines(filediff, c.user_session_attrs["diffmode"], hunk, use_comments=use_comments, inline_comments=inline_comments)}
218 % endfor
218 % endfor
219
219
220 <% unmatched_comments = (inline_comments or {}).get(filediff.patch['filename'], {}) %>
220 <% unmatched_comments = (inline_comments or {}).get(filediff.patch['filename'], {}) %>
221
221
222 ## outdated comments that do not fit into currently displayed lines
222 ## outdated comments that do not fit into currently displayed lines
223 % for lineno, comments in unmatched_comments.items():
223 % for lineno, comments in unmatched_comments.items():
224
224
225 %if c.user_session_attrs["diffmode"] == 'unified':
225 %if c.user_session_attrs["diffmode"] == 'unified':
226 % if loop.index == 0:
226 % if loop.index == 0:
227 <tr class="cb-hunk">
227 <tr class="cb-hunk">
228 <td colspan="3"></td>
228 <td colspan="3"></td>
229 <td>
229 <td>
230 <div>
230 <div>
231 ${_('Unmatched inline comments below')}
231 ${_('Unmatched inline comments below')}
232 </div>
232 </div>
233 </td>
233 </td>
234 </tr>
234 </tr>
235 % endif
235 % endif
236 <tr class="cb-line">
236 <tr class="cb-line">
237 <td class="cb-data cb-context"></td>
237 <td class="cb-data cb-context"></td>
238 <td class="cb-lineno cb-context"></td>
238 <td class="cb-lineno cb-context"></td>
239 <td class="cb-lineno cb-context"></td>
239 <td class="cb-lineno cb-context"></td>
240 <td class="cb-content cb-context">
240 <td class="cb-content cb-context">
241 ${inline_comments_container(comments, inline_comments)}
241 ${inline_comments_container(comments, inline_comments)}
242 </td>
242 </td>
243 </tr>
243 </tr>
244 %elif c.user_session_attrs["diffmode"] == 'sideside':
244 %elif c.user_session_attrs["diffmode"] == 'sideside':
245 % if loop.index == 0:
245 % if loop.index == 0:
246 <tr class="cb-comment-info">
246 <tr class="cb-comment-info">
247 <td colspan="2"></td>
247 <td colspan="2"></td>
248 <td class="cb-line">
248 <td class="cb-line">
249 <div>
249 <div>
250 ${_('Unmatched inline comments below')}
250 ${_('Unmatched inline comments below')}
251 </div>
251 </div>
252 </td>
252 </td>
253 <td colspan="2"></td>
253 <td colspan="2"></td>
254 <td class="cb-line">
254 <td class="cb-line">
255 <div>
255 <div>
256 ${_('Unmatched comments below')}
256 ${_('Unmatched comments below')}
257 </div>
257 </div>
258 </td>
258 </td>
259 </tr>
259 </tr>
260 % endif
260 % endif
261 <tr class="cb-line">
261 <tr class="cb-line">
262 <td class="cb-data cb-context"></td>
262 <td class="cb-data cb-context"></td>
263 <td class="cb-lineno cb-context"></td>
263 <td class="cb-lineno cb-context"></td>
264 <td class="cb-content cb-context">
264 <td class="cb-content cb-context">
265 % if lineno.startswith('o'):
265 % if lineno.startswith('o'):
266 ${inline_comments_container(comments, inline_comments)}
266 ${inline_comments_container(comments, inline_comments)}
267 % endif
267 % endif
268 </td>
268 </td>
269
269
270 <td class="cb-data cb-context"></td>
270 <td class="cb-data cb-context"></td>
271 <td class="cb-lineno cb-context"></td>
271 <td class="cb-lineno cb-context"></td>
272 <td class="cb-content cb-context">
272 <td class="cb-content cb-context">
273 % if lineno.startswith('n'):
273 % if lineno.startswith('n'):
274 ${inline_comments_container(comments, inline_comments)}
274 ${inline_comments_container(comments, inline_comments)}
275 % endif
275 % endif
276 </td>
276 </td>
277 </tr>
277 </tr>
278 %endif
278 %endif
279
279
280 % endfor
280 % endfor
281
281
282 </table>
282 </table>
283 </div>
283 </div>
284 %endfor
284 %endfor
285
285
286 ## outdated comments that are made for a file that has been deleted
286 ## outdated comments that are made for a file that has been deleted
287 % for filename, comments_dict in (deleted_files_comments or {}).items():
287 % for filename, comments_dict in (deleted_files_comments or {}).items():
288 <%
288 <%
289 display_state = 'display: none'
289 display_state = 'display: none'
290 open_comments_in_file = [x for x in comments_dict['comments'] if x.outdated is False]
290 open_comments_in_file = [x for x in comments_dict['comments'] if x.outdated is False]
291 if open_comments_in_file:
291 if open_comments_in_file:
292 display_state = ''
292 display_state = ''
293 %>
293 %>
294 <div class="filediffs filediff-outdated" style="${display_state}">
294 <div class="filediffs filediff-outdated" style="${display_state}">
295 <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox" onchange="updateSticky();">
295 <input ${(collapse_all and 'checked' or '')} class="filediff-collapse-state" id="filediff-collapse-${id(filename)}" type="checkbox" onchange="updateSticky();">
296 <div class="filediff" data-f-path="${filename}" id="a_${h.FID(filediff.raw_id, filename)}">
296 <div class="filediff" data-f-path="${filename}" id="a_${h.FID(filediff.raw_id, filename)}">
297 <label for="filediff-collapse-${id(filename)}" class="filediff-heading">
297 <label for="filediff-collapse-${id(filename)}" class="filediff-heading">
298 <div class="filediff-collapse-indicator"></div>
298 <div class="filediff-collapse-indicator"></div>
299 <span class="pill">
299 <span class="pill">
300 ## file was deleted
300 ## file was deleted
301 <strong>${filename}</strong>
301 <strong>${filename}</strong>
302 </span>
302 </span>
303 <span class="pill-group" style="float: left">
303 <span class="pill-group" style="float: left">
304 ## file op, doesn't need translation
304 ## file op, doesn't need translation
305 <span class="pill" op="removed">removed in this version</span>
305 <span class="pill" op="removed">removed in this version</span>
306 </span>
306 </span>
307 <a class="pill filediff-anchor" href="#a_${h.FID(filediff.raw_id, filename)}"></a>
307 <a class="pill filediff-anchor" href="#a_${h.FID(filediff.raw_id, filename)}"></a>
308 <span class="pill-group" style="float: right">
308 <span class="pill-group" style="float: right">
309 <span class="pill" op="deleted">-${comments_dict['stats']}</span>
309 <span class="pill" op="deleted">-${comments_dict['stats']}</span>
310 </span>
310 </span>
311 </label>
311 </label>
312
312
313 <table class="cb cb-diff-${c.user_session_attrs["diffmode"]} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}">
313 <table class="cb cb-diff-${c.user_session_attrs["diffmode"]} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}">
314 <tr>
314 <tr>
315 % if c.user_session_attrs["diffmode"] == 'unified':
315 % if c.user_session_attrs["diffmode"] == 'unified':
316 <td></td>
316 <td></td>
317 %endif
317 %endif
318
318
319 <td></td>
319 <td></td>
320 <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=5')}>
320 <td class="cb-text cb-${op_class(BIN_FILENODE)}" ${(c.user_session_attrs["diffmode"] == 'unified' and 'colspan=4' or 'colspan=5')}>
321 ${_('File was deleted in this version. There are still outdated/unresolved comments attached to it.')}
321 ${_('File was deleted in this version. There are still outdated/unresolved comments attached to it.')}
322 </td>
322 </td>
323 </tr>
323 </tr>
324 %if c.user_session_attrs["diffmode"] == 'unified':
324 %if c.user_session_attrs["diffmode"] == 'unified':
325 <tr class="cb-line">
325 <tr class="cb-line">
326 <td class="cb-data cb-context"></td>
326 <td class="cb-data cb-context"></td>
327 <td class="cb-lineno cb-context"></td>
327 <td class="cb-lineno cb-context"></td>
328 <td class="cb-lineno cb-context"></td>
328 <td class="cb-lineno cb-context"></td>
329 <td class="cb-content cb-context">
329 <td class="cb-content cb-context">
330 ${inline_comments_container(comments_dict['comments'], inline_comments)}
330 ${inline_comments_container(comments_dict['comments'], inline_comments)}
331 </td>
331 </td>
332 </tr>
332 </tr>
333 %elif c.user_session_attrs["diffmode"] == 'sideside':
333 %elif c.user_session_attrs["diffmode"] == 'sideside':
334 <tr class="cb-line">
334 <tr class="cb-line">
335 <td class="cb-data cb-context"></td>
335 <td class="cb-data cb-context"></td>
336 <td class="cb-lineno cb-context"></td>
336 <td class="cb-lineno cb-context"></td>
337 <td class="cb-content cb-context"></td>
337 <td class="cb-content cb-context"></td>
338
338
339 <td class="cb-data cb-context"></td>
339 <td class="cb-data cb-context"></td>
340 <td class="cb-lineno cb-context"></td>
340 <td class="cb-lineno cb-context"></td>
341 <td class="cb-content cb-context">
341 <td class="cb-content cb-context">
342 ${inline_comments_container(comments_dict['comments'], inline_comments)}
342 ${inline_comments_container(comments_dict['comments'], inline_comments)}
343 </td>
343 </td>
344 </tr>
344 </tr>
345 %endif
345 %endif
346 </table>
346 </table>
347 </div>
347 </div>
348 </div>
348 </div>
349 % endfor
349 % endfor
350
350
351 </div>
351 </div>
352 </div>
352 </div>
353 </%def>
353 </%def>
354
354
355 <%def name="diff_ops(filediff)">
355 <%def name="diff_ops(filediff)">
356 <%
356 <%
357 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
357 from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \
358 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE
358 MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE, COPIED_FILENODE
359 %>
359 %>
360 <span class="pill">
360 <span class="pill">
361 %if filediff.source_file_path and filediff.target_file_path:
361 %if filediff.source_file_path and filediff.target_file_path:
362 %if filediff.source_file_path != filediff.target_file_path:
362 %if filediff.source_file_path != filediff.target_file_path:
363 ## file was renamed, or copied
363 ## file was renamed, or copied
364 %if RENAMED_FILENODE in filediff.patch['stats']['ops']:
364 %if RENAMED_FILENODE in filediff.patch['stats']['ops']:
365 <strong>${filediff.target_file_path}</strong><del>${filediff.source_file_path}</del>
365 <strong>${filediff.target_file_path}</strong><del>${filediff.source_file_path}</del>
366 <% final_path = filediff.target_file_path %>
366 <% final_path = filediff.target_file_path %>
367 %elif COPIED_FILENODE in filediff.patch['stats']['ops']:
367 %elif COPIED_FILENODE in filediff.patch['stats']['ops']:
368 <strong>${filediff.target_file_path}</strong>${filediff.source_file_path}
368 <strong>${filediff.target_file_path}</strong>${filediff.source_file_path}
369 <% final_path = filediff.target_file_path %>
369 <% final_path = filediff.target_file_path %>
370 %endif
370 %endif
371 %else:
371 %else:
372 ## file was modified
372 ## file was modified
373 <strong>${filediff.source_file_path}</strong>
373 <strong>${filediff.source_file_path}</strong>
374 <% final_path = filediff.source_file_path %>
374 <% final_path = filediff.source_file_path %>
375 %endif
375 %endif
376 %else:
376 %else:
377 %if filediff.source_file_path:
377 %if filediff.source_file_path:
378 ## file was deleted
378 ## file was deleted
379 <strong>${filediff.source_file_path}</strong>
379 <strong>${filediff.source_file_path}</strong>
380 <% final_path = filediff.source_file_path %>
380 <% final_path = filediff.source_file_path %>
381 %else:
381 %else:
382 ## file was added
382 ## file was added
383 <strong>${filediff.target_file_path}</strong>
383 <strong>${filediff.target_file_path}</strong>
384 <% final_path = filediff.target_file_path %>
384 <% final_path = filediff.target_file_path %>
385 %endif
385 %endif
386 %endif
386 %endif
387 <i style="color: #aaa" class="tooltip icon-clipboard clipboard-action" data-clipboard-text="${final_path}" title="${_('Copy the full path')}" onclick="return false;"></i>
387 <i style="color: #aaa" class="tooltip icon-clipboard clipboard-action" data-clipboard-text="${final_path}" title="${_('Copy the full path')}" onclick="return false;"></i>
388 </span>
388 </span>
389 ## anchor link
389 ## anchor link
390 <a class="pill filediff-anchor" href="#a_${h.FID(filediff.raw_id, filediff.patch['filename'])}"></a>
390 <a class="pill filediff-anchor" href="#a_${h.FID(filediff.raw_id, filediff.patch['filename'])}"></a>
391
391
392 <span class="pill-group" style="float: right">
392 <span class="pill-group" style="float: right">
393
393
394 ## ops pills
394 ## ops pills
395 %if filediff.limited_diff:
395 %if filediff.limited_diff:
396 <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span>
396 <span class="pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span>
397 %endif
397 %endif
398
398
399 %if NEW_FILENODE in filediff.patch['stats']['ops']:
399 %if NEW_FILENODE in filediff.patch['stats']['ops']:
400 <span class="pill" op="created">created</span>
400 <span class="pill" op="created">created</span>
401 %if filediff['target_mode'].startswith('120'):
401 %if filediff['target_mode'].startswith('120'):
402 <span class="pill" op="symlink">symlink</span>
402 <span class="pill" op="symlink">symlink</span>
403 %else:
403 %else:
404 <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span>
404 <span class="pill" op="mode">${nice_mode(filediff['target_mode'])}</span>
405 %endif
405 %endif
406 %endif
406 %endif
407
407
408 %if RENAMED_FILENODE in filediff.patch['stats']['ops']:
408 %if RENAMED_FILENODE in filediff.patch['stats']['ops']:
409 <span class="pill" op="renamed">renamed</span>
409 <span class="pill" op="renamed">renamed</span>
410 %endif
410 %endif
411
411
412 %if COPIED_FILENODE in filediff.patch['stats']['ops']:
412 %if COPIED_FILENODE in filediff.patch['stats']['ops']:
413 <span class="pill" op="copied">copied</span>
413 <span class="pill" op="copied">copied</span>
414 %endif
414 %endif
415
415
416 %if DEL_FILENODE in filediff.patch['stats']['ops']:
416 %if DEL_FILENODE in filediff.patch['stats']['ops']:
417 <span class="pill" op="removed">removed</span>
417 <span class="pill" op="removed">removed</span>
418 %endif
418 %endif
419
419
420 %if CHMOD_FILENODE in filediff.patch['stats']['ops']:
420 %if CHMOD_FILENODE in filediff.patch['stats']['ops']:
421 <span class="pill" op="mode">
421 <span class="pill" op="mode">
422 ${nice_mode(filediff['source_mode'])}${nice_mode(filediff['target_mode'])}
422 ${nice_mode(filediff['source_mode'])}${nice_mode(filediff['target_mode'])}
423 </span>
423 </span>
424 %endif
424 %endif
425
425
426 %if BIN_FILENODE in filediff.patch['stats']['ops']:
426 %if BIN_FILENODE in filediff.patch['stats']['ops']:
427 <span class="pill" op="binary">binary</span>
427 <span class="pill" op="binary">binary</span>
428 %if MOD_FILENODE in filediff.patch['stats']['ops']:
428 %if MOD_FILENODE in filediff.patch['stats']['ops']:
429 <span class="pill" op="modified">modified</span>
429 <span class="pill" op="modified">modified</span>
430 %endif
430 %endif
431 %endif
431 %endif
432
432
433 <span class="pill" op="added">${('+' if filediff.patch['stats']['added'] else '')}${filediff.patch['stats']['added']}</span>
433 <span class="pill" op="added">${('+' if filediff.patch['stats']['added'] else '')}${filediff.patch['stats']['added']}</span>
434 <span class="pill" op="deleted">${((h.safe_int(filediff.patch['stats']['deleted']) or 0) * -1)}</span>
434 <span class="pill" op="deleted">${((h.safe_int(filediff.patch['stats']['deleted']) or 0) * -1)}</span>
435
435
436 </span>
436 </span>
437
437
438 </%def>
438 </%def>
439
439
440 <%def name="nice_mode(filemode)">
440 <%def name="nice_mode(filemode)">
441 ${(filemode.startswith('100') and filemode[3:] or filemode)}
441 ${(filemode.startswith('100') and filemode[3:] or filemode)}
442 </%def>
442 </%def>
443
443
444 <%def name="diff_menu(filediff, use_comments=False)">
444 <%def name="diff_menu(filediff, use_comments=False)">
445 <div class="filediff-menu">
445 <div class="filediff-menu">
446
446
447 %if filediff.diffset.source_ref:
447 %if filediff.diffset.source_ref:
448
448
449 ## FILE BEFORE CHANGES
449 ## FILE BEFORE CHANGES
450 %if filediff.operation in ['D', 'M']:
450 %if filediff.operation in ['D', 'M']:
451 <a
451 <a
452 class="tooltip"
452 class="tooltip"
453 href="${h.route_path('repo_files',repo_name=filediff.diffset.target_repo_name,commit_id=filediff.diffset.source_ref,f_path=filediff.source_file_path)}"
453 href="${h.route_path('repo_files',repo_name=filediff.diffset.target_repo_name,commit_id=filediff.diffset.source_ref,f_path=filediff.source_file_path)}"
454 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
454 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
455 >
455 >
456 ${_('Show file before')}
456 ${_('Show file before')}
457 </a> |
457 </a> |
458 %else:
458 %else:
459 <span
459 <span
460 class="tooltip"
460 class="tooltip"
461 title="${h.tooltip(_('File not present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
461 title="${h.tooltip(_('File not present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}"
462 >
462 >
463 ${_('Show file before')}
463 ${_('Show file before')}
464 </span> |
464 </span> |
465 %endif
465 %endif
466
466
467 ## FILE AFTER CHANGES
467 ## FILE AFTER CHANGES
468 %if filediff.operation in ['A', 'M']:
468 %if filediff.operation in ['A', 'M']:
469 <a
469 <a
470 class="tooltip"
470 class="tooltip"
471 href="${h.route_path('repo_files',repo_name=filediff.diffset.source_repo_name,commit_id=filediff.diffset.target_ref,f_path=filediff.target_file_path)}"
471 href="${h.route_path('repo_files',repo_name=filediff.diffset.source_repo_name,commit_id=filediff.diffset.target_ref,f_path=filediff.target_file_path)}"
472 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
472 title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
473 >
473 >
474 ${_('Show file after')}
474 ${_('Show file after')}
475 </a>
475 </a>
476 %else:
476 %else:
477 <span
477 <span
478 class="tooltip"
478 class="tooltip"
479 title="${h.tooltip(_('File not present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
479 title="${h.tooltip(_('File not present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}"
480 >
480 >
481 ${_('Show file after')}
481 ${_('Show file after')}
482 </span>
482 </span>
483 %endif
483 %endif
484
484
485 % if use_comments:
485 % if use_comments:
486 |
486 |
487 <a href="#" onclick="return Rhodecode.comments.toggleComments(this);">
487 <a href="#" onclick="return Rhodecode.comments.toggleComments(this);">
488 <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span>
488 <span class="show-comment-button">${_('Show comments')}</span><span class="hide-comment-button">${_('Hide comments')}</span>
489 </a>
489 </a>
490 % endif
490 % endif
491
491
492 %endif
492 %endif
493
493
494 </div>
494 </div>
495 </%def>
495 </%def>
496
496
497
497
498 <%def name="inline_comments_container(comments, inline_comments)">
498 <%def name="inline_comments_container(comments, inline_comments)">
499 <div class="inline-comments">
499 <div class="inline-comments">
500 %for comment in comments:
500 %for comment in comments:
501 ${commentblock.comment_block(comment, inline=True)}
501 ${commentblock.comment_block(comment, inline=True)}
502 %endfor
502 %endfor
503 % if comments and comments[-1].outdated:
503 % if comments and comments[-1].outdated:
504 <span class="btn btn-secondary cb-comment-add-button comment-outdated}"
504 <span class="btn btn-secondary cb-comment-add-button comment-outdated}"
505 style="display: none;}">
505 style="display: none;}">
506 ${_('Add another comment')}
506 ${_('Add another comment')}
507 </span>
507 </span>
508 % else:
508 % else:
509 <span onclick="return Rhodecode.comments.createComment(this)"
509 <span onclick="return Rhodecode.comments.createComment(this)"
510 class="btn btn-secondary cb-comment-add-button">
510 class="btn btn-secondary cb-comment-add-button">
511 ${_('Add another comment')}
511 ${_('Add another comment')}
512 </span>
512 </span>
513 % endif
513 % endif
514
514
515 </div>
515 </div>
516 </%def>
516 </%def>
517
517
518 <%!
518 <%!
519 def get_comments_for(diff_type, comments, filename, line_version, line_number):
519 def get_comments_for(diff_type, comments, filename, line_version, line_number):
520 if hasattr(filename, 'unicode_path'):
520 if hasattr(filename, 'unicode_path'):
521 filename = filename.unicode_path
521 filename = filename.unicode_path
522
522
523 if not isinstance(filename, basestring):
523 if not isinstance(filename, basestring):
524 return None
524 return None
525
525
526 line_key = '{}{}'.format(line_version, line_number) ## e.g o37, n12
526 line_key = '{}{}'.format(line_version, line_number) ## e.g o37, n12
527
527
528 if comments and filename in comments:
528 if comments and filename in comments:
529 file_comments = comments[filename]
529 file_comments = comments[filename]
530 if line_key in file_comments:
530 if line_key in file_comments:
531 data = file_comments.pop(line_key)
531 data = file_comments.pop(line_key)
532 return data
532 return data
533 %>
533 %>
534
534
535 <%def name="render_hunk_lines_sideside(filediff, hunk, use_comments=False, inline_comments=None)">
535 <%def name="render_hunk_lines_sideside(filediff, hunk, use_comments=False, inline_comments=None)">
536 %for i, line in enumerate(hunk.sideside):
536 %for i, line in enumerate(hunk.sideside):
537 <%
537 <%
538 old_line_anchor, new_line_anchor = None, None
538 old_line_anchor, new_line_anchor = None, None
539
539
540 if line.original.lineno:
540 if line.original.lineno:
541 old_line_anchor = diff_line_anchor(filediff.raw_id, hunk.source_file_path, line.original.lineno, 'o')
541 old_line_anchor = diff_line_anchor(filediff.raw_id, hunk.source_file_path, line.original.lineno, 'o')
542 if line.modified.lineno:
542 if line.modified.lineno:
543 new_line_anchor = diff_line_anchor(filediff.raw_id, hunk.target_file_path, line.modified.lineno, 'n')
543 new_line_anchor = diff_line_anchor(filediff.raw_id, hunk.target_file_path, line.modified.lineno, 'n')
544 %>
544 %>
545
545
546 <tr class="cb-line">
546 <tr class="cb-line">
547 <td class="cb-data ${action_class(line.original.action)}"
547 <td class="cb-data ${action_class(line.original.action)}"
548 data-line-no="${line.original.lineno}"
548 data-line-no="${line.original.lineno}"
549 >
549 >
550 <div>
550 <div>
551
551
552 <% line_old_comments = None %>
552 <% line_old_comments = None %>
553 %if line.original.get_comment_args:
553 %if line.original.get_comment_args:
554 <% line_old_comments = get_comments_for('side-by-side', inline_comments, *line.original.get_comment_args) %>
554 <% line_old_comments = get_comments_for('side-by-side', inline_comments, *line.original.get_comment_args) %>
555 %endif
555 %endif
556 %if line_old_comments:
556 %if line_old_comments:
557 <% has_outdated = any([x.outdated for x in line_old_comments]) %>
557 <% has_outdated = any([x.outdated for x in line_old_comments]) %>
558 % if has_outdated:
558 % if has_outdated:
559 <i title="${_('comments including outdated')}:${len(line_old_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
559 <i title="${_('comments including outdated')}:${len(line_old_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
560 % else:
560 % else:
561 <i title="${_('comments')}: ${len(line_old_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
561 <i title="${_('comments')}: ${len(line_old_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
562 % endif
562 % endif
563 %endif
563 %endif
564 </div>
564 </div>
565 </td>
565 </td>
566 <td class="cb-lineno ${action_class(line.original.action)}"
566 <td class="cb-lineno ${action_class(line.original.action)}"
567 data-line-no="${line.original.lineno}"
567 data-line-no="${line.original.lineno}"
568 %if old_line_anchor:
568 %if old_line_anchor:
569 id="${old_line_anchor}"
569 id="${old_line_anchor}"
570 %endif
570 %endif
571 >
571 >
572 %if line.original.lineno:
572 %if line.original.lineno:
573 <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a>
573 <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a>
574 %endif
574 %endif
575 </td>
575 </td>
576 <td class="cb-content ${action_class(line.original.action)}"
576 <td class="cb-content ${action_class(line.original.action)}"
577 data-line-no="o${line.original.lineno}"
577 data-line-no="o${line.original.lineno}"
578 >
578 >
579 %if use_comments and line.original.lineno:
579 %if use_comments and line.original.lineno:
580 ${render_add_comment_button()}
580 ${render_add_comment_button()}
581 %endif
581 %endif
582 <span class="cb-code"><span class="cb-action ${action_class(line.original.action)}"></span>${line.original.content or '' | n}</span>
582 <span class="cb-code"><span class="cb-action ${action_class(line.original.action)}"></span>${line.original.content or '' | n}</span>
583
583
584 %if use_comments and line.original.lineno and line_old_comments:
584 %if use_comments and line.original.lineno and line_old_comments:
585 ${inline_comments_container(line_old_comments, inline_comments)}
585 ${inline_comments_container(line_old_comments, inline_comments)}
586 %endif
586 %endif
587
587
588 </td>
588 </td>
589 <td class="cb-data ${action_class(line.modified.action)}"
589 <td class="cb-data ${action_class(line.modified.action)}"
590 data-line-no="${line.modified.lineno}"
590 data-line-no="${line.modified.lineno}"
591 >
591 >
592 <div>
592 <div>
593
593
594 %if line.modified.get_comment_args:
594 %if line.modified.get_comment_args:
595 <% line_new_comments = get_comments_for('side-by-side', inline_comments, *line.modified.get_comment_args) %>
595 <% line_new_comments = get_comments_for('side-by-side', inline_comments, *line.modified.get_comment_args) %>
596 %else:
596 %else:
597 <% line_new_comments = None%>
597 <% line_new_comments = None%>
598 %endif
598 %endif
599 %if line_new_comments:
599 %if line_new_comments:
600 <% has_outdated = any([x.outdated for x in line_new_comments]) %>
600 <% has_outdated = any([x.outdated for x in line_new_comments]) %>
601 % if has_outdated:
601 % if has_outdated:
602 <i title="${_('comments including outdated')}:${len(line_new_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
602 <i title="${_('comments including outdated')}:${len(line_new_comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
603 % else:
603 % else:
604 <i title="${_('comments')}: ${len(line_new_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
604 <i title="${_('comments')}: ${len(line_new_comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
605 % endif
605 % endif
606 %endif
606 %endif
607 </div>
607 </div>
608 </td>
608 </td>
609 <td class="cb-lineno ${action_class(line.modified.action)}"
609 <td class="cb-lineno ${action_class(line.modified.action)}"
610 data-line-no="${line.modified.lineno}"
610 data-line-no="${line.modified.lineno}"
611 %if new_line_anchor:
611 %if new_line_anchor:
612 id="${new_line_anchor}"
612 id="${new_line_anchor}"
613 %endif
613 %endif
614 >
614 >
615 %if line.modified.lineno:
615 %if line.modified.lineno:
616 <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a>
616 <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a>
617 %endif
617 %endif
618 </td>
618 </td>
619 <td class="cb-content ${action_class(line.modified.action)}"
619 <td class="cb-content ${action_class(line.modified.action)}"
620 data-line-no="n${line.modified.lineno}"
620 data-line-no="n${line.modified.lineno}"
621 >
621 >
622 %if use_comments and line.modified.lineno:
622 %if use_comments and line.modified.lineno:
623 ${render_add_comment_button()}
623 ${render_add_comment_button()}
624 %endif
624 %endif
625 <span class="cb-code"><span class="cb-action ${action_class(line.modified.action)}"></span>${line.modified.content or '' | n}</span>
625 <span class="cb-code"><span class="cb-action ${action_class(line.modified.action)}"></span>${line.modified.content or '' | n}</span>
626 %if use_comments and line.modified.lineno and line_new_comments:
626 %if use_comments and line.modified.lineno and line_new_comments:
627 ${inline_comments_container(line_new_comments, inline_comments)}
627 ${inline_comments_container(line_new_comments, inline_comments)}
628 %endif
628 %endif
629 </td>
629 </td>
630 </tr>
630 </tr>
631 %endfor
631 %endfor
632 </%def>
632 </%def>
633
633
634
634
635 <%def name="render_hunk_lines_unified(filediff, hunk, use_comments=False, inline_comments=None)">
635 <%def name="render_hunk_lines_unified(filediff, hunk, use_comments=False, inline_comments=None)">
636 %for old_line_no, new_line_no, action, content, comments_args in hunk.unified:
636 %for old_line_no, new_line_no, action, content, comments_args in hunk.unified:
637
637
638 <%
638 <%
639 old_line_anchor, new_line_anchor = None, None
639 old_line_anchor, new_line_anchor = None, None
640 if old_line_no:
640 if old_line_no:
641 old_line_anchor = diff_line_anchor(filediff.raw_id, hunk.source_file_path, old_line_no, 'o')
641 old_line_anchor = diff_line_anchor(filediff.raw_id, hunk.source_file_path, old_line_no, 'o')
642 if new_line_no:
642 if new_line_no:
643 new_line_anchor = diff_line_anchor(filediff.raw_id, hunk.target_file_path, new_line_no, 'n')
643 new_line_anchor = diff_line_anchor(filediff.raw_id, hunk.target_file_path, new_line_no, 'n')
644 %>
644 %>
645 <tr class="cb-line">
645 <tr class="cb-line">
646 <td class="cb-data ${action_class(action)}">
646 <td class="cb-data ${action_class(action)}">
647 <div>
647 <div>
648
648
649 %if comments_args:
649 %if comments_args:
650 <% comments = get_comments_for('unified', inline_comments, *comments_args) %>
650 <% comments = get_comments_for('unified', inline_comments, *comments_args) %>
651 %else:
651 %else:
652 <% comments = None %>
652 <% comments = None %>
653 %endif
653 %endif
654
654
655 % if comments:
655 % if comments:
656 <% has_outdated = any([x.outdated for x in comments]) %>
656 <% has_outdated = any([x.outdated for x in comments]) %>
657 % if has_outdated:
657 % if has_outdated:
658 <i title="${_('comments including outdated')}:${len(comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
658 <i title="${_('comments including outdated')}:${len(comments)}" class="icon-comment_toggle" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
659 % else:
659 % else:
660 <i title="${_('comments')}: ${len(comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
660 <i title="${_('comments')}: ${len(comments)}" class="icon-comment" onclick="return Rhodecode.comments.toggleLineComments(this)"></i>
661 % endif
661 % endif
662 % endif
662 % endif
663 </div>
663 </div>
664 </td>
664 </td>
665 <td class="cb-lineno ${action_class(action)}"
665 <td class="cb-lineno ${action_class(action)}"
666 data-line-no="${old_line_no}"
666 data-line-no="${old_line_no}"
667 %if old_line_anchor:
667 %if old_line_anchor:
668 id="${old_line_anchor}"
668 id="${old_line_anchor}"
669 %endif
669 %endif
670 >
670 >
671 %if old_line_anchor:
671 %if old_line_anchor:
672 <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a>
672 <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a>
673 %endif
673 %endif
674 </td>
674 </td>
675 <td class="cb-lineno ${action_class(action)}"
675 <td class="cb-lineno ${action_class(action)}"
676 data-line-no="${new_line_no}"
676 data-line-no="${new_line_no}"
677 %if new_line_anchor:
677 %if new_line_anchor:
678 id="${new_line_anchor}"
678 id="${new_line_anchor}"
679 %endif
679 %endif
680 >
680 >
681 %if new_line_anchor:
681 %if new_line_anchor:
682 <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a>
682 <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a>
683 %endif
683 %endif
684 </td>
684 </td>
685 <td class="cb-content ${action_class(action)}"
685 <td class="cb-content ${action_class(action)}"
686 data-line-no="${(new_line_no and 'n' or 'o')}${(new_line_no or old_line_no)}"
686 data-line-no="${(new_line_no and 'n' or 'o')}${(new_line_no or old_line_no)}"
687 >
687 >
688 %if use_comments:
688 %if use_comments:
689 ${render_add_comment_button()}
689 ${render_add_comment_button()}
690 %endif
690 %endif
691 <span class="cb-code"><span class="cb-action ${action_class(action)}"></span> ${content or '' | n}</span>
691 <span class="cb-code"><span class="cb-action ${action_class(action)}"></span> ${content or '' | n}</span>
692 %if use_comments and comments:
692 %if use_comments and comments:
693 ${inline_comments_container(comments, inline_comments)}
693 ${inline_comments_container(comments, inline_comments)}
694 %endif
694 %endif
695 </td>
695 </td>
696 </tr>
696 </tr>
697 %endfor
697 %endfor
698 </%def>
698 </%def>
699
699
700
700
701 <%def name="render_hunk_lines(filediff, diff_mode, hunk, use_comments, inline_comments)">
701 <%def name="render_hunk_lines(filediff, diff_mode, hunk, use_comments, inline_comments)">
702 % if diff_mode == 'unified':
702 % if diff_mode == 'unified':
703 ${render_hunk_lines_unified(filediff, hunk, use_comments=use_comments, inline_comments=inline_comments)}
703 ${render_hunk_lines_unified(filediff, hunk, use_comments=use_comments, inline_comments=inline_comments)}
704 % elif diff_mode == 'sideside':
704 % elif diff_mode == 'sideside':
705 ${render_hunk_lines_sideside(filediff, hunk, use_comments=use_comments, inline_comments=inline_comments)}
705 ${render_hunk_lines_sideside(filediff, hunk, use_comments=use_comments, inline_comments=inline_comments)}
706 % else:
706 % else:
707 <tr class="cb-line">
707 <tr class="cb-line">
708 <td>unknown diff mode</td>
708 <td>unknown diff mode</td>
709 </tr>
709 </tr>
710 % endif
710 % endif
711 </%def>file changes
711 </%def>file changes
712
712
713
713
714 <%def name="render_add_comment_button()">
714 <%def name="render_add_comment_button()">
715 <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)">
715 <button class="btn btn-small btn-primary cb-comment-box-opener" onclick="return Rhodecode.comments.createComment(this)">
716 <span><i class="icon-comment"></i></span>
716 <span><i class="icon-comment"></i></span>
717 </button>
717 </button>
718 </%def>
718 </%def>
719
719
720 <%def name="render_diffset_menu(diffset=None, range_diff_on=None)">
720 <%def name="render_diffset_menu(diffset=None, range_diff_on=None)">
721
721
722 <div id="diff-file-sticky" class="diffset-menu clearinner">
722 <div id="diff-file-sticky" class="diffset-menu clearinner">
723 ## auto adjustable
723 ## auto adjustable
724 <div class="sidebar__inner">
724 <div class="sidebar__inner">
725 <div class="sidebar__bar">
725 <div class="sidebar__bar">
726 <div class="pull-right">
726 <div class="pull-right">
727 <div class="btn-group">
727 <div class="btn-group">
728
728
729 ## DIFF OPTIONS via Select2
729 ## DIFF OPTIONS via Select2
730 <div class="pull-left">
730 <div class="pull-left">
731 ${h.hidden('diff_menu')}
731 ${h.hidden('diff_menu')}
732 </div>
732 </div>
733
733
734 <a
734 <a
735 class="btn ${(c.user_session_attrs["diffmode"] == 'sideside' and 'btn-primary')} tooltip"
735 class="btn ${(c.user_session_attrs["diffmode"] == 'sideside' and 'btn-primary')} tooltip"
736 title="${h.tooltip(_('View side by side'))}"
736 title="${h.tooltip(_('View side by side'))}"
737 href="${h.current_route_path(request, diffmode='sideside')}">
737 href="${h.current_route_path(request, diffmode='sideside')}">
738 <span>${_('Side by Side')}</span>
738 <span>${_('Side by Side')}</span>
739 </a>
739 </a>
740
740
741 <a
741 <a
742 class="btn ${(c.user_session_attrs["diffmode"] == 'unified' and 'btn-primary')} tooltip"
742 class="btn ${(c.user_session_attrs["diffmode"] == 'unified' and 'btn-primary')} tooltip"
743 title="${h.tooltip(_('View unified'))}" href="${h.current_route_path(request, diffmode='unified')}">
743 title="${h.tooltip(_('View unified'))}" href="${h.current_route_path(request, diffmode='unified')}">
744 <span>${_('Unified')}</span>
744 <span>${_('Unified')}</span>
745 </a>
745 </a>
746
746
747 % if range_diff_on is True:
747 % if range_diff_on is True:
748 <a
748 <a
749 title="${_('Turn off: Show the diff as commit range')}"
749 title="${_('Turn off: Show the diff as commit range')}"
750 class="btn btn-primary"
750 class="btn btn-primary"
751 href="${h.current_route_path(request, **{"range-diff":"0"})}">
751 href="${h.current_route_path(request, **{"range-diff":"0"})}">
752 <span>${_('Range Diff')}</span>
752 <span>${_('Range Diff')}</span>
753 </a>
753 </a>
754 % elif range_diff_on is False:
754 % elif range_diff_on is False:
755 <a
755 <a
756 title="${_('Show the diff as commit range')}"
756 title="${_('Show the diff as commit range')}"
757 class="btn"
757 class="btn"
758 href="${h.current_route_path(request, **{"range-diff":"1"})}">
758 href="${h.current_route_path(request, **{"range-diff":"1"})}">
759 <span>${_('Range Diff')}</span>
759 <span>${_('Range Diff')}</span>
760 </a>
760 </a>
761 % endif
761 % endif
762 </div>
762 </div>
763 </div>
763 </div>
764 <div class="pull-left">
764 <div class="pull-left">
765 <div class="btn-group">
765 <div class="btn-group">
766 <div class="pull-left">
766 <div class="pull-left">
767 ${h.hidden('file_filter')}
767 ${h.hidden('file_filter')}
768 </div>
768 </div>
769 <a
769 <a
770 class="btn"
770 class="btn"
771 href="#"
771 href="#"
772 onclick="$('input[class=filediff-collapse-state]').prop('checked', false); updateSticky(); return false">${_('Expand All Files')}</a>
772 onclick="$('input[class=filediff-collapse-state]').prop('checked', false); updateSticky(); return false">${_('Expand All Files')}</a>
773 <a
773 <a
774 class="btn"
774 class="btn"
775 href="#"
775 href="#"
776 onclick="$('input[class=filediff-collapse-state]').prop('checked', true); updateSticky(); return false">${_('Collapse All Files')}</a>
776 onclick="$('input[class=filediff-collapse-state]').prop('checked', true); updateSticky(); return false">${_('Collapse All Files')}</a>
777 </div>
777 </div>
778 </div>
778 </div>
779 </div>
779 </div>
780 <div class="fpath-placeholder">
780 <div class="fpath-placeholder">
781 <i class="icon-file-text"></i>
781 <i class="icon-file-text"></i>
782 <strong class="fpath-placeholder-text">
782 <strong class="fpath-placeholder-text">
783 Context file:
783 Context file:
784 </strong>
784 </strong>
785 </div>
785 </div>
786 <div class="sidebar_inner_shadow"></div>
786 <div class="sidebar_inner_shadow"></div>
787 </div>
787 </div>
788 </div>
788 </div>
789
789
790 % if diffset:
790 % if diffset:
791
791
792 %if diffset.limited_diff:
792 %if diffset.limited_diff:
793 <% file_placeholder = _ungettext('%(num)s file changed', '%(num)s files changed', diffset.changed_files) % {'num': diffset.changed_files} %>
793 <% file_placeholder = _ungettext('%(num)s file changed', '%(num)s files changed', diffset.changed_files) % {'num': diffset.changed_files} %>
794 %else:
794 %else:
795 <% file_placeholder = _ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}%>
795 <% file_placeholder = _ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}%>
796 %endif
796 %endif
797 ## case on range-diff placeholder needs to be updated
797 ## case on range-diff placeholder needs to be updated
798 % if range_diff_on is True:
798 % if range_diff_on is True:
799 <% file_placeholder = _('Disabled on range diff') %>
799 <% file_placeholder = _('Disabled on range diff') %>
800 % endif
800 % endif
801
801
802 <script>
802 <script>
803
803
804 var feedFilesOptions = function (query, initialData) {
804 var feedFilesOptions = function (query, initialData) {
805 var data = {results: []};
805 var data = {results: []};
806 var isQuery = typeof query.term !== 'undefined';
806 var isQuery = typeof query.term !== 'undefined';
807
807
808 var section = _gettext('Changed files');
808 var section = _gettext('Changed files');
809 var filteredData = [];
809 var filteredData = [];
810
810
811 //filter results
811 //filter results
812 $.each(initialData.results, function (idx, value) {
812 $.each(initialData.results, function (idx, value) {
813
813
814 if (!isQuery || query.term.length === 0 || value.text.toUpperCase().indexOf(query.term.toUpperCase()) >= 0) {
814 if (!isQuery || query.term.length === 0 || value.text.toUpperCase().indexOf(query.term.toUpperCase()) >= 0) {
815 filteredData.push({
815 filteredData.push({
816 'id': this.id,
816 'id': this.id,
817 'text': this.text,
817 'text': this.text,
818 "ops": this.ops,
818 "ops": this.ops,
819 })
819 })
820 }
820 }
821
821
822 });
822 });
823
823
824 data.results = filteredData;
824 data.results = filteredData;
825
825
826 query.callback(data);
826 query.callback(data);
827 };
827 };
828
828
829 var formatFileResult = function(result, container, query, escapeMarkup) {
829 var formatFileResult = function(result, container, query, escapeMarkup) {
830 return function(data, escapeMarkup) {
830 return function(data, escapeMarkup) {
831 var container = '<div class="filelist" style="padding-right:100px">{0}</div>';
831 var container = '<div class="filelist" style="padding-right:100px">{0}</div>';
832 var tmpl = '<span style="margin-right:-50px"><strong>{0}</strong></span>'.format(escapeMarkup(data['text']));
832 var tmpl = '<span style="margin-right:-50px"><strong>{0}</strong></span>'.format(escapeMarkup(data['text']));
833 var pill = '<span class="pill-group" style="float: right;margin-right: -100px">' +
833 var pill = '<span class="pill-group" style="float: right;margin-right: -100px">' +
834 '<span class="pill" op="added">{0}</span>' +
834 '<span class="pill" op="added">{0}</span>' +
835 '<span class="pill" op="deleted">{1}</span>' +
835 '<span class="pill" op="deleted">{1}</span>' +
836 '</span>'
836 '</span>'
837 ;
837 ;
838 var added = data['ops']['added'];
838 var added = data['ops']['added'];
839 if (added === 0) {
839 if (added === 0) {
840 // don't show +0
840 // don't show +0
841 added = 0;
841 added = 0;
842 } else {
842 } else {
843 added = '+' + added;
843 added = '+' + added;
844 }
844 }
845
845
846 var deleted = -1*data['ops']['deleted'];
846 var deleted = -1*data['ops']['deleted'];
847
847
848 tmpl += pill.format(added, deleted);
848 tmpl += pill.format(added, deleted);
849 return container.format(tmpl);
849 return container.format(tmpl);
850
850
851 }(result, escapeMarkup);
851 }(result, escapeMarkup);
852 };
852 };
853
853
854 var preloadFileFilterData = {
854 var preloadFileFilterData = {
855 results: [
855 results: [
856 % for filediff in diffset.files:
856 % for filediff in diffset.files:
857 {id:"a_${h.FID(filediff.raw_id, filediff.patch['filename'])}",
857 {id:"a_${h.FID(filediff.raw_id, filediff.patch['filename'])}",
858 text:"${filediff.patch['filename']}",
858 text:"${filediff.patch['filename']}",
859 ops:${h.json.dumps(filediff.patch['stats'])|n}}${('' if loop.last else ',')}
859 ops:${h.json.dumps(filediff.patch['stats'])|n}}${('' if loop.last else ',')}
860 % endfor
860 % endfor
861 ]
861 ]
862 };
862 };
863
863
864 $(document).ready(function () {
864 $(document).ready(function () {
865
865
866 var fileFilter = $("#file_filter").select2({
866 var fileFilter = $("#file_filter").select2({
867 'dropdownAutoWidth': true,
867 'dropdownAutoWidth': true,
868 'width': 'auto',
868 'width': 'auto',
869 'placeholder': "${file_placeholder}",
869 'placeholder': "${file_placeholder}",
870 containerCssClass: "drop-menu",
870 containerCssClass: "drop-menu",
871 dropdownCssClass: "drop-menu-dropdown",
871 dropdownCssClass: "drop-menu-dropdown",
872 data: preloadFileFilterData,
872 data: preloadFileFilterData,
873 query: function(query) {
873 query: function(query) {
874 feedFilesOptions(query, preloadFileFilterData);
874 feedFilesOptions(query, preloadFileFilterData);
875 },
875 },
876 formatResult: formatFileResult
876 formatResult: formatFileResult
877 });
877 });
878
878
879 % if range_diff_on is True:
879 % if range_diff_on is True:
880 fileFilter.select2("enable", false);
880 fileFilter.select2("enable", false);
881 % endif
881 % endif
882
882
883 $("#file_filter").on('click', function (e) {
883 $("#file_filter").on('click', function (e) {
884 e.preventDefault();
884 e.preventDefault();
885 var selected = $('#file_filter').select2('data');
885 var selected = $('#file_filter').select2('data');
886 var idSelector = "#"+selected.id;
886 var idSelector = "#"+selected.id;
887 window.location.hash = idSelector;
887 window.location.hash = idSelector;
888 // expand the container if we quick-select the field
888 // expand the container if we quick-select the field
889 $(idSelector).next().prop('checked', false);
889 $(idSelector).next().prop('checked', false);
890 updateSticky()
890 updateSticky()
891 });
891 });
892
892
893 var contextPrefix = _gettext('Context file: ');
893 var contextPrefix = _gettext('Context file: ');
894 ## sticky sidebar
894 ## sticky sidebar
895 var sidebarElement = document.getElementById('diff-file-sticky');
895 var sidebarElement = document.getElementById('diff-file-sticky');
896 sidebar = new StickySidebar(sidebarElement, {
896 sidebar = new StickySidebar(sidebarElement, {
897 topSpacing: 0,
897 topSpacing: 0,
898 bottomSpacing: 0,
898 bottomSpacing: 0,
899 innerWrapperSelector: '.sidebar__inner'
899 innerWrapperSelector: '.sidebar__inner'
900 });
900 });
901 sidebarElement.addEventListener('affixed.static.stickySidebar', function () {
901 sidebarElement.addEventListener('affixed.static.stickySidebar', function () {
902 // reset our file so it's not holding new value
902 // reset our file so it's not holding new value
903 $('.fpath-placeholder-text').html(contextPrefix)
903 $('.fpath-placeholder-text').html(contextPrefix)
904 });
904 });
905
905
906 updateSticky = function () {
906 updateSticky = function () {
907 sidebar.updateSticky();
907 sidebar.updateSticky();
908 Waypoint.refreshAll();
908 Waypoint.refreshAll();
909 };
909 };
910
910
911 var animateText = $.debounce(100, function(fPath, anchorId) {
911 var animateText = $.debounce(100, function(fPath, anchorId) {
912 fPath = Select2.util.escapeMarkup(fPath);
912 fPath = Select2.util.escapeMarkup(fPath);
913
913
914 // animate setting the text
914 // animate setting the text
915 var callback = function () {
915 var callback = function () {
916 $('.fpath-placeholder-text').animate({'opacity': 1.00}, 200)
916 $('.fpath-placeholder-text').animate({'opacity': 1.00}, 200)
917 $('.fpath-placeholder-text').html(contextPrefix + '<a href="#a_' + anchorId + '">' + fPath + '</a>')
917 $('.fpath-placeholder-text').html(contextPrefix + '<a href="#a_' + anchorId + '">' + fPath + '</a>')
918 };
918 };
919 $('.fpath-placeholder-text').animate({'opacity': 0.15}, 200, callback);
919 $('.fpath-placeholder-text').animate({'opacity': 0.15}, 200, callback);
920 });
920 });
921
921
922 ## dynamic file waypoints
922 ## dynamic file waypoints
923 var setFPathInfo = function(fPath, anchorId){
923 var setFPathInfo = function(fPath, anchorId){
924 animateText(fPath, anchorId)
924 animateText(fPath, anchorId)
925 };
925 };
926
926
927 var codeBlock = $('.filediff');
927 var codeBlock = $('.filediff');
928 // forward waypoint
928 // forward waypoint
929 codeBlock.waypoint(
929 codeBlock.waypoint(
930 function(direction) {
930 function(direction) {
931 if (direction === "down"){
931 if (direction === "down"){
932 setFPathInfo($(this.element).data('fPath'), $(this.element).data('anchorId'))
932 setFPathInfo($(this.element).data('fPath'), $(this.element).data('anchorId'))
933 }
933 }
934 }, {
934 }, {
935 offset: 70,
935 offset: 70,
936 context: '.fpath-placeholder'
936 context: '.fpath-placeholder'
937 }
937 }
938 );
938 );
939
939
940 // backward waypoint
940 // backward waypoint
941 codeBlock.waypoint(
941 codeBlock.waypoint(
942 function(direction) {
942 function(direction) {
943 if (direction === "up"){
943 if (direction === "up"){
944 setFPathInfo($(this.element).data('fPath'), $(this.element).data('anchorId'))
944 setFPathInfo($(this.element).data('fPath'), $(this.element).data('anchorId'))
945 }
945 }
946 }, {
946 }, {
947 offset: function () {
947 offset: function () {
948 return -this.element.clientHeight + 90
948 return -this.element.clientHeight + 90
949 },
949 },
950 context: '.fpath-placeholder'
950 context: '.fpath-placeholder'
951 }
951 }
952 );
952 );
953
953
954 var preloadDiffMenuData = {
954 var preloadDiffMenuData = {
955 results: [
955 results: [
956 ## Wide diff mode
956 ## Wide diff mode
957 {
957 {
958 id: 1,
958 id: 1,
959 text: _gettext('Toggle Wide Mode diff'),
959 text: _gettext('Toggle Wide Mode diff'),
960 action: function () {
960 action: function () {
961 updateSticky();
961 updateSticky();
962 Rhodecode.comments.toggleWideMode(this);
962 Rhodecode.comments.toggleWideMode(this);
963 return null;
963 return null;
964 },
964 },
965 url: null,
965 url: null,
966 },
966 },
967
967
968 ## Whitespace change
968 ## Whitespace change
969 % if request.GET.get('ignorews', '') == '1':
969 % if request.GET.get('ignorews', '') == '1':
970 {
970 {
971 id: 2,
971 id: 2,
972 text: _gettext('Show whitespace changes'),
972 text: _gettext('Show whitespace changes'),
973 action: function () {},
973 action: function () {},
974 url: "${h.current_route_path(request, ignorews=0)|n}"
974 url: "${h.current_route_path(request, ignorews=0)|n}"
975 },
975 },
976 % else:
976 % else:
977 {
977 {
978 id: 2,
978 id: 2,
979 text: _gettext('Hide whitespace changes'),
979 text: _gettext('Hide whitespace changes'),
980 action: function () {},
980 action: function () {},
981 url: "${h.current_route_path(request, ignorews=1)|n}"
981 url: "${h.current_route_path(request, ignorews=1)|n}"
982 },
982 },
983 % endif
983 % endif
984
984
985 ## FULL CONTEXT
985 ## FULL CONTEXT
986 % if request.GET.get('fullcontext', '') == '1':
986 % if request.GET.get('fullcontext', '') == '1':
987 {
987 {
988 id: 3,
988 id: 3,
989 text: _gettext('Hide full context diff'),
989 text: _gettext('Hide full context diff'),
990 action: function () {},
990 action: function () {},
991 url: "${h.current_route_path(request, fullcontext=0)|n}"
991 url: "${h.current_route_path(request, fullcontext=0)|n}"
992 },
992 },
993 % else:
993 % else:
994 {
994 {
995 id: 3,
995 id: 3,
996 text: _gettext('Show full context diff'),
996 text: _gettext('Show full context diff'),
997 action: function () {},
997 action: function () {},
998 url: "${h.current_route_path(request, fullcontext=1)|n}"
998 url: "${h.current_route_path(request, fullcontext=1)|n}"
999 },
999 },
1000 % endif
1000 % endif
1001
1001
1002 ]
1002 ]
1003 };
1003 };
1004
1004
1005 $("#diff_menu").select2({
1005 $("#diff_menu").select2({
1006 minimumResultsForSearch: -1,
1006 minimumResultsForSearch: -1,
1007 containerCssClass: "drop-menu",
1007 containerCssClass: "drop-menu",
1008 dropdownCssClass: "drop-menu-dropdown",
1008 dropdownCssClass: "drop-menu-dropdown",
1009 dropdownAutoWidth: true,
1009 dropdownAutoWidth: true,
1010 data: preloadDiffMenuData,
1010 data: preloadDiffMenuData,
1011 placeholder: "${_('Diff Options')}",
1011 placeholder: "${_('Diff Options')}",
1012 });
1012 });
1013 $("#diff_menu").on('select2-selecting', function (e) {
1013 $("#diff_menu").on('select2-selecting', function (e) {
1014 e.choice.action();
1014 e.choice.action();
1015 if (e.choice.url !== null) {
1015 if (e.choice.url !== null) {
1016 window.location = e.choice.url
1016 window.location = e.choice.url
1017 }
1017 }
1018 });
1018 });
1019
1019
1020 });
1020 });
1021
1021
1022 </script>
1022 </script>
1023 % endif
1023 % endif
1024
1024
1025 </%def>
1025 </%def>
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now