##// END OF EJS Templates
diffs: don't use highlite on the new ops lines
marcink -
r3082:25ff4b81 default
parent child Browse files
Show More
@@ -20,12 +20,12 b''
20
20
21 import logging
21 import logging
22 import difflib
22 import difflib
23 import string
24 from itertools import groupby
23 from itertools import groupby
25
24
26 from pygments import lex
25 from pygments import lex
27 from pygments.formatters.html import _get_ttype_class as pygment_token_class
26 from pygments.formatters.html import _get_ttype_class as pygment_token_class
28 from pygments.lexers.special import TextLexer, Token
27 from pygments.lexers.special import TextLexer, Token
28 from pygments.lexers import get_lexer_by_name
29
29
30 from rhodecode.lib.helpers import (
30 from rhodecode.lib.helpers import (
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
31 get_lexer_for_filenode, html_escape, get_custom_lexer)
@@ -34,7 +34,7 b' from rhodecode.lib.vcs.nodes import File'
34 from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError
34 from rhodecode.lib.vcs.exceptions import VCSError, NodeDoesNotExistError
35 from rhodecode.lib.diff_match_patch import diff_match_patch
35 from rhodecode.lib.diff_match_patch import diff_match_patch
36 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
36 from rhodecode.lib.diffs import LimitedDiffContainer, DEL_FILENODE, BIN_FILENODE
37 from pygments.lexers import get_lexer_by_name
37
38
38
39 plain_text_lexer = get_lexer_by_name(
39 plain_text_lexer = get_lexer_by_name(
40 'text', stripall=False, stripnl=False, ensurenl=False)
40 'text', stripall=False, stripnl=False, ensurenl=False)
@@ -307,7 +307,7 b' def tokens_diff(old_tokens, new_tokens, '
307
307
308 if use_diff_match_patch:
308 if use_diff_match_patch:
309 dmp = diff_match_patch()
309 dmp = diff_match_patch()
310 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
310 dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting
311 reps = dmp.diff_main(old_string, new_string)
311 reps = dmp.diff_main(old_string, new_string)
312 dmp.diff_cleanupEfficiency(reps)
312 dmp.diff_cleanupEfficiency(reps)
313
313
@@ -449,7 +449,10 b' class DiffSet(object):'
449 target_lexer = plain_text_lexer
449 target_lexer = plain_text_lexer
450
450
451 if not patch['stats']['binary']:
451 if not patch['stats']['binary']:
452 if self.highlight_mode == self.HL_REAL:
452 node_hl_mode = self.HL_NONE if patch['chunks'] == [] else None
453 hl_mode = node_hl_mode or self.highlight_mode
454
455 if hl_mode == self.HL_REAL:
453 if (source_filename and patch['operation'] in ('D', 'M')
456 if (source_filename and patch['operation'] in ('D', 'M')
454 and source_filename not in self.source_nodes):
457 and source_filename not in self.source_nodes):
455 self.source_nodes[source_filename] = (
458 self.source_nodes[source_filename] = (
@@ -460,7 +463,7 b' class DiffSet(object):'
460 self.target_nodes[target_filename] = (
463 self.target_nodes[target_filename] = (
461 self.target_node_getter(target_filename))
464 self.target_node_getter(target_filename))
462
465
463 elif self.highlight_mode == self.HL_FAST:
466 elif hl_mode == self.HL_FAST:
464 source_lexer = self._get_lexer_for_filename(source_filename)
467 source_lexer = self._get_lexer_for_filename(source_filename)
465 target_lexer = self._get_lexer_for_filename(target_filename)
468 target_lexer = self._get_lexer_for_filename(target_filename)
466
469
@@ -510,8 +513,8 b' class DiffSet(object):'
510 'hunk_ops': None,
513 'hunk_ops': None,
511 'diffset': self,
514 'diffset': self,
512 })
515 })
513
516 file_chunks = patch['chunks'][1:]
514 for hunk in patch['chunks'][1:]:
517 for hunk in file_chunks:
515 hunkbit = self.parse_hunk(hunk, source_file, target_file)
518 hunkbit = self.parse_hunk(hunk, source_file, target_file)
516 hunkbit.source_file_path = source_file_path
519 hunkbit.source_file_path = source_file_path
517 hunkbit.target_file_path = target_file_path
520 hunkbit.target_file_path = target_file_path
@@ -519,28 +522,29 b' class DiffSet(object):'
519
522
520 # Simulate hunk on OPS type line which doesn't really contain any diff
523 # Simulate hunk on OPS type line which doesn't really contain any diff
521 # this allows commenting on those
524 # this allows commenting on those
522 actions = []
525 if not file_chunks:
523 for op_id, op_text in filediff.patch['stats']['ops'].items():
526 actions = []
524 if op_id == DEL_FILENODE:
527 for op_id, op_text in filediff.patch['stats']['ops'].items():
525 actions.append(u'file was deleted')
528 if op_id == DEL_FILENODE:
526 elif op_id == BIN_FILENODE:
529 actions.append(u'file was deleted')
527 actions.append(u'binary diff hidden')
530 elif op_id == BIN_FILENODE:
528 else:
531 actions.append(u'binary diff hidden')
529 actions.append(safe_unicode(op_text))
532 else:
530 action_line = u'FILE WITHOUT CONTENT: ' + \
533 actions.append(safe_unicode(op_text))
531 u', '.join(map(string.upper, actions)) or u'UNDEFINED_ACTION'
534 action_line = u'NO CONTENT: ' + \
535 u', '.join(actions) or u'UNDEFINED_ACTION'
532
536
533 hunk_ops = {'source_length': 0, 'source_start': 0,
537 hunk_ops = {'source_length': 0, 'source_start': 0,
534 'lines': [
538 'lines': [
535 {'new_lineno': 0, 'old_lineno': 1,
539 {'new_lineno': 0, 'old_lineno': 1,
536 'action': 'unmod', 'line': action_line}
540 'action': 'unmod-no-hl', 'line': action_line}
537 ],
541 ],
538 'section_header': u'', 'target_start': 1, 'target_length': 1}
542 'section_header': u'', 'target_start': 1, 'target_length': 1}
539
543
540 hunkbit = self.parse_hunk(hunk_ops, source_file, target_file)
544 hunkbit = self.parse_hunk(hunk_ops, source_file, target_file)
541 hunkbit.source_file_path = source_file_path
545 hunkbit.source_file_path = source_file_path
542 hunkbit.target_file_path = target_file_path
546 hunkbit.target_file_path = target_file_path
543 filediff.hunk_ops = hunkbit
547 filediff.hunk_ops = hunkbit
544 return filediff
548 return filediff
545
549
546 def parse_hunk(self, hunk, source_file, target_file):
550 def parse_hunk(self, hunk, source_file, target_file):
@@ -555,10 +559,10 b' class DiffSet(object):'
555 before, after = [], []
559 before, after = [], []
556
560
557 for line in hunk['lines']:
561 for line in hunk['lines']:
558
562 if line['action'] in ['unmod', 'unmod-no-hl']:
559 if line['action'] == 'unmod':
563 no_hl = line['action'] == 'unmod-no-hl'
560 result.lines.extend(
564 result.lines.extend(
561 self.parse_lines(before, after, source_file, target_file))
565 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
562 after.append(line)
566 after.append(line)
563 before.append(line)
567 before.append(line)
564 elif line['action'] == 'add':
568 elif line['action'] == 'add':
@@ -570,14 +574,18 b' class DiffSet(object):'
570 elif line['action'] == 'new-no-nl':
574 elif line['action'] == 'new-no-nl':
571 after.append(line)
575 after.append(line)
572
576
577 all_actions = [x['action'] for x in after] + [x['action'] for x in before]
578 no_hl = {x for x in all_actions} == {'unmod-no-hl'}
573 result.lines.extend(
579 result.lines.extend(
574 self.parse_lines(before, after, source_file, target_file))
580 self.parse_lines(before, after, source_file, target_file, no_hl=no_hl))
581 # NOTE(marcink): we must keep list() call here so we can cache the result...
575 result.unified = list(self.as_unified(result.lines))
582 result.unified = list(self.as_unified(result.lines))
576 result.sideside = result.lines
583 result.sideside = result.lines
577
584
578 return result
585 return result
579
586
580 def parse_lines(self, before_lines, after_lines, source_file, target_file):
587 def parse_lines(self, before_lines, after_lines, source_file, target_file,
588 no_hl=False):
581 # TODO: dan: investigate doing the diff comparison and fast highlighting
589 # TODO: dan: investigate doing the diff comparison and fast highlighting
582 # on the entire before and after buffered block lines rather than by
590 # on the entire before and after buffered block lines rather than by
583 # line, this means we can get better 'fast' highlighting if the context
591 # line, this means we can get better 'fast' highlighting if the context
@@ -621,9 +629,8 b' class DiffSet(object):'
621 before_tokens = [('nonl', before['line'])]
629 before_tokens = [('nonl', before['line'])]
622 else:
630 else:
623 before_tokens = self.get_line_tokens(
631 before_tokens = self.get_line_tokens(
624 line_text=before['line'],
632 line_text=before['line'], line_number=before['old_lineno'],
625 line_number=before['old_lineno'],
633 input_file=source_file, no_hl=no_hl)
626 file=source_file)
627 original.lineno = before['old_lineno']
634 original.lineno = before['old_lineno']
628 original.content = before['line']
635 original.content = before['line']
629 original.action = self.action_to_op(before['action'])
636 original.action = self.action_to_op(before['action'])
@@ -637,13 +644,12 b' class DiffSet(object):'
637 else:
644 else:
638 after_tokens = self.get_line_tokens(
645 after_tokens = self.get_line_tokens(
639 line_text=after['line'], line_number=after['new_lineno'],
646 line_text=after['line'], line_number=after['new_lineno'],
640 file=target_file)
647 input_file=target_file, no_hl=no_hl)
641 modified.lineno = after['new_lineno']
648 modified.lineno = after['new_lineno']
642 modified.content = after['line']
649 modified.content = after['line']
643 modified.action = self.action_to_op(after['action'])
650 modified.action = self.action_to_op(after['action'])
644
651
645 modified.get_comment_args = (
652 modified.get_comment_args = (target_file, 'n', after['new_lineno'])
646 target_file, 'n', after['new_lineno'])
647
653
648 # diff the lines
654 # diff the lines
649 if before_tokens and after_tokens:
655 if before_tokens and after_tokens:
@@ -672,24 +678,25 b' class DiffSet(object):'
672
678
673 return lines
679 return lines
674
680
675 def get_line_tokens(self, line_text, line_number, file=None):
681 def get_line_tokens(self, line_text, line_number, input_file=None, no_hl=False):
676 filenode = None
682 filenode = None
677 filename = None
683 filename = None
678
684
679 if isinstance(file, basestring):
685 if isinstance(input_file, basestring):
680 filename = file
686 filename = input_file
681 elif isinstance(file, FileNode):
687 elif isinstance(input_file, FileNode):
682 filenode = file
688 filenode = input_file
683 filename = file.unicode_path
689 filename = input_file.unicode_path
684
690
685 if self.highlight_mode == self.HL_REAL and filenode:
691 hl_mode = self.HL_NONE if no_hl else self.highlight_mode
692 if hl_mode == self.HL_REAL and filenode:
686 lexer = self._get_lexer_for_filename(filename)
693 lexer = self._get_lexer_for_filename(filename)
687 file_size_allowed = file.size < self.max_file_size_limit
694 file_size_allowed = input_file.size < self.max_file_size_limit
688 if line_number and file_size_allowed:
695 if line_number and file_size_allowed:
689 return self.get_tokenized_filenode_line(
696 return self.get_tokenized_filenode_line(
690 file, line_number, lexer)
697 input_file, line_number, lexer)
691
698
692 if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename:
699 if hl_mode in (self.HL_REAL, self.HL_FAST) and filename:
693 lexer = self._get_lexer_for_filename(filename)
700 lexer = self._get_lexer_for_filename(filename)
694 return list(tokenize_string(line_text, lexer))
701 return list(tokenize_string(line_text, lexer))
695
702
@@ -707,6 +714,7 b' class DiffSet(object):'
707 'add': '+',
714 'add': '+',
708 'del': '-',
715 'del': '-',
709 'unmod': ' ',
716 'unmod': ' ',
717 'unmod-no-hl': ' ',
710 'old-no-nl': ' ',
718 'old-no-nl': ' ',
711 'new-no-nl': ' ',
719 'new-no-nl': ' ',
712 }.get(action, action)
720 }.get(action, action)
@@ -1136,7 +1136,7 b' class DiffLimitExceeded(Exception):'
1136
1136
1137 # NOTE(marcink): if diffs.mako change, probably this
1137 # NOTE(marcink): if diffs.mako change, probably this
1138 # needs a bump to next version
1138 # needs a bump to next version
1139 CURRENT_DIFF_VERSION = 'v1'
1139 CURRENT_DIFF_VERSION = 'v2'
1140
1140
1141
1141
1142 def _cleanup_cache_file(cached_diff_file):
1142 def _cleanup_cache_file(cached_diff_file):
@@ -653,7 +653,7 b' def get_comments_for(diff_type, comments'
653 %if comments_args:
653 %if comments_args:
654 <% comments = get_comments_for('unified', inline_comments, *comments_args) %>
654 <% comments = get_comments_for('unified', inline_comments, *comments_args) %>
655 %else:
655 %else:
656 <% comments = None%>
656 <% comments = None %>
657 %endif
657 %endif
658
658
659 % if comments:
659 % if comments:
General Comments 0
You need to be logged in to leave comments. Login now