Show More
@@ -40,8 +40,10 b' log = logging.getLogger()' | |||
|
40 | 40 | |
|
41 | 41 | |
|
42 | 42 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
43 | org_lexer = lexer | |
|
43 | 44 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
44 |
log.debug('Generating file node pygment tokens for %s, %s', |
|
|
45 | log.debug('Generating file node pygment tokens for %s, %s, org_lexer:%s', | |
|
46 | lexer, filenode, org_lexer) | |
|
45 | 47 | tokens = tokenize_string(filenode.content, lexer) |
|
46 | 48 | lines = split_token_stream(tokens, split_string='\n') |
|
47 | 49 | rv = list(lines) |
@@ -398,10 +400,14 b' class DiffSet(object):' | |||
|
398 | 400 | return diffset |
|
399 | 401 | |
|
400 | 402 | _lexer_cache = {} |
|
401 | def _get_lexer_for_filename(self, filename): | |
|
403 | def _get_lexer_for_filename(self, filename, filenode=None): | |
|
402 | 404 | # cached because we might need to call it twice for source/target |
|
403 | 405 | if filename not in self._lexer_cache: |
|
404 | self._lexer_cache[filename] = get_lexer_safe(filepath=filename) | |
|
406 | if filenode: | |
|
407 | lexer = filenode.lexer | |
|
408 | else: | |
|
409 | lexer = get_lexer_safe(filepath=filename) | |
|
410 | self._lexer_cache[filename] = lexer | |
|
405 | 411 | return self._lexer_cache[filename] |
|
406 | 412 | |
|
407 | 413 | def render_patch(self, patch): |
@@ -439,10 +445,15 b' class DiffSet(object):' | |||
|
439 | 445 | # done can allow caching a lexer for a filenode to avoid the file lookup |
|
440 | 446 | if isinstance(source_file, FileNode): |
|
441 | 447 | source_filenode = source_file |
|
442 | source_lexer = source_file.lexer | |
|
448 | #source_lexer = source_file.lexer | |
|
449 | source_lexer = self._get_lexer_for_filename(source_filename) | |
|
450 | source_file.lexer = source_lexer | |
|
451 | ||
|
443 | 452 | if isinstance(target_file, FileNode): |
|
444 | 453 | target_filenode = target_file |
|
445 | target_lexer = target_file.lexer | |
|
454 | #target_lexer = target_file.lexer | |
|
455 | target_lexer = self._get_lexer_for_filename(target_filename) | |
|
456 | target_file.lexer = target_lexer | |
|
446 | 457 | |
|
447 | 458 | source_file_path, target_file_path = None, None |
|
448 | 459 | |
@@ -619,8 +630,11 b' class DiffSet(object):' | |||
|
619 | 630 | filename = file.unicode_path |
|
620 | 631 | |
|
621 | 632 | if self.highlight_mode == self.HL_REAL and filenode: |
|
622 | if line_number and file.size < self.max_file_size_limit: | |
|
623 | return self.get_tokenized_filenode_line(file, line_number) | |
|
633 | lexer = self._get_lexer_for_filename(filename) | |
|
634 | file_size_allowed = file.size < self.max_file_size_limit | |
|
635 | if line_number and file_size_allowed: | |
|
636 | return self.get_tokenized_filenode_line( | |
|
637 | file, line_number, lexer) | |
|
624 | 638 | |
|
625 | 639 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: |
|
626 | 640 | lexer = self._get_lexer_for_filename(filename) |
@@ -628,10 +642,10 b' class DiffSet(object):' | |||
|
628 | 642 | |
|
629 | 643 | return list(tokenize_string(line_text, plain_text_lexer)) |
|
630 | 644 | |
|
631 | def get_tokenized_filenode_line(self, filenode, line_number): | |
|
645 | def get_tokenized_filenode_line(self, filenode, line_number, lexer=None): | |
|
632 | 646 | |
|
633 | 647 | if filenode not in self.highlighted_filenodes: |
|
634 |
tokenized_lines = filenode_as_lines_tokens(filenode, |
|
|
648 | tokenized_lines = filenode_as_lines_tokens(filenode, lexer) | |
|
635 | 649 | self.highlighted_filenodes[filenode] = tokenized_lines |
|
636 | 650 | return self.highlighted_filenodes[filenode][line_number - 1] |
|
637 | 651 |
General Comments 0
You need to be logged in to leave comments.
Login now