Show More
@@ -166,9 +166,15 b' startup.import_repos = false' | |||
|
166 | 166 | ## change this to unique ID for security |
|
167 | 167 | app_instance_uuid = rc-production |
|
168 | 168 | |
|
169 | ## cut off limit for large diffs (size in bytes) | |
|
170 | cut_off_limit_diff = 1024000 | |
|
171 | cut_off_limit_file = 256000 | |
|
169 | ## cut off limit for large diffs (size in bytes). If overall diff size on | |
|
170 | ## commit, or pull request exceeds this limit this diff will be displayed | |
|
171 | ## partially. E.g 512000 == 512Kb | |
|
172 | cut_off_limit_diff = 512000 | |
|
173 | ||
|
174 | ## cut off limit for large files inside diffs (size in bytes). Each individual | |
|
175 | ## file inside diff which exceeds this limit will be displayed partially. | |
|
176 | ## E.g 128000 == 128Kb | |
|
177 | cut_off_limit_file = 128000 | |
|
172 | 178 | |
|
173 | 179 | ## use cache version of scm repo everywhere |
|
174 | 180 | vcs_full_cache = true |
@@ -140,9 +140,15 b' startup.import_repos = false' | |||
|
140 | 140 | ## change this to unique ID for security |
|
141 | 141 | app_instance_uuid = rc-production |
|
142 | 142 | |
|
143 | ## cut off limit for large diffs (size in bytes) | |
|
144 | cut_off_limit_diff = 1024000 | |
|
145 | cut_off_limit_file = 256000 | |
|
143 | ## cut off limit for large diffs (size in bytes). If overall diff size on | |
|
144 | ## commit, or pull request exceeds this limit this diff will be displayed | |
|
145 | ## partially. E.g 512000 == 512Kb | |
|
146 | cut_off_limit_diff = 512000 | |
|
147 | ||
|
148 | ## cut off limit for large files inside diffs (size in bytes). Each individual | |
|
149 | ## file inside diff which exceeds this limit will be displayed partially. | |
|
150 | ## E.g 128000 == 128Kb | |
|
151 | cut_off_limit_file = 128000 | |
|
146 | 152 | |
|
147 | 153 | ## use cache version of scm repo everywhere |
|
148 | 154 | vcs_full_cache = true |
@@ -227,6 +227,7 b' class DiffProcessor(object):' | |||
|
227 | 227 | self.parsed = False |
|
228 | 228 | self.parsed_diff = [] |
|
229 | 229 | |
|
230 | log.debug('Initialized DiffProcessor with %s mode', format) | |
|
230 | 231 | if format == 'gitdiff': |
|
231 | 232 | self.differ = self._highlight_line_difflib |
|
232 | 233 | self._parser = self._parse_gitdiff |
@@ -496,36 +497,26 b' class DiffProcessor(object):' | |||
|
496 | 497 | |
|
497 | 498 | return diff_container(sorted(_files, key=sorter)) |
|
498 | 499 | |
|
499 | ||
|
500 | # FIXME: NEWDIFFS: dan: this replaces the old _escaper function | |
|
501 | def _process_line(self, string): | |
|
502 | """ | |
|
503 | Process a diff line, checks the diff limit | |
|
504 | ||
|
505 | :param string: | |
|
506 | """ | |
|
507 | ||
|
508 | self.cur_diff_size += len(string) | |
|
509 | ||
|
500 | def _check_large_diff(self): | |
|
501 | log.debug('Diff exceeds current diff_limit of %s', self.diff_limit) | |
|
510 | 502 | if not self.show_full_diff and (self.cur_diff_size > self.diff_limit): |
|
511 | raise DiffLimitExceeded('Diff Limit Exceeded') | |
|
512 | ||
|
513 | return safe_unicode(string) | |
|
503 | raise DiffLimitExceeded('Diff Limit `%s` Exceeded', self.diff_limit) | |
|
514 | 504 | |
|
515 | 505 | # FIXME: NEWDIFFS: dan: this replaces _parse_gitdiff |
|
516 | 506 | def _new_parse_gitdiff(self, inline_diff=True): |
|
517 | 507 | _files = [] |
|
508 | ||
|
509 | # this can be overriden later to a LimitedDiffContainer type | |
|
518 | 510 | diff_container = lambda arg: arg |
|
511 | ||
|
519 | 512 | for chunk in self._diff.chunks(): |
|
520 | 513 | head = chunk.header |
|
521 | 514 | log.debug('parsing diff %r' % head) |
|
522 | 515 | |
|
523 | diff = imap(self._process_line, chunk.diff.splitlines(1)) | |
|
524 | 516 | raw_diff = chunk.raw |
|
525 | 517 | limited_diff = False |
|
526 | 518 | exceeds_limit = False |
|
527 | # if 'empty_file_to_modify_and_rename' in head['a_path']: | |
|
528 | # 1/0 | |
|
519 | ||
|
529 | 520 | op = None |
|
530 | 521 | stats = { |
|
531 | 522 | 'added': 0, |
@@ -542,19 +533,22 b' class DiffProcessor(object):' | |||
|
542 | 533 | if head['b_mode']: |
|
543 | 534 | stats['new_mode'] = head['b_mode'] |
|
544 | 535 | |
|
536 | # delete file | |
|
545 | 537 | if head['deleted_file_mode']: |
|
546 | 538 | op = OPS.DEL |
|
547 | 539 | stats['binary'] = True |
|
548 | 540 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
549 | 541 | |
|
542 | # new file | |
|
550 | 543 | elif head['new_file_mode']: |
|
551 | 544 | op = OPS.ADD |
|
552 | 545 | stats['binary'] = True |
|
553 | 546 | stats['old_mode'] = None |
|
554 | 547 | stats['new_mode'] = head['new_file_mode'] |
|
555 | 548 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] |
|
556 | else: # modify operation, can be copy, rename or chmod | |
|
557 | 549 | |
|
550 | # modify operation, can be copy, rename or chmod | |
|
551 | else: | |
|
558 | 552 | # CHMOD |
|
559 | 553 | if head['new_mode'] and head['old_mode']: |
|
560 | 554 | op = OPS.MOD |
@@ -602,7 +596,27 b' class DiffProcessor(object):' | |||
|
602 | 596 | |
|
603 | 597 | # a real non-binary diff |
|
604 | 598 | if head['a_file'] or head['b_file']: |
|
599 | diff = iter(chunk.diff.splitlines(1)) | |
|
600 | ||
|
601 | # append each file to the diff size | |
|
602 | raw_chunk_size = len(raw_diff) | |
|
603 | ||
|
604 | exceeds_limit = raw_chunk_size > self.file_limit | |
|
605 | self.cur_diff_size += raw_chunk_size | |
|
606 | ||
|
605 | 607 | try: |
|
608 | # Check each file instead of the whole diff. | |
|
609 | # Diff will hide big files but still show small ones. | |
|
610 | # From the tests big files are fairly safe to be parsed | |
|
611 | # but the browser is the bottleneck. | |
|
612 | if not self.show_full_diff and exceeds_limit: | |
|
613 | log.debug('File `%s` exceeds current file_limit of %s', | |
|
614 | safe_unicode(head['b_path']), self.file_limit) | |
|
615 | raise DiffLimitExceeded( | |
|
616 | 'File Limit %s Exceeded', self.file_limit) | |
|
617 | ||
|
618 | self._check_large_diff() | |
|
619 | ||
|
606 | 620 | raw_diff, chunks, _stats = self._new_parse_lines(diff) |
|
607 | 621 | stats['binary'] = False |
|
608 | 622 | stats['added'] = _stats[0] |
@@ -610,22 +624,12 b' class DiffProcessor(object):' | |||
|
610 | 624 | # explicit mark that it's a modified file |
|
611 | 625 | if op == OPS.MOD: |
|
612 | 626 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
613 | exceeds_limit = len(raw_diff) > self.file_limit | |
|
614 | ||
|
615 | # changed from _escaper function so we validate size of | |
|
616 | # each file instead of the whole diff | |
|
617 | # diff will hide big files but still show small ones | |
|
618 | # from my tests, big files are fairly safe to be parsed | |
|
619 | # but the browser is the bottleneck | |
|
620 | if not self.show_full_diff and exceeds_limit: | |
|
621 | raise DiffLimitExceeded('File Limit Exceeded') | |
|
622 | 627 | |
|
623 | 628 | except DiffLimitExceeded: |
|
624 | 629 | diff_container = lambda _diff: \ |
|
625 | 630 | LimitedDiffContainer( |
|
626 | 631 | self.diff_limit, self.cur_diff_size, _diff) |
|
627 | 632 | |
|
628 | exceeds_limit = len(raw_diff) > self.file_limit | |
|
629 | 633 | limited_diff = True |
|
630 | 634 | chunks = [] |
|
631 | 635 | |
@@ -636,19 +640,20 b' class DiffProcessor(object):' | |||
|
636 | 640 | stats['ops'][BIN_FILENODE] = 'binary diff hidden' |
|
637 | 641 | chunks = [] |
|
638 | 642 | |
|
643 | # Hide content of deleted node by setting empty chunks | |
|
639 | 644 | if chunks and not self.show_full_diff and op == OPS.DEL: |
|
640 | 645 | # if not full diff mode show deleted file contents |
|
641 | 646 | # TODO: anderson: if the view is not too big, there is no way |
|
642 | 647 | # to see the content of the file |
|
643 | 648 | chunks = [] |
|
644 | 649 | |
|
645 |
chunks.insert( |
|
|
646 |
|
|
|
647 |
|
|
|
648 |
|
|
|
649 |
|
|
|
650 |
|
|
|
651 |
|
|
|
650 | chunks.insert( | |
|
651 | 0, [{'old_lineno': '', | |
|
652 | 'new_lineno': '', | |
|
653 | 'action': Action.CONTEXT, | |
|
654 | 'line': msg, | |
|
655 | } for _op, msg in stats['ops'].iteritems() | |
|
656 | if _op not in [MOD_FILENODE]]) | |
|
652 | 657 | |
|
653 | 658 | original_filename = safe_unicode(head['a_path']) |
|
654 | 659 | _files.append({ |
@@ -664,7 +669,6 b' class DiffProcessor(object):' | |||
|
664 | 669 | 'is_limited_diff': limited_diff, |
|
665 | 670 | }) |
|
666 | 671 | |
|
667 | ||
|
668 | 672 | sorter = lambda info: {OPS.ADD: 0, OPS.MOD: 1, |
|
669 | 673 | OPS.DEL: 2}.get(info['operation']) |
|
670 | 674 | |
@@ -766,18 +770,19 b' class DiffProcessor(object):' | |||
|
766 | 770 | return ''.join(raw_diff), chunks, stats |
|
767 | 771 | |
|
768 | 772 | # FIXME: NEWDIFFS: dan: this replaces _parse_lines |
|
769 | def _new_parse_lines(self, diff): | |
|
773 | def _new_parse_lines(self, diff_iter): | |
|
770 | 774 | """ |
|
771 | 775 | Parse the diff an return data for the template. |
|
772 | 776 | """ |
|
773 | 777 | |
|
774 | lineiter = iter(diff) | |
|
775 | 778 | stats = [0, 0] |
|
776 | 779 | chunks = [] |
|
777 | 780 | raw_diff = [] |
|
778 | 781 | |
|
782 | diff_iter = imap(lambda s: safe_unicode(s), diff_iter) | |
|
783 | ||
|
779 | 784 | try: |
|
780 |
line = |
|
|
785 | line = diff_iter.next() | |
|
781 | 786 | |
|
782 | 787 | while line: |
|
783 | 788 | raw_diff.append(line) |
@@ -808,7 +813,7 b' class DiffProcessor(object):' | |||
|
808 | 813 | old_end += old_line |
|
809 | 814 | new_end += new_line |
|
810 | 815 | |
|
811 |
line = |
|
|
816 | line = diff_iter.next() | |
|
812 | 817 | |
|
813 | 818 | while old_line < old_end or new_line < new_end: |
|
814 | 819 | command = ' ' |
@@ -843,7 +848,7 b' class DiffProcessor(object):' | |||
|
843 | 848 | }) |
|
844 | 849 | raw_diff.append(line) |
|
845 | 850 | |
|
846 |
line = |
|
|
851 | line = diff_iter.next() | |
|
847 | 852 | |
|
848 | 853 | if self._newline_marker.match(line): |
|
849 | 854 | # we need to append to lines, since this is not |
@@ -864,6 +869,7 b' class DiffProcessor(object):' | |||
|
864 | 869 | |
|
865 | 870 | except StopIteration: |
|
866 | 871 | pass |
|
872 | ||
|
867 | 873 | return ''.join(raw_diff), chunks, stats |
|
868 | 874 | |
|
869 | 875 | def _safe_id(self, idstring): |
General Comments 0
You need to be logged in to leave comments.
Login now