Show More
@@ -821,6 +821,7 b' class _PrevDeltaSearch(_BaseDeltaSearch)' | |||||
821 | def _init_group(self): |
|
821 | def _init_group(self): | |
822 | self.current_stage = _STAGE_PREV |
|
822 | self.current_stage = _STAGE_PREV | |
823 | self.current_group = [self.target_rev - 1] |
|
823 | self.current_group = [self.target_rev - 1] | |
|
824 | self.tested.update(self.current_group) | |||
824 |
|
825 | |||
825 | def next_group(self, good_delta=None): |
|
826 | def next_group(self, good_delta=None): | |
826 | self.current_stage = _STAGE_FULL |
|
827 | self.current_stage = _STAGE_FULL | |
@@ -837,60 +838,60 b' class _DeltaSearch(_BaseDeltaSearch):' | |||||
837 | # Why search for delta base if we cannot use a delta base ? |
|
838 | # Why search for delta base if we cannot use a delta base ? | |
838 | # also see issue6056 |
|
839 | # also see issue6056 | |
839 | assert self.revlog.delta_config.general_delta |
|
840 | assert self.revlog.delta_config.general_delta | |
840 |
self._candidates_iterator = self._ |
|
841 | self._candidates_iterator = self._refined_groups() | |
841 | self._last_good = None |
|
842 | self._last_good = None | |
842 | self.current_group = self._candidates_iterator.send(self._last_good) |
|
843 | self._next_internal_group() | |
|
844 | ||||
|
845 | def _next_internal_group(self): | |||
|
846 | # self._internal_group can be larger than self.current_group | |||
|
847 | self._internal_idx = 0 | |||
|
848 | group = self._candidates_iterator.send(self._last_good) | |||
|
849 | if group is not None: | |||
|
850 | group = self._pre_filter_candidate_revs(group) | |||
|
851 | self._internal_group = group | |||
|
852 | if self._internal_group is None: | |||
|
853 | self.current_group = None | |||
|
854 | elif len(self._internal_group) == 0: | |||
|
855 | self.next_group() | |||
|
856 | else: | |||
|
857 | chunk_size = self.revlog.delta_config.candidate_group_chunk_size | |||
|
858 | if chunk_size > 0: | |||
|
859 | self.current_group = self._internal_group[:chunk_size] | |||
|
860 | self._internal_idx += chunk_size | |||
|
861 | else: | |||
|
862 | self.current_group = self._internal_group | |||
|
863 | self._internal_idx += len(self.current_group) | |||
|
864 | ||||
|
865 | self.tested.update(self.current_group) | |||
843 |
|
866 | |||
844 | def next_group(self, good_delta=None): |
|
867 | def next_group(self, good_delta=None): | |
|
868 | old_good = self._last_good | |||
845 | if good_delta is not None: |
|
869 | if good_delta is not None: | |
846 | self._last_good = good_delta.base |
|
870 | self._last_good = good_delta.base | |
847 | self.current_group = self._candidates_iterator.send(self._last_good) |
|
871 | if (self._internal_idx < len(self._internal_group)) and ( | |
848 |
|
872 | old_good != good_delta | ||
849 | def _candidate_groups(self): |
|
873 | ): | |
850 | """Provides group of revision to be tested as delta base |
|
874 | # When the size of the candidate group is big, it can result in | |
851 |
|
875 | # a quite significant performance impact. To reduce this, we | ||
852 | This top level function focus on emitting groups with unique and |
|
876 | # can send them in smaller batches until the new batch does not | |
853 | worthwhile content. See _raw_candidate_groups for details about the |
|
877 | # provide any improvements. | |
854 | group order. |
|
878 | # | |
855 | """ |
|
879 | # This might reduce the overall efficiency of the compression | |
856 | good = None |
|
880 | # in some corner cases, but that should also prevent very | |
857 |
|
881 | # pathological cases from being an issue. (eg. 20 000 | ||
858 | group_chunk_size = self.revlog.delta_config.candidate_group_chunk_size |
|
882 | # candidates). | |
859 |
|
883 | # | ||
860 | tested = self.tested # prefetch for speed and code compactness |
|
884 | # XXX note that the ordering of the group becomes important as | |
861 | candidates = self._refined_groups() |
|
885 | # it now impacts the final result. The current order is | |
862 | while True: |
|
886 | # unprocessed and can be improved. | |
863 | temptative = candidates.send(good) |
|
887 | next_idx = self._internal_idx + self._group_chunk_size | |
864 | if temptative is None: |
|
888 | self.current_group = self._internal_group[ | |
865 | break |
|
889 | self._internal_idx : next_idx | |
866 | group = self._pre_filter_candidate_revs(temptative) |
|
890 | ] | |
867 | if group: |
|
891 | self.tested.update(self.current_group) | |
868 | # When the size of the candidate group is big, it can result in |
|
892 | self._internal_idx = next_idx | |
869 | # a quite significant performance impact. To reduce this, we |
|
893 | else: | |
870 | # can send them in smaller batches until the new batch does not |
|
894 | self._next_internal_group() | |
871 | # provide any improvements. |
|
|||
872 | # |
|
|||
873 | # This might reduce the overall efficiency of the compression |
|
|||
874 | # in some corner cases, but that should also prevent very |
|
|||
875 | # pathological cases from being an issue. (eg. 20 000 |
|
|||
876 | # candidates). |
|
|||
877 | # |
|
|||
878 | # XXX note that the ordering of the group becomes important as |
|
|||
879 | # it now impacts the final result. The current order is |
|
|||
880 | # unprocessed and can be improved. |
|
|||
881 | if group_chunk_size == 0: |
|
|||
882 | tested.update(group) |
|
|||
883 | good = yield tuple(group) |
|
|||
884 | else: |
|
|||
885 | prev_good = good |
|
|||
886 | for start in range(0, len(group), group_chunk_size): |
|
|||
887 | sub_group = group[start : start + group_chunk_size] |
|
|||
888 | tested.update(sub_group) |
|
|||
889 | good = yield tuple(sub_group) |
|
|||
890 | if prev_good == good: |
|
|||
891 | break |
|
|||
892 |
|
||||
893 | yield None |
|
|||
894 |
|
895 | |||
895 | def _pre_filter_candidate_revs(self, temptative): |
|
896 | def _pre_filter_candidate_revs(self, temptative): | |
896 | """filter possible candidate before computing a delta |
|
897 | """filter possible candidate before computing a delta |
General Comments 0
You need to be logged in to leave comments.
Login now