Show More
@@ -2042,6 +2042,11 b' coreconfigitem(' | |||
|
2042 | 2042 | ) |
|
2043 | 2043 | coreconfigitem( |
|
2044 | 2044 | b'storage', |
|
2045 | b'revlog.delta-parent-search.candidate-group-chunk-size', | |
|
2046 | default=0, | |
|
2047 | ) | |
|
2048 | coreconfigitem( | |
|
2049 | b'storage', | |
|
2045 | 2050 | b'revlog.issue6528.fix-incoming', |
|
2046 | 2051 | default=True, |
|
2047 | 2052 | ) |
@@ -2281,6 +2281,21 b' category impact performance and reposito' | |||
|
2281 | 2281 | To fix affected revisions that already exist within the repository, one can |
|
2282 | 2282 | use :hg:`debug-repair-issue-6528`. |
|
2283 | 2283 | |
|
2284 | .. container:: verbose | |
|
2285 | ||
|
2286 | ``revlog.delta-parent-search.candidate-group-chunk-size`` | |
|
2287 | Tune the number of delta bases the storage will consider in the | |
|
2288 | same "round" of search. In some very rare cases, using a smaller value | |
|
2289 | might result in faster processing at the possible expense of storage | |
|
2290 | space, while using larger values might result in slower processing at the | |
|
2291 | possible benefit of storage space. A value of "0" means no limitation. | |
|
2292 | ||
|
2293 | default: no limitation | |
|
2294 | ||
|
2295 | This is unlikely that you'll have to tune this configuration. If you think | |
|
2296 | you do, consider talking with the mercurial developer community about your | |
|
2297 | repositories. | |
|
2298 | ||
|
2284 | 2299 | ``revlog.optimize-delta-parent-choice`` |
|
2285 | 2300 | When storing a merge revision, both parents will be equally considered as |
|
2286 | 2301 | a possible delta base. This results in better delta selection and improved |
@@ -1081,6 +1081,11 b' def resolverevlogstorevfsoptions(ui, req' | |||
|
1081 | 1081 | b'storage', b'revlog.optimize-delta-parent-choice' |
|
1082 | 1082 | ) |
|
1083 | 1083 | options[b'deltabothparents'] = deltabothparents |
|
1084 | dps_cgds = ui.configint( | |
|
1085 | b'storage', | |
|
1086 | b'revlog.delta-parent-search.candidate-group-chunk-size', | |
|
1087 | ) | |
|
1088 | options[b'delta-parent-search.candidate-group-chunk-size'] = dps_cgds | |
|
1084 | 1089 | options[b'debug-delta'] = ui.configbool(b'debug', b'revlog.debug-delta') |
|
1085 | 1090 | |
|
1086 | 1091 | issue6528 = ui.configbool(b'storage', b'revlog.issue6528.fix-incoming') |
@@ -348,6 +348,7 b' class revlog:' | |||
|
348 | 348 | self._chunkcachesize = 65536 |
|
349 | 349 | self._maxchainlen = None |
|
350 | 350 | self._deltabothparents = True |
|
351 | self._candidate_group_chunk_size = 0 | |
|
351 | 352 | self._debug_delta = False |
|
352 | 353 | self.index = None |
|
353 | 354 | self._docket = None |
@@ -422,6 +423,9 b' class revlog:' | |||
|
422 | 423 | self._maxchainlen = opts[b'maxchainlen'] |
|
423 | 424 | if b'deltabothparents' in opts: |
|
424 | 425 | self._deltabothparents = opts[b'deltabothparents'] |
|
426 | dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size') | |
|
427 | if dps_cgds: | |
|
428 | self._candidate_group_chunk_size = dps_cgds | |
|
425 | 429 | self._lazydelta = bool(opts.get(b'lazydelta', True)) |
|
426 | 430 | self._lazydeltabase = False |
|
427 | 431 | if self._lazydelta: |
@@ -680,6 +680,7 b' def _candidategroups(' | |||
|
680 | 680 | good = None |
|
681 | 681 | |
|
682 | 682 | deltas_limit = textlen * LIMIT_DELTA2TEXT |
|
683 | group_chunk_size = revlog._candidate_group_chunk_size | |
|
683 | 684 | |
|
684 | 685 | tested = {nullrev} |
|
685 | 686 | candidates = _refinedgroups( |
@@ -770,11 +771,30 b' def _candidategroups(' | |||
|
770 | 771 | |
|
771 | 772 | group.append(rev) |
|
772 | 773 | if group: |
|
773 | # XXX: in the sparse revlog case, group can become large, | |
|
774 | # impacting performances. Some bounding or slicing mecanism | |
|
775 | # would help to reduce this impact. | |
|
776 | tested.update(group) | |
|
777 | good = yield tuple(group) | |
|
774 | # When the size of the candidate group is big, it can result in a | |
|
775 | # quite significant performance impact. To reduce this, we can send | |
|
776 | # them in smaller batches until the new batch does not provide any | |
|
777 | # improvements. | |
|
778 | # | |
|
779 | # This might reduce the overall efficiency of the compression in | |
|
780 | # some corner cases, but that should also prevent very pathological | |
|
781 | # cases from being an issue. (eg. 20 000 candidates). | |
|
782 | # | |
|
783 | # XXX note that the ordering of the group becomes important as it | |
|
784 | # now impacts the final result. The current order is unprocessed | |
|
785 | # and can be improved. | |
|
786 | if group_chunk_size == 0: | |
|
787 | tested.update(group) | |
|
788 | good = yield tuple(group) | |
|
789 | else: | |
|
790 | prev_good = good | |
|
791 | for start in range(0, len(group), group_chunk_size): | |
|
792 | sub_group = group[start : start + group_chunk_size] | |
|
793 | tested.update(sub_group) | |
|
794 | good = yield tuple(sub_group) | |
|
795 | if prev_good == good: | |
|
796 | break | |
|
797 | ||
|
778 | 798 | yield None |
|
779 | 799 | |
|
780 | 800 |
General Comments 0
You need to be logged in to leave comments.
Login now