##// END OF EJS Templates
sparse-read: move from a recursive-based approach to a heap-based one...
sparse-read: move from a recursive-based approach to a heap-based one The previous recursive approach was trying to optimise each read slice to have a good density. It had the tendency to over-optimize smaller slices while leaving larger hole in others. The new approach focuses on improving the combined density of all the reads, instead of the individual slices. It slices at the largest gaps first, as they reduce the total amount of read data the most efficiently. Another benefit of this approach is that we iterate over the delta chain only once, reducing the overhead of slicing long delta chains. On the repository we use for tests, the new approach shows similar or faster performance than the current default linear full read. The repository contains about 450,000 revisions with many concurrent topological branches. Tests have been run on two versions of the repository: one built with the current delta constraint, and the other with an unlimited delta span (using 'experimental.maxdeltachainspan=0') Below are timings for building 1% of all the revision in the manifest log using 'hg perfrevlogrevisions -m'. Times are given in seconds. They include the new couple of follow-up changeset in this series. delta-span standard unlimited linear-read 922s 632s sparse-read 814s 566s

File last commit:

r33573:857876eb merge 4.3-rc stable
r34881:9e18ab7f default
Show More
bdiff.py
76 lines | 2.1 KiB | text/x-python | PythonLexer
# bdiff.py - CFFI implementation of bdiff.c
#
# Copyright 2016 Maciej Fijalkowski <fijall@gmail.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import struct
from ..pure.bdiff import *
from . import _bdiff
ffi = _bdiff.ffi
lib = _bdiff.lib
def blocks(sa, sb):
a = ffi.new("struct bdiff_line**")
b = ffi.new("struct bdiff_line**")
ac = ffi.new("char[]", str(sa))
bc = ffi.new("char[]", str(sb))
l = ffi.new("struct bdiff_hunk*")
try:
an = lib.bdiff_splitlines(ac, len(sa), a)
bn = lib.bdiff_splitlines(bc, len(sb), b)
if not a[0] or not b[0]:
raise MemoryError
count = lib.bdiff_diff(a[0], an, b[0], bn, l)
if count < 0:
raise MemoryError
rl = [None] * count
h = l.next
i = 0
while h:
rl[i] = (h.a1, h.a2, h.b1, h.b2)
h = h.next
i += 1
finally:
lib.free(a[0])
lib.free(b[0])
lib.bdiff_freehunks(l.next)
return rl
def bdiff(sa, sb):
a = ffi.new("struct bdiff_line**")
b = ffi.new("struct bdiff_line**")
ac = ffi.new("char[]", str(sa))
bc = ffi.new("char[]", str(sb))
l = ffi.new("struct bdiff_hunk*")
try:
an = lib.bdiff_splitlines(ac, len(sa), a)
bn = lib.bdiff_splitlines(bc, len(sb), b)
if not a[0] or not b[0]:
raise MemoryError
count = lib.bdiff_diff(a[0], an, b[0], bn, l)
if count < 0:
raise MemoryError
rl = []
h = l.next
la = lb = 0
while h:
if h.a1 != la or h.b1 != lb:
lgt = (b[0] + h.b1).l - (b[0] + lb).l
rl.append(struct.pack(">lll", (a[0] + la).l - a[0].l,
(a[0] + h.a1).l - a[0].l, lgt))
rl.append(str(ffi.buffer((b[0] + lb).l, lgt)))
la = h.a2
lb = h.b2
h = h.next
finally:
lib.free(a[0])
lib.free(b[0])
lib.bdiff_freehunks(l.next)
return "".join(rl)