##// END OF EJS Templates
changegroup: skip delta when the underlying revlog do not use them...
changegroup: skip delta when the underlying revlog do not use them Revlog can now be configured to store full snapshot only. This is used on the changelog. However, the changegroup packing was still recomputing deltas to be sent over the wire. We now just reuse the full snapshot directly in this case, skipping delta computation. This provides use with a large speed up(-30%): # perfchangegroupchangelog on mercurial ! wall 2.010326 comb 2.020000 user 2.000000 sys 0.020000 (best of 5) ! wall 1.382039 comb 1.380000 user 1.370000 sys 0.010000 (best of 8) # perfchangegroupchangelog on pypy ! wall 5.792589 comb 5.780000 user 5.780000 sys 0.000000 (best of 3) ! wall 3.911158 comb 3.920000 user 3.900000 sys 0.020000 (best of 3) # perfchangegroupchangelog on mozilla central ! wall 20.683727 comb 20.680000 user 20.630000 sys 0.050000 (best of 3) ! wall 14.190204 comb 14.190000 user 14.150000 sys 0.040000 (best of 3) Many tests have to be updated because of the change in bundle content. All theses update have been verified. Because diffing changelog was not very valuable, the resulting bundle have similar size (often a bit smaller): # full bundle of mozilla central with delta: 1142740533B without delta: 1142173300B So this is a win all over the board.

File last commit:

r29341:0d83ad96 default
r30211:6b0741d6 default
Show More
similar.py
110 lines | 3.7 KiB | text/x-python | PythonLexer
# similar.py - mechanisms for finding similar files
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import hashlib
from .i18n import _
from . import (
bdiff,
mdiff,
util,
)
def _findexactmatches(repo, added, removed):
'''find renamed files that have no changes
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after) tuples of exact matches.
'''
numfiles = len(added) + len(removed)
# Get hashes of removed files.
hashes = {}
for i, fctx in enumerate(removed):
repo.ui.progress(_('searching for exact renames'), i, total=numfiles,
unit=_('files'))
h = hashlib.sha1(fctx.data()).digest()
hashes[h] = fctx
# For each added file, see if it corresponds to a removed file.
for i, fctx in enumerate(added):
repo.ui.progress(_('searching for exact renames'), i + len(removed),
total=numfiles, unit=_('files'))
h = hashlib.sha1(fctx.data()).digest()
if h in hashes:
yield (hashes[h], fctx)
# Done
repo.ui.progress(_('searching for exact renames'), None)
def _findsimilarmatches(repo, added, removed, threshold):
'''find potentially renamed files based on similar file content
Takes a list of new filectxs and a list of removed filectxs, and yields
(before, after, score) tuples of partial matches.
'''
copies = {}
for i, r in enumerate(removed):
repo.ui.progress(_('searching for similar files'), i,
total=len(removed), unit=_('files'))
# lazily load text
@util.cachefunc
def data():
orig = r.data()
return orig, mdiff.splitnewlines(orig)
def score(text):
orig, lines = data()
# bdiff.blocks() returns blocks of matching lines
# count the number of bytes in each
equal = 0
matches = bdiff.blocks(text, orig)
for x1, x2, y1, y2 in matches:
for line in lines[y1:y2]:
equal += len(line)
lengths = len(text) + len(orig)
return equal * 2.0 / lengths
for a in added:
bestscore = copies.get(a, (None, threshold))[1]
myscore = score(a.data())
if myscore >= bestscore:
copies[a] = (r, myscore)
repo.ui.progress(_('searching'), None)
for dest, v in copies.iteritems():
source, score = v
yield source, dest, score
def findrenames(repo, added, removed, threshold):
'''find renamed files -- yields (before, after, score) tuples'''
parentctx = repo['.']
workingctx = repo[None]
# Zero length files will be frequently unrelated to each other, and
# tracking the deletion/addition of such a file will probably cause more
# harm than good. We strip them out here to avoid matching them later on.
addedfiles = set([workingctx[fp] for fp in added
if workingctx[fp].size() > 0])
removedfiles = set([parentctx[fp] for fp in removed
if fp in parentctx and parentctx[fp].size() > 0])
# Find exact matches.
for (a, b) in _findexactmatches(repo,
sorted(addedfiles), sorted(removedfiles)):
addedfiles.remove(b)
yield (a.path(), b.path(), 1.0)
# If the user requested similar files to be matched, search for them also.
if threshold < 1.0:
for (a, b, score) in _findsimilarmatches(repo,
sorted(addedfiles), sorted(removedfiles), threshold):
yield (a.path(), b.path(), score)