##// END OF EJS Templates
bdiff: replace hash algorithm...
bdiff: replace hash algorithm This patch replaces lyhash with the hash algorithm used by diffutils. The algorithm has its origins in Git commit 2e9d1410, which is all the way back from 1992. The license header in the code at that revision in GPL v2. I have not performed an extensive analysis of the distribution (and therefore buckets) of hash output. However, `hg perfbdiff` gives some clear wins. I'd like to think that if it is good enough for diffutils it is good enough for us? From the mozilla-unified repository: $ perfbdiff -m 3041e4d59df2 ! wall 0.053271 comb 0.060000 user 0.060000 sys 0.000000 (best of 100) ! wall 0.035827 comb 0.040000 user 0.040000 sys 0.000000 (best of 100) $ perfbdiff 0e9928989e9c --alldata --count 100 ! wall 6.204277 comb 6.200000 user 6.200000 sys 0.000000 (best of 3) ! wall 4.309710 comb 4.300000 user 4.300000 sys 0.000000 (best of 3) From the hg repo: $ perfbdiff 35000 --alldata --count 1000 ! wall 0.660358 comb 0.660000 user 0.660000 sys 0.000000 (best of 15) ! wall 0.534092 comb 0.530000 user 0.530000 sys 0.000000 (best of 19) Looking at the generated assembly and statistical profiler output from the kernel level, I believe there is room to make this function even faster. Namely, we're still consuming data character by character instead of at the word level. This translates to more loop iterations and more instructions. At this juncture though, the real performance killer is that we're hashing every line. We should get a significant speedup if we change the algorithm to find the longest prefix, longest suffix, treat those as single "lines" and then only do the line splitting and hashing on the parts that are different. That will require a lot of C code, however. I'm optimistic this approach could result in a ~2x speedup.

File last commit:

r29841:d5883fd0 default
r30318:e1d6aa0e default
Show More
share.py
194 lines | 6.4 KiB | text/x-python | PythonLexer
# Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''share a common history between several working directories
Automatic Pooled Storage for Clones
-----------------------------------
When this extension is active, :hg:`clone` can be configured to
automatically share/pool storage across multiple clones. This
mode effectively converts :hg:`clone` to :hg:`clone` + :hg:`share`.
The benefit of using this mode is the automatic management of
store paths and intelligent pooling of related repositories.
The following ``share.`` config options influence this feature:
``share.pool``
Filesystem path where shared repository data will be stored. When
defined, :hg:`clone` will automatically use shared repository
storage instead of creating a store inside each clone.
``share.poolnaming``
How directory names in ``share.pool`` are constructed.
"identity" means the name is derived from the first changeset in the
repository. In this mode, different remotes share storage if their
root/initial changeset is identical. In this mode, the local shared
repository is an aggregate of all encountered remote repositories.
"remote" means the name is derived from the source repository's
path or URL. In this mode, storage is only shared if the path or URL
requested in the :hg:`clone` command matches exactly to a repository
that was cloned before.
The default naming mode is "identity."
'''
from __future__ import absolute_import
import errno
from mercurial.i18n import _
from mercurial import (
bookmarks,
cmdutil,
commands,
error,
extensions,
hg,
util,
)
repository = hg.repository
parseurl = hg.parseurl
cmdtable = {}
command = cmdutil.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'ships-with-hg-core'
@command('share',
[('U', 'noupdate', None, _('do not create a working directory')),
('B', 'bookmarks', None, _('also share bookmarks'))],
_('[-U] [-B] SOURCE [DEST]'),
norepo=True)
def share(ui, source, dest=None, noupdate=False, bookmarks=False):
"""create a new shared repository
Initialize a new repository and working directory that shares its
history (and optionally bookmarks) with another repository.
.. note::
using rollback or extensions that destroy/modify history (mq,
rebase, etc.) can cause considerable confusion with shared
clones. In particular, if two shared clones are both updated to
the same changeset, and one of them destroys that changeset
with rollback, the other clone will suddenly stop working: all
operations will fail with "abort: working directory has unknown
parent". The only known workaround is to use debugsetparents on
the broken clone to reset it to a changeset that still exists.
"""
return hg.share(ui, source, dest=dest, update=not noupdate,
bookmarks=bookmarks)
@command('unshare', [], '')
def unshare(ui, repo):
"""convert a shared repository to a normal one
Copy the store data to the repo and remove the sharedpath data.
"""
if not repo.shared():
raise error.Abort(_("this is not a shared repo"))
destlock = lock = None
lock = repo.lock()
try:
# we use locks here because if we race with commit, we
# can end up with extra data in the cloned revlogs that's
# not pointed to by changesets, thus causing verify to
# fail
destlock = hg.copystore(ui, repo, repo.path)
sharefile = repo.join('sharedpath')
util.rename(sharefile, sharefile + '.old')
repo.requirements.discard('sharedpath')
repo._writerequirements()
finally:
destlock and destlock.release()
lock and lock.release()
# update store, spath, svfs and sjoin of repo
repo.unfiltered().__init__(repo.baseui, repo.root)
# Wrap clone command to pass auto share options.
def clone(orig, ui, source, *args, **opts):
pool = ui.config('share', 'pool', None)
if pool:
pool = util.expandpath(pool)
opts['shareopts'] = dict(
pool=pool,
mode=ui.config('share', 'poolnaming', 'identity'),
)
return orig(ui, source, *args, **opts)
def extsetup(ui):
extensions.wrapfunction(bookmarks, '_getbkfile', getbkfile)
extensions.wrapfunction(bookmarks.bmstore, 'recordchange', recordchange)
extensions.wrapfunction(bookmarks.bmstore, '_writerepo', writerepo)
extensions.wrapcommand(commands.table, 'clone', clone)
def _hassharedbookmarks(repo):
"""Returns whether this repo has shared bookmarks"""
try:
shared = repo.vfs.read('shared').splitlines()
except IOError as inst:
if inst.errno != errno.ENOENT:
raise
return False
return hg.sharedbookmarks in shared
def _getsrcrepo(repo):
"""
Returns the source repository object for a given shared repository.
If repo is not a shared repository, return None.
"""
if repo.sharedpath == repo.path:
return None
if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
return repo.srcrepo
# the sharedpath always ends in the .hg; we want the path to the repo
source = repo.vfs.split(repo.sharedpath)[0]
srcurl, branches = parseurl(source)
srcrepo = repository(repo.ui, srcurl)
repo.srcrepo = srcrepo
return srcrepo
def getbkfile(orig, repo):
if _hassharedbookmarks(repo):
srcrepo = _getsrcrepo(repo)
if srcrepo is not None:
repo = srcrepo
return orig(repo)
def recordchange(orig, self, tr):
# Continue with write to local bookmarks file as usual
orig(self, tr)
if _hassharedbookmarks(self._repo):
srcrepo = _getsrcrepo(self._repo)
if srcrepo is not None:
category = 'share-bookmarks'
tr.addpostclose(category, lambda tr: self._writerepo(srcrepo))
def writerepo(orig, self, repo):
# First write local bookmarks file in case we ever unshare
orig(self, repo)
if _hassharedbookmarks(self._repo):
srcrepo = _getsrcrepo(self._repo)
if srcrepo is not None:
orig(self, srcrepo)