##// END OF EJS Templates
copies: move from a copy on branchpoint to a copy on write approach...
copies: move from a copy on branchpoint to a copy on write approach Before this changes, any branch points results in a copy of the dictionary containing the copy information. This can be very costly for branchy history with few rename information. Instead, we take a "copy on write" approach. Copying the input data only when we are about to update them. In practice we where already doing the copying in half of these case (because `_chain` makes a copy), so we don't add a significant cost here even in the linear case. However the speed up in branchy case is very significant. Here are some timing on the pypy repository. revision: large amount; added files: large amount; rename small amount; c3b14617fbd7 9ba6ab77fd29 before: ! wall 1.399863 comb 1.400000 user 1.370000 sys 0.030000 (median of 10) after: ! wall 0.766453 comb 0.770000 user 0.750000 sys 0.020000 (median of 11) revision: large amount; added files: small amount; rename small amount; c3b14617fbd7 f650a9b140d2 before: ! wall 1.876748 comb 1.890000 user 1.870000 sys 0.020000 (median of 10) after: ! wall 1.167223 comb 1.170000 user 1.150000 sys 0.020000 (median of 10) revision: large amount; added files: large amount; rename large amount; 08ea3258278e d9fa043f30c0 before: ! wall 0.242457 comb 0.240000 user 0.240000 sys 0.000000 (median of 39) after: ! wall 0.211476 comb 0.210000 user 0.210000 sys 0.000000 (median of 45) revision: small amount; added files: large amount; rename large amount; df6f7a526b60 a83dc6a2d56f before: ! wall 0.013193 comb 0.020000 user 0.020000 sys 0.000000 (median of 224) after: ! wall 0.013290 comb 0.010000 user 0.010000 sys 0.000000 (median of 222) revision: small amount; added files: large amount; rename small amount; 4aa4e1f8e19a 169138063d63 before: ! wall 0.001673 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000) after: ! wall 0.001677 comb 0.000000 user 0.000000 sys 0.000000 (median of 1000) revision: small amount; added files: small amount; rename small amount; 4bc173b045a6 964879152e2e before: ! wall 0.000119 comb 0.000000 user 0.000000 sys 0.000000 (median of 8023) after: ! wall 0.000119 comb 0.000000 user 0.000000 sys 0.000000 (median of 7997) revision: medium amount; added files: large amount; rename medium amount; c95f1ced15f2 2c68e87c3efe before: ! wall 0.201898 comb 0.210000 user 0.200000 sys 0.010000 (median of 48) after: ! wall 0.167415 comb 0.170000 user 0.160000 sys 0.010000 (median of 58) revision: medium amount; added files: medium amount; rename small amount; d343da0c55a8 d7746d32bf9d before: ! wall 0.036820 comb 0.040000 user 0.040000 sys 0.000000 (median of 100) after: ! wall 0.035797 comb 0.040000 user 0.040000 sys 0.000000 (median of 100) The extra cost in the linear case can be reclaimed later with some extra logic. Differential Revision: https://phab.mercurial-scm.org/D7124

File last commit:

r43346:2372284d default
r43594:ffd04bc9 default
Show More
__init__.py
126 lines | 3.9 KiB | text/x-python | PythonLexer
# __init__.py - asv benchmark suite
#
# Copyright 2016 Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# "historical portability" policy of contrib/benchmarks:
#
# We have to make this code work correctly with current mercurial stable branch
# and if possible with reasonable cost with early Mercurial versions.
'''ASV (https://asv.readthedocs.io) benchmark suite
Benchmark are parameterized against reference repositories found in the
directory pointed by the REPOS_DIR environment variable.
Invocation example:
$ export REPOS_DIR=~/hgperf/repos
# run suite on given revision
$ asv --config contrib/asv.conf.json run REV
# run suite on new changesets found in stable and default branch
$ asv --config contrib/asv.conf.json run NEW
# display a comparative result table of benchmark results between two given
# revisions
$ asv --config contrib/asv.conf.json compare REV1 REV2
# compute regression detection and generate ASV static website
$ asv --config contrib/asv.conf.json publish
# serve the static website
$ asv --config contrib/asv.conf.json preview
'''
from __future__ import absolute_import
import functools
import os
import re
from mercurial import (
extensions,
hg,
ui as uimod,
util,
)
basedir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
)
reposdir = os.environ['REPOS_DIR']
reposnames = [
name
for name in os.listdir(reposdir)
if os.path.isdir(os.path.join(reposdir, name, ".hg"))
]
if not reposnames:
raise ValueError("No repositories found in $REPO_DIR")
outputre = re.compile(
(
r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
r'\d+.\d+ \(best of \d+\)'
)
)
def runperfcommand(reponame, command, *args, **kwargs):
os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
# for "historical portability"
# ui.load() has been available since d83ca85
if util.safehasattr(uimod.ui, "load"):
ui = uimod.ui.load()
else:
ui = uimod.ui()
repo = hg.repository(ui, os.path.join(reposdir, reponame))
perfext = extensions.load(
ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
)
cmd = getattr(perfext, command)
ui.pushbuffer()
cmd(ui, repo, *args, **kwargs)
output = ui.popbuffer()
match = outputre.search(output)
if not match:
raise ValueError("Invalid output {0}".format(output))
return float(match.group(1))
def perfbench(repos=reposnames, name=None, params=None):
"""decorator to declare ASV benchmark based on contrib/perf.py extension
An ASV benchmark is a python function with the given attributes:
__name__: should start with track_, time_ or mem_ to be collected by ASV
params and param_name: parameter matrix to display multiple graphs on the
same page.
pretty_name: If defined it's displayed in web-ui instead of __name__
(useful for revsets)
the module name is prepended to the benchmark name and displayed as
"category" in webui.
Benchmarks are automatically parameterized with repositories found in the
REPOS_DIR environment variable.
`params` is the param matrix in the form of a list of tuple
(param_name, [value0, value1])
For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
(a, c), (a, d), (b, c) and (b, d).
"""
params = list(params or [])
params.insert(0, ("repo", repos))
def decorator(func):
@functools.wraps(func)
def wrapped(repo, *args):
def perf(command, *a, **kw):
return runperfcommand(repo, command, *a, **kw)
return func(perf, *args)
wrapped.params = [p[1] for p in params]
wrapped.param_names = [p[0] for p in params]
wrapped.pretty_name = name
return wrapped
return decorator