##// END OF EJS Templates
perf: add asv benchmarks...
Philippe Pepiot -
r30406:cff0f592 default
parent child Browse files
Show More
@@ -0,0 +1,13 b''
1 {
2 "version": 1,
3 "project": "mercurial",
4 "project_url": "https://mercurial-scm.org/",
5 "repo": "..",
6 "branches": ["default", "stable"],
7 "environment_type": "virtualenv",
8 "show_commit_url": "https://www.mercurial-scm.org/repo/hg/rev/",
9 "benchmark_dir": "benchmarks",
10 "env_dir": "../.asv/env",
11 "results_dir": "../.asv/results",
12 "html_dir": "../.asv/html"
13 }
@@ -0,0 +1,102 b''
1 # __init__.py - asv benchmark suite
2 #
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 '''ASV (https://asv.readthedocs.io) benchmark suite
9
10 Benchmark are parameterized against reference repositories found in the
11 directory pointed by the REPOS_DIR environment variable.
12
13 Invocation example:
14
15 $ export REPOS_DIR=~/hgperf/repos
16 # run suite on given revision
17 $ asv --config contrib/asv.conf.json run REV
18 # run suite on new changesets found in stable and default branch
19 $ asv --config contrib/asv.conf.json run NEW
20 # display a comparative result table of benchmark results between two given
21 # revisions
22 $ asv --config contrib/asv.conf.json compare REV1 REV2
23 # compute regression detection and generate ASV static website
24 $ asv --config contrib/asv.conf.json publish
25 # serve the static website
26 $ asv --config contrib/asv.conf.json preview
27 '''
28
29 from __future__ import absolute_import
30
31 import functools
32 import os
33 import re
34
35 from mercurial import (
36 extensions,
37 hg,
38 ui as uimod,
39 )
40
41 basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
42 os.path.pardir, os.path.pardir))
43 reposdir = os.environ['REPOS_DIR']
44 reposnames = [name for name in os.listdir(reposdir)
45 if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
46 if not reposnames:
47 raise ValueError("No repositories found in $REPO_DIR")
48 outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
49 r'\d+.\d+ \(best of \d+\)'))
50
51 def runperfcommand(reponame, command, *args, **kwargs):
52 os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
53 ui = uimod.ui()
54 repo = hg.repository(ui, os.path.join(reposdir, reponame))
55 perfext = extensions.load(ui, 'perfext',
56 os.path.join(basedir, 'contrib', 'perf.py'))
57 cmd = getattr(perfext, command)
58 ui.pushbuffer()
59 cmd(ui, repo, *args, **kwargs)
60 output = ui.popbuffer()
61 match = outputre.search(output)
62 if not match:
63 raise ValueError("Invalid output {0}".format(output))
64 return float(match.group(1))
65
66 def perfbench(repos=reposnames, name=None, params=None):
67 """decorator to declare ASV benchmark based on contrib/perf.py extension
68
69 An ASV benchmark is a python function with the given attributes:
70
71 __name__: should start with track_, time_ or mem_ to be collected by ASV
72 params and param_name: parameter matrix to display multiple graphs on the
73 same page.
74 pretty_name: If defined it's displayed in web-ui instead of __name__
75 (useful for revsets)
76 the module name is prepended to the benchmark name and displayed as
77 "category" in webui.
78
79 Benchmarks are automatically parameterized with repositories found in the
80 REPOS_DIR environment variable.
81
82 `params` is the param matrix in the form of a list of tuple
83 (param_name, [value0, value1])
84
85 For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
86 (a, c), (a, d), (b, c) and (b, d).
87 """
88 params = list(params or [])
89 params.insert(0, ("repo", repos))
90
91 def decorator(func):
92 @functools.wraps(func)
93 def wrapped(repo, *args):
94 def perf(command, *a, **kw):
95 return runperfcommand(repo, command, *a, **kw)
96 return func(perf, *args)
97
98 wrapped.params = [p[1] for p in params]
99 wrapped.param_names = [p[0] for p in params]
100 wrapped.pretty_name = name
101 return wrapped
102 return decorator
@@ -0,0 +1,26 b''
1 # perf.py - asv benchmarks using contrib/perf.py extension
2 #
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 from __future__ import absolute_import
9
10 from . import perfbench
11
12 @perfbench()
13 def track_tags(perf):
14 return perf("perftags")
15
16 @perfbench()
17 def track_status(perf):
18 return perf("perfstatus", unknown=False)
19
20 @perfbench(params=[('rev', ['1000', '10000', 'tip'])])
21 def track_manifest(perf, rev):
22 return perf("perfmanifest", rev)
23
24 @perfbench()
25 def track_heads(perf):
26 return perf("perfheads")
@@ -0,0 +1,53 b''
1 # revset.py - asv revset benchmarks
2 #
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
4 #
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
7
8 '''ASV revset benchmarks generated from contrib/base-revsets.txt
9
10 Each revset benchmark is parameterized with variants (first, last, sort, ...)
11 '''
12
13 from __future__ import absolute_import
14
15 import os
16 import string
17 import sys
18
19 from . import basedir, perfbench
20
21 def createrevsetbenchmark(baseset, variants=None):
22 if variants is None:
23 # Default variants
24 variants = ["plain", "first", "last", "sort", "sort+first",
25 "sort+last"]
26 fname = "track_" + "_".join("".join([
27 c if c in string.digits + string.letters else " "
28 for c in baseset
29 ]).split())
30
31 def wrap(fname, baseset):
32 @perfbench(name=baseset, params=[("variant", variants)])
33 def f(perf, variant):
34 revset = baseset
35 if variant != "plain":
36 for var in variant.split("+"):
37 revset = "%s(%s)" % (var, revset)
38 return perf("perfrevset", revset)
39 f.__name__ = fname
40 return f
41 return wrap(fname, baseset)
42
43 def initializerevsetbenchmarks():
44 mod = sys.modules[__name__]
45 with open(os.path.join(basedir, 'contrib', 'base-revsets.txt'),
46 'rb') as fh:
47 for line in fh:
48 baseset = line.strip()
49 if baseset and not baseset.startswith('#'):
50 func = createrevsetbenchmark(baseset)
51 setattr(mod, func.__name__, func)
52
53 initializerevsetbenchmarks()
@@ -49,6 +49,7 b' mercurial.egg-info'
49 tags
49 tags
50 cscope.*
50 cscope.*
51 .idea/*
51 .idea/*
52 .asv/*
52 i18n/hg.pot
53 i18n/hg.pot
53 locale/*/LC_MESSAGES/hg.mo
54 locale/*/LC_MESSAGES/hg.mo
54 hgext/__index__.py
55 hgext/__index__.py
General Comments 0
You need to be logged in to leave comments. Login now