##// END OF EJS Templates
benchmarks: restore `output` variable lost in D10884...
Martin von Zweigbergk -
r48251:50cd14db default
parent child Browse files
Show More
@@ -1,125 +1,126 b''
1 # __init__.py - asv benchmark suite
1 # __init__.py - asv benchmark suite
2 #
2 #
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # "historical portability" policy of contrib/benchmarks:
8 # "historical portability" policy of contrib/benchmarks:
9 #
9 #
10 # We have to make this code work correctly with current mercurial stable branch
10 # We have to make this code work correctly with current mercurial stable branch
11 # and if possible with reasonable cost with early Mercurial versions.
11 # and if possible with reasonable cost with early Mercurial versions.
12
12
13 '''ASV (https://asv.readthedocs.io) benchmark suite
13 '''ASV (https://asv.readthedocs.io) benchmark suite
14
14
15 Benchmark are parameterized against reference repositories found in the
15 Benchmark are parameterized against reference repositories found in the
16 directory pointed by the REPOS_DIR environment variable.
16 directory pointed by the REPOS_DIR environment variable.
17
17
18 Invocation example:
18 Invocation example:
19
19
20 $ export REPOS_DIR=~/hgperf/repos
20 $ export REPOS_DIR=~/hgperf/repos
21 # run suite on given revision
21 # run suite on given revision
22 $ asv --config contrib/asv.conf.json run REV
22 $ asv --config contrib/asv.conf.json run REV
23 # run suite on new changesets found in stable and default branch
23 # run suite on new changesets found in stable and default branch
24 $ asv --config contrib/asv.conf.json run NEW
24 $ asv --config contrib/asv.conf.json run NEW
25 # display a comparative result table of benchmark results between two given
25 # display a comparative result table of benchmark results between two given
26 # revisions
26 # revisions
27 $ asv --config contrib/asv.conf.json compare REV1 REV2
27 $ asv --config contrib/asv.conf.json compare REV1 REV2
28 # compute regression detection and generate ASV static website
28 # compute regression detection and generate ASV static website
29 $ asv --config contrib/asv.conf.json publish
29 $ asv --config contrib/asv.conf.json publish
30 # serve the static website
30 # serve the static website
31 $ asv --config contrib/asv.conf.json preview
31 $ asv --config contrib/asv.conf.json preview
32 '''
32 '''
33
33
34 from __future__ import absolute_import
34 from __future__ import absolute_import
35
35
36 import functools
36 import functools
37 import os
37 import os
38 import re
38 import re
39
39
40 from mercurial import (
40 from mercurial import (
41 extensions,
41 extensions,
42 hg,
42 hg,
43 ui as uimod,
43 ui as uimod,
44 util,
44 util,
45 )
45 )
46
46
47 basedir = os.path.abspath(
47 basedir = os.path.abspath(
48 os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
48 os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
49 )
49 )
50 reposdir = os.environ['REPOS_DIR']
50 reposdir = os.environ['REPOS_DIR']
51 reposnames = [
51 reposnames = [
52 name
52 name
53 for name in os.listdir(reposdir)
53 for name in os.listdir(reposdir)
54 if os.path.isdir(os.path.join(reposdir, name, ".hg"))
54 if os.path.isdir(os.path.join(reposdir, name, ".hg"))
55 ]
55 ]
56 if not reposnames:
56 if not reposnames:
57 raise ValueError("No repositories found in $REPO_DIR")
57 raise ValueError("No repositories found in $REPO_DIR")
58 outputre = re.compile(
58 outputre = re.compile(
59 (
59 (
60 r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
60 r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
61 r'\d+.\d+ \(best of \d+\)'
61 r'\d+.\d+ \(best of \d+\)'
62 )
62 )
63 )
63 )
64
64
65
65
66 def runperfcommand(reponame, command, *args, **kwargs):
66 def runperfcommand(reponame, command, *args, **kwargs):
67 os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
67 os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
68 # for "historical portability"
68 # for "historical portability"
69 # ui.load() has been available since d83ca85
69 # ui.load() has been available since d83ca85
70 if util.safehasattr(uimod.ui, "load"):
70 if util.safehasattr(uimod.ui, "load"):
71 ui = uimod.ui.load()
71 ui = uimod.ui.load()
72 else:
72 else:
73 ui = uimod.ui()
73 ui = uimod.ui()
74 repo = hg.repository(ui, os.path.join(reposdir, reponame))
74 repo = hg.repository(ui, os.path.join(reposdir, reponame))
75 perfext = extensions.load(
75 perfext = extensions.load(
76 ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
76 ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
77 )
77 )
78 cmd = getattr(perfext, command)
78 cmd = getattr(perfext, command)
79 with ui.silent():
79 ui.pushbuffer()
80 cmd(ui, repo, *args, **kwargs)
80 cmd(ui, repo, *args, **kwargs)
81 output = ui.popbuffer()
81 match = outputre.search(output)
82 match = outputre.search(output)
82 if not match:
83 if not match:
83 raise ValueError("Invalid output {}".format(output))
84 raise ValueError("Invalid output {}".format(output))
84 return float(match.group(1))
85 return float(match.group(1))
85
86
86
87
87 def perfbench(repos=reposnames, name=None, params=None):
88 def perfbench(repos=reposnames, name=None, params=None):
88 """decorator to declare ASV benchmark based on contrib/perf.py extension
89 """decorator to declare ASV benchmark based on contrib/perf.py extension
89
90
90 An ASV benchmark is a python function with the given attributes:
91 An ASV benchmark is a python function with the given attributes:
91
92
92 __name__: should start with track_, time_ or mem_ to be collected by ASV
93 __name__: should start with track_, time_ or mem_ to be collected by ASV
93 params and param_name: parameter matrix to display multiple graphs on the
94 params and param_name: parameter matrix to display multiple graphs on the
94 same page.
95 same page.
95 pretty_name: If defined it's displayed in web-ui instead of __name__
96 pretty_name: If defined it's displayed in web-ui instead of __name__
96 (useful for revsets)
97 (useful for revsets)
97 the module name is prepended to the benchmark name and displayed as
98 the module name is prepended to the benchmark name and displayed as
98 "category" in webui.
99 "category" in webui.
99
100
100 Benchmarks are automatically parameterized with repositories found in the
101 Benchmarks are automatically parameterized with repositories found in the
101 REPOS_DIR environment variable.
102 REPOS_DIR environment variable.
102
103
103 `params` is the param matrix in the form of a list of tuple
104 `params` is the param matrix in the form of a list of tuple
104 (param_name, [value0, value1])
105 (param_name, [value0, value1])
105
106
106 For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
107 For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
107 (a, c), (a, d), (b, c) and (b, d).
108 (a, c), (a, d), (b, c) and (b, d).
108 """
109 """
109 params = list(params or [])
110 params = list(params or [])
110 params.insert(0, ("repo", repos))
111 params.insert(0, ("repo", repos))
111
112
112 def decorator(func):
113 def decorator(func):
113 @functools.wraps(func)
114 @functools.wraps(func)
114 def wrapped(repo, *args):
115 def wrapped(repo, *args):
115 def perf(command, *a, **kw):
116 def perf(command, *a, **kw):
116 return runperfcommand(repo, command, *a, **kw)
117 return runperfcommand(repo, command, *a, **kw)
117
118
118 return func(perf, *args)
119 return func(perf, *args)
119
120
120 wrapped.params = [p[1] for p in params]
121 wrapped.params = [p[1] for p in params]
121 wrapped.param_names = [p[0] for p in params]
122 wrapped.param_names = [p[0] for p in params]
122 wrapped.pretty_name = name
123 wrapped.pretty_name = name
123 return wrapped
124 return wrapped
124
125
125 return decorator
126 return decorator
General Comments 0
You need to be logged in to leave comments. Login now