##// END OF EJS Templates
perf: add historical support of ui.load()...
Philippe Pepiot -
r30588:be0e7af8 default
parent child Browse files
Show More
@@ -1,102 +1,113 b''
1 # __init__.py - asv benchmark suite
1 # __init__.py - asv benchmark suite
2 #
2 #
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
3 # Copyright 2016 Logilab SA <contact@logilab.fr>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # "historical portability" policy of contrib/benchmarks:
9 #
10 # We have to make this code work correctly with current mercurial stable branch
11 # and if possible with reasonable cost with early Mercurial versions.
12
8 '''ASV (https://asv.readthedocs.io) benchmark suite
13 '''ASV (https://asv.readthedocs.io) benchmark suite
9
14
10 Benchmark are parameterized against reference repositories found in the
15 Benchmark are parameterized against reference repositories found in the
11 directory pointed by the REPOS_DIR environment variable.
16 directory pointed by the REPOS_DIR environment variable.
12
17
13 Invocation example:
18 Invocation example:
14
19
15 $ export REPOS_DIR=~/hgperf/repos
20 $ export REPOS_DIR=~/hgperf/repos
16 # run suite on given revision
21 # run suite on given revision
17 $ asv --config contrib/asv.conf.json run REV
22 $ asv --config contrib/asv.conf.json run REV
18 # run suite on new changesets found in stable and default branch
23 # run suite on new changesets found in stable and default branch
19 $ asv --config contrib/asv.conf.json run NEW
24 $ asv --config contrib/asv.conf.json run NEW
20 # display a comparative result table of benchmark results between two given
25 # display a comparative result table of benchmark results between two given
21 # revisions
26 # revisions
22 $ asv --config contrib/asv.conf.json compare REV1 REV2
27 $ asv --config contrib/asv.conf.json compare REV1 REV2
23 # compute regression detection and generate ASV static website
28 # compute regression detection and generate ASV static website
24 $ asv --config contrib/asv.conf.json publish
29 $ asv --config contrib/asv.conf.json publish
25 # serve the static website
30 # serve the static website
26 $ asv --config contrib/asv.conf.json preview
31 $ asv --config contrib/asv.conf.json preview
27 '''
32 '''
28
33
29 from __future__ import absolute_import
34 from __future__ import absolute_import
30
35
31 import functools
36 import functools
32 import os
37 import os
33 import re
38 import re
34
39
35 from mercurial import (
40 from mercurial import (
36 extensions,
41 extensions,
37 hg,
42 hg,
38 ui as uimod,
43 ui as uimod,
44 util,
39 )
45 )
40
46
41 basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
47 basedir = os.path.abspath(os.path.join(os.path.dirname(__file__),
42 os.path.pardir, os.path.pardir))
48 os.path.pardir, os.path.pardir))
43 reposdir = os.environ['REPOS_DIR']
49 reposdir = os.environ['REPOS_DIR']
44 reposnames = [name for name in os.listdir(reposdir)
50 reposnames = [name for name in os.listdir(reposdir)
45 if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
51 if os.path.isdir(os.path.join(reposdir, name, ".hg"))]
46 if not reposnames:
52 if not reposnames:
47 raise ValueError("No repositories found in $REPO_DIR")
53 raise ValueError("No repositories found in $REPO_DIR")
48 outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
54 outputre = re.compile((r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
49 r'\d+.\d+ \(best of \d+\)'))
55 r'\d+.\d+ \(best of \d+\)'))
50
56
51 def runperfcommand(reponame, command, *args, **kwargs):
57 def runperfcommand(reponame, command, *args, **kwargs):
52 os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
58 os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
53 ui = uimod.ui.load()
59 # for "historical portability"
60 # ui.load() has been available since d83ca85
61 if util.safehasattr(uimod.ui, "load"):
62 ui = uimod.ui.load()
63 else:
64 ui = uimod.ui()
54 repo = hg.repository(ui, os.path.join(reposdir, reponame))
65 repo = hg.repository(ui, os.path.join(reposdir, reponame))
55 perfext = extensions.load(ui, 'perfext',
66 perfext = extensions.load(ui, 'perfext',
56 os.path.join(basedir, 'contrib', 'perf.py'))
67 os.path.join(basedir, 'contrib', 'perf.py'))
57 cmd = getattr(perfext, command)
68 cmd = getattr(perfext, command)
58 ui.pushbuffer()
69 ui.pushbuffer()
59 cmd(ui, repo, *args, **kwargs)
70 cmd(ui, repo, *args, **kwargs)
60 output = ui.popbuffer()
71 output = ui.popbuffer()
61 match = outputre.search(output)
72 match = outputre.search(output)
62 if not match:
73 if not match:
63 raise ValueError("Invalid output {0}".format(output))
74 raise ValueError("Invalid output {0}".format(output))
64 return float(match.group(1))
75 return float(match.group(1))
65
76
66 def perfbench(repos=reposnames, name=None, params=None):
77 def perfbench(repos=reposnames, name=None, params=None):
67 """decorator to declare ASV benchmark based on contrib/perf.py extension
78 """decorator to declare ASV benchmark based on contrib/perf.py extension
68
79
69 An ASV benchmark is a python function with the given attributes:
80 An ASV benchmark is a python function with the given attributes:
70
81
71 __name__: should start with track_, time_ or mem_ to be collected by ASV
82 __name__: should start with track_, time_ or mem_ to be collected by ASV
72 params and param_name: parameter matrix to display multiple graphs on the
83 params and param_name: parameter matrix to display multiple graphs on the
73 same page.
84 same page.
74 pretty_name: If defined it's displayed in web-ui instead of __name__
85 pretty_name: If defined it's displayed in web-ui instead of __name__
75 (useful for revsets)
86 (useful for revsets)
76 the module name is prepended to the benchmark name and displayed as
87 the module name is prepended to the benchmark name and displayed as
77 "category" in webui.
88 "category" in webui.
78
89
79 Benchmarks are automatically parameterized with repositories found in the
90 Benchmarks are automatically parameterized with repositories found in the
80 REPOS_DIR environment variable.
91 REPOS_DIR environment variable.
81
92
82 `params` is the param matrix in the form of a list of tuple
93 `params` is the param matrix in the form of a list of tuple
83 (param_name, [value0, value1])
94 (param_name, [value0, value1])
84
95
85 For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
96 For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
86 (a, c), (a, d), (b, c) and (b, d).
97 (a, c), (a, d), (b, c) and (b, d).
87 """
98 """
88 params = list(params or [])
99 params = list(params or [])
89 params.insert(0, ("repo", repos))
100 params.insert(0, ("repo", repos))
90
101
91 def decorator(func):
102 def decorator(func):
92 @functools.wraps(func)
103 @functools.wraps(func)
93 def wrapped(repo, *args):
104 def wrapped(repo, *args):
94 def perf(command, *a, **kw):
105 def perf(command, *a, **kw):
95 return runperfcommand(repo, command, *a, **kw)
106 return runperfcommand(repo, command, *a, **kw)
96 return func(perf, *args)
107 return func(perf, *args)
97
108
98 wrapped.params = [p[1] for p in params]
109 wrapped.params = [p[1] for p in params]
99 wrapped.param_names = [p[0] for p in params]
110 wrapped.param_names = [p[0] for p in params]
100 wrapped.pretty_name = name
111 wrapped.pretty_name = name
101 return wrapped
112 return wrapped
102 return decorator
113 return decorator
General Comments 0
You need to be logged in to leave comments. Login now