Show More
@@ -1,125 +1,126 b'' | |||
|
1 | 1 | # __init__.py - asv benchmark suite |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2016 Logilab SA <contact@logilab.fr> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | # "historical portability" policy of contrib/benchmarks: |
|
9 | 9 | # |
|
10 | 10 | # We have to make this code work correctly with current mercurial stable branch |
|
11 | 11 | # and if possible with reasonable cost with early Mercurial versions. |
|
12 | 12 | |
|
13 | 13 | '''ASV (https://asv.readthedocs.io) benchmark suite |
|
14 | 14 | |
|
15 | 15 | Benchmark are parameterized against reference repositories found in the |
|
16 | 16 | directory pointed by the REPOS_DIR environment variable. |
|
17 | 17 | |
|
18 | 18 | Invocation example: |
|
19 | 19 | |
|
20 | 20 | $ export REPOS_DIR=~/hgperf/repos |
|
21 | 21 | # run suite on given revision |
|
22 | 22 | $ asv --config contrib/asv.conf.json run REV |
|
23 | 23 | # run suite on new changesets found in stable and default branch |
|
24 | 24 | $ asv --config contrib/asv.conf.json run NEW |
|
25 | 25 | # display a comparative result table of benchmark results between two given |
|
26 | 26 | # revisions |
|
27 | 27 | $ asv --config contrib/asv.conf.json compare REV1 REV2 |
|
28 | 28 | # compute regression detection and generate ASV static website |
|
29 | 29 | $ asv --config contrib/asv.conf.json publish |
|
30 | 30 | # serve the static website |
|
31 | 31 | $ asv --config contrib/asv.conf.json preview |
|
32 | 32 | ''' |
|
33 | 33 | |
|
34 | 34 | from __future__ import absolute_import |
|
35 | 35 | |
|
36 | 36 | import functools |
|
37 | 37 | import os |
|
38 | 38 | import re |
|
39 | 39 | |
|
40 | 40 | from mercurial import ( |
|
41 | 41 | extensions, |
|
42 | 42 | hg, |
|
43 | 43 | ui as uimod, |
|
44 | 44 | util, |
|
45 | 45 | ) |
|
46 | 46 | |
|
47 | 47 | basedir = os.path.abspath( |
|
48 | 48 | os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir) |
|
49 | 49 | ) |
|
50 | 50 | reposdir = os.environ['REPOS_DIR'] |
|
51 | 51 | reposnames = [ |
|
52 | 52 | name |
|
53 | 53 | for name in os.listdir(reposdir) |
|
54 | 54 | if os.path.isdir(os.path.join(reposdir, name, ".hg")) |
|
55 | 55 | ] |
|
56 | 56 | if not reposnames: |
|
57 | 57 | raise ValueError("No repositories found in $REPO_DIR") |
|
58 | 58 | outputre = re.compile( |
|
59 | 59 | ( |
|
60 | 60 | r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys ' |
|
61 | 61 | r'\d+.\d+ \(best of \d+\)' |
|
62 | 62 | ) |
|
63 | 63 | ) |
|
64 | 64 | |
|
65 | 65 | |
|
66 | 66 | def runperfcommand(reponame, command, *args, **kwargs): |
|
67 | 67 | os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "") |
|
68 | 68 | # for "historical portability" |
|
69 | 69 | # ui.load() has been available since d83ca85 |
|
70 | 70 | if util.safehasattr(uimod.ui, "load"): |
|
71 | 71 | ui = uimod.ui.load() |
|
72 | 72 | else: |
|
73 | 73 | ui = uimod.ui() |
|
74 | 74 | repo = hg.repository(ui, os.path.join(reposdir, reponame)) |
|
75 | 75 | perfext = extensions.load( |
|
76 | 76 | ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py') |
|
77 | 77 | ) |
|
78 | 78 | cmd = getattr(perfext, command) |
|
79 | with ui.silent(): | |
|
79 | ui.pushbuffer() | |
|
80 | 80 |
|
|
81 | output = ui.popbuffer() | |
|
81 | 82 | match = outputre.search(output) |
|
82 | 83 | if not match: |
|
83 | 84 | raise ValueError("Invalid output {}".format(output)) |
|
84 | 85 | return float(match.group(1)) |
|
85 | 86 | |
|
86 | 87 | |
|
87 | 88 | def perfbench(repos=reposnames, name=None, params=None): |
|
88 | 89 | """decorator to declare ASV benchmark based on contrib/perf.py extension |
|
89 | 90 | |
|
90 | 91 | An ASV benchmark is a python function with the given attributes: |
|
91 | 92 | |
|
92 | 93 | __name__: should start with track_, time_ or mem_ to be collected by ASV |
|
93 | 94 | params and param_name: parameter matrix to display multiple graphs on the |
|
94 | 95 | same page. |
|
95 | 96 | pretty_name: If defined it's displayed in web-ui instead of __name__ |
|
96 | 97 | (useful for revsets) |
|
97 | 98 | the module name is prepended to the benchmark name and displayed as |
|
98 | 99 | "category" in webui. |
|
99 | 100 | |
|
100 | 101 | Benchmarks are automatically parameterized with repositories found in the |
|
101 | 102 | REPOS_DIR environment variable. |
|
102 | 103 | |
|
103 | 104 | `params` is the param matrix in the form of a list of tuple |
|
104 | 105 | (param_name, [value0, value1]) |
|
105 | 106 | |
|
106 | 107 | For example [(x, [a, b]), (y, [c, d])] declare benchmarks for |
|
107 | 108 | (a, c), (a, d), (b, c) and (b, d). |
|
108 | 109 | """ |
|
109 | 110 | params = list(params or []) |
|
110 | 111 | params.insert(0, ("repo", repos)) |
|
111 | 112 | |
|
112 | 113 | def decorator(func): |
|
113 | 114 | @functools.wraps(func) |
|
114 | 115 | def wrapped(repo, *args): |
|
115 | 116 | def perf(command, *a, **kw): |
|
116 | 117 | return runperfcommand(repo, command, *a, **kw) |
|
117 | 118 | |
|
118 | 119 | return func(perf, *args) |
|
119 | 120 | |
|
120 | 121 | wrapped.params = [p[1] for p in params] |
|
121 | 122 | wrapped.param_names = [p[0] for p in params] |
|
122 | 123 | wrapped.pretty_name = name |
|
123 | 124 | return wrapped |
|
124 | 125 | |
|
125 | 126 | return decorator |
General Comments 0
You need to be logged in to leave comments.
Login now