##// END OF EJS Templates
rust-matchers: fix quadratic complexity in `FileMatcher`...
rust-matchers: fix quadratic complexity in `FileMatcher` Concretely, this command: ``` $ echo hg up -r <nodeid>; time hg revert dir1 dir2 -r <othernode> --debug hg up -r <nodeid> real 0m14.690s user 0m14.766s sys 0m5.430s ``` was much slower despite using 16 cores before this change. The approach taken here is the same one used in match.py, in exactmatcher. This changeset was originally written by Valentin Gatien-Baron in a private repository. I have redacted the commit message and did a minor clean up of the code.

File last commit:

r51821:d718eddf default
r52002:687e192d default
Show More
__init__.py
124 lines | 3.8 KiB | text/x-python | PythonLexer
Philippe Pepiot
perf: add asv benchmarks...
r30406 # __init__.py - asv benchmark suite
#
# Copyright 2016 Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
Philippe Pepiot
perf: add historical support of ui.load()...
r30588 # "historical portability" policy of contrib/benchmarks:
#
# We have to make this code work correctly with current mercurial stable branch
# and if possible with reasonable cost with early Mercurial versions.
Philippe Pepiot
perf: add asv benchmarks...
r30406 '''ASV (https://asv.readthedocs.io) benchmark suite
Benchmark are parameterized against reference repositories found in the
directory pointed by the REPOS_DIR environment variable.
Invocation example:
$ export REPOS_DIR=~/hgperf/repos
# run suite on given revision
$ asv --config contrib/asv.conf.json run REV
# run suite on new changesets found in stable and default branch
$ asv --config contrib/asv.conf.json run NEW
# display a comparative result table of benchmark results between two given
# revisions
$ asv --config contrib/asv.conf.json compare REV1 REV2
# compute regression detection and generate ASV static website
$ asv --config contrib/asv.conf.json publish
# serve the static website
$ asv --config contrib/asv.conf.json preview
'''
import functools
import os
import re
from mercurial import (
extensions,
hg,
ui as uimod,
)
Augie Fackler
formatting: blacken the codebase...
r43346 basedir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
)
Philippe Pepiot
perf: add asv benchmarks...
r30406 reposdir = os.environ['REPOS_DIR']
Augie Fackler
formatting: blacken the codebase...
r43346 reposnames = [
name
for name in os.listdir(reposdir)
if os.path.isdir(os.path.join(reposdir, name, ".hg"))
]
Philippe Pepiot
perf: add asv benchmarks...
r30406 if not reposnames:
raise ValueError("No repositories found in $REPO_DIR")
Augie Fackler
formatting: blacken the codebase...
r43346 outputre = re.compile(
(
r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
r'\d+.\d+ \(best of \d+\)'
)
)
Philippe Pepiot
perf: add asv benchmarks...
r30406
def runperfcommand(reponame, command, *args, **kwargs):
os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
Philippe Pepiot
perf: add historical support of ui.load()...
r30588 # for "historical portability"
# ui.load() has been available since d83ca85
safehasattr: drop usage in favor of hasattr...
r51821 if hasattr(uimod.ui, "load"):
Philippe Pepiot
perf: add historical support of ui.load()...
r30588 ui = uimod.ui.load()
else:
ui = uimod.ui()
Philippe Pepiot
perf: add asv benchmarks...
r30406 repo = hg.repository(ui, os.path.join(reposdir, reponame))
Augie Fackler
formatting: blacken the codebase...
r43346 perfext = extensions.load(
ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
)
Philippe Pepiot
perf: add asv benchmarks...
r30406 cmd = getattr(perfext, command)
Martin von Zweigbergk
benchmarks: restore `output` variable lost in D10884...
r48251 ui.pushbuffer()
cmd(ui, repo, *args, **kwargs)
output = ui.popbuffer()
Philippe Pepiot
perf: add asv benchmarks...
r30406 match = outputre.search(output)
if not match:
Augie Fackler
cleanup: run pyupgrade on our source tree to clean up varying things...
r44937 raise ValueError("Invalid output {}".format(output))
Philippe Pepiot
perf: add asv benchmarks...
r30406 return float(match.group(1))
Augie Fackler
formatting: blacken the codebase...
r43346
Philippe Pepiot
perf: add asv benchmarks...
r30406 def perfbench(repos=reposnames, name=None, params=None):
"""decorator to declare ASV benchmark based on contrib/perf.py extension
An ASV benchmark is a python function with the given attributes:
__name__: should start with track_, time_ or mem_ to be collected by ASV
params and param_name: parameter matrix to display multiple graphs on the
same page.
pretty_name: If defined it's displayed in web-ui instead of __name__
(useful for revsets)
the module name is prepended to the benchmark name and displayed as
"category" in webui.
Benchmarks are automatically parameterized with repositories found in the
REPOS_DIR environment variable.
`params` is the param matrix in the form of a list of tuple
(param_name, [value0, value1])
For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
(a, c), (a, d), (b, c) and (b, d).
"""
params = list(params or [])
params.insert(0, ("repo", repos))
def decorator(func):
@functools.wraps(func)
def wrapped(repo, *args):
def perf(command, *a, **kw):
return runperfcommand(repo, command, *a, **kw)
Augie Fackler
formatting: blacken the codebase...
r43346
Philippe Pepiot
perf: add asv benchmarks...
r30406 return func(perf, *args)
wrapped.params = [p[1] for p in params]
wrapped.param_names = [p[0] for p in params]
wrapped.pretty_name = name
return wrapped
Augie Fackler
formatting: blacken the codebase...
r43346
Philippe Pepiot
perf: add asv benchmarks...
r30406 return decorator