##// END OF EJS Templates
memory-usage: fix `hg log --follow --rev R F` space complexity...
memory-usage: fix `hg log --follow --rev R F` space complexity When running `hg log --follow --rev REVS FILES`, the log code will walk the history of all FILES starting from the file revisions that exists in each REVS. Before doing so, it looks if the files actually exists in the target revisions. To do so, it opens the manifest of each revision in REVS to look up if we find the associated items in FILES. Before this changeset this was done in a way that created a changectx for each target revision, keeping them in memory while we look into each file. If the set of REVS is large, this means keeping the manifest for each entry in REVS in memory. That can be largeā€¦ if REV is in the form `::X`, this can quickly become huge and saturate the memory. We have seen usage allocating 2GB per second until memory runs out. So this changeset invert the two loop so that only one revision is kept in memory during the operation. This solve the memory explosion issue.

File last commit:

r49730:6000f5b2 default
r50517:dcb2581e stable
Show More
__init__.py
125 lines | 3.9 KiB | text/x-python | PythonLexer
# __init__.py - asv benchmark suite
#
# Copyright 2016 Logilab SA <contact@logilab.fr>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# "historical portability" policy of contrib/benchmarks:
#
# We have to make this code work correctly with current mercurial stable branch
# and if possible with reasonable cost with early Mercurial versions.
'''ASV (https://asv.readthedocs.io) benchmark suite
Benchmark are parameterized against reference repositories found in the
directory pointed by the REPOS_DIR environment variable.
Invocation example:
$ export REPOS_DIR=~/hgperf/repos
# run suite on given revision
$ asv --config contrib/asv.conf.json run REV
# run suite on new changesets found in stable and default branch
$ asv --config contrib/asv.conf.json run NEW
# display a comparative result table of benchmark results between two given
# revisions
$ asv --config contrib/asv.conf.json compare REV1 REV2
# compute regression detection and generate ASV static website
$ asv --config contrib/asv.conf.json publish
# serve the static website
$ asv --config contrib/asv.conf.json preview
'''
import functools
import os
import re
from mercurial import (
extensions,
hg,
ui as uimod,
util,
)
basedir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
)
reposdir = os.environ['REPOS_DIR']
reposnames = [
name
for name in os.listdir(reposdir)
if os.path.isdir(os.path.join(reposdir, name, ".hg"))
]
if not reposnames:
raise ValueError("No repositories found in $REPO_DIR")
outputre = re.compile(
(
r'! wall (\d+.\d+) comb \d+.\d+ user \d+.\d+ sys '
r'\d+.\d+ \(best of \d+\)'
)
)
def runperfcommand(reponame, command, *args, **kwargs):
os.environ["HGRCPATH"] = os.environ.get("ASVHGRCPATH", "")
# for "historical portability"
# ui.load() has been available since d83ca85
if util.safehasattr(uimod.ui, "load"):
ui = uimod.ui.load()
else:
ui = uimod.ui()
repo = hg.repository(ui, os.path.join(reposdir, reponame))
perfext = extensions.load(
ui, 'perfext', os.path.join(basedir, 'contrib', 'perf.py')
)
cmd = getattr(perfext, command)
ui.pushbuffer()
cmd(ui, repo, *args, **kwargs)
output = ui.popbuffer()
match = outputre.search(output)
if not match:
raise ValueError("Invalid output {}".format(output))
return float(match.group(1))
def perfbench(repos=reposnames, name=None, params=None):
"""decorator to declare ASV benchmark based on contrib/perf.py extension
An ASV benchmark is a python function with the given attributes:
__name__: should start with track_, time_ or mem_ to be collected by ASV
params and param_name: parameter matrix to display multiple graphs on the
same page.
pretty_name: If defined it's displayed in web-ui instead of __name__
(useful for revsets)
the module name is prepended to the benchmark name and displayed as
"category" in webui.
Benchmarks are automatically parameterized with repositories found in the
REPOS_DIR environment variable.
`params` is the param matrix in the form of a list of tuple
(param_name, [value0, value1])
For example [(x, [a, b]), (y, [c, d])] declare benchmarks for
(a, c), (a, d), (b, c) and (b, d).
"""
params = list(params or [])
params.insert(0, ("repo", repos))
def decorator(func):
@functools.wraps(func)
def wrapped(repo, *args):
def perf(command, *a, **kw):
return runperfcommand(repo, command, *a, **kw)
return func(perf, *args)
wrapped.params = [p[1] for p in params]
wrapped.param_names = [p[0] for p in params]
wrapped.pretty_name = name
return wrapped
return decorator