perf.py
3915 lines
| 111.3 KiB
| text/x-python
|
PythonLexer
/ contrib / perf.py
Matt Mackall
|
r7366 | # perf.py - performance test routines | ||
r42182 | '''helper extension to measure performance | |||
Configurations | ||||
============== | ||||
``perf`` | ||||
-------- | ||||
``all-timing`` | ||||
Augie Fackler
|
r42188 | When set, additional statistics will be reported for each benchmark: best, | ||
r42182 | worst, median average. If not set only the best timing is reported | |||
(default: off). | ||||
``presleep`` | ||||
Augie Fackler
|
r42188 | number of second to wait before any group of runs (default: 1) | ||
r42182 | ||||
r42551 | ``pre-run`` | |||
number of run to perform before starting measurement. | ||||
r42552 | ``profile-benchmark`` | |||
Enable profiling for the benchmarked section. | ||||
(The first iteration is benchmarked) | ||||
r42186 | ``run-limits`` | |||
Augie Fackler
|
r42188 | Control the number of runs each benchmark will perform. The option value | ||
r42186 | should be a list of `<time>-<numberofrun>` pairs. After each run the | |||
Augie Fackler
|
r42188 | conditions are considered in order with the following logic: | ||
If benchmark has been running for <time> seconds, and we have performed | ||||
r42186 | <numberofrun> iterations, stop the benchmark, | |||
The default value is: `3.0-100, 10.0-3` | ||||
r42182 | ``stub`` | |||
Augie Fackler
|
r42188 | When set, benchmarks will only be run once, useful for testing | ||
(default: off) | ||||
r42182 | ''' | |||
Matt Mackall
|
r7366 | |||
FUJIWARA Katsunori
|
r29493 | # "historical portability" policy of perf.py: | ||
# | ||||
# We have to do: | ||||
# - make perf.py "loadable" with as wide Mercurial version as possible | ||||
# This doesn't mean that perf commands work correctly with that Mercurial. | ||||
# BTW, perf.py itself has been available since 1.1 (or eb240755386d). | ||||
# - make historical perf command work correctly with as wide Mercurial | ||||
# version as possible | ||||
# | ||||
# We have to do, if possible with reasonable cost: | ||||
# - make recent perf command for historical feature work correctly | ||||
# with early Mercurial | ||||
# | ||||
# We don't have to do: | ||||
# - make perf command for recent feature work correctly with early | ||||
# Mercurial | ||||
Pulkit Goyal
|
r28561 | from __future__ import absolute_import | ||
Boris Feld
|
r40179 | import contextlib | ||
Pulkit Goyal
|
r28561 | import functools | ||
Gregory Szorc
|
r31397 | import gc | ||
Pulkit Goyal
|
r28561 | import os | ||
Gregory Szorc
|
r27286 | import random | ||
Boris Feld
|
r40583 | import shutil | ||
Gregory Szorc
|
r32532 | import struct | ||
Pulkit Goyal
|
r28561 | import sys | ||
Boris Feld
|
r40583 | import tempfile | ||
Boris Feld
|
r35617 | import threading | ||
Pulkit Goyal
|
r28561 | import time | ||
from mercurial import ( | ||||
Gregory Szorc
|
r30018 | changegroup, | ||
Pulkit Goyal
|
r28561 | cmdutil, | ||
commands, | ||||
copies, | ||||
error, | ||||
FUJIWARA Katsunori
|
r29495 | extensions, | ||
Georges Racinet
|
r40977 | hg, | ||
Pulkit Goyal
|
r28561 | mdiff, | ||
merge, | ||||
Gregory Szorc
|
r32532 | revlog, | ||
Pulkit Goyal
|
r28561 | util, | ||
) | ||||
Matt Mackall
|
r7366 | |||
FUJIWARA Katsunori
|
r29494 | # for "historical portability": | ||
FUJIWARA Katsunori
|
r29567 | # try to import modules separately (in dict order), and ignore | ||
# failure, because these aren't available with early Mercurial | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial import branchmap # since 2.5 (or bcee63733aad) | ||
FUJIWARA Katsunori
|
r29567 | except ImportError: | ||
pass | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial import obsolete # since 2.3 (or ad0d6c2b3279) | ||
FUJIWARA Katsunori
|
r29567 | except ImportError: | ||
pass | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial import registrar # since 3.7 (or 37d50250b696) | ||
dir(registrar) # forcibly load it | ||||
Yuya Nishihara
|
r32337 | except ImportError: | ||
registrar = None | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial import repoview # since 2.5 (or 3a6ddacb7198) | ||
FUJIWARA Katsunori
|
r29567 | except ImportError: | ||
pass | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial.utils import repoviewutil # since 5.0 | ||
r42314 | except ImportError: | |||
repoviewutil = None | ||||
try: | ||||
Augie Fackler
|
r43346 | from mercurial import scmutil # since 1.9 (or 8b252e826c68) | ||
FUJIWARA Katsunori
|
r29567 | except ImportError: | ||
pass | ||||
Georges Racinet
|
r40977 | try: | ||
Augie Fackler
|
r43346 | from mercurial import setdiscovery # since 1.9 (or cb98fed52495) | ||
Georges Racinet
|
r40977 | except ImportError: | ||
pass | ||||
r42552 | try: | |||
from mercurial import profiling | ||||
except ImportError: | ||||
profiling = None | ||||
Matt Harbison
|
r39850 | |||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r39850 | def identity(a): | ||
return a | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r36196 | try: | ||
from mercurial import pycompat | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r36196 | getargspec = pycompat.getargspec # added to module after 4.5 | ||
Matt Harbison
|
r39850 | _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802) | ||
Augie Fackler
|
r43346 | _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede) | ||
_bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5) | ||||
_xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b) | ||||
fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e) | ||||
Matt Harbison
|
r39848 | if pycompat.ispy3: | ||
_maxint = sys.maxsize # per py3 docs for replacing maxint | ||||
else: | ||||
_maxint = sys.maxint | ||||
Martin von Zweigbergk
|
r43051 | except (NameError, ImportError, AttributeError): | ||
Augie Fackler
|
r36196 | import inspect | ||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r36196 | getargspec = inspect.getargspec | ||
Matt Harbison
|
r39850 | _byteskwargs = identity | ||
Martin von Zweigbergk
|
r43053 | _bytestr = str | ||
Augie Fackler
|
r43346 | fsencode = identity # no py3 support | ||
_maxint = sys.maxint # no py3 support | ||||
_sysstr = lambda x: x # no py3 support | ||||
Matt Harbison
|
r39847 | _xrange = xrange | ||
FUJIWARA Katsunori
|
r29567 | |||
Gregory Szorc
|
r37863 | try: | ||
# 4.7+ | ||||
queue = pycompat.queue.Queue | ||||
Martin von Zweigbergk
|
r43051 | except (NameError, AttributeError, ImportError): | ||
Gregory Szorc
|
r37863 | # <4.7. | ||
try: | ||||
queue = pycompat.queue | ||||
Martin von Zweigbergk
|
r43051 | except (NameError, AttributeError, ImportError): | ||
Martin von Zweigbergk
|
r43052 | import Queue as queue | ||
Gregory Szorc
|
r37863 | |||
Boris Feld
|
r38276 | try: | ||
from mercurial import logcmdutil | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r38276 | makelogtemplater = logcmdutil.maketemplater | ||
except (AttributeError, ImportError): | ||||
try: | ||||
makelogtemplater = cmdutil.makelogtemplater | ||||
except (AttributeError, ImportError): | ||||
makelogtemplater = None | ||||
FUJIWARA Katsunori
|
r29567 | # for "historical portability": | ||
FUJIWARA Katsunori
|
r29494 | # define util.safehasattr forcibly, because util.safehasattr has been | ||
# available since 1.9.3 (or 94b200a11cf7) | ||||
_undefined = object() | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29494 | def safehasattr(thing, attr): | ||
Matt Harbison
|
r39846 | return getattr(thing, _sysstr(attr), _undefined) is not _undefined | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29494 | setattr(util, 'safehasattr', safehasattr) | ||
FUJIWARA Katsunori
|
r29496 | # for "historical portability": | ||
Philippe Pepiot
|
r31823 | # define util.timer forcibly, because util.timer has been available | ||
# since ae5d60bb70c9 | ||||
if safehasattr(time, 'perf_counter'): | ||||
util.timer = time.perf_counter | ||||
Pulkit Goyal
|
r39398 | elif os.name == b'nt': | ||
Philippe Pepiot
|
r31823 | util.timer = time.clock | ||
else: | ||||
util.timer = time.time | ||||
# for "historical portability": | ||||
FUJIWARA Katsunori
|
r29496 | # use locally defined empty option list, if formatteropts isn't | ||
# available, because commands.formatteropts has been available since | ||||
# 3.2 (or 7a7eed5176a4), even though formatting itself has been | ||||
# available since 2.2 (or ae5f92e154d3) | ||||
Augie Fackler
|
r43346 | formatteropts = getattr( | ||
cmdutil, "formatteropts", getattr(commands, "formatteropts", []) | ||||
) | ||||
FUJIWARA Katsunori
|
r29495 | |||
# for "historical portability": | ||||
# use locally defined option list, if debugrevlogopts isn't available, | ||||
# because commands.debugrevlogopts has been available since 3.7 (or | ||||
# 5606f7d0d063), even though cmdutil.openrevlog() has been available | ||||
# since 1.9 (or a79fea6b3e77). | ||||
Augie Fackler
|
r43346 | revlogopts = getattr( | ||
cmdutil, | ||||
"debugrevlogopts", | ||||
getattr( | ||||
commands, | ||||
"debugrevlogopts", | ||||
[ | ||||
(b'c', b'changelog', False, b'open changelog'), | ||||
(b'm', b'manifest', False, b'open manifest'), | ||||
(b'', b'dir', False, b'open directory manifest'), | ||||
], | ||||
), | ||||
) | ||||
Pierre-Yves David
|
r25494 | |||
Pierre-Yves David
|
r18237 | cmdtable = {} | ||
FUJIWARA Katsunori
|
r29497 | |||
# for "historical portability": | ||||
# define parsealiases locally, because cmdutil.parsealiases has been | ||||
# available since 1.5 (or 6252852b4332) | ||||
def parsealiases(cmd): | ||||
Rodrigo Damazio
|
r40331 | return cmd.split(b"|") | ||
FUJIWARA Katsunori
|
r29497 | |||
Augie Fackler
|
r43346 | |||
Yuya Nishihara
|
r32337 | if safehasattr(registrar, 'command'): | ||
command = registrar.command(cmdtable) | ||||
elif safehasattr(cmdutil, 'command'): | ||||
FUJIWARA Katsunori
|
r29497 | command = cmdutil.command(cmdtable) | ||
Raphaël Gomès
|
r46210 | if 'norepo' not in getargspec(command).args: | ||
FUJIWARA Katsunori
|
r29497 | # for "historical portability": | ||
# wrap original cmdutil.command, because "norepo" option has | ||||
# been available since 3.1 (or 75a96326cecb) | ||||
_command = command | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29497 | def command(name, options=(), synopsis=None, norepo=False): | ||
if norepo: | ||||
Pulkit Goyal
|
r39398 | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||
FUJIWARA Katsunori
|
r29497 | return _command(name, list(options), synopsis) | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29497 | else: | ||
# for "historical portability": | ||||
# define "@command" annotation locally, because cmdutil.command | ||||
# has been available since 1.9 (or 2daa5179e73f) | ||||
def command(name, options=(), synopsis=None, norepo=False): | ||||
def decorator(func): | ||||
if synopsis: | ||||
cmdtable[name] = func, list(options), synopsis | ||||
else: | ||||
cmdtable[name] = func, list(options) | ||||
if norepo: | ||||
Pulkit Goyal
|
r39398 | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||
FUJIWARA Katsunori
|
r29497 | return func | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29497 | return decorator | ||
Pierre-Yves David
|
r18237 | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r34495 | try: | ||
Boris Feld
|
r34750 | import mercurial.registrar | ||
import mercurial.configitems | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r34495 | configtable = {} | ||
Boris Feld
|
r34750 | configitem = mercurial.registrar.configitem(configtable) | ||
Augie Fackler
|
r43346 | configitem( | ||
b'perf', | ||||
b'presleep', | ||||
Boris Feld
|
r34750 | default=mercurial.configitems.dynamicdefault, | ||
Navaneeth Suresh
|
r43028 | experimental=True, | ||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
b'perf', | ||||
b'stub', | ||||
Navaneeth Suresh
|
r43028 | default=mercurial.configitems.dynamicdefault, | ||
experimental=True, | ||||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
b'perf', | ||||
b'parentscount', | ||||
Navaneeth Suresh
|
r43028 | default=mercurial.configitems.dynamicdefault, | ||
experimental=True, | ||||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
b'perf', | ||||
b'all-timing', | ||||
Navaneeth Suresh
|
r43028 | default=mercurial.configitems.dynamicdefault, | ||
experimental=True, | ||||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r46554 | b'perf', | ||
b'pre-run', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Augie Fackler
|
r43346 | ) | ||
configitem( | ||||
b'perf', | ||||
b'profile-benchmark', | ||||
Navaneeth Suresh
|
r43028 | default=mercurial.configitems.dynamicdefault, | ||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
b'perf', | ||||
b'run-limits', | ||||
Navaneeth Suresh
|
r43028 | default=mercurial.configitems.dynamicdefault, | ||
experimental=True, | ||||
) | ||||
except (ImportError, AttributeError): | ||||
pass | ||||
except TypeError: | ||||
# compatibility fix for a11fd395e83f | ||||
# hg version: 5.2 | ||||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r46554 | b'perf', | ||
b'presleep', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Augie Fackler
|
r43346 | ) | ||
configitem( | ||||
Augie Fackler
|
r46554 | b'perf', | ||
b'stub', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Augie Fackler
|
r43346 | ) | ||
configitem( | ||||
Augie Fackler
|
r46554 | b'perf', | ||
b'parentscount', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Boris Feld
|
r34750 | ) | ||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r46554 | b'perf', | ||
b'all-timing', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Boris Feld
|
r34495 | ) | ||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r46554 | b'perf', | ||
b'pre-run', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
Augie Fackler
|
r43346 | ) | ||
configitem( | ||||
b'perf', | ||||
b'profile-benchmark', | ||||
Boris Feld
|
r34751 | default=mercurial.configitems.dynamicdefault, | ||
) | ||||
Augie Fackler
|
r43346 | configitem( | ||
Augie Fackler
|
r46554 | b'perf', | ||
b'run-limits', | ||||
default=mercurial.configitems.dynamicdefault, | ||||
r42551 | ) | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r34495 | |||
timeless
|
r27307 | def getlen(ui): | ||
Pulkit Goyal
|
r39398 | if ui.configbool(b"perf", b"stub", False): | ||
timeless
|
r27307 | return lambda x: 1 | ||
return len | ||||
Augie Fackler
|
r43346 | |||
r42552 | class noop(object): | |||
"""dummy context manager""" | ||||
Augie Fackler
|
r43346 | |||
r42552 | def __enter__(self): | |||
pass | ||||
Augie Fackler
|
r43346 | |||
r42552 | def __exit__(self, *args): | |||
pass | ||||
Augie Fackler
|
r43346 | |||
r42556 | NOOPCTX = noop() | |||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r23171 | def gettimer(ui, opts=None): | ||
"""return a timer function and formatter: (timer, formatter) | ||||
timeless
|
r27303 | This function exists to gather the creation of formatter in a single | ||
place instead of duplicating it in all performance commands.""" | ||||
Matt Mackall
|
r23788 | |||
# enforce an idle period before execution to counteract power management | ||||
Matt Mackall
|
r25850 | # experimental config: perf.presleep | ||
Pulkit Goyal
|
r39398 | time.sleep(getint(ui, b"perf", b"presleep", 1)) | ||
Matt Mackall
|
r23788 | |||
Pierre-Yves David
|
r23171 | if opts is None: | ||
opts = {} | ||||
Philippe Pepiot
|
r30405 | # redirect all to stderr unless buffer api is in use | ||
if not ui._buffers: | ||||
ui = ui.copy() | ||||
Pulkit Goyal
|
r39398 | uifout = safeattrsetter(ui, b'fout', ignoremissing=True) | ||
Philippe Pepiot
|
r30405 | if uifout: | ||
# for "historical portability": | ||||
# ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d) | ||||
uifout.set(ui.ferr) | ||||
FUJIWARA Katsunori
|
r30147 | |||
Pierre-Yves David
|
r23171 | # get a formatter | ||
FUJIWARA Katsunori
|
r30147 | uiformatter = getattr(ui, 'formatter', None) | ||
if uiformatter: | ||||
Pulkit Goyal
|
r39398 | fm = uiformatter(b'perf', opts) | ||
FUJIWARA Katsunori
|
r30147 | else: | ||
# for "historical portability": | ||||
# define formatter locally, because ui.formatter has been | ||||
# available since 2.2 (or ae5f92e154d3) | ||||
from mercurial import node | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | class defaultformatter(object): | ||
Augie Fackler
|
r46554 | """Minimized composition of baseformatter and plainformatter""" | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def __init__(self, ui, topic, opts): | ||
self._ui = ui | ||||
if ui.debugflag: | ||||
self.hexfunc = node.hex | ||||
else: | ||||
self.hexfunc = node.short | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def __nonzero__(self): | ||
return False | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def startitem(self): | ||
pass | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def data(self, **data): | ||
pass | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def write(self, fields, deftext, *fielddata, **opts): | ||
self._ui.write(deftext % fielddata, **opts) | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def condwrite(self, cond, fields, deftext, *fielddata, **opts): | ||
if cond: | ||||
self._ui.write(deftext % fielddata, **opts) | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def plain(self, text, **opts): | ||
self._ui.write(text, **opts) | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30147 | def end(self): | ||
pass | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | fm = defaultformatter(ui, b'perf', opts) | ||
FUJIWARA Katsunori
|
r30147 | |||
timeless
|
r27304 | # stub function, runs code only once instead of in a loop | ||
# experimental config: perf.stub | ||||
Pulkit Goyal
|
r39398 | if ui.configbool(b"perf", b"stub", False): | ||
timeless
|
r27304 | return functools.partial(stub_timer, fm), fm | ||
Boris Feld
|
r38716 | |||
# experimental config: perf.all-timing | ||||
Pulkit Goyal
|
r39398 | displayall = ui.configbool(b"perf", b"all-timing", False) | ||
r42186 | ||||
# experimental config: perf.run-limits | ||||
limitspec = ui.configlist(b"perf", b"run-limits", []) | ||||
limits = [] | ||||
for item in limitspec: | ||||
Gregory Szorc
|
r42230 | parts = item.split(b'-', 1) | ||
r42186 | if len(parts) < 2: | |||
Augie Fackler
|
r43346 | ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item)) | ||
r42186 | continue | |||
try: | ||||
Martin von Zweigbergk
|
r43053 | time_limit = float(_sysstr(parts[0])) | ||
r42186 | except ValueError as e: | |||
Augie Fackler
|
r43346 | ui.warn( | ||
( | ||||
b'malformatted run limit entry, %s: %s\n' | ||||
% (_bytestr(e), item) | ||||
) | ||||
) | ||||
r42186 | continue | |||
try: | ||||
Martin von Zweigbergk
|
r43053 | run_limit = int(_sysstr(parts[1])) | ||
r42186 | except ValueError as e: | |||
Augie Fackler
|
r43346 | ui.warn( | ||
( | ||||
b'malformatted run limit entry, %s: %s\n' | ||||
% (_bytestr(e), item) | ||||
) | ||||
) | ||||
r42186 | continue | |||
limits.append((time_limit, run_limit)) | ||||
if not limits: | ||||
limits = DEFAULTLIMITS | ||||
r42552 | profiler = None | |||
if profiling is not None: | ||||
if ui.configbool(b"perf", b"profile-benchmark", False): | ||||
profiler = profiling.profile(ui) | ||||
r42551 | prerun = getint(ui, b"perf", b"pre-run", 0) | |||
Augie Fackler
|
r43346 | t = functools.partial( | ||
_timer, | ||||
fm, | ||||
displayall=displayall, | ||||
limits=limits, | ||||
prerun=prerun, | ||||
profiler=profiler, | ||||
) | ||||
r42186 | return t, fm | |||
Pierre-Yves David
|
r23171 | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40716 | def stub_timer(fm, func, setup=None, title=None): | ||
Boris Feld
|
r40757 | if setup is not None: | ||
setup() | ||||
timeless
|
r27304 | func() | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40179 | @contextlib.contextmanager | ||
def timeone(): | ||||
r = [] | ||||
ostart = os.times() | ||||
cstart = util.timer() | ||||
yield r | ||||
cstop = util.timer() | ||||
ostop = os.times() | ||||
a, b = ostart, ostop | ||||
Augie Fackler
|
r43346 | r.append((cstop - cstart, b[0] - a[0], b[1] - a[1])) | ||
Boris Feld
|
r40179 | |||
r42184 | ||||
# list of stop condition (elapsed time, minimal run count) | ||||
DEFAULTLIMITS = ( | ||||
(3.0, 100), | ||||
(10.0, 3), | ||||
) | ||||
Augie Fackler
|
r43346 | |||
def _timer( | ||||
fm, | ||||
func, | ||||
setup=None, | ||||
title=None, | ||||
displayall=False, | ||||
limits=DEFAULTLIMITS, | ||||
prerun=0, | ||||
profiler=None, | ||||
): | ||||
Gregory Szorc
|
r31397 | gc.collect() | ||
Matt Mackall
|
r7366 | results = [] | ||
Simon Farnsworth
|
r30975 | begin = util.timer() | ||
Matt Mackall
|
r7366 | count = 0 | ||
r42552 | if profiler is None: | |||
r42556 | profiler = NOOPCTX | |||
Pulkit Goyal
|
r42562 | for i in range(prerun): | ||
r42551 | if setup is not None: | |||
setup() | ||||
func() | ||||
r42184 | keepgoing = True | |||
while keepgoing: | ||||
Boris Feld
|
r40716 | if setup is not None: | ||
setup() | ||||
r42552 | with profiler: | |||
with timeone() as item: | ||||
r = func() | ||||
r42556 | profiler = NOOPCTX | |||
Boris Feld
|
r40179 | count += 1 | ||
results.append(item[0]) | ||||
Simon Farnsworth
|
r30975 | cstop = util.timer() | ||
r42184 | # Look for a stop condition. | |||
elapsed = cstop - begin | ||||
r42185 | for t, mincount in limits: | |||
r42184 | if elapsed >= t and count >= mincount: | |||
keepgoing = False | ||||
break | ||||
Pierre-Yves David
|
r23171 | |||
Augie Fackler
|
r43346 | formatone(fm, results, title=title, result=r, displayall=displayall) | ||
Boris Feld
|
r40180 | |||
def formatone(fm, timings, title=None, result=None, displayall=False): | ||||
count = len(timings) | ||||
Pierre-Yves David
|
r23171 | fm.startitem() | ||
Patrick Mezard
|
r9826 | if title: | ||
Pulkit Goyal
|
r39398 | fm.write(b'title', b'! %s\n', title) | ||
Boris Feld
|
r40180 | if result: | ||
fm.write(b'result', b'! result: %s\n', result) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r38716 | def display(role, entry): | ||
Pulkit Goyal
|
r39398 | prefix = b'' | ||
if role != b'best': | ||||
prefix = b'%s.' % role | ||||
fm.plain(b'!') | ||||
fm.write(prefix + b'wall', b' wall %f', entry[0]) | ||||
fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2]) | ||||
fm.write(prefix + b'user', b' user %f', entry[1]) | ||||
Augie Fackler
|
r43346 | fm.write(prefix + b'sys', b' sys %f', entry[2]) | ||
fm.write(prefix + b'count', b' (%s of %%d)' % role, count) | ||||
Pulkit Goyal
|
r39398 | fm.plain(b'\n') | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40180 | timings.sort() | ||
min_val = timings[0] | ||||
Pulkit Goyal
|
r39398 | display(b'best', min_val) | ||
Boris Feld
|
r38716 | if displayall: | ||
Boris Feld
|
r40180 | max_val = timings[-1] | ||
Pulkit Goyal
|
r39398 | display(b'max', max_val) | ||
Boris Feld
|
r40180 | avg = tuple([sum(x) / count for x in zip(*timings)]) | ||
Pulkit Goyal
|
r39398 | display(b'avg', avg) | ||
Boris Feld
|
r40180 | median = timings[len(timings) // 2] | ||
Pulkit Goyal
|
r39398 | display(b'median', median) | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30143 | # utilities for historical portability | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30149 | def getint(ui, section, name, default): | ||
# for "historical portability": | ||||
# ui.configint has been available since 1.9 (or fa2b596db182) | ||||
v = ui.config(section, name, None) | ||||
if v is None: | ||||
return default | ||||
try: | ||||
return int(v) | ||||
except ValueError: | ||||
Augie Fackler
|
r43346 | raise error.ConfigError( | ||
b"%s.%s is not an integer ('%s')" % (section, name, v) | ||||
) | ||||
FUJIWARA Katsunori
|
r30149 | |||
FUJIWARA Katsunori
|
r30143 | def safeattrsetter(obj, name, ignoremissing=False): | ||
"""Ensure that 'obj' has 'name' attribute before subsequent setattr | ||||
This function is aborted, if 'obj' doesn't have 'name' attribute | ||||
at runtime. This avoids overlooking removal of an attribute, which | ||||
breaks assumption of performance measurement, in the future. | ||||
This function returns the object to (1) assign a new value, and | ||||
(2) restore an original value to the attribute. | ||||
If 'ignoremissing' is true, missing 'name' attribute doesn't cause | ||||
abortion, and this function returns None. This is useful to | ||||
examine an attribute, which isn't ensured in all Mercurial | ||||
versions. | ||||
""" | ||||
if not util.safehasattr(obj, name): | ||||
if ignoremissing: | ||||
return None | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
( | ||||
b"missing attribute %s of %s might break assumption" | ||||
b" of performance measurement" | ||||
) | ||||
% (name, obj) | ||||
) | ||||
FUJIWARA Katsunori
|
r30143 | |||
Matt Harbison
|
r39846 | origvalue = getattr(obj, _sysstr(name)) | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30143 | class attrutil(object): | ||
def set(self, newvalue): | ||||
Matt Harbison
|
r39846 | setattr(obj, _sysstr(name), newvalue) | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30143 | def restore(self): | ||
Matt Harbison
|
r39846 | setattr(obj, _sysstr(name), origvalue) | ||
FUJIWARA Katsunori
|
r30143 | |||
return attrutil() | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30144 | # utilities to examine each internal API changes | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30144 | def getbranchmapsubsettable(): | ||
# for "historical portability": | ||||
# subsettable is defined in: | ||||
# - branchmap since 2.9 (or 175c6fd8cacc) | ||||
# - repoview since 2.5 (or 59a9f18d4587) | ||||
r42314 | # - repoviewutil since 5.0 | |||
for mod in (branchmap, repoview, repoviewutil): | ||||
FUJIWARA Katsunori
|
r30144 | subsettable = getattr(mod, 'subsettable', None) | ||
if subsettable: | ||||
return subsettable | ||||
# bisecting in bcee63733aad::59a9f18d4587 can reach here (both | ||||
# branchmap and repoview modules exist, but subsettable attribute | ||||
# doesn't) | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
b"perfbranchmap not available with this Mercurial", | ||||
hint=b"use 2.5 or later", | ||||
) | ||||
FUJIWARA Katsunori
|
r30144 | |||
FUJIWARA Katsunori
|
r30146 | def getsvfs(repo): | ||
Augie Fackler
|
r46554 | """Return appropriate object to access files under .hg/store""" | ||
FUJIWARA Katsunori
|
r30146 | # for "historical portability": | ||
# repo.svfs has been available since 2.3 (or 7034365089bf) | ||||
svfs = getattr(repo, 'svfs', None) | ||||
if svfs: | ||||
return svfs | ||||
else: | ||||
return getattr(repo, 'sopener') | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30146 | def getvfs(repo): | ||
Augie Fackler
|
r46554 | """Return appropriate object to access files under .hg""" | ||
FUJIWARA Katsunori
|
r30146 | # for "historical portability": | ||
# repo.vfs has been available since 2.3 (or 7034365089bf) | ||||
vfs = getattr(repo, 'vfs', None) | ||||
if vfs: | ||||
return vfs | ||||
else: | ||||
return getattr(repo, 'opener') | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30150 | def repocleartagscachefunc(repo): | ||
Augie Fackler
|
r46554 | """Return the function to clear tags cache according to repo internal API""" | ||
Augie Fackler
|
r43346 | if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525) | ||
FUJIWARA Katsunori
|
r30150 | # in this case, setattr(repo, '_tagscache', None) or so isn't | ||
# correct way to clear tags cache, because existing code paths | ||||
# expect _tagscache to be a structured object. | ||||
def clearcache(): | ||||
# _tagscache has been filteredpropertycache since 2.5 (or | ||||
# 98c867ac1330), and delattr() can't work in such case | ||||
Martin von Zweigbergk
|
r43744 | if '_tagscache' in vars(repo): | ||
del repo.__dict__['_tagscache'] | ||||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30150 | return clearcache | ||
Pulkit Goyal
|
r39398 | repotags = safeattrsetter(repo, b'_tags', ignoremissing=True) | ||
Augie Fackler
|
r43346 | if repotags: # since 1.4 (or 5614a628d173) | ||
return lambda: repotags.set(None) | ||||
FUJIWARA Katsunori
|
r30150 | |||
Pulkit Goyal
|
r39398 | repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True) | ||
Augie Fackler
|
r43346 | if repotagscache: # since 0.6 (or d7df759d0e97) | ||
return lambda: repotagscache.set(None) | ||||
FUJIWARA Katsunori
|
r30150 | |||
# Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches | ||||
# this point, but it isn't so problematic, because: | ||||
# - repo.tags of such Mercurial isn't "callable", and repo.tags() | ||||
# in perftags() causes failure soon | ||||
# - perf.py itself has been available since 1.1 (or eb240755386d) | ||||
Augie Fackler
|
r43346 | raise error.Abort(b"tags API of this hg command is unknown") | ||
FUJIWARA Katsunori
|
r30150 | |||
r32731 | # utilities to clear cache | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40719 | def clearfilecache(obj, attrname): | ||
unfiltered = getattr(obj, 'unfiltered', None) | ||||
if unfiltered is not None: | ||||
obj = obj.unfiltered() | ||||
if attrname in vars(obj): | ||||
delattr(obj, attrname) | ||||
obj._filecache.pop(attrname, None) | ||||
r32731 | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40737 | def clearchangelog(repo): | ||
if repo is not repo.unfiltered(): | ||||
Augie Fackler
|
r43906 | object.__setattr__(repo, '_clcachekey', None) | ||
object.__setattr__(repo, '_clcache', None) | ||||
Boris Feld
|
r40737 | clearfilecache(repo.unfiltered(), 'changelog') | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r30143 | # perf commands | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfwalk', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfwalk(ui, repo, *pats, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martin von Zweigbergk
|
r34343 | m = scmutil.match(repo[None], pats, {}) | ||
Augie Fackler
|
r43346 | timer( | ||
lambda: len( | ||||
list( | ||||
repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False) | ||||
) | ||||
) | ||||
) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfannotate', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfannotate(ui, repo, f, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | fc = repo[b'.'][f] | ||
Durham Goode
|
r19292 | timer(lambda: len(fc.annotate(True))) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Durham Goode
|
r19292 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfstatus', | ||||
r43740 | [ | |||
(b'u', b'unknown', False, b'ask status to look for unknown files'), | ||||
(b'', b'dirstate', False, b'benchmark the internal dirstate call'), | ||||
] | ||||
Augie Fackler
|
r43346 | + formatteropts, | ||
) | ||||
Siddharth Agarwal
|
r18033 | def perfstatus(ui, repo, **opts): | ||
r43390 | """benchmark the performance of a single status call | |||
The repository data are preserved between each call. | ||||
By default, only the status of the tracked file are requested. If | ||||
`--unknown` is passed, the "unknown" files are also tracked. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Augie Fackler
|
r43346 | # m = match.always(repo.root, repo.getcwd()) | ||
# timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | ||||
Brodie Rao
|
r16683 | # False)))) | ||
Matt Mackall
|
r27017 | timer, fm = gettimer(ui, opts) | ||
r43740 | if opts[b'dirstate']: | |||
dirstate = repo.dirstate | ||||
m = scmutil.matchall(repo) | ||||
unknown = opts[b'unknown'] | ||||
def status_dirstate(): | ||||
s = dirstate.status( | ||||
m, subrepos=[], ignored=False, clean=False, unknown=unknown | ||||
) | ||||
Augie Fackler
|
r44052 | sum(map(bool, s)) | ||
r43740 | ||||
timer(status_dirstate) | ||||
else: | ||||
timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown'])))) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfaddremove', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfaddremove(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18871 | try: | ||
oldquiet = repo.ui.quiet | ||||
repo.ui.quiet = True | ||||
Matt Harbison
|
r23533 | matcher = scmutil.match(repo[None]) | ||
Pulkit Goyal
|
r39398 | opts[b'dry_run'] = True | ||
Raphaël Gomès
|
r46210 | if 'uipathfn' in getargspec(scmutil.addremove).args: | ||
Martin von Zweigbergk
|
r41801 | uipathfn = scmutil.getuipathfn(repo) | ||
timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts)) | ||||
else: | ||||
timer(lambda: scmutil.addremove(repo, matcher, b"", opts)) | ||||
Siddharth Agarwal
|
r18871 | finally: | ||
repo.ui.quiet = oldquiet | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18871 | |||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16785 | def clearcaches(cl): | ||
# behave somewhat consistently across internal API changes | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(cl, b'clearcaches'): | ||
Bryan O'Sullivan
|
r16785 | cl.clearcaches() | ||
Pulkit Goyal
|
r39398 | elif util.safehasattr(cl, b'_nodecache'): | ||
r43927 | # <= hg-5.2 | |||
Bryan O'Sullivan
|
r16785 | from mercurial.node import nullid, nullrev | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16785 | cl._nodecache = {nullid: nullrev} | ||
cl._nodepos = None | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfheads', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfheads(ui, repo, **opts): | ||
Boris Feld
|
r41479 | """benchmark the computation of a changelog heads""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16785 | cl = repo.changelog | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41481 | def s(): | ||
clearcaches(cl) | ||||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16785 | def d(): | ||
len(cl.headrevs()) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41481 | timer(d, setup=s) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perftags', | ||||
formatteropts | ||||
Augie Fackler
|
r46554 | + [ | ||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||||
], | ||||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r25494 | def perftags(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
FUJIWARA Katsunori
|
r30150 | repocleartagscache = repocleartagscachefunc(repo) | ||
Boris Feld
|
r40775 | clearrevlogs = opts[b'clear_revlogs'] | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40718 | def s(): | ||
Boris Feld
|
r40775 | if clearrevlogs: | ||
clearchangelog(repo) | ||||
clearfilecache(repo.unfiltered(), 'manifest') | ||||
FUJIWARA Katsunori
|
r30150 | repocleartagscache() | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40718 | def t(): | ||
Matt Mackall
|
r7366 | return len(repo.tags()) | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40718 | timer(t, setup=s) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfancestors', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfancestors(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16802 | heads = repo.changelog.headrevs() | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16802 | def d(): | ||
Bryan O'Sullivan
|
r16866 | for a in repo.changelog.ancestors(heads): | ||
Bryan O'Sullivan
|
r16802 | pass | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16802 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16802 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfancestorset', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfancestorset(ui, repo, revset, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18080 | revs = repo.revs(revset) | ||
heads = repo.changelog.headrevs() | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18080 | def d(): | ||
Siddharth Agarwal
|
r18091 | s = repo.changelog.ancestors(heads) | ||
Siddharth Agarwal
|
r18080 | for rev in revs: | ||
rev in s | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18080 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18080 | |||
Augie Fackler
|
r43346 | |||
Georges Racinet
|
r40977 | @command(b'perfdiscovery', formatteropts, b'PATH') | ||
def perfdiscovery(ui, repo, path, **opts): | ||||
Augie Fackler
|
r46554 | """benchmark discovery between local repo and the peer at given path""" | ||
Georges Racinet
|
r40977 | repos = [repo, None] | ||
timer, fm = gettimer(ui, opts) | ||||
path = ui.expandpath(path) | ||||
def s(): | ||||
repos[1] = hg.peer(ui, opts, path) | ||||
Augie Fackler
|
r43346 | |||
Georges Racinet
|
r40977 | def d(): | ||
setdiscovery.findcommonheads(ui, *repos) | ||||
Augie Fackler
|
r43346 | |||
Georges Racinet
|
r40977 | timer(d, setup=s) | ||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfbookmarks', | ||||
formatteropts | ||||
Augie Fackler
|
r46554 | + [ | ||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||||
], | ||||
Augie Fackler
|
r43346 | ) | ||
r32733 | def perfbookmarks(ui, repo, **opts): | |||
"""benchmark parsing bookmarks from disk to memory""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
r32733 | timer, fm = gettimer(ui, opts) | |||
Boris Feld
|
r40717 | |||
Boris Feld
|
r40777 | clearrevlogs = opts[b'clear_revlogs'] | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40717 | def s(): | ||
Boris Feld
|
r40777 | if clearrevlogs: | ||
clearchangelog(repo) | ||||
Pulkit Goyal
|
r39398 | clearfilecache(repo, b'_bookmarks') | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40717 | def d(): | ||
r32733 | repo._bookmarks | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40717 | timer(d, setup=s) | ||
r32733 | fm.end() | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfbundleread', formatteropts, b'BUNDLE') | ||
Gregory Szorc
|
r35108 | def perfbundleread(ui, repo, bundlepath, **opts): | ||
"""Benchmark reading of bundle files. | ||||
This command is meant to isolate the I/O part of bundle reading as | ||||
much as possible. | ||||
""" | ||||
from mercurial import ( | ||||
bundle2, | ||||
exchange, | ||||
streamclone, | ||||
) | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r35108 | def makebench(fn): | ||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
fn(bundle) | ||||
return run | ||||
def makereadnbytes(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
while bundle.read(size): | ||||
pass | ||||
return run | ||||
def makestdioread(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | while fh.read(size): | ||
pass | ||||
return run | ||||
# bundle1 | ||||
def deltaiter(bundle): | ||||
for delta in bundle.deltaiter(): | ||||
pass | ||||
def iterchunks(bundle): | ||||
for chunk in bundle.getchunks(): | ||||
pass | ||||
# bundle2 | ||||
def forwardchunks(bundle): | ||||
for chunk in bundle._forwardchunks(): | ||||
pass | ||||
def iterparts(bundle): | ||||
for part in bundle.iterparts(): | ||||
pass | ||||
Gregory Szorc
|
r35113 | def iterpartsseekable(bundle): | ||
for part in bundle.iterparts(seekable=True): | ||||
pass | ||||
Gregory Szorc
|
r35108 | def seek(bundle): | ||
Gregory Szorc
|
r35113 | for part in bundle.iterparts(seekable=True): | ||
Gregory Szorc
|
r35108 | part.seek(0, os.SEEK_END) | ||
def makepartreadnbytes(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
for part in bundle.iterparts(): | ||||
while part.read(size): | ||||
pass | ||||
return run | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (makestdioread(8192), b'read(8k)'), | ||
(makestdioread(16384), b'read(16k)'), | ||||
(makestdioread(32768), b'read(32k)'), | ||||
(makestdioread(131072), b'read(128k)'), | ||||
Gregory Szorc
|
r35108 | ] | ||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
if isinstance(bundle, changegroup.cg1unpacker): | ||||
Augie Fackler
|
r43346 | benches.extend( | ||
[ | ||||
(makebench(deltaiter), b'cg1 deltaiter()'), | ||||
(makebench(iterchunks), b'cg1 getchunks()'), | ||||
(makereadnbytes(8192), b'cg1 read(8k)'), | ||||
(makereadnbytes(16384), b'cg1 read(16k)'), | ||||
(makereadnbytes(32768), b'cg1 read(32k)'), | ||||
(makereadnbytes(131072), b'cg1 read(128k)'), | ||||
] | ||||
) | ||||
Gregory Szorc
|
r35108 | elif isinstance(bundle, bundle2.unbundle20): | ||
Augie Fackler
|
r43346 | benches.extend( | ||
[ | ||||
(makebench(forwardchunks), b'bundle2 forwardchunks()'), | ||||
(makebench(iterparts), b'bundle2 iterparts()'), | ||||
( | ||||
makebench(iterpartsseekable), | ||||
b'bundle2 iterparts() seekable', | ||||
), | ||||
(makebench(seek), b'bundle2 part seek()'), | ||||
(makepartreadnbytes(8192), b'bundle2 part read(8k)'), | ||||
(makepartreadnbytes(16384), b'bundle2 part read(16k)'), | ||||
(makepartreadnbytes(32768), b'bundle2 part read(32k)'), | ||||
(makepartreadnbytes(131072), b'bundle2 part read(128k)'), | ||||
] | ||||
) | ||||
Gregory Szorc
|
r35108 | elif isinstance(bundle, streamclone.streamcloneapplier): | ||
Pulkit Goyal
|
r39398 | raise error.Abort(b'stream clone bundles not supported') | ||
Gregory Szorc
|
r35108 | else: | ||
Pulkit Goyal
|
r39398 | raise error.Abort(b'unhandled bundle type: %s' % type(bundle)) | ||
Gregory Szorc
|
r35108 | |||
for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfchangegroupchangelog', | ||||
formatteropts | ||||
+ [ | ||||
(b'', b'cgversion', b'02', b'changegroup version'), | ||||
(b'r', b'rev', b'', b'revisions to add to changegroup'), | ||||
], | ||||
) | ||||
Pulkit Goyal
|
r40749 | def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts): | ||
Gregory Szorc
|
r30018 | """Benchmark producing a changelog group for a changegroup. | ||
This measures the time spent processing the changelog during a | ||||
bundle operation. This occurs during `hg bundle` and on a server | ||||
processing a `getbundle` wire protocol request (handles clones | ||||
and pull requests). | ||||
By default, all revisions are added to the changegroup. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r30018 | cl = repo.changelog | ||
Pulkit Goyal
|
r39398 | nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')] | ||
Pulkit Goyal
|
r40749 | bundler = changegroup.getbundler(cgversion, repo) | ||
Gregory Szorc
|
r30018 | |||
def d(): | ||||
Gregory Szorc
|
r39013 | state, chunks = bundler._generatechangelog(cl, nodes) | ||
for chunk in chunks: | ||||
Gregory Szorc
|
r30018 | pass | ||
timer, fm = gettimer(ui, opts) | ||||
Gregory Szorc
|
r39013 | |||
# Terminal printing can interfere with timing. So disable it. | ||||
Pulkit Goyal
|
r39398 | with ui.configoverride({(b'progress', b'disable'): True}): | ||
Gregory Szorc
|
r39013 | timer(d) | ||
Gregory Szorc
|
r30018 | fm.end() | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirs', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirs(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r18845 | dirstate = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b'a' in dirstate | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18845 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate.hasdir(b'a') | ||
Mark Thomas
|
r35083 | del dirstate._map._dirs | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r18845 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r18845 | |||
Augie Fackler
|
r43346 | |||
r43468 | @command( | |||
b'perfdirstate', | ||||
[ | ||||
( | ||||
b'', | ||||
b'iteration', | ||||
None, | ||||
b'benchmark a full iteration for the dirstate', | ||||
), | ||||
( | ||||
b'', | ||||
b'contains', | ||||
None, | ||||
b'benchmark a large amount of `nf in dirstate` calls', | ||||
), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Pierre-Yves David
|
r25494 | def perfdirstate(ui, repo, **opts): | ||
r43466 | """benchmap the time of various distate operations | |||
By default benchmark the time necessary to load a dirstate from scratch. | ||||
r43391 | The dirstate is loaded to the point were a "contains" request can be | |||
answered. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | b"a" in repo.dirstate | ||
Augie Fackler
|
r43346 | |||
r43468 | if opts[b'iteration'] and opts[b'contains']: | |||
msg = b'only specify one of --iteration or --contains' | ||||
raise error.Abort(msg) | ||||
r43466 | if opts[b'iteration']: | |||
setup = None | ||||
dirstate = repo.dirstate | ||||
r43468 | ||||
r43466 | def d(): | |||
for f in dirstate: | ||||
pass | ||||
r43468 | ||||
elif opts[b'contains']: | ||||
setup = None | ||||
dirstate = repo.dirstate | ||||
allfiles = list(dirstate) | ||||
# also add file path that will be "missing" from the dirstate | ||||
allfiles.extend([f[::-1] for f in allfiles]) | ||||
def d(): | ||||
for f in allfiles: | ||||
f in dirstate | ||||
r43466 | else: | |||
r43468 | ||||
r43466 | def setup(): | |||
repo.dirstate.invalidate() | ||||
def d(): | ||||
b"a" in repo.dirstate | ||||
Augie Fackler
|
r43346 | |||
r43392 | timer(d, setup=setup) | |||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatedirs', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirstatedirs(ui, repo, **opts): | ||
Augie Fackler
|
r46554 | """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
r43394 | repo.dirstate.hasdir(b"a") | |||
def setup(): | ||||
del repo.dirstate._map._dirs | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | def d(): | ||
Pulkit Goyal
|
r39398 | repo.dirstate.hasdir(b"a") | ||
r43394 | ||||
timer(d, setup=setup) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatefoldmap', formatteropts) | ||
timeless
|
r27095 | def perfdirstatefoldmap(ui, repo, **opts): | ||
r43395 | """benchmap a `dirstate._map.filefoldmap.get()` request | |||
The dirstate filefoldmap cache is dropped between every request. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r22780 | dirstate = repo.dirstate | ||
r43396 | dirstate._map.filefoldmap.get(b'a') | |||
def setup(): | ||||
del dirstate._map.filefoldmap | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r22780 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate._map.filefoldmap.get(b'a') | ||
r43396 | ||||
timer(d, setup=setup) | ||||
Siddharth Agarwal
|
r24607 | fm.end() | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirfoldmap', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirfoldmap(ui, repo, **opts): | ||
r43397 | """benchmap a `dirstate._map.dirfoldmap.get()` request | |||
The dirstate dirfoldmap cache is dropped between every request. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r24607 | dirstate = repo.dirstate | ||
r43398 | dirstate._map.dirfoldmap.get(b'a') | |||
def setup(): | ||||
del dirstate._map.dirfoldmap | ||||
del dirstate._map._dirs | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r24607 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate._map.dirfoldmap.get(b'a') | ||
r43398 | ||||
timer(d, setup=setup) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r22780 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatewrite', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirstatewrite(ui, repo, **opts): | ||
Augie Fackler
|
r46554 | """benchmap the time it take to write a dirstate on disk""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16788 | ds = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b"a" in ds | ||
Augie Fackler
|
r43346 | |||
r43400 | def setup(): | |||
ds._dirty = True | ||||
Bryan O'Sullivan
|
r16788 | def d(): | ||
FUJIWARA Katsunori
|
r26748 | ds.write(repo.currenttransaction()) | ||
Augie Fackler
|
r43346 | |||
r43400 | timer(d, setup=setup) | |||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16788 | |||
Augie Fackler
|
r43346 | |||
r42575 | def _getmergerevs(repo, opts): | |||
"""parse command argument to return rev involved in merge | ||||
input: options dictionnary with `rev`, `from` and `bse` | ||||
output: (localctx, otherctx, basectx) | ||||
""" | ||||
Augie Fackler
|
r42584 | if opts[b'from']: | ||
fromrev = scmutil.revsingle(repo, opts[b'from']) | ||||
r42573 | wctx = repo[fromrev] | |||
else: | ||||
wctx = repo[None] | ||||
# we don't want working dir files to be stat'd in the benchmark, so | ||||
# prime that cache | ||||
wctx.dirty() | ||||
Augie Fackler
|
r42584 | rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev']) | ||
if opts[b'base']: | ||||
fromrev = scmutil.revsingle(repo, opts[b'base']) | ||||
r42574 | ancestor = repo[fromrev] | |||
else: | ||||
ancestor = wctx.ancestor(rctx) | ||||
r42575 | return (wctx, rctx, ancestor) | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfmergecalculate', | ||||
[ | ||||
(b'r', b'rev', b'.', b'rev to merge against'), | ||||
(b'', b'from', b'', b'rev to merge from'), | ||||
(b'', b'base', b'', b'the revision to use as base'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
r42575 | def perfmergecalculate(ui, repo, **opts): | |||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
wctx, rctx, ancestor = _getmergerevs(repo, opts) | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18817 | def d(): | ||
# acceptremote is True because we don't want prompts in the middle of | ||||
# our benchmark | ||||
Augie Fackler
|
r43346 | merge.calculateupdates( | ||
repo, | ||||
wctx, | ||||
rctx, | ||||
[ancestor], | ||||
branchmerge=False, | ||||
force=False, | ||||
acceptremote=True, | ||||
followcopies=True, | ||||
) | ||||
Siddharth Agarwal
|
r18817 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18817 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfmergecopies', | ||||
[ | ||||
(b'r', b'rev', b'.', b'rev to merge against'), | ||||
(b'', b'from', b'', b'rev to merge from'), | ||||
(b'', b'base', b'', b'the revision to use as base'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
r42576 | def perfmergecopies(ui, repo, **opts): | |||
"""measure runtime of `copies.mergecopies`""" | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
wctx, rctx, ancestor = _getmergerevs(repo, opts) | ||||
Augie Fackler
|
r43346 | |||
r42576 | def d(): | |||
# acceptremote is True because we don't want prompts in the middle of | ||||
# our benchmark | ||||
copies.mergecopies(repo, wctx, rctx, ancestor) | ||||
Augie Fackler
|
r43346 | |||
r42576 | timer(d) | |||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfpathcopies', [], b"REV REV") | ||
Pierre-Yves David
|
r25494 | def perfpathcopies(ui, repo, rev1, rev2, **opts): | ||
Boris Feld
|
r40770 | """benchmark the copy tracing logic""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18877 | ctx1 = scmutil.revsingle(repo, rev1, rev1) | ||
ctx2 = scmutil.revsingle(repo, rev2, rev2) | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18877 | def d(): | ||
copies.pathcopies(ctx1, ctx2) | ||||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18877 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18877 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfphases', | ||||
Augie Fackler
|
r46554 | [ | ||
(b'', b'full', False, b'include file reading time too'), | ||||
], | ||||
Augie Fackler
|
r43346 | b"", | ||
) | ||||
r32467 | def perfphases(ui, repo, **opts): | |||
"""benchmark phasesets computation""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
r32467 | timer, fm = gettimer(ui, opts) | |||
r32732 | _phases = repo._phasecache | |||
Pulkit Goyal
|
r39398 | full = opts.get(b'full') | ||
Augie Fackler
|
r43346 | |||
r32467 | def d(): | |||
r32732 | phases = _phases | |||
if full: | ||||
Pulkit Goyal
|
r39398 | clearfilecache(repo, b'_phasecache') | ||
r32732 | phases = repo._phasecache | |||
r32467 | phases.invalidate() | |||
phases.loadphaserevs(repo) | ||||
Augie Fackler
|
r43346 | |||
r32467 | timer(d) | |||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command(b'perfphasesremote', [], b"[DEST]") | ||||
Boris Feld
|
r39180 | def perfphasesremote(ui, repo, dest=None, **opts): | ||
"""benchmark time needed to analyse phases of the remote server""" | ||||
Augie Fackler
|
r43346 | from mercurial.node import bin | ||
Boris Feld
|
r39180 | from mercurial import ( | ||
exchange, | ||||
hg, | ||||
phases, | ||||
) | ||||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r39180 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | path = ui.paths.getpath(dest, default=(b'default-push', b'default')) | ||
Boris Feld
|
r39180 | if not path: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
b'default repository not configured!', | ||||
hint=b"see 'hg help config.paths'", | ||||
) | ||||
Boris Feld
|
r39180 | dest = path.pushloc or path.loc | ||
Augie Fackler
|
r43350 | ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest)) | ||
Boris Feld
|
r39180 | other = hg.peer(repo, opts, dest) | ||
# easier to perform discovery through the operation | ||||
op = exchange.pushoperation(repo, other) | ||||
exchange._pushdiscoverychangeset(op) | ||||
remotesubset = op.fallbackheads | ||||
with other.commandexecutor() as e: | ||||
Augie Fackler
|
r43346 | remotephases = e.callcommand( | ||
b'listkeys', {b'namespace': b'phases'} | ||||
).result() | ||||
Boris Feld
|
r39180 | del other | ||
Pulkit Goyal
|
r39398 | publishing = remotephases.get(b'publishing', False) | ||
Boris Feld
|
r39180 | if publishing: | ||
Augie Fackler
|
r43350 | ui.statusnoi18n(b'publishing: yes\n') | ||
Boris Feld
|
r39180 | else: | ||
Augie Fackler
|
r43350 | ui.statusnoi18n(b'publishing: no\n') | ||
Boris Feld
|
r39180 | |||
r43951 | has_node = getattr(repo.changelog.index, 'has_node', None) | |||
if has_node is None: | ||||
has_node = repo.changelog.nodemap.__contains__ | ||||
Boris Feld
|
r39180 | nonpublishroots = 0 | ||
for nhex, phase in remotephases.iteritems(): | ||||
Augie Fackler
|
r43346 | if nhex == b'publishing': # ignore data related to publish option | ||
Boris Feld
|
r39180 | continue | ||
node = bin(nhex) | ||||
r43951 | if has_node(node) and int(phase): | |||
Boris Feld
|
r39180 | nonpublishroots += 1 | ||
Augie Fackler
|
r43350 | ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases)) | ||
ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r39180 | def d(): | ||
Augie Fackler
|
r43346 | phases.remotephasessummary(repo, remotesubset, remotephases) | ||
Boris Feld
|
r39180 | timer(d) | ||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfmanifest', | ||||
[ | ||||
(b'm', b'manifest-rev', False, b'Look up a manifest node revision'), | ||||
(b'', b'clear-disk', False, b'clear on-disk caches too'), | ||||
] | ||||
+ formatteropts, | ||||
b'REV|NODE', | ||||
) | ||||
Martijn Pieters
|
r38802 | def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts): | ||
Boris Feld
|
r38715 | """benchmark the time to read a manifest from disk and return a usable | ||
dict-like object | ||||
Manifest caches are cleared before retrieval.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martijn Pieters
|
r38802 | if not manifest_rev: | ||
ctx = scmutil.revsingle(repo, rev, rev) | ||||
t = ctx.manifestnode() | ||||
else: | ||||
Gregory Szorc
|
r39354 | from mercurial.node import bin | ||
if len(rev) == 40: | ||||
t = bin(rev) | ||||
else: | ||||
try: | ||||
rev = int(rev) | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(repo.manifestlog, b'getstorage'): | ||
Gregory Szorc
|
r39354 | t = repo.manifestlog.getstorage(b'').node(rev) | ||
else: | ||||
t = repo.manifestlog._revlog.lookup(rev) | ||||
except ValueError: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
Martin von Zweigbergk
|
r43387 | b'manifest revision must be integer or full node' | ||
Augie Fackler
|
r43346 | ) | ||
Matt Mackall
|
r7366 | def d(): | ||
Martijn Pieters
|
r38803 | repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | ||
Durham Goode
|
r30369 | repo.manifestlog[t].read() | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfchangeset', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfchangeset(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martin von Zweigbergk
|
r37373 | n = scmutil.revsingle(repo, rev).node() | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r16262 | def d(): | ||
Simon Heimberg
|
r19378 | repo.changelog.read(n) | ||
Augie Fackler
|
r43346 | # repo.changelog._cache = None | ||
Matt Mackall
|
r16262 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r16262 | |||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40781 | @command(b'perfignore', formatteropts) | ||
def perfignore(ui, repo, **opts): | ||||
"""benchmark operation related to computing ignore""" | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
dirstate = repo.dirstate | ||||
def setupone(): | ||||
dirstate.invalidate() | ||||
clearfilecache(dirstate, b'_ignore') | ||||
def runone(): | ||||
dirstate._ignore | ||||
timer(runone, setup=setupone, title=b"load") | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfindex', | ||||
[ | ||||
(b'', b'rev', [], b'revision to be looked up (default tip)'), | ||||
(b'', b'no-lookup', None, b'do not revision lookup post creation'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Pierre-Yves David
|
r25494 | def perfindex(ui, repo, **opts): | ||
Boris Feld
|
r41482 | """benchmark index creation time followed by a lookup | ||
The default is to look `tip` up. Depending on the index implementation, | ||||
the revision looked up can matters. For example, an implementation | ||||
scanning the index will have a faster lookup time for `--rev tip` than for | ||||
Boris Feld
|
r41484 | `--rev 0`. The number of looked up revisions and their order can also | ||
matters. | ||||
Example of useful set to test: | ||||
r44732 | ||||
Boris Feld
|
r41484 | * tip | ||
* 0 | ||||
* -10: | ||||
* :10 | ||||
* -10: + :10 | ||||
* :10: + -10: | ||||
* -10000: | ||||
* -10000: + 0 | ||||
Boris Feld
|
r41482 | |||
Boris Feld
|
r41610 | It is not currently possible to check for lookup of a missing node. For | ||
deeper lookup benchmarking, checkout the `perfnodemap` command.""" | ||||
Matt Mackall
|
r13255 | import mercurial.revlog | ||
Augie Fackler
|
r43346 | |||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | ||
Boris Feld
|
r41483 | if opts[b'no_lookup']: | ||
Boris Feld
|
r41484 | if opts['rev']: | ||
raise error.Abort('--no-lookup and --rev are mutually exclusive') | ||||
nodes = [] | ||||
elif not opts[b'rev']: | ||||
nodes = [repo[b"tip"].node()] | ||||
Boris Feld
|
r40820 | else: | ||
Boris Feld
|
r41484 | revs = scmutil.revrange(repo, opts[b'rev']) | ||
cl = repo.changelog | ||||
nodes = [cl.node(r) for r in revs] | ||||
Boris Feld
|
r40819 | |||
unfi = repo.unfiltered() | ||||
# find the filecache func directly | ||||
# This avoid polluting the benchmark with the filecache logic | ||||
makecl = unfi.__class__.changelog.func | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40819 | def setup(): | ||
# probably not necessary, but for good measure | ||||
clearchangelog(unfi) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | def d(): | ||
Boris Feld
|
r40819 | cl = makecl(unfi) | ||
Boris Feld
|
r41484 | for n in nodes: | ||
Boris Feld
|
r41483 | cl.rev(n) | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40819 | timer(d, setup=setup) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfnodemap', | ||||
[ | ||||
(b'', b'rev', [], b'revision to be looked up (default tip)'), | ||||
(b'', b'clear-caches', True, b'clear revlog cache between calls'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Boris Feld
|
r41610 | def perfnodemap(ui, repo, **opts): | ||
"""benchmark the time necessary to look up revision from a cold nodemap | ||||
Depending on the implementation, the amount and order of revision we look | ||||
up can varies. Example of useful set to test: | ||||
* tip | ||||
* 0 | ||||
* -10: | ||||
* :10 | ||||
* -10: + :10 | ||||
* :10: + -10: | ||||
* -10000: | ||||
* -10000: + 0 | ||||
The command currently focus on valid binary lookup. Benchmarking for | ||||
hexlookup, prefix lookup and missing lookup would also be valuable. | ||||
""" | ||||
import mercurial.revlog | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41610 | opts = _byteskwargs(opts) | ||
timer, fm = gettimer(ui, opts) | ||||
Augie Fackler
|
r43346 | mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | ||
Boris Feld
|
r41610 | |||
unfi = repo.unfiltered() | ||||
Boris Feld
|
r41611 | clearcaches = opts['clear_caches'] | ||
Boris Feld
|
r41610 | # find the filecache func directly | ||
# This avoid polluting the benchmark with the filecache logic | ||||
makecl = unfi.__class__.changelog.func | ||||
if not opts[b'rev']: | ||||
raise error.Abort('use --rev to specify revisions to look up') | ||||
revs = scmutil.revrange(repo, opts[b'rev']) | ||||
cl = repo.changelog | ||||
nodes = [cl.node(r) for r in revs] | ||||
# use a list to pass reference to a nodemap from one closure to the next | ||||
nodeget = [None] | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41610 | def setnodeget(): | ||
# probably not necessary, but for good measure | ||||
clearchangelog(unfi) | ||||
r43971 | cl = makecl(unfi) | |||
if util.safehasattr(cl.index, 'get_rev'): | ||||
nodeget[0] = cl.index.get_rev | ||||
else: | ||||
nodeget[0] = cl.nodemap.get | ||||
Boris Feld
|
r41610 | |||
def d(): | ||||
get = nodeget[0] | ||||
for n in nodes: | ||||
get(n) | ||||
Boris Feld
|
r41611 | setup = None | ||
if clearcaches: | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41611 | def setup(): | ||
setnodeget() | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r41611 | else: | ||
setnodeget() | ||||
Augie Fackler
|
r43346 | d() # prewarm the data structure | ||
Boris Feld
|
r41610 | timer(d, setup=setup) | ||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfstartup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfstartup(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | def d(): | ||
Augie Fackler
|
r43906 | if os.name != 'nt': | ||
Augie Fackler
|
r43346 | os.system( | ||
b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0]) | ||||
) | ||||
Matt Harbison
|
r27382 | else: | ||
Augie Fackler
|
r43906 | os.environ['HGRCPATH'] = r' ' | ||
Augie Fackler
|
r43809 | os.system("%s version -q > NUL" % sys.argv[0]) | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfparents', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfparents(ui, repo, **opts): | ||
r42183 | """benchmark the time necessary to fetch one changeset's parents. | |||
Augie Fackler
|
r42188 | The fetch is done using the `node identifier`, traversing all object layers | ||
from the repository object. The first N revisions will be used for this | ||||
r42183 | benchmark. N is controlled by the ``perf.parentscount`` config option | |||
(default: 1000). | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
timeless
|
r27305 | # control the number of commits perfparents iterates over | ||
# experimental config: perf.parentscount | ||||
Pulkit Goyal
|
r39398 | count = getint(ui, b"perf", b"parentscount", 1000) | ||
timeless
|
r27305 | if len(repo.changelog) < count: | ||
Pulkit Goyal
|
r39398 | raise error.Abort(b"repo needs %d commits for this test" % count) | ||
timeless
|
r27100 | repo = repo.unfiltered() | ||
Matt Harbison
|
r39847 | nl = [repo.changelog.node(i) for i in _xrange(count)] | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | def d(): | ||
for n in nl: | ||||
repo.changelog.parents(n) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfctxfiles', formatteropts) | ||
timeless
|
r27095 | def perfctxfiles(ui, repo, x, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Matt Mackall
|
r24349 | x = int(x) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r24349 | def d(): | ||
len(repo[x].files()) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r24349 | timer(d) | ||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfrawfiles', formatteropts) | ||
timeless
|
r27095 | def perfrawfiles(ui, repo, x, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Matt Mackall
|
r24349 | x = int(x) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r24349 | cl = repo.changelog | ||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r24349 | def d(): | ||
len(cl.read(x)[3]) | ||||
Augie Fackler
|
r43346 | |||
Matt Mackall
|
r24349 | timer(d) | ||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perflookup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perflookup(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r7366 | timer(lambda: len(repo.lookup(rev))) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perflinelogedits', | ||||
[ | ||||
(b'n', b'edits', 10000, b'number of edits'), | ||||
(b'', b'max-hunk-lines', 10, b'max lines in a hunk'), | ||||
], | ||||
norepo=True, | ||||
) | ||||
Jun Wu
|
r39005 | def perflinelogedits(ui, **opts): | ||
from mercurial import linelog | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | edits = opts[b'edits'] | ||
maxhunklines = opts[b'max_hunk_lines'] | ||||
Jun Wu
|
r39005 | |||
maxb1 = 100000 | ||||
random.seed(0) | ||||
randint = random.randint | ||||
currentlines = 0 | ||||
arglist = [] | ||||
Matt Harbison
|
r39847 | for rev in _xrange(edits): | ||
Jun Wu
|
r39005 | a1 = randint(0, currentlines) | ||
a2 = randint(a1, min(currentlines, a1 + maxhunklines)) | ||||
b1 = randint(0, maxb1) | ||||
b2 = randint(b1, b1 + maxhunklines) | ||||
currentlines += (b2 - b1) - (a2 - a1) | ||||
arglist.append((rev, a1, a2, b1, b2)) | ||||
def d(): | ||||
ll = linelog.linelog() | ||||
for args in arglist: | ||||
ll.replacelines(*args) | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(d) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfrevrange', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfrevrange(ui, repo, *specs, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16858 | revrange = scmutil.revrange | ||
timer(lambda: len(revrange(repo, specs))) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16858 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfnodelookup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfnodelookup(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r16309 | import mercurial.revlog | ||
Augie Fackler
|
r43346 | |||
mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg | ||||
Martin von Zweigbergk
|
r37373 | n = scmutil.revsingle(repo, rev).node() | ||
Pulkit Goyal
|
r39398 | cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i") | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16414 | def d(): | ||
cl.rev(n) | ||||
Bryan O'Sullivan
|
r16785 | clearcaches(cl) | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16414 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16414 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perflog', | ||||
[(b'', b'rename', False, b'ask log to follow renames')] + formatteropts, | ||||
) | ||||
timeless
|
r27306 | def perflog(ui, repo, rev=None, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
timeless
|
r27306 | if rev is None: | ||
Augie Fackler
|
r43346 | rev = [] | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Alexander Solovyov
|
r7872 | ui.pushbuffer() | ||
Augie Fackler
|
r43346 | timer( | ||
lambda: commands.log( | ||||
ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename') | ||||
) | ||||
) | ||||
Alexander Solovyov
|
r7872 | ui.popbuffer() | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Alexander Solovyov
|
r7872 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfmoonwalk', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfmoonwalk(ui, repo, **opts): | ||
Brodie Rao
|
r20178 | """benchmark walking the changelog backwards | ||
This also loads the changelog data for each revision in the changelog. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Brodie Rao
|
r20178 | def moonwalk(): | ||
Martin von Zweigbergk
|
r38801 | for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1): | ||
Brodie Rao
|
r20178 | ctx = repo[i] | ||
Augie Fackler
|
r43346 | ctx.branch() # read changelog data (in addition to the index) | ||
Brodie Rao
|
r20178 | timer(moonwalk) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Brodie Rao
|
r20178 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perftemplating', | ||||
Augie Fackler
|
r46554 | [ | ||
(b'r', b'rev', [], b'revisions to run the template on'), | ||||
] | ||||
+ formatteropts, | ||||
Augie Fackler
|
r43346 | ) | ||
Boris Feld
|
r38277 | def perftemplating(ui, repo, testedtemplate=None, **opts): | ||
"""test the rendering time of a given template""" | ||||
Boris Feld
|
r38276 | if makelogtemplater is None: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
b"perftemplating not available with this Mercurial", | ||||
hint=b"use 4.3 or later", | ||||
) | ||||
Boris Feld
|
r38276 | |||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r38274 | nullui = ui.copy() | ||
Augie Fackler
|
r43906 | nullui.fout = open(os.devnull, 'wb') | ||
Boris Feld
|
r38274 | nullui.disablepager() | ||
Pulkit Goyal
|
r39398 | revs = opts.get(b'rev') | ||
Boris Feld
|
r38276 | if not revs: | ||
Pulkit Goyal
|
r39398 | revs = [b'all()'] | ||
Boris Feld
|
r38276 | revs = list(scmutil.revrange(repo, revs)) | ||
Augie Fackler
|
r43346 | defaulttemplate = ( | ||
b'{date|shortdate} [{rev}:{node|short}]' | ||||
b' {author|person}: {desc|firstline}\n' | ||||
) | ||||
Boris Feld
|
r38277 | if testedtemplate is None: | ||
testedtemplate = defaulttemplate | ||||
displayer = makelogtemplater(nullui, repo, testedtemplate) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r38273 | def format(): | ||
Boris Feld
|
r38276 | for r in revs: | ||
ctx = repo[r] | ||||
displayer.show(ctx) | ||||
displayer.flush(ctx) | ||||
Boris Feld
|
r38273 | |||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Boris Feld
|
r38273 | timer(format) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Alexander Solovyov
|
r7872 | |||
Augie Fackler
|
r43346 | |||
r43211 | def _displaystats(ui, opts, entries, data): | |||
# use a second formatter because the data are quite different, not sure | ||||
# how it flies with the templater. | ||||
fm = ui.formatter(b'perf-stats', opts) | ||||
for key, title in entries: | ||||
values = data[key] | ||||
nbvalues = len(data) | ||||
values.sort() | ||||
stats = { | ||||
'key': key, | ||||
'title': title, | ||||
'nbitems': len(values), | ||||
'min': values[0][0], | ||||
'10%': values[(nbvalues * 10) // 100][0], | ||||
'25%': values[(nbvalues * 25) // 100][0], | ||||
'50%': values[(nbvalues * 50) // 100][0], | ||||
'75%': values[(nbvalues * 75) // 100][0], | ||||
'80%': values[(nbvalues * 80) // 100][0], | ||||
'85%': values[(nbvalues * 85) // 100][0], | ||||
'90%': values[(nbvalues * 90) // 100][0], | ||||
'95%': values[(nbvalues * 95) // 100][0], | ||||
'99%': values[(nbvalues * 99) // 100][0], | ||||
'max': values[-1][0], | ||||
} | ||||
fm.startitem() | ||||
fm.data(**stats) | ||||
# make node pretty for the human output | ||||
fm.plain('### %s (%d items)\n' % (title, len(values))) | ||||
lines = [ | ||||
'min', | ||||
'10%', | ||||
'25%', | ||||
'50%', | ||||
'75%', | ||||
'80%', | ||||
'85%', | ||||
'90%', | ||||
'95%', | ||||
'99%', | ||||
'max', | ||||
] | ||||
for l in lines: | ||||
fm.plain('%s: %s\n' % (l, stats[l])) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfhelper-mergecopies', | ||||
formatteropts | ||||
+ [ | ||||
(b'r', b'revs', [], b'restrict search to these revisions'), | ||||
(b'', b'timing', False, b'provides extra data (costly)'), | ||||
(b'', b'stats', False, b'provides statistic about the measured data'), | ||||
], | ||||
) | ||||
r42577 | def perfhelpermergecopies(ui, repo, revs=[], **opts): | |||
"""find statistics about potential parameters for `perfmergecopies` | ||||
This command find (base, p1, p2) triplet relevant for copytracing | ||||
benchmarking in the context of a merge. It reports values for some of the | ||||
parameters that impact merge copy tracing time during merge. | ||||
If `--timing` is set, rename detection is run and the associated timing | ||||
will be reported. The extra details come at the cost of slower command | ||||
execution. | ||||
Since rename detection is only run once, other factors might easily | ||||
affect the precision of the timing. However it should give a good | ||||
approximation of which revision triplets are very costly. | ||||
""" | ||||
opts = _byteskwargs(opts) | ||||
fm = ui.formatter(b'perf', opts) | ||||
dotiming = opts[b'timing'] | ||||
r43211 | dostats = opts[b'stats'] | |||
r42577 | ||||
output_template = [ | ||||
("base", "%(base)12s"), | ||||
("p1", "%(p1.node)12s"), | ||||
("p2", "%(p2.node)12s"), | ||||
("p1.nb-revs", "%(p1.nbrevs)12d"), | ||||
("p1.nb-files", "%(p1.nbmissingfiles)12d"), | ||||
("p1.renames", "%(p1.renamedfiles)12d"), | ||||
("p1.time", "%(p1.time)12.3f"), | ||||
("p2.nb-revs", "%(p2.nbrevs)12d"), | ||||
("p2.nb-files", "%(p2.nbmissingfiles)12d"), | ||||
("p2.renames", "%(p2.renamedfiles)12d"), | ||||
("p2.time", "%(p2.time)12.3f"), | ||||
("renames", "%(nbrenamedfiles)12d"), | ||||
("total.time", "%(time)12.3f"), | ||||
Augie Fackler
|
r43346 | ] | ||
r42577 | if not dotiming: | |||
Augie Fackler
|
r43346 | output_template = [ | ||
i | ||||
for i in output_template | ||||
if not ('time' in i[0] or 'renames' in i[0]) | ||||
] | ||||
r42577 | header_names = [h for (h, v) in output_template] | |||
output = ' '.join([v for (h, v) in output_template]) + '\n' | ||||
header = ' '.join(['%12s'] * len(header_names)) + '\n' | ||||
fm.plain(header % tuple(header_names)) | ||||
if not revs: | ||||
revs = ['all()'] | ||||
revs = scmutil.revrange(repo, revs) | ||||
r43211 | if dostats: | |||
alldata = { | ||||
'nbrevs': [], | ||||
'nbmissingfiles': [], | ||||
} | ||||
if dotiming: | ||||
alldata['parentnbrenames'] = [] | ||||
alldata['totalnbrenames'] = [] | ||||
alldata['parenttime'] = [] | ||||
alldata['totaltime'] = [] | ||||
r42577 | roi = repo.revs('merge() and %ld', revs) | |||
for r in roi: | ||||
ctx = repo[r] | ||||
p1 = ctx.p1() | ||||
p2 = ctx.p2() | ||||
bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev()) | ||||
for b in bases: | ||||
b = repo[b] | ||||
p1missing = copies._computeforwardmissing(b, p1) | ||||
p2missing = copies._computeforwardmissing(b, p2) | ||||
data = { | ||||
b'base': b.hex(), | ||||
b'p1.node': p1.hex(), | ||||
r43431 | b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())), | |||
r42577 | b'p1.nbmissingfiles': len(p1missing), | |||
b'p2.node': p2.hex(), | ||||
r43431 | b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())), | |||
r42577 | b'p2.nbmissingfiles': len(p2missing), | |||
} | ||||
r43211 | if dostats: | |||
if p1missing: | ||||
Augie Fackler
|
r43346 | alldata['nbrevs'].append( | ||
(data['p1.nbrevs'], b.hex(), p1.hex()) | ||||
) | ||||
alldata['nbmissingfiles'].append( | ||||
(data['p1.nbmissingfiles'], b.hex(), p1.hex()) | ||||
) | ||||
r43211 | if p2missing: | |||
Augie Fackler
|
r43346 | alldata['nbrevs'].append( | ||
(data['p2.nbrevs'], b.hex(), p2.hex()) | ||||
) | ||||
alldata['nbmissingfiles'].append( | ||||
(data['p2.nbmissingfiles'], b.hex(), p2.hex()) | ||||
) | ||||
r42577 | if dotiming: | |||
begin = util.timer() | ||||
mergedata = copies.mergecopies(repo, p1, p2, b) | ||||
end = util.timer() | ||||
# not very stable timing since we did only one run | ||||
data['time'] = end - begin | ||||
# mergedata contains five dicts: "copy", "movewithdir", | ||||
# "diverge", "renamedelete" and "dirmove". | ||||
# The first 4 are about renamed file so lets count that. | ||||
renames = len(mergedata[0]) | ||||
renames += len(mergedata[1]) | ||||
renames += len(mergedata[2]) | ||||
renames += len(mergedata[3]) | ||||
data['nbrenamedfiles'] = renames | ||||
begin = util.timer() | ||||
p1renames = copies.pathcopies(b, p1) | ||||
end = util.timer() | ||||
data['p1.time'] = end - begin | ||||
begin = util.timer() | ||||
p2renames = copies.pathcopies(b, p2) | ||||
Matt Harbison
|
r44426 | end = util.timer() | ||
r42577 | data['p2.time'] = end - begin | |||
data['p1.renamedfiles'] = len(p1renames) | ||||
data['p2.renamedfiles'] = len(p2renames) | ||||
r43211 | ||||
if dostats: | ||||
if p1missing: | ||||
Augie Fackler
|
r43346 | alldata['parentnbrenames'].append( | ||
(data['p1.renamedfiles'], b.hex(), p1.hex()) | ||||
) | ||||
alldata['parenttime'].append( | ||||
(data['p1.time'], b.hex(), p1.hex()) | ||||
) | ||||
r43211 | if p2missing: | |||
Augie Fackler
|
r43346 | alldata['parentnbrenames'].append( | ||
(data['p2.renamedfiles'], b.hex(), p2.hex()) | ||||
) | ||||
alldata['parenttime'].append( | ||||
(data['p2.time'], b.hex(), p2.hex()) | ||||
) | ||||
r43211 | if p1missing or p2missing: | |||
Augie Fackler
|
r43346 | alldata['totalnbrenames'].append( | ||
( | ||||
data['nbrenamedfiles'], | ||||
b.hex(), | ||||
p1.hex(), | ||||
p2.hex(), | ||||
) | ||||
) | ||||
alldata['totaltime'].append( | ||||
(data['time'], b.hex(), p1.hex(), p2.hex()) | ||||
) | ||||
r42577 | fm.startitem() | |||
fm.data(**data) | ||||
# make node pretty for the human output | ||||
out = data.copy() | ||||
out['base'] = fm.hexfunc(b.node()) | ||||
out['p1.node'] = fm.hexfunc(p1.node()) | ||||
out['p2.node'] = fm.hexfunc(p2.node()) | ||||
fm.plain(output % out) | ||||
fm.end() | ||||
r43211 | if dostats: | |||
# use a second formatter because the data are quite different, not sure | ||||
# how it flies with the templater. | ||||
entries = [ | ||||
('nbrevs', 'number of revision covered'), | ||||
('nbmissingfiles', 'number of missing files at head'), | ||||
] | ||||
if dotiming: | ||||
Augie Fackler
|
r43346 | entries.append( | ||
('parentnbrenames', 'rename from one parent to base') | ||||
) | ||||
r43211 | entries.append(('totalnbrenames', 'total number of renames')) | |||
entries.append(('parenttime', 'time for one parent')) | ||||
entries.append(('totaltime', 'time for both parents')) | ||||
_displaystats(ui, opts, entries, alldata) | ||||
r42577 | ||||
Augie Fackler
|
r43346 | @command( | ||
b'perfhelper-pathcopies', | ||||
formatteropts | ||||
+ [ | ||||
(b'r', b'revs', [], b'restrict search to these revisions'), | ||||
(b'', b'timing', False, b'provides extra data (costly)'), | ||||
(b'', b'stats', False, b'provides statistic about the measured data'), | ||||
], | ||||
) | ||||
Boris Feld
|
r40771 | def perfhelperpathcopies(ui, repo, revs=[], **opts): | ||
Boris Feld
|
r40727 | """find statistic about potential parameters for the `perftracecopies` | ||
This command find source-destination pair relevant for copytracing testing. | ||||
It report value for some of the parameters that impact copy tracing time. | ||||
Boris Feld
|
r40762 | |||
If `--timing` is set, rename detection is run and the associated timing | ||||
will be reported. The extra details comes at the cost of a slower command | ||||
execution. | ||||
Since the rename detection is only run once, other factors might easily | ||||
affect the precision of the timing. However it should give a good | ||||
approximation of which revision pairs are very costly. | ||||
Boris Feld
|
r40727 | """ | ||
opts = _byteskwargs(opts) | ||||
fm = ui.formatter(b'perf', opts) | ||||
Boris Feld
|
r40762 | dotiming = opts[b'timing'] | ||
r43212 | dostats = opts[b'stats'] | |||
Boris Feld
|
r40762 | |||
if dotiming: | ||||
header = '%12s %12s %12s %12s %12s %12s\n' | ||||
Augie Fackler
|
r43346 | output = ( | ||
"%(source)12s %(destination)12s " | ||||
"%(nbrevs)12d %(nbmissingfiles)12d " | ||||
"%(nbrenamedfiles)12d %(time)18.5f\n" | ||||
) | ||||
header_names = ( | ||||
"source", | ||||
"destination", | ||||
"nb-revs", | ||||
"nb-files", | ||||
"nb-renames", | ||||
"time", | ||||
) | ||||
Boris Feld
|
r40762 | fm.plain(header % header_names) | ||
else: | ||||
header = '%12s %12s %12s %12s\n' | ||||
Augie Fackler
|
r43346 | output = ( | ||
"%(source)12s %(destination)12s " | ||||
"%(nbrevs)12d %(nbmissingfiles)12d\n" | ||||
) | ||||
Boris Feld
|
r40762 | fm.plain(header % ("source", "destination", "nb-revs", "nb-files")) | ||
Boris Feld
|
r40727 | |||
if not revs: | ||||
revs = ['all()'] | ||||
revs = scmutil.revrange(repo, revs) | ||||
r43212 | if dostats: | |||
alldata = { | ||||
'nbrevs': [], | ||||
'nbmissingfiles': [], | ||||
} | ||||
if dotiming: | ||||
alldata['nbrenames'] = [] | ||||
alldata['time'] = [] | ||||
Boris Feld
|
r40727 | roi = repo.revs('merge() and %ld', revs) | ||
for r in roi: | ||||
ctx = repo[r] | ||||
p1 = ctx.p1().rev() | ||||
p2 = ctx.p2().rev() | ||||
bases = repo.changelog._commonancestorsheads(p1, p2) | ||||
for p in (p1, p2): | ||||
for b in bases: | ||||
base = repo[b] | ||||
parent = repo[p] | ||||
missing = copies._computeforwardmissing(base, parent) | ||||
if not missing: | ||||
continue | ||||
data = { | ||||
b'source': base.hex(), | ||||
b'destination': parent.hex(), | ||||
r43430 | b'nbrevs': len(repo.revs('only(%d, %d)', p, b)), | |||
Boris Feld
|
r40727 | b'nbmissingfiles': len(missing), | ||
} | ||||
r43273 | if dostats: | |||
Augie Fackler
|
r43346 | alldata['nbrevs'].append( | ||
Augie Fackler
|
r46554 | ( | ||
data['nbrevs'], | ||||
base.hex(), | ||||
parent.hex(), | ||||
) | ||||
Augie Fackler
|
r43346 | ) | ||
alldata['nbmissingfiles'].append( | ||||
Augie Fackler
|
r46554 | ( | ||
data['nbmissingfiles'], | ||||
base.hex(), | ||||
parent.hex(), | ||||
) | ||||
Augie Fackler
|
r43346 | ) | ||
Boris Feld
|
r40762 | if dotiming: | ||
begin = util.timer() | ||||
renames = copies.pathcopies(base, parent) | ||||
end = util.timer() | ||||
# not very stable timing since we did only one run | ||||
data['time'] = end - begin | ||||
data['nbrenamedfiles'] = len(renames) | ||||
r43273 | if dostats: | |||
Augie Fackler
|
r43346 | alldata['time'].append( | ||
Augie Fackler
|
r46554 | ( | ||
data['time'], | ||||
base.hex(), | ||||
parent.hex(), | ||||
) | ||||
Augie Fackler
|
r43346 | ) | ||
alldata['nbrenames'].append( | ||||
Augie Fackler
|
r46554 | ( | ||
data['nbrenamedfiles'], | ||||
base.hex(), | ||||
parent.hex(), | ||||
) | ||||
Augie Fackler
|
r43346 | ) | ||
Boris Feld
|
r40762 | fm.startitem() | ||
Boris Feld
|
r40727 | fm.data(**data) | ||
out = data.copy() | ||||
out['source'] = fm.hexfunc(base.node()) | ||||
out['destination'] = fm.hexfunc(parent.node()) | ||||
fm.plain(output % out) | ||||
Boris Feld
|
r40762 | |||
Boris Feld
|
r40727 | fm.end() | ||
r43212 | if dostats: | |||
entries = [ | ||||
('nbrevs', 'number of revision covered'), | ||||
('nbmissingfiles', 'number of missing files at head'), | ||||
] | ||||
if dotiming: | ||||
Augie Fackler
|
r43346 | entries.append(('nbrenames', 'renamed files')) | ||
r43212 | entries.append(('time', 'time')) | |||
_displaystats(ui, opts, entries, alldata) | ||||
Boris Feld
|
r40727 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfcca', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfcca(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Joshua Redstone
|
r17216 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r16386 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncacheload', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncacheload(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16403 | def d(): | ||
s.fncache._load() | ||||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16403 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16403 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncachewrite', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncachewrite(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Boris Feld
|
r38717 | lock = repo.lock() | ||
Bryan O'Sullivan
|
r16403 | s.fncache._load() | ||
Pulkit Goyal
|
r39398 | tr = repo.transaction(b'perffncachewrite') | ||
tr.addbackup(b'fncache') | ||||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16403 | def d(): | ||
s.fncache._dirty = True | ||||
timeless
|
r27097 | s.fncache.write(tr) | ||
Augie Fackler
|
r43346 | |||
Bryan O'Sullivan
|
r16403 | timer(d) | ||
Pierre-Yves David
|
r30069 | tr.close() | ||
timeless
|
r27097 | lock.release() | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16403 | |||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncacheencode', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncacheencode(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Adrian Buehlmann
|
r17553 | s.fncache._load() | ||
Augie Fackler
|
r43346 | |||
Adrian Buehlmann
|
r17553 | def d(): | ||
for p in s.fncache.entries: | ||||
s.encode(p) | ||||
Augie Fackler
|
r43346 | |||
Adrian Buehlmann
|
r17553 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Adrian Buehlmann
|
r17553 | |||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r36784 | def _bdiffworker(q, blocks, xdiff, ready, done): | ||
Boris Feld
|
r35617 | while not done.is_set(): | ||
pair = q.get() | ||||
while pair is not None: | ||||
Gregory Szorc
|
r36784 | if xdiff: | ||
mdiff.bdiff.xdiffblocks(*pair) | ||||
elif blocks: | ||||
mdiff.bdiff.blocks(*pair) | ||||
else: | ||||
mdiff.textdiff(*pair) | ||||
Boris Feld
|
r35617 | q.task_done() | ||
pair = q.get() | ||||
Augie Fackler
|
r43346 | q.task_done() # for the None one | ||
Boris Feld
|
r35617 | with ready: | ||
ready.wait() | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r39355 | def _manifestrevision(repo, mnode): | ||
ml = repo.manifestlog | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(ml, b'getstorage'): | ||
Gregory Szorc
|
r39355 | store = ml.getstorage(b'') | ||
else: | ||||
store = ml._revlog | ||||
return store.revision(mnode) | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfbdiff', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [ | ||||
( | ||||
b'', | ||||
b'count', | ||||
1, | ||||
b'number of revisions to test (when using --startrev)', | ||||
), | ||||
(b'', b'alldata', False, b'test bdiffs for all associated revisions'), | ||||
(b'', b'threads', 0, b'number of thread to use (disable with 0)'), | ||||
(b'', b'blocks', False, b'test computing diffs into blocks'), | ||||
(b'', b'xdiff', False, b'use xdiff algorithm'), | ||||
Boris Feld
|
r35617 | ], | ||
Augie Fackler
|
r43346 | b'-c|-m|FILE REV', | ||
) | ||||
Boris Feld
|
r35617 | def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts): | ||
Gregory Szorc
|
r30336 | """benchmark a bdiff between revisions | ||
By default, benchmark a bdiff between its delta parent and itself. | ||||
With ``--count``, benchmark bdiffs between delta parents and self for N | ||||
revisions starting at the specified revision. | ||||
Gregory Szorc
|
r30337 | |||
With ``--alldata``, assume the requested revision is a changeset and | ||||
measure bdiffs for all changes related to that changeset (manifest | ||||
and filelogs). | ||||
Gregory Szorc
|
r30336 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r36784 | |||
Pulkit Goyal
|
r39398 | if opts[b'xdiff'] and not opts[b'blocks']: | ||
raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks') | ||||
Gregory Szorc
|
r36784 | |||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
opts[b'changelog'] = True | ||||
Gregory Szorc
|
r30337 | |||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Gregory Szorc
|
r30307 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfbdiff', b'invalid arguments') | ||
Gregory Szorc
|
r30307 | |||
Pulkit Goyal
|
r39398 | blocks = opts[b'blocks'] | ||
xdiff = opts[b'xdiff'] | ||||
Gregory Szorc
|
r30335 | textpairs = [] | ||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts) | ||
Gregory Szorc
|
r30307 | |||
Gregory Szorc
|
r30336 | startrev = r.rev(r.lookup(rev)) | ||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
Gregory Szorc
|
r30337 | # Load revisions associated with changeset. | ||
ctx = repo[rev] | ||||
Gregory Szorc
|
r39355 | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||
Gregory Szorc
|
r30337 | for pctx in ctx.parents(): | ||
Gregory Szorc
|
r39355 | pman = _manifestrevision(repo, pctx.manifestnode()) | ||
Gregory Szorc
|
r30337 | textpairs.append((pman, mtext)) | ||
# Load filelog revisions by iterating manifest delta. | ||||
man = ctx.manifest() | ||||
pman = ctx.p1().manifest() | ||||
for filename, change in pman.diff(man).items(): | ||||
fctx = repo.file(filename) | ||||
f1 = fctx.revision(change[0][0] or -1) | ||||
f2 = fctx.revision(change[1][0] or -1) | ||||
textpairs.append((f1, f2)) | ||||
else: | ||||
dp = r.deltaparent(rev) | ||||
textpairs.append((r.revision(dp), r.revision(rev))) | ||||
Gregory Szorc
|
r30307 | |||
Boris Feld
|
r35617 | withthreads = threads > 0 | ||
if not withthreads: | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r35617 | def d(): | ||
for pair in textpairs: | ||||
Gregory Szorc
|
r36784 | if xdiff: | ||
mdiff.bdiff.xdiffblocks(*pair) | ||||
elif blocks: | ||||
mdiff.bdiff.blocks(*pair) | ||||
else: | ||||
mdiff.textdiff(*pair) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r35617 | else: | ||
Gregory Szorc
|
r37863 | q = queue() | ||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
ready = threading.Condition() | ||||
done = threading.Event() | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Augie Fackler
|
r43346 | threading.Thread( | ||
target=_bdiffworker, args=(q, blocks, xdiff, ready, done) | ||||
).start() | ||||
Boris Feld
|
r35617 | q.join() | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r35617 | def d(): | ||
for pair in textpairs: | ||||
q.put(pair) | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
with ready: | ||||
ready.notify_all() | ||||
q.join() | ||||
Augie Fackler
|
r43346 | |||
Gregory Szorc
|
r30307 | timer, fm = gettimer(ui, opts) | ||
timer(d) | ||||
fm.end() | ||||
Boris Feld
|
r35617 | if withthreads: | ||
done.set() | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
with ready: | ||||
ready.notify_all() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfunidiff', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [ | ||||
( | ||||
b'', | ||||
b'count', | ||||
1, | ||||
b'number of revisions to test (when using --startrev)', | ||||
), | ||||
(b'', b'alldata', False, b'test unidiffs for all associated revisions'), | ||||
], | ||||
b'-c|-m|FILE REV', | ||||
) | ||||
Augie Fackler
|
r35879 | def perfunidiff(ui, repo, file_, rev=None, count=None, **opts): | ||
"""benchmark a unified diff between revisions | ||||
This doesn't include any copy tracing - it's just a unified diff | ||||
of the texts. | ||||
By default, benchmark a diff between its delta parent and itself. | ||||
With ``--count``, benchmark diffs between delta parents and self for N | ||||
revisions starting at the specified revision. | ||||
With ``--alldata``, assume the requested revision is a changeset and | ||||
measure diffs for all changes related to that changeset (manifest | ||||
and filelogs). | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
opts[b'changelog'] = True | ||||
Augie Fackler
|
r35879 | |||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Augie Fackler
|
r35879 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfunidiff', b'invalid arguments') | ||
Augie Fackler
|
r35879 | |||
textpairs = [] | ||||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts) | ||
Augie Fackler
|
r35879 | |||
startrev = r.rev(r.lookup(rev)) | ||||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
Augie Fackler
|
r35879 | # Load revisions associated with changeset. | ||
ctx = repo[rev] | ||||
Gregory Szorc
|
r39355 | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||
Augie Fackler
|
r35879 | for pctx in ctx.parents(): | ||
Gregory Szorc
|
r39355 | pman = _manifestrevision(repo, pctx.manifestnode()) | ||
Augie Fackler
|
r35879 | textpairs.append((pman, mtext)) | ||
# Load filelog revisions by iterating manifest delta. | ||||
man = ctx.manifest() | ||||
pman = ctx.p1().manifest() | ||||
for filename, change in pman.diff(man).items(): | ||||
fctx = repo.file(filename) | ||||
f1 = fctx.revision(change[0][0] or -1) | ||||
f2 = fctx.revision(change[1][0] or -1) | ||||
textpairs.append((f1, f2)) | ||||
else: | ||||
dp = r.deltaparent(rev) | ||||
textpairs.append((r.revision(dp), r.revision(rev))) | ||||
def d(): | ||||
for left, right in textpairs: | ||||
# The date strings don't matter, so we pass empty strings. | ||||
headerlines, hunks = mdiff.unidiff( | ||||
Augie Fackler
|
r43346 | left, b'', right, b'', b'left', b'right', binary=False | ||
) | ||||
Augie Fackler
|
r35879 | # consume iterators in roughly the way patch.py does | ||
b'\n'.join(headerlines) | ||||
b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) | ||||
Augie Fackler
|
r43346 | |||
Augie Fackler
|
r35879 | timer, fm = gettimer(ui, opts) | ||
timer(d) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdiffwd', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdiffwd(ui, repo, **opts): | ||
Patrick Mezard
|
r9826 | """Profile diff of working directory changes""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Patrick Mezard
|
r9826 | options = { | ||
Pulkit Goyal
|
r40250 | 'w': 'ignore_all_space', | ||
'b': 'ignore_space_change', | ||||
'B': 'ignore_blank_lines', | ||||
Augie Fackler
|
r43346 | } | ||
Patrick Mezard
|
r9826 | |||
Pulkit Goyal
|
r40250 | for diffopt in ('', 'w', 'b', 'B', 'wB'): | ||
Augie Fackler
|
r44937 | opts = {options[c]: b'1' for c in diffopt} | ||
Augie Fackler
|
r43346 | |||
Patrick Mezard
|
r9826 | def d(): | ||
ui.pushbuffer() | ||||
commands.diff(ui, repo, **opts) | ||||
ui.popbuffer() | ||||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r40250 | diffopt = diffopt.encode('ascii') | ||
Pulkit Goyal
|
r39398 | title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none') | ||
Boris Feld
|
r40715 | timer(d, title=title) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Patrick Mezard
|
r9826 | |||
Augie Fackler
|
r43346 | |||
@command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE') | ||||
Gregory Szorc
|
r32532 | def perfrevlogindex(ui, repo, file_=None, **opts): | ||
"""Benchmark operations against a revlog index. | ||||
This tests constructing a revlog instance, reading index data, | ||||
parsing index data, and performing various operations related to | ||||
index data. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts) | ||
Gregory Szorc
|
r32532 | |||
opener = getattr(rl, 'opener') # trick linter | ||||
indexfile = rl.indexfile | ||||
data = opener.read(indexfile) | ||||
Pulkit Goyal
|
r39398 | header = struct.unpack(b'>I', data[0:4])[0] | ||
Gregory Szorc
|
r32532 | version = header & 0xFFFF | ||
if version == 1: | ||||
revlogio = revlog.revlogio() | ||||
inline = header & (1 << 16) | ||||
else: | ||||
Augie Fackler
|
r43346 | raise error.Abort(b'unsupported revlog version: %d' % version) | ||
Gregory Szorc
|
r32532 | |||
rllen = len(rl) | ||||
node0 = rl.node(0) | ||||
node25 = rl.node(rllen // 4) | ||||
node50 = rl.node(rllen // 2) | ||||
node75 = rl.node(rllen // 4 * 3) | ||||
node100 = rl.node(rllen - 1) | ||||
allrevs = range(rllen) | ||||
allrevsrev = list(reversed(allrevs)) | ||||
allnodes = [rl.node(rev) for rev in range(rllen)] | ||||
allnodesrev = list(reversed(allnodes)) | ||||
def constructor(): | ||||
revlog.revlog(opener, indexfile) | ||||
def read(): | ||||
with opener(indexfile) as fh: | ||||
fh.read() | ||||
def parseindex(): | ||||
revlogio.parseindex(data, inline) | ||||
def getentry(revornode): | ||||
index = revlogio.parseindex(data, inline)[0] | ||||
index[revornode] | ||||
def getentries(revs, count=1): | ||||
index = revlogio.parseindex(data, inline)[0] | ||||
for i in range(count): | ||||
for rev in revs: | ||||
index[rev] | ||||
def resolvenode(node): | ||||
r43972 | index = revlogio.parseindex(data, inline)[0] | |||
rev = getattr(index, 'rev', None) | ||||
if rev is None: | ||||
nodemap = getattr( | ||||
revlogio.parseindex(data, inline)[0], 'nodemap', None | ||||
) | ||||
# This only works for the C code. | ||||
if nodemap is None: | ||||
return | ||||
rev = nodemap.__getitem__ | ||||
Gregory Szorc
|
r32532 | |||
try: | ||||
r43972 | rev(node) | |||
Gregory Szorc
|
r32532 | except error.RevlogError: | ||
pass | ||||
def resolvenodes(nodes, count=1): | ||||
r43972 | index = revlogio.parseindex(data, inline)[0] | |||
rev = getattr(index, 'rev', None) | ||||
if rev is None: | ||||
nodemap = getattr( | ||||
revlogio.parseindex(data, inline)[0], 'nodemap', None | ||||
) | ||||
# This only works for the C code. | ||||
if nodemap is None: | ||||
return | ||||
rev = nodemap.__getitem__ | ||||
Gregory Szorc
|
r32532 | |||
for i in range(count): | ||||
for node in nodes: | ||||
try: | ||||
r43972 | rev(node) | |||
Gregory Szorc
|
r32532 | except error.RevlogError: | ||
pass | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (constructor, b'revlog constructor'), | ||
(read, b'read'), | ||||
(parseindex, b'create index object'), | ||||
(lambda: getentry(0), b'retrieve index entry for rev 0'), | ||||
(lambda: resolvenode(b'a' * 20), b'look up missing node'), | ||||
(lambda: resolvenode(node0), b'look up node at rev 0'), | ||||
(lambda: resolvenode(node25), b'look up node at 1/4 len'), | ||||
(lambda: resolvenode(node50), b'look up node at 1/2 len'), | ||||
(lambda: resolvenode(node75), b'look up node at 3/4 len'), | ||||
(lambda: resolvenode(node100), b'look up node at tip'), | ||||
Gregory Szorc
|
r32532 | # 2x variation is to measure caching impact. | ||
Augie Fackler
|
r43346 | (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'), | ||
(lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'), | ||||
(lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'), | ||||
( | ||||
lambda: resolvenodes(allnodesrev, 2), | ||||
b'look up all nodes 2x (reverse)', | ||||
), | ||||
(lambda: getentries(allrevs), b'retrieve all index entries (forward)'), | ||||
( | ||||
lambda: getentries(allrevs, 2), | ||||
b'retrieve all index entries 2x (forward)', | ||||
), | ||||
( | ||||
lambda: getentries(allrevsrev), | ||||
b'retrieve all index entries (reverse)', | ||||
), | ||||
( | ||||
lambda: getentries(allrevsrev, 2), | ||||
b'retrieve all index entries 2x (reverse)', | ||||
), | ||||
Gregory Szorc
|
r32532 | ] | ||
for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfrevlogrevisions', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [ | ||||
(b'd', b'dist', 100, b'distance between the revisions'), | ||||
(b's', b'startrev', 0, b'revision to start reading at'), | ||||
(b'', b'reverse', False, b'read in reverse'), | ||||
], | ||||
b'-c|-m|FILE', | ||||
) | ||||
def perfrevlogrevisions( | ||||
ui, repo, file_=None, startrev=0, reverse=False, **opts | ||||
): | ||||
Gregory Szorc
|
r27492 | """Benchmark reading a series of revisions from a revlog. | ||
By default, we read every ``-d/--dist`` revision from 0 to tip of | ||||
the specified revlog. | ||||
Gregory Szorc
|
r27493 | |||
The start revision can be defined via ``-s/--startrev``. | ||||
Gregory Szorc
|
r27492 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts) | ||
Gregory Szorc
|
r32227 | rllen = getlen(ui)(rl) | ||
Gregory Szorc
|
r30017 | |||
Boris Feld
|
r40178 | if startrev < 0: | ||
startrev = rllen + startrev | ||||
Pradeepkumar Gayam
|
r11694 | def d(): | ||
Gregory Szorc
|
r32227 | rl.clearcaches() | ||
Gregory Szorc
|
r30017 | |||
Gregory Szorc
|
r32219 | beginrev = startrev | ||
Gregory Szorc
|
r32227 | endrev = rllen | ||
Pulkit Goyal
|
r39398 | dist = opts[b'dist'] | ||
Gregory Szorc
|
r30017 | |||
if reverse: | ||||
Boris Feld
|
r40573 | beginrev, endrev = endrev - 1, beginrev - 1 | ||
Gregory Szorc
|
r30017 | dist = -1 * dist | ||
Matt Harbison
|
r39847 | for x in _xrange(beginrev, endrev, dist): | ||
Gregory Szorc
|
r32297 | # Old revisions don't support passing int. | ||
n = rl.node(x) | ||||
rl.revision(n) | ||||
Pradeepkumar Gayam
|
r11694 | |||
Gregory Szorc
|
r32220 | timer, fm = gettimer(ui, opts) | ||
Pradeepkumar Gayam
|
r11694 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pradeepkumar Gayam
|
r11694 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfrevlogwrite', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [ | ||||
(b's', b'startrev', 1000, b'revision to start writing at'), | ||||
(b'', b'stoprev', -1, b'last revision to write'), | ||||
(b'', b'count', 3, b'number of passes to perform'), | ||||
(b'', b'details', False, b'print timing for every revisions tested'), | ||||
(b'', b'source', b'full', b'the kind of data feed in the revlog'), | ||||
(b'', b'lazydeltabase', True, b'try the provided delta first'), | ||||
(b'', b'clear-caches', True, b'clear revlog cache between calls'), | ||||
], | ||||
b'-c|-m|FILE', | ||||
) | ||||
Boris Feld
|
r40583 | def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts): | ||
"""Benchmark writing a series of revisions to a revlog. | ||||
Boris Feld
|
r40586 | |||
Possible source values are: | ||||
* `full`: add from a full text (default). | ||||
Boris Feld
|
r40587 | * `parent-1`: add from a delta to the first parent | ||
Boris Feld
|
r40588 | * `parent-2`: add from a delta to the second parent if it exists | ||
(use a delta from the first parent otherwise) | ||||
Boris Feld
|
r40589 | * `parent-smallest`: add from the smallest delta (either p1 or p2) | ||
Boris Feld
|
r40590 | * `storage`: add from the existing precomputed deltas | ||
r42661 | ||||
Note: This performance command measures performance in a custom way. As a | ||||
result some of the global configuration of the 'perf' command does not | ||||
apply to it: | ||||
* ``pre-run``: disabled | ||||
* ``profile-benchmark``: disabled | ||||
* ``run-limits``: disabled use --count instead | ||||
Boris Feld
|
r40583 | """ | ||
opts = _byteskwargs(opts) | ||||
rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts) | ||||
rllen = getlen(ui)(rl) | ||||
if startrev < 0: | ||||
startrev = rllen + startrev | ||||
if stoprev < 0: | ||||
stoprev = rllen + stoprev | ||||
Boris Feld
|
r40591 | lazydeltabase = opts['lazydeltabase'] | ||
Boris Feld
|
r40586 | source = opts['source'] | ||
Boris Feld
|
r41038 | clearcaches = opts['clear_caches'] | ||
Augie Fackler
|
r43346 | validsource = ( | ||
b'full', | ||||
b'parent-1', | ||||
b'parent-2', | ||||
b'parent-smallest', | ||||
b'storage', | ||||
) | ||||
Boris Feld
|
r40586 | if source not in validsource: | ||
raise error.Abort('invalid source type: %s' % source) | ||||
Boris Feld
|
r40583 | ### actually gather results | ||
count = opts['count'] | ||||
if count <= 0: | ||||
raise error.Abort('invalide run count: %d' % count) | ||||
allresults = [] | ||||
for c in range(count): | ||||
Augie Fackler
|
r43346 | timing = _timeonewrite( | ||
ui, | ||||
rl, | ||||
source, | ||||
startrev, | ||||
stoprev, | ||||
c + 1, | ||||
lazydeltabase=lazydeltabase, | ||||
clearcaches=clearcaches, | ||||
) | ||||
Boris Feld
|
r40586 | allresults.append(timing) | ||
Boris Feld
|
r40583 | |||
### consolidate the results in a single list | ||||
results = [] | ||||
for idx, (rev, t) in enumerate(allresults[0]): | ||||
ts = [t] | ||||
for other in allresults[1:]: | ||||
orev, ot = other[idx] | ||||
assert orev == rev | ||||
ts.append(ot) | ||||
results.append((rev, ts)) | ||||
resultcount = len(results) | ||||
### Compute and display relevant statistics | ||||
# get a formatter | ||||
fm = ui.formatter(b'perf', opts) | ||||
displayall = ui.configbool(b"perf", b"all-timing", False) | ||||
Boris Feld
|
r40584 | # print individual details if requested | ||
if opts['details']: | ||||
for idx, item in enumerate(results, 1): | ||||
rev, data = item | ||||
title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev) | ||||
formatone(fm, data, title=title, displayall=displayall) | ||||
Boris Feld
|
r40583 | # sorts results by median time | ||
results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2]) | ||||
# list of (name, index) to display) | ||||
relevants = [ | ||||
("min", 0), | ||||
("10%", resultcount * 10 // 100), | ||||
("25%", resultcount * 25 // 100), | ||||
("50%", resultcount * 70 // 100), | ||||
("75%", resultcount * 75 // 100), | ||||
("90%", resultcount * 90 // 100), | ||||
("95%", resultcount * 95 // 100), | ||||
("99%", resultcount * 99 // 100), | ||||
Boris Feld
|
r40992 | ("99.9%", resultcount * 999 // 1000), | ||
("99.99%", resultcount * 9999 // 10000), | ||||
("99.999%", resultcount * 99999 // 100000), | ||||
Boris Feld
|
r40583 | ("max", -1), | ||
] | ||||
Boris Feld
|
r40585 | if not ui.quiet: | ||
for name, idx in relevants: | ||||
data = results[idx] | ||||
title = '%s of %d, rev %d' % (name, resultcount, data[0]) | ||||
formatone(fm, data[1], title=title, displayall=displayall) | ||||
Boris Feld
|
r40583 | |||
# XXX summing that many float will not be very precise, we ignore this fact | ||||
# for now | ||||
totaltime = [] | ||||
for item in allresults: | ||||
Augie Fackler
|
r43346 | totaltime.append( | ||
( | ||||
sum(x[1][0] for x in item), | ||||
sum(x[1][1] for x in item), | ||||
sum(x[1][2] for x in item), | ||||
) | ||||
Boris Feld
|
r40583 | ) | ||
Augie Fackler
|
r43346 | formatone( | ||
fm, | ||||
totaltime, | ||||
title="total time (%d revs)" % resultcount, | ||||
displayall=displayall, | ||||
) | ||||
Boris Feld
|
r40583 | fm.end() | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40583 | class _faketr(object): | ||
def add(s, x, y, z=None): | ||||
return None | ||||
Augie Fackler
|
r43346 | |||
def _timeonewrite( | ||||
ui, | ||||
orig, | ||||
source, | ||||
startrev, | ||||
stoprev, | ||||
runidx=None, | ||||
lazydeltabase=True, | ||||
clearcaches=True, | ||||
): | ||||
Boris Feld
|
r40583 | timings = [] | ||
tr = _faketr() | ||||
with _temprevlog(ui, orig, startrev) as dest: | ||||
Boris Feld
|
r40591 | dest._lazydeltabase = lazydeltabase | ||
Boris Feld
|
r40583 | revs = list(orig.revs(startrev, stoprev)) | ||
total = len(revs) | ||||
topic = 'adding' | ||||
if runidx is not None: | ||||
topic += ' (run #%d)' % runidx | ||||
Augie Fackler
|
r43346 | # Support both old and new progress API | ||
Martin von Zweigbergk
|
r41191 | if util.safehasattr(ui, 'makeprogress'): | ||
progress = ui.makeprogress(topic, unit='revs', total=total) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41191 | def updateprogress(pos): | ||
progress.update(pos) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41191 | def completeprogress(): | ||
progress.complete() | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41191 | else: | ||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41191 | def updateprogress(pos): | ||
ui.progress(topic, pos, unit='revs', total=total) | ||||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r41191 | def completeprogress(): | ||
ui.progress(topic, None, unit='revs', total=total) | ||||
Boris Feld
|
r40583 | for idx, rev in enumerate(revs): | ||
Martin von Zweigbergk
|
r41191 | updateprogress(idx) | ||
Boris Feld
|
r40586 | addargs, addkwargs = _getrevisionseed(orig, rev, tr, source) | ||
Boris Feld
|
r41013 | if clearcaches: | ||
dest.index.clearcaches() | ||||
dest.clearcaches() | ||||
Boris Feld
|
r40583 | with timeone() as r: | ||
dest.addrawrevision(*addargs, **addkwargs) | ||||
timings.append((rev, r[0])) | ||||
Martin von Zweigbergk
|
r41191 | updateprogress(total) | ||
completeprogress() | ||||
Boris Feld
|
r40583 | return timings | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40586 | def _getrevisionseed(orig, rev, tr, source): | ||
Boris Feld
|
r40588 | from mercurial.node import nullid | ||
Boris Feld
|
r40583 | linkrev = orig.linkrev(rev) | ||
node = orig.node(rev) | ||||
p1, p2 = orig.parents(node) | ||||
flags = orig.flags(rev) | ||||
cachedelta = None | ||||
Boris Feld
|
r40586 | text = None | ||
if source == b'full': | ||||
text = orig.revision(rev) | ||||
Boris Feld
|
r40587 | elif source == b'parent-1': | ||
baserev = orig.rev(p1) | ||||
cachedelta = (baserev, orig.revdiff(p1, rev)) | ||||
Boris Feld
|
r40588 | elif source == b'parent-2': | ||
parent = p2 | ||||
if p2 == nullid: | ||||
parent = p1 | ||||
baserev = orig.rev(parent) | ||||
cachedelta = (baserev, orig.revdiff(parent, rev)) | ||||
Boris Feld
|
r40589 | elif source == b'parent-smallest': | ||
p1diff = orig.revdiff(p1, rev) | ||||
parent = p1 | ||||
diff = p1diff | ||||
if p2 != nullid: | ||||
p2diff = orig.revdiff(p2, rev) | ||||
if len(p1diff) > len(p2diff): | ||||
parent = p2 | ||||
diff = p2diff | ||||
baserev = orig.rev(parent) | ||||
cachedelta = (baserev, diff) | ||||
Boris Feld
|
r40590 | elif source == b'storage': | ||
baserev = orig.deltaparent(rev) | ||||
cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev)) | ||||
Boris Feld
|
r40583 | |||
Augie Fackler
|
r43346 | return ( | ||
(text, tr, linkrev, p1, p2), | ||||
{'node': node, 'flags': flags, 'cachedelta': cachedelta}, | ||||
) | ||||
Boris Feld
|
r40583 | |||
@contextlib.contextmanager | ||||
def _temprevlog(ui, orig, truncaterev): | ||||
from mercurial import vfs as vfsmod | ||||
if orig._inline: | ||||
raise error.Abort('not supporting inline revlog (yet)') | ||||
r42662 | revlogkwargs = {} | |||
k = 'upperboundcomp' | ||||
if util.safehasattr(orig, k): | ||||
revlogkwargs[k] = getattr(orig, k) | ||||
Boris Feld
|
r40583 | |||
origindexpath = orig.opener.join(orig.indexfile) | ||||
origdatapath = orig.opener.join(orig.datafile) | ||||
indexname = 'revlog.i' | ||||
dataname = 'revlog.d' | ||||
tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-') | ||||
try: | ||||
# copy the data file in a temporary directory | ||||
ui.debug('copying data in %s\n' % tmpdir) | ||||
destindexpath = os.path.join(tmpdir, 'revlog.i') | ||||
destdatapath = os.path.join(tmpdir, 'revlog.d') | ||||
shutil.copyfile(origindexpath, destindexpath) | ||||
shutil.copyfile(origdatapath, destdatapath) | ||||
# remove the data we want to add again | ||||
ui.debug('truncating data to be rewritten\n') | ||||
with open(destindexpath, 'ab') as index: | ||||
index.seek(0) | ||||
index.truncate(truncaterev * orig._io.size) | ||||
with open(destdatapath, 'ab') as data: | ||||
data.seek(0) | ||||
data.truncate(orig.start(truncaterev)) | ||||
# instantiate a new revlog from the temporary copy | ||||
ui.debug('truncating adding to be rewritten\n') | ||||
vfs = vfsmod.vfs(tmpdir) | ||||
vfs.options = getattr(orig.opener, 'options', None) | ||||
Augie Fackler
|
r43346 | dest = revlog.revlog( | ||
vfs, indexfile=indexname, datafile=dataname, **revlogkwargs | ||||
) | ||||
Boris Feld
|
r40583 | if dest._inline: | ||
raise error.Abort('not supporting inline revlog (yet)') | ||||
# make sure internals are initialized | ||||
dest.revision(len(dest) - 1) | ||||
yield dest | ||||
del dest, vfs | ||||
finally: | ||||
shutil.rmtree(tmpdir, True) | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfrevlogchunks', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [ | ||||
(b'e', b'engines', b'', b'compression engines to use'), | ||||
(b's', b'startrev', 0, b'revision to start at'), | ||||
], | ||||
b'-c|-m|FILE', | ||||
) | ||||
Gregory Szorc
|
r30796 | def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts): | ||
Gregory Szorc
|
r30451 | """Benchmark operations on revlog chunks. | ||
Logically, each revlog is a collection of fulltext revisions. However, | ||||
stored within each revlog are "chunks" of possibly compressed data. This | ||||
data needs to be read and decompressed or compressed and written. | ||||
This command measures the time it takes to read+decompress and recompress | ||||
chunks in a revlog. It effectively isolates I/O and compression performance. | ||||
For measurements of higher-level operations like resolving revisions, | ||||
Gregory Szorc
|
r32531 | see ``perfrevlogrevisions`` and ``perfrevlogrevision``. | ||
Gregory Szorc
|
r30451 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts) | ||
Gregory Szorc
|
r32224 | |||
# _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | ||||
segmentforrevs = rl._getsegmentforrevs | ||||
except AttributeError: | ||||
segmentforrevs = rl._chunkraw | ||||
Gregory Szorc
|
r30796 | |||
# Verify engines argument. | ||||
if engines: | ||||
Augie Fackler
|
r44937 | engines = {e.strip() for e in engines.split(b',')} | ||
Gregory Szorc
|
r30796 | for engine in engines: | ||
try: | ||||
util.compressionengines[engine] | ||||
except KeyError: | ||||
Pulkit Goyal
|
r39398 | raise error.Abort(b'unknown compression engine: %s' % engine) | ||
Gregory Szorc
|
r30796 | else: | ||
engines = [] | ||||
for e in util.compengines: | ||||
engine = util.compengines[e] | ||||
try: | ||||
if engine.available(): | ||||
Pulkit Goyal
|
r39398 | engine.revlogcompressor().compress(b'dummy') | ||
Gregory Szorc
|
r30796 | engines.append(e) | ||
except NotImplementedError: | ||||
pass | ||||
Gregory Szorc
|
r30451 | revs = list(rl.revs(startrev, len(rl) - 1)) | ||
def rlfh(rl): | ||||
if rl._inline: | ||||
return getsvfs(repo)(rl.indexfile) | ||||
else: | ||||
return getsvfs(repo)(rl.datafile) | ||||
def doread(): | ||||
rl.clearcaches() | ||||
for rev in revs: | ||||
Gregory Szorc
|
r32223 | segmentforrevs(rev, rev) | ||
Gregory Szorc
|
r30451 | |||
def doreadcachedfh(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
for rev in revs: | ||||
Gregory Szorc
|
r32223 | segmentforrevs(rev, rev, df=fh) | ||
Gregory Szorc
|
r30451 | |||
def doreadbatch(): | ||||
rl.clearcaches() | ||||
Gregory Szorc
|
r32223 | segmentforrevs(revs[0], revs[-1]) | ||
Gregory Szorc
|
r30451 | |||
def doreadbatchcachedfh(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
Gregory Szorc
|
r32223 | segmentforrevs(revs[0], revs[-1], df=fh) | ||
Gregory Szorc
|
r30451 | |||
def dochunk(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
for rev in revs: | ||||
rl._chunk(rev, df=fh) | ||||
chunks = [None] | ||||
def dochunkbatch(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
# Save chunks as a side-effect. | ||||
chunks[0] = rl._chunks(revs, df=fh) | ||||
Gregory Szorc
|
r30796 | def docompress(compressor): | ||
Gregory Szorc
|
r30451 | rl.clearcaches() | ||
Gregory Szorc
|
r30796 | |||
try: | ||||
# Swap in the requested compression engine. | ||||
oldcompressor = rl._compressor | ||||
rl._compressor = compressor | ||||
for chunk in chunks[0]: | ||||
rl.compress(chunk) | ||||
finally: | ||||
rl._compressor = oldcompressor | ||||
Gregory Szorc
|
r30451 | |||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (lambda: doread(), b'read'), | ||
(lambda: doreadcachedfh(), b'read w/ reused fd'), | ||||
(lambda: doreadbatch(), b'read batch'), | ||||
(lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'), | ||||
(lambda: dochunk(), b'chunk'), | ||||
(lambda: dochunkbatch(), b'chunk batch'), | ||||
Gregory Szorc
|
r30451 | ] | ||
Gregory Szorc
|
r30796 | for engine in sorted(engines): | ||
compressor = util.compengines[engine].revlogcompressor() | ||||
Augie Fackler
|
r43346 | benches.append( | ||
( | ||||
functools.partial(docompress, compressor), | ||||
b'compress w/ %s' % engine, | ||||
) | ||||
) | ||||
Gregory Szorc
|
r30796 | |||
Gregory Szorc
|
r30451 | for fn, title in benches: | ||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfrevlogrevision', | ||||
revlogopts | ||||
+ formatteropts | ||||
+ [(b'', b'cache', False, b'use caches instead of clearing')], | ||||
b'-c|-m|FILE REV', | ||||
) | ||||
Gregory Szorc
|
r27470 | def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): | ||
"""Benchmark obtaining a revlog revision. | ||||
Obtaining a revlog revision consists of roughly the following steps: | ||||
1. Compute the delta chain | ||||
Boris Feld
|
r40567 | 2. Slice the delta chain if applicable | ||
3. Obtain the raw chunks for that delta chain | ||||
4. Decompress each raw chunk | ||||
5. Apply binary patches to obtain fulltext | ||||
6. Verify hash of fulltext | ||||
Gregory Szorc
|
r27470 | |||
This command measures the time spent in each of these phases. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Gregory Szorc
|
r27470 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfrevlogrevision', b'invalid arguments') | ||
Gregory Szorc
|
r27470 | |||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts) | ||
Gregory Szorc
|
r32224 | |||
# _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | ||||
segmentforrevs = r._getsegmentforrevs | ||||
except AttributeError: | ||||
segmentforrevs = r._chunkraw | ||||
Gregory Szorc
|
r27470 | node = r.lookup(rev) | ||
rev = r.rev(node) | ||||
Gregory Szorc
|
r30882 | def getrawchunks(data, chain): | ||
start = r.start | ||||
length = r.length | ||||
inline = r._inline | ||||
iosize = r._io.size | ||||
buffer = util.buffer | ||||
chunks = [] | ||||
ladd = chunks.append | ||||
Boris Feld
|
r40566 | for idx, item in enumerate(chain): | ||
offset = start(item[0]) | ||||
bits = data[idx] | ||||
for rev in item: | ||||
chunkstart = start(rev) | ||||
if inline: | ||||
chunkstart += (rev + 1) * iosize | ||||
chunklength = length(rev) | ||||
ladd(buffer(bits, chunkstart - offset, chunklength)) | ||||
Gregory Szorc
|
r30882 | |||
return chunks | ||||
Gregory Szorc
|
r27470 | def dodeltachain(rev): | ||
if not cache: | ||||
r.clearcaches() | ||||
r._deltachain(rev) | ||||
def doread(chain): | ||||
if not cache: | ||||
r.clearcaches() | ||||
Boris Feld
|
r40566 | for item in slicedchain: | ||
segmentforrevs(item[0], item[-1]) | ||||
Gregory Szorc
|
r27470 | |||
Boris Feld
|
r40567 | def doslice(r, chain, size): | ||
for s in slicechunk(r, chain, targetsize=size): | ||||
pass | ||||
Gregory Szorc
|
r30882 | def dorawchunks(data, chain): | ||
Gregory Szorc
|
r27470 | if not cache: | ||
r.clearcaches() | ||||
Gregory Szorc
|
r30882 | getrawchunks(data, chain) | ||
Gregory Szorc
|
r27470 | |||
Gregory Szorc
|
r30882 | def dodecompress(chunks): | ||
decomp = r.decompress | ||||
for chunk in chunks: | ||||
decomp(chunk) | ||||
Gregory Szorc
|
r27470 | |||
def dopatch(text, bins): | ||||
if not cache: | ||||
r.clearcaches() | ||||
mdiff.patches(text, bins) | ||||
def dohash(text): | ||||
if not cache: | ||||
r.clearcaches() | ||||
Remi Chaintron
|
r30584 | r.checkhash(text, node, rev=rev) | ||
Gregory Szorc
|
r27470 | |||
def dorevision(): | ||||
if not cache: | ||||
r.clearcaches() | ||||
r.revision(node) | ||||
Boris Feld
|
r40566 | try: | ||
from mercurial.revlogutils.deltas import slicechunk | ||||
except ImportError: | ||||
slicechunk = getattr(revlog, '_slicechunk', None) | ||||
size = r.length(rev) | ||||
Gregory Szorc
|
r27470 | chain = r._deltachain(rev)[0] | ||
Boris Feld
|
r40566 | if not getattr(r, '_withsparseread', False): | ||
slicedchain = (chain,) | ||||
else: | ||||
slicedchain = tuple(slicechunk(r, chain, targetsize=size)) | ||||
data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain] | ||||
rawchunks = getrawchunks(data, slicedchain) | ||||
Gregory Szorc
|
r27470 | bins = r._chunks(chain) | ||
Pulkit Goyal
|
r40250 | text = bytes(bins[0]) | ||
Gregory Szorc
|
r27470 | bins = bins[1:] | ||
text = mdiff.patches(text, bins) | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (lambda: dorevision(), b'full'), | ||
(lambda: dodeltachain(rev), b'deltachain'), | ||||
(lambda: doread(chain), b'read'), | ||||
Boris Feld
|
r40567 | ] | ||
if getattr(r, '_withsparseread', False): | ||||
slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain') | ||||
benches.append(slicing) | ||||
Augie Fackler
|
r43346 | benches.extend( | ||
[ | ||||
(lambda: dorawchunks(data, slicedchain), b'rawchunks'), | ||||
(lambda: dodecompress(rawchunks), b'decompress'), | ||||
(lambda: dopatch(text, bins), b'patch'), | ||||
(lambda: dohash(text), b'hash'), | ||||
] | ||||
) | ||||
Gregory Szorc
|
r27470 | |||
Boris Feld
|
r40565 | timer, fm = gettimer(ui, opts) | ||
Gregory Szorc
|
r27470 | for fn, title in benches: | ||
timer(fn, title=title) | ||||
Boris Feld
|
r40565 | fm.end() | ||
Gregory Szorc
|
r27470 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfrevset', | ||||
[ | ||||
(b'C', b'clear', False, b'clear volatile cache between each call.'), | ||||
(b'', b'contexts', False, b'obtain changectx for each revision'), | ||||
] | ||||
+ formatteropts, | ||||
b"REVSET", | ||||
) | ||||
Gregory Szorc
|
r27072 | def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts): | ||
Pierre-Yves David
|
r18239 | """benchmark the execution time of a revset | ||
Mads Kiilerich
|
r18644 | Use the --clean option if need to evaluate the impact of build volatile | ||
Pierre-Yves David
|
r18239 | revisions set cache on the revset execution. Volatile cache hold filtered | ||
and obsolete related cache.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Siddharth Agarwal
|
r18062 | def d(): | ||
Pierre-Yves David
|
r18239 | if clear: | ||
repo.invalidatevolatilesets() | ||||
Gregory Szorc
|
r27072 | if contexts: | ||
Augie Fackler
|
r43346 | for ctx in repo.set(expr): | ||
pass | ||||
Gregory Szorc
|
r27072 | else: | ||
Augie Fackler
|
r43346 | for r in repo.revs(expr): | ||
pass | ||||
Siddharth Agarwal
|
r18062 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r18240 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfvolatilesets', | ||||
Augie Fackler
|
r46554 | [ | ||
(b'', b'clear-obsstore', False, b'drop obsstore between each call.'), | ||||
] | ||||
Augie Fackler
|
r43346 | + formatteropts, | ||
) | ||||
Pierre-Yves David
|
r25494 | def perfvolatilesets(ui, repo, *names, **opts): | ||
Pierre-Yves David
|
r18240 | """benchmark the computation of various volatile set | ||
Volatile set computes element related to filtering and obsolescence.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pierre-Yves David
|
r18240 | repo = repo.unfiltered() | ||
def getobs(name): | ||||
def d(): | ||||
repo.invalidatevolatilesets() | ||||
Pulkit Goyal
|
r39398 | if opts[b'clear_obsstore']: | ||
clearfilecache(repo, b'obsstore') | ||||
Pierre-Yves David
|
r18240 | obsolete.getrevs(repo, name) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18240 | return d | ||
Pierre-Yves David
|
r18241 | allobs = sorted(obsolete.cachefuncs) | ||
if names: | ||||
allobs = [n for n in allobs if n in names] | ||||
for name in allobs: | ||||
Pierre-Yves David
|
r18240 | timer(getobs(name), title=name) | ||
def getfiltered(name): | ||||
def d(): | ||||
repo.invalidatevolatilesets() | ||||
Pulkit Goyal
|
r39398 | if opts[b'clear_obsstore']: | ||
clearfilecache(repo, b'obsstore') | ||||
Pierre-Yves David
|
r20205 | repoview.filterrevs(repo, name) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18240 | return d | ||
Pierre-Yves David
|
r18241 | allfilter = sorted(repoview.filtertable) | ||
if names: | ||||
allfilter = [n for n in allfilter if n in names] | ||||
for name in allfilter: | ||||
Pierre-Yves David
|
r18240 | timer(getfiltered(name), title=name) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r18304 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfbranchmap', | ||||
[ | ||||
(b'f', b'full', False, b'Includes build time of subset'), | ||||
( | ||||
b'', | ||||
b'clear-revbranch', | ||||
False, | ||||
b'purge the revbranch cache between computation', | ||||
), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Boris Feld
|
r36380 | def perfbranchmap(ui, repo, *filternames, **opts): | ||
Pierre-Yves David
|
r18304 | """benchmark the update of a branchmap | ||
This benchmarks the full repo.branchmap() call with read and write disabled | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | full = opts.get(b"full", False) | ||
clear_revbranch = opts.get(b"clear_revbranch", False) | ||||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18304 | def getbranchmap(filtername): | ||
"""generate a benchmark function for the filtername""" | ||||
if filtername is None: | ||||
view = repo | ||||
else: | ||||
view = repo.filtered(filtername) | ||||
Martijn Pieters
|
r41764 | if util.safehasattr(view._branchcaches, '_per_filter'): | ||
filtered = view._branchcaches._per_filter | ||||
else: | ||||
# older versions | ||||
filtered = view._branchcaches | ||||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18304 | def d(): | ||
r32710 | if clear_revbranch: | |||
repo.revbranchcache()._clear() | ||||
Pierre-Yves David
|
r18304 | if full: | ||
view._branchcaches.clear() | ||||
else: | ||||
Martijn Pieters
|
r41764 | filtered.pop(filtername, None) | ||
Pierre-Yves David
|
r18304 | view.branchmap() | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18304 | return d | ||
Augie Fackler
|
r43346 | |||
Pierre-Yves David
|
r18304 | # add filter in smaller subset to bigger subset | ||
possiblefilters = set(repoview.filtertable) | ||||
Boris Feld
|
r36380 | if filternames: | ||
possiblefilters &= set(filternames) | ||||
FUJIWARA Katsunori
|
r30144 | subsettable = getbranchmapsubsettable() | ||
Pierre-Yves David
|
r18304 | allfilters = [] | ||
while possiblefilters: | ||||
for name in possiblefilters: | ||||
FUJIWARA Katsunori
|
r30144 | subset = subsettable.get(name) | ||
Pierre-Yves David
|
r18304 | if subset not in possiblefilters: | ||
break | ||||
else: | ||||
Pulkit Goyal
|
r39398 | assert False, b'subset cycle %s!' % possiblefilters | ||
Pierre-Yves David
|
r18304 | allfilters.append(name) | ||
possiblefilters.remove(name) | ||||
# warm the cache | ||||
if not full: | ||||
for name in allfilters: | ||||
repo.filtered(name).branchmap() | ||||
Pulkit Goyal
|
r39398 | if not filternames or b'unfiltered' in filternames: | ||
Boris Feld
|
r36380 | # add unfiltered | ||
allfilters.append(None) | ||||
FUJIWARA Katsunori
|
r30145 | |||
Martijn Pieters
|
r41706 | if util.safehasattr(branchmap.branchcache, 'fromfile'): | ||
branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile') | ||||
branchcacheread.set(classmethod(lambda *args: None)) | ||||
else: | ||||
# older versions | ||||
branchcacheread = safeattrsetter(branchmap, b'read') | ||||
branchcacheread.set(lambda *args: None) | ||||
Pulkit Goyal
|
r39398 | branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') | ||
Martijn Pieters
|
r41706 | branchcachewrite.set(lambda *args: None) | ||
Pierre-Yves David
|
r18304 | try: | ||
for name in allfilters: | ||||
Boris Feld
|
r36379 | printname = name | ||
if name is None: | ||||
Pulkit Goyal
|
r39398 | printname = b'unfiltered' | ||
Boris Feld
|
r36379 | timer(getbranchmap(name), title=str(printname)) | ||
Pierre-Yves David
|
r18304 | finally: | ||
FUJIWARA Katsunori
|
r30145 | branchcacheread.restore() | ||
branchcachewrite.restore() | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r23485 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfbranchmapupdate', | ||||
[ | ||||
(b'', b'base', [], b'subset of revision to start from'), | ||||
(b'', b'target', [], b'subset of revision to end with'), | ||||
(b'', b'clear-caches', False, b'clear cache between each runs'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Boris Feld
|
r40804 | def perfbranchmapupdate(ui, repo, base=(), target=(), **opts): | ||
"""benchmark branchmap update from for <base> revs to <target> revs | ||||
Boris Feld
|
r40808 | If `--clear-caches` is passed, the following items will be reset before | ||
each update: | ||||
* the changelog instance and associated indexes | ||||
* the rev-branch-cache instance | ||||
Boris Feld
|
r40804 | Examples: | ||
# update for the one last revision | ||||
$ hg perfbranchmapupdate --base 'not tip' --target 'tip' | ||||
$ update for change coming with a new branch | ||||
$ hg perfbranchmapupdate --base 'stable' --target 'default' | ||||
""" | ||||
from mercurial import branchmap | ||||
Boris Feld
|
r40806 | from mercurial import repoview | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40804 | opts = _byteskwargs(opts) | ||
timer, fm = gettimer(ui, opts) | ||||
Boris Feld
|
r40808 | clearcaches = opts[b'clear_caches'] | ||
Boris Feld
|
r40806 | unfi = repo.unfiltered() | ||
Augie Fackler
|
r43346 | x = [None] # used to pass data between closure | ||
Boris Feld
|
r40804 | |||
# we use a `list` here to avoid possible side effect from smartset | ||||
baserevs = list(scmutil.revrange(repo, base)) | ||||
targetrevs = list(scmutil.revrange(repo, target)) | ||||
if not baserevs: | ||||
raise error.Abort(b'no revisions selected for --base') | ||||
if not targetrevs: | ||||
raise error.Abort(b'no revisions selected for --target') | ||||
# make sure the target branchmap also contains the one in the base | ||||
targetrevs = list(set(baserevs) | set(targetrevs)) | ||||
targetrevs.sort() | ||||
cl = repo.changelog | ||||
allbaserevs = list(cl.ancestors(baserevs, inclusive=True)) | ||||
allbaserevs.sort() | ||||
alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True)) | ||||
newrevs = list(alltargetrevs.difference(allbaserevs)) | ||||
newrevs.sort() | ||||
Boris Feld
|
r40806 | allrevs = frozenset(unfi.changelog.revs()) | ||
basefilterrevs = frozenset(allrevs.difference(allbaserevs)) | ||||
targetfilterrevs = frozenset(allrevs.difference(alltargetrevs)) | ||||
def basefilter(repo, visibilityexceptions=None): | ||||
return basefilterrevs | ||||
def targetfilter(repo, visibilityexceptions=None): | ||||
return targetfilterrevs | ||||
Boris Feld
|
r40804 | msg = b'benchmark of branchmap with %d revisions with %d new ones\n' | ||
ui.status(msg % (len(allbaserevs), len(newrevs))) | ||||
Boris Feld
|
r40806 | if targetfilterrevs: | ||
msg = b'(%d revisions still filtered)\n' | ||||
ui.status(msg % len(targetfilterrevs)) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40806 | try: | ||
repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter | ||||
repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter | ||||
baserepo = repo.filtered(b'__perf_branchmap_update_base') | ||||
targetrepo = repo.filtered(b'__perf_branchmap_update_target') | ||||
Boris Feld
|
r40807 | # try to find an existing branchmap to reuse | ||
subsettable = getbranchmapsubsettable() | ||||
candidatefilter = subsettable.get(None) | ||||
while candidatefilter is not None: | ||||
candidatebm = repo.filtered(candidatefilter).branchmap() | ||||
if candidatebm.validfor(baserepo): | ||||
filtered = repoview.filterrevs(repo, candidatefilter) | ||||
missing = [r for r in allbaserevs if r in filtered] | ||||
base = candidatebm.copy() | ||||
base.update(baserepo, missing) | ||||
break | ||||
candidatefilter = subsettable.get(candidatefilter) | ||||
else: | ||||
# no suitable subset where found | ||||
base = branchmap.branchcache() | ||||
base.update(baserepo, allbaserevs) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40805 | def setup(): | ||
x[0] = base.copy() | ||||
Boris Feld
|
r40808 | if clearcaches: | ||
unfi._revbranchcache = None | ||||
clearchangelog(repo) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40805 | def bench(): | ||
Boris Feld
|
r40806 | x[0].update(targetrepo, newrevs) | ||
Boris Feld
|
r40805 | |||
timer(bench, setup=setup) | ||||
fm.end() | ||||
Boris Feld
|
r40806 | finally: | ||
repoview.filtertable.pop(b'__perf_branchmap_update_base', None) | ||||
repoview.filtertable.pop(b'__perf_branchmap_update_target', None) | ||||
Boris Feld
|
r40804 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfbranchmapload', | ||||
[ | ||||
(b'f', b'filter', b'', b'Specify repoview filter'), | ||||
(b'', b'list', False, b'List brachmap filter caches'), | ||||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||||
] | ||||
+ formatteropts, | ||||
) | ||||
Boris Feld
|
r40735 | def perfbranchmapload(ui, repo, filter=b'', list=False, **opts): | ||
Martijn Pieters
|
r39150 | """benchmark reading the branchmap""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r40738 | clearrevlogs = opts[b'clear_revlogs'] | ||
Matt Harbison
|
r39850 | |||
Martijn Pieters
|
r39150 | if list: | ||
for name, kind, st in repo.cachevfs.readdir(stat=True): | ||||
Pulkit Goyal
|
r39398 | if name.startswith(b'branch2'): | ||
filtername = name.partition(b'-')[2] or b'unfiltered' | ||||
Augie Fackler
|
r43346 | ui.status( | ||
b'%s - %s\n' % (filtername, util.bytecount(st.st_size)) | ||||
) | ||||
Martijn Pieters
|
r39150 | return | ||
Boris Feld
|
r40756 | if not filter: | ||
filter = None | ||||
subsettable = getbranchmapsubsettable() | ||||
if filter is None: | ||||
repo = repo.unfiltered() | ||||
else: | ||||
Martijn Pieters
|
r39150 | repo = repoview.repoview(repo, filter) | ||
Boris Feld
|
r40755 | |||
Augie Fackler
|
r43346 | repo.branchmap() # make sure we have a relevant, up to date branchmap | ||
Boris Feld
|
r40755 | |||
Martijn Pieters
|
r41706 | try: | ||
fromfile = branchmap.branchcache.fromfile | ||||
except AttributeError: | ||||
# older versions | ||||
fromfile = branchmap.read | ||||
Boris Feld
|
r40756 | currentfilter = filter | ||
Martijn Pieters
|
r39150 | # try once without timer, the filter may not be cached | ||
Martijn Pieters
|
r41706 | while fromfile(repo) is None: | ||
Boris Feld
|
r40756 | currentfilter = subsettable.get(currentfilter) | ||
if currentfilter is None: | ||||
Augie Fackler
|
r43346 | raise error.Abort( | ||
b'No branchmap cached for %s repo' % (filter or b'unfiltered') | ||||
) | ||||
Boris Feld
|
r40756 | repo = repo.filtered(currentfilter) | ||
Martijn Pieters
|
r39150 | timer, fm = gettimer(ui, opts) | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40738 | def setup(): | ||
if clearrevlogs: | ||||
clearchangelog(repo) | ||||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40736 | def bench(): | ||
Martijn Pieters
|
r41706 | fromfile(repo) | ||
Augie Fackler
|
r43346 | |||
Boris Feld
|
r40738 | timer(bench, setup=setup) | ||
Martijn Pieters
|
r39150 | fm.end() | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | @command(b'perfloadmarkers') | ||
Pierre-Yves David
|
r23485 | def perfloadmarkers(ui, repo): | ||
"""benchmark the time to parse the on-disk markers for a repo | ||||
Result is the number of markers in the repo.""" | ||||
timer, fm = gettimer(ui) | ||||
FUJIWARA Katsunori
|
r30146 | svfs = getsvfs(repo) | ||
timer(lambda: len(obsolete.obsstore(svfs))) | ||||
Pierre-Yves David
|
r23485 | fm.end() | ||
Gregory Szorc
|
r27286 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perflrucachedict', | ||||
formatteropts | ||||
+ [ | ||||
(b'', b'costlimit', 0, b'maximum total cost of items in cache'), | ||||
(b'', b'mincost', 0, b'smallest cost of items in cache'), | ||||
(b'', b'maxcost', 100, b'maximum cost of items in cache'), | ||||
(b'', b'size', 4, b'size of cache'), | ||||
(b'', b'gets', 10000, b'number of key lookups'), | ||||
(b'', b'sets', 10000, b'number of key sets'), | ||||
(b'', b'mixed', 10000, b'number of mixed mode operations'), | ||||
( | ||||
b'', | ||||
b'mixedgetfreq', | ||||
50, | ||||
b'frequency of get vs set ops in mixed mode', | ||||
), | ||||
], | ||||
norepo=True, | ||||
) | ||||
def perflrucache( | ||||
ui, | ||||
mincost=0, | ||||
maxcost=100, | ||||
costlimit=0, | ||||
size=4, | ||||
gets=10000, | ||||
sets=10000, | ||||
mixed=10000, | ||||
mixedgetfreq=50, | ||||
**opts | ||||
): | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r27286 | def doinit(): | ||
Matt Harbison
|
r39847 | for i in _xrange(10000): | ||
Gregory Szorc
|
r27286 | util.lrucachedict(size) | ||
Gregory Szorc
|
r39604 | costrange = list(range(mincost, maxcost + 1)) | ||
Gregory Szorc
|
r27286 | values = [] | ||
Matt Harbison
|
r39847 | for i in _xrange(size): | ||
Matt Harbison
|
r39848 | values.append(random.randint(0, _maxint)) | ||
Gregory Szorc
|
r27286 | |||
# Get mode fills the cache and tests raw lookup performance with no | ||||
# eviction. | ||||
getseq = [] | ||||
Matt Harbison
|
r39847 | for i in _xrange(gets): | ||
Gregory Szorc
|
r27286 | getseq.append(random.choice(values)) | ||
def dogets(): | ||||
d = util.lrucachedict(size) | ||||
for v in values: | ||||
d[v] = v | ||||
for key in getseq: | ||||
value = d[key] | ||||
Augie Fackler
|
r43346 | value # silence pyflakes warning | ||
Gregory Szorc
|
r27286 | |||
Gregory Szorc
|
r39604 | def dogetscost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for i, v in enumerate(values): | ||||
d.insert(v, v, cost=costs[i]) | ||||
for key in getseq: | ||||
try: | ||||
value = d[key] | ||||
Augie Fackler
|
r43346 | value # silence pyflakes warning | ||
Gregory Szorc
|
r39604 | except KeyError: | ||
pass | ||||
Gregory Szorc
|
r27286 | # Set mode tests insertion speed with cache eviction. | ||
setseq = [] | ||||
Gregory Szorc
|
r39604 | costs = [] | ||
Matt Harbison
|
r39847 | for i in _xrange(sets): | ||
Matt Harbison
|
r39848 | setseq.append(random.randint(0, _maxint)) | ||
Gregory Szorc
|
r39604 | costs.append(random.choice(costrange)) | ||
Gregory Szorc
|
r27286 | |||
Gregory Szorc
|
r39603 | def doinserts(): | ||
d = util.lrucachedict(size) | ||||
for v in setseq: | ||||
d.insert(v, v) | ||||
Gregory Szorc
|
r39604 | def doinsertscost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for i, v in enumerate(setseq): | ||||
d.insert(v, v, cost=costs[i]) | ||||
Gregory Szorc
|
r27286 | def dosets(): | ||
d = util.lrucachedict(size) | ||||
for v in setseq: | ||||
d[v] = v | ||||
# Mixed mode randomly performs gets and sets with eviction. | ||||
mixedops = [] | ||||
Matt Harbison
|
r39847 | for i in _xrange(mixed): | ||
Gregory Szorc
|
r27286 | r = random.randint(0, 100) | ||
if r < mixedgetfreq: | ||||
op = 0 | ||||
else: | ||||
op = 1 | ||||
Augie Fackler
|
r43346 | mixedops.append( | ||
(op, random.randint(0, size * 2), random.choice(costrange)) | ||||
) | ||||
Gregory Szorc
|
r27286 | |||
def domixed(): | ||||
d = util.lrucachedict(size) | ||||
Gregory Szorc
|
r39604 | for op, v, cost in mixedops: | ||
Gregory Szorc
|
r27286 | if op == 0: | ||
try: | ||||
d[v] | ||||
except KeyError: | ||||
pass | ||||
else: | ||||
d[v] = v | ||||
Gregory Szorc
|
r39604 | def domixedcost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for op, v, cost in mixedops: | ||||
if op == 0: | ||||
try: | ||||
d[v] | ||||
except KeyError: | ||||
pass | ||||
else: | ||||
d.insert(v, v, cost=cost) | ||||
Gregory Szorc
|
r27286 | benches = [ | ||
Pulkit Goyal
|
r39398 | (doinit, b'init'), | ||
Gregory Szorc
|
r27286 | ] | ||
Gregory Szorc
|
r39604 | if costlimit: | ||
Augie Fackler
|
r43346 | benches.extend( | ||
[ | ||||
(dogetscost, b'gets w/ cost limit'), | ||||
(doinsertscost, b'inserts w/ cost limit'), | ||||
(domixedcost, b'mixed w/ cost limit'), | ||||
] | ||||
) | ||||
Gregory Szorc
|
r39604 | else: | ||
Augie Fackler
|
r43346 | benches.extend( | ||
[ | ||||
(dogets, b'gets'), | ||||
(doinserts, b'inserts'), | ||||
(dosets, b'sets'), | ||||
(domixed, b'mixed'), | ||||
] | ||||
) | ||||
Gregory Szorc
|
r39604 | |||
Gregory Szorc
|
r27286 | for fn, title in benches: | ||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
FUJIWARA Katsunori
|
r29495 | |||
Augie Fackler
|
r43346 | |||
Manuel Jacob
|
r45530 | @command( | ||
b'perfwrite', | ||||
formatteropts | ||||
+ [ | ||||
(b'', b'write-method', b'write', b'ui write method'), | ||||
(b'', b'nlines', 100, b'number of lines'), | ||||
(b'', b'nitems', 100, b'number of items (per line)'), | ||||
(b'', b'item', b'x', b'item that is written'), | ||||
(b'', b'batch-line', None, b'pass whole line to write method at once'), | ||||
(b'', b'flush-line', None, b'flush after each line'), | ||||
], | ||||
) | ||||
Simon Farnsworth
|
r30977 | def perfwrite(ui, repo, **opts): | ||
Augie Fackler
|
r46554 | """microbenchmark ui.write (and others)""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Manuel Jacob
|
r45530 | write = getattr(ui, _sysstr(opts[b'write_method'])) | ||
nlines = int(opts[b'nlines']) | ||||
nitems = int(opts[b'nitems']) | ||||
item = opts[b'item'] | ||||
batch_line = opts.get(b'batch_line') | ||||
flush_line = opts.get(b'flush_line') | ||||
if batch_line: | ||||
line = item * nitems + b'\n' | ||||
def benchmark(): | ||||
for i in pycompat.xrange(nlines): | ||||
if batch_line: | ||||
write(line) | ||||
else: | ||||
for i in pycompat.xrange(nitems): | ||||
write(item) | ||||
write(b'\n') | ||||
if flush_line: | ||||
ui.flush() | ||||
ui.flush() | ||||
Simon Farnsworth
|
r30977 | timer, fm = gettimer(ui, opts) | ||
Manuel Jacob
|
r45530 | timer(benchmark) | ||
Simon Farnsworth
|
r30977 | fm.end() | ||
Augie Fackler
|
r43346 | |||
FUJIWARA Katsunori
|
r29495 | def uisetup(ui): | ||
Augie Fackler
|
r43346 | if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr( | ||
commands, b'debugrevlogopts' | ||||
): | ||||
FUJIWARA Katsunori
|
r29495 | # for "historical portability": | ||
# In this case, Mercurial should be 1.9 (or a79fea6b3e77) - | ||||
# 3.7 (or 5606f7d0d063). Therefore, '--dir' option for | ||||
# openrevlog() should cause failure, because it has been | ||||
# available since 3.5 (or 49c583ca48c4). | ||||
def openrevlog(orig, repo, cmd, file_, opts): | ||||
Pulkit Goyal
|
r39398 | if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'): | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
b"This version doesn't support --dir option", | ||||
hint=b"use 3.5 or later", | ||||
) | ||||
FUJIWARA Katsunori
|
r29495 | return orig(repo, cmd, file_, opts) | ||
Augie Fackler
|
r43346 | |||
Pulkit Goyal
|
r39398 | extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog) | ||
Gregory Szorc
|
r40961 | |||
Augie Fackler
|
r43346 | |||
@command( | ||||
b'perfprogress', | ||||
formatteropts | ||||
+ [ | ||||
(b'', b'topic', b'topic', b'topic for progress messages'), | ||||
(b'c', b'total', 1000000, b'total value we are progressing to'), | ||||
], | ||||
norepo=True, | ||||
) | ||||
Gregory Szorc
|
r40961 | def perfprogress(ui, topic=None, total=None, **opts): | ||
"""printing of progress bars""" | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
def doprogress(): | ||||
with ui.makeprogress(topic, total=total) as progress: | ||||
Martin von Zweigbergk
|
r43053 | for i in _xrange(total): | ||
Gregory Szorc
|
r40961 | progress.increment() | ||
timer(doprogress) | ||||
fm.end() | ||||