perf.py
2772 lines
| 88.5 KiB
| text/x-python
|
PythonLexer
/ contrib / perf.py
Matt Mackall
|
r7366 | # perf.py - performance test routines | ||
Dirkjan Ochtman
|
r8873 | '''helper extension to measure performance''' | ||
Matt Mackall
|
r7366 | |||
FUJIWARA Katsunori
|
r29493 | # "historical portability" policy of perf.py: | ||
# | ||||
# We have to do: | ||||
# - make perf.py "loadable" with as wide Mercurial version as possible | ||||
# This doesn't mean that perf commands work correctly with that Mercurial. | ||||
# BTW, perf.py itself has been available since 1.1 (or eb240755386d). | ||||
# - make historical perf command work correctly with as wide Mercurial | ||||
# version as possible | ||||
# | ||||
# We have to do, if possible with reasonable cost: | ||||
# - make recent perf command for historical feature work correctly | ||||
# with early Mercurial | ||||
# | ||||
# We don't have to do: | ||||
# - make perf command for recent feature work correctly with early | ||||
# Mercurial | ||||
Pulkit Goyal
|
r28561 | from __future__ import absolute_import | ||
Boris Feld
|
r40179 | import contextlib | ||
Pulkit Goyal
|
r28561 | import functools | ||
Gregory Szorc
|
r31397 | import gc | ||
Pulkit Goyal
|
r28561 | import os | ||
Gregory Szorc
|
r27286 | import random | ||
Boris Feld
|
r40583 | import shutil | ||
Gregory Szorc
|
r32532 | import struct | ||
Pulkit Goyal
|
r28561 | import sys | ||
Boris Feld
|
r40583 | import tempfile | ||
Boris Feld
|
r35617 | import threading | ||
Pulkit Goyal
|
r28561 | import time | ||
from mercurial import ( | ||||
Gregory Szorc
|
r30018 | changegroup, | ||
Pulkit Goyal
|
r28561 | cmdutil, | ||
commands, | ||||
copies, | ||||
error, | ||||
FUJIWARA Katsunori
|
r29495 | extensions, | ||
Georges Racinet
|
r40977 | hg, | ||
Pulkit Goyal
|
r28561 | mdiff, | ||
merge, | ||||
Gregory Szorc
|
r32532 | revlog, | ||
Pulkit Goyal
|
r28561 | util, | ||
) | ||||
Matt Mackall
|
r7366 | |||
FUJIWARA Katsunori
|
r29494 | # for "historical portability": | ||
FUJIWARA Katsunori
|
r29567 | # try to import modules separately (in dict order), and ignore | ||
# failure, because these aren't available with early Mercurial | ||||
try: | ||||
from mercurial import branchmap # since 2.5 (or bcee63733aad) | ||||
except ImportError: | ||||
pass | ||||
try: | ||||
from mercurial import obsolete # since 2.3 (or ad0d6c2b3279) | ||||
except ImportError: | ||||
pass | ||||
try: | ||||
Yuya Nishihara
|
r32337 | from mercurial import registrar # since 3.7 (or 37d50250b696) | ||
dir(registrar) # forcibly load it | ||||
except ImportError: | ||||
registrar = None | ||||
try: | ||||
FUJIWARA Katsunori
|
r29567 | from mercurial import repoview # since 2.5 (or 3a6ddacb7198) | ||
except ImportError: | ||||
pass | ||||
try: | ||||
from mercurial import scmutil # since 1.9 (or 8b252e826c68) | ||||
except ImportError: | ||||
pass | ||||
Georges Racinet
|
r40977 | try: | ||
from mercurial import setdiscovery # since 1.9 (or cb98fed52495) | ||||
except ImportError: | ||||
pass | ||||
Matt Harbison
|
r39850 | |||
def identity(a): | ||||
return a | ||||
Augie Fackler
|
r36196 | try: | ||
from mercurial import pycompat | ||||
getargspec = pycompat.getargspec # added to module after 4.5 | ||||
Matt Harbison
|
r39850 | _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802) | ||
Matt Harbison
|
r39846 | _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede) | ||
Matt Harbison
|
r39847 | _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b) | ||
Pulkit Goyal
|
r40250 | fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e) | ||
Matt Harbison
|
r39848 | if pycompat.ispy3: | ||
_maxint = sys.maxsize # per py3 docs for replacing maxint | ||||
else: | ||||
_maxint = sys.maxint | ||||
Augie Fackler
|
r36196 | except (ImportError, AttributeError): | ||
import inspect | ||||
getargspec = inspect.getargspec | ||||
Matt Harbison
|
r39850 | _byteskwargs = identity | ||
Pulkit Goyal
|
r40250 | fsencode = identity # no py3 support | ||
Matt Harbison
|
r39848 | _maxint = sys.maxint # no py3 support | ||
Matt Harbison
|
r39846 | _sysstr = lambda x: x # no py3 support | ||
Matt Harbison
|
r39847 | _xrange = xrange | ||
FUJIWARA Katsunori
|
r29567 | |||
Gregory Szorc
|
r37863 | try: | ||
# 4.7+ | ||||
queue = pycompat.queue.Queue | ||||
except (AttributeError, ImportError): | ||||
# <4.7. | ||||
try: | ||||
queue = pycompat.queue | ||||
except (AttributeError, ImportError): | ||||
queue = util.queue | ||||
Boris Feld
|
r38276 | try: | ||
from mercurial import logcmdutil | ||||
makelogtemplater = logcmdutil.maketemplater | ||||
except (AttributeError, ImportError): | ||||
try: | ||||
makelogtemplater = cmdutil.makelogtemplater | ||||
except (AttributeError, ImportError): | ||||
makelogtemplater = None | ||||
FUJIWARA Katsunori
|
r29567 | # for "historical portability": | ||
FUJIWARA Katsunori
|
r29494 | # define util.safehasattr forcibly, because util.safehasattr has been | ||
# available since 1.9.3 (or 94b200a11cf7) | ||||
_undefined = object() | ||||
def safehasattr(thing, attr): | ||||
Matt Harbison
|
r39846 | return getattr(thing, _sysstr(attr), _undefined) is not _undefined | ||
FUJIWARA Katsunori
|
r29494 | setattr(util, 'safehasattr', safehasattr) | ||
FUJIWARA Katsunori
|
r29496 | # for "historical portability": | ||
Philippe Pepiot
|
r31823 | # define util.timer forcibly, because util.timer has been available | ||
# since ae5d60bb70c9 | ||||
if safehasattr(time, 'perf_counter'): | ||||
util.timer = time.perf_counter | ||||
Pulkit Goyal
|
r39398 | elif os.name == b'nt': | ||
Philippe Pepiot
|
r31823 | util.timer = time.clock | ||
else: | ||||
util.timer = time.time | ||||
# for "historical portability": | ||||
FUJIWARA Katsunori
|
r29496 | # use locally defined empty option list, if formatteropts isn't | ||
# available, because commands.formatteropts has been available since | ||||
# 3.2 (or 7a7eed5176a4), even though formatting itself has been | ||||
# available since 2.2 (or ae5f92e154d3) | ||||
Yuya Nishihara
|
r32375 | formatteropts = getattr(cmdutil, "formatteropts", | ||
getattr(commands, "formatteropts", [])) | ||||
FUJIWARA Katsunori
|
r29495 | |||
# for "historical portability": | ||||
# use locally defined option list, if debugrevlogopts isn't available, | ||||
# because commands.debugrevlogopts has been available since 3.7 (or | ||||
# 5606f7d0d063), even though cmdutil.openrevlog() has been available | ||||
# since 1.9 (or a79fea6b3e77). | ||||
Yuya Nishihara
|
r32375 | revlogopts = getattr(cmdutil, "debugrevlogopts", | ||
getattr(commands, "debugrevlogopts", [ | ||||
Pulkit Goyal
|
r39398 | (b'c', b'changelog', False, (b'open changelog')), | ||
(b'm', b'manifest', False, (b'open manifest')), | ||||
(b'', b'dir', False, (b'open directory manifest')), | ||||
Yuya Nishihara
|
r32375 | ])) | ||
Pierre-Yves David
|
r25494 | |||
Pierre-Yves David
|
r18237 | cmdtable = {} | ||
FUJIWARA Katsunori
|
r29497 | |||
# for "historical portability": | ||||
# define parsealiases locally, because cmdutil.parsealiases has been | ||||
# available since 1.5 (or 6252852b4332) | ||||
def parsealiases(cmd): | ||||
Rodrigo Damazio
|
r40331 | return cmd.split(b"|") | ||
FUJIWARA Katsunori
|
r29497 | |||
Yuya Nishihara
|
r32337 | if safehasattr(registrar, 'command'): | ||
command = registrar.command(cmdtable) | ||||
elif safehasattr(cmdutil, 'command'): | ||||
FUJIWARA Katsunori
|
r29497 | command = cmdutil.command(cmdtable) | ||
Pulkit Goyal
|
r39398 | if b'norepo' not in getargspec(command).args: | ||
FUJIWARA Katsunori
|
r29497 | # for "historical portability": | ||
# wrap original cmdutil.command, because "norepo" option has | ||||
# been available since 3.1 (or 75a96326cecb) | ||||
_command = command | ||||
def command(name, options=(), synopsis=None, norepo=False): | ||||
if norepo: | ||||
Pulkit Goyal
|
r39398 | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||
FUJIWARA Katsunori
|
r29497 | return _command(name, list(options), synopsis) | ||
else: | ||||
# for "historical portability": | ||||
# define "@command" annotation locally, because cmdutil.command | ||||
# has been available since 1.9 (or 2daa5179e73f) | ||||
def command(name, options=(), synopsis=None, norepo=False): | ||||
def decorator(func): | ||||
if synopsis: | ||||
cmdtable[name] = func, list(options), synopsis | ||||
else: | ||||
cmdtable[name] = func, list(options) | ||||
if norepo: | ||||
Pulkit Goyal
|
r39398 | commands.norepo += b' %s' % b' '.join(parsealiases(name)) | ||
FUJIWARA Katsunori
|
r29497 | return func | ||
return decorator | ||||
Pierre-Yves David
|
r18237 | |||
Boris Feld
|
r34495 | try: | ||
Boris Feld
|
r34750 | import mercurial.registrar | ||
import mercurial.configitems | ||||
Boris Feld
|
r34495 | configtable = {} | ||
Boris Feld
|
r34750 | configitem = mercurial.registrar.configitem(configtable) | ||
Pulkit Goyal
|
r39398 | configitem(b'perf', b'presleep', | ||
Boris Feld
|
r34750 | default=mercurial.configitems.dynamicdefault, | ||
) | ||||
Pulkit Goyal
|
r39398 | configitem(b'perf', b'stub', | ||
Boris Feld
|
r34750 | default=mercurial.configitems.dynamicdefault, | ||
Boris Feld
|
r34495 | ) | ||
Pulkit Goyal
|
r39398 | configitem(b'perf', b'parentscount', | ||
Boris Feld
|
r34751 | default=mercurial.configitems.dynamicdefault, | ||
) | ||||
Pulkit Goyal
|
r39398 | configitem(b'perf', b'all-timing', | ||
Boris Feld
|
r38716 | default=mercurial.configitems.dynamicdefault, | ||
) | ||||
Boris Feld
|
r34495 | except (ImportError, AttributeError): | ||
pass | ||||
timeless
|
r27307 | def getlen(ui): | ||
Pulkit Goyal
|
r39398 | if ui.configbool(b"perf", b"stub", False): | ||
timeless
|
r27307 | return lambda x: 1 | ||
return len | ||||
Pierre-Yves David
|
r23171 | def gettimer(ui, opts=None): | ||
"""return a timer function and formatter: (timer, formatter) | ||||
timeless
|
r27303 | This function exists to gather the creation of formatter in a single | ||
place instead of duplicating it in all performance commands.""" | ||||
Matt Mackall
|
r23788 | |||
# enforce an idle period before execution to counteract power management | ||||
Matt Mackall
|
r25850 | # experimental config: perf.presleep | ||
Pulkit Goyal
|
r39398 | time.sleep(getint(ui, b"perf", b"presleep", 1)) | ||
Matt Mackall
|
r23788 | |||
Pierre-Yves David
|
r23171 | if opts is None: | ||
opts = {} | ||||
Philippe Pepiot
|
r30405 | # redirect all to stderr unless buffer api is in use | ||
if not ui._buffers: | ||||
ui = ui.copy() | ||||
Pulkit Goyal
|
r39398 | uifout = safeattrsetter(ui, b'fout', ignoremissing=True) | ||
Philippe Pepiot
|
r30405 | if uifout: | ||
# for "historical portability": | ||||
# ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d) | ||||
uifout.set(ui.ferr) | ||||
FUJIWARA Katsunori
|
r30147 | |||
Pierre-Yves David
|
r23171 | # get a formatter | ||
FUJIWARA Katsunori
|
r30147 | uiformatter = getattr(ui, 'formatter', None) | ||
if uiformatter: | ||||
Pulkit Goyal
|
r39398 | fm = uiformatter(b'perf', opts) | ||
FUJIWARA Katsunori
|
r30147 | else: | ||
# for "historical portability": | ||||
# define formatter locally, because ui.formatter has been | ||||
# available since 2.2 (or ae5f92e154d3) | ||||
from mercurial import node | ||||
class defaultformatter(object): | ||||
"""Minimized composition of baseformatter and plainformatter | ||||
""" | ||||
def __init__(self, ui, topic, opts): | ||||
self._ui = ui | ||||
if ui.debugflag: | ||||
self.hexfunc = node.hex | ||||
else: | ||||
self.hexfunc = node.short | ||||
def __nonzero__(self): | ||||
return False | ||||
Gregory Szorc
|
r31476 | __bool__ = __nonzero__ | ||
FUJIWARA Katsunori
|
r30147 | def startitem(self): | ||
pass | ||||
def data(self, **data): | ||||
pass | ||||
def write(self, fields, deftext, *fielddata, **opts): | ||||
self._ui.write(deftext % fielddata, **opts) | ||||
def condwrite(self, cond, fields, deftext, *fielddata, **opts): | ||||
if cond: | ||||
self._ui.write(deftext % fielddata, **opts) | ||||
def plain(self, text, **opts): | ||||
self._ui.write(text, **opts) | ||||
def end(self): | ||||
pass | ||||
Pulkit Goyal
|
r39398 | fm = defaultformatter(ui, b'perf', opts) | ||
FUJIWARA Katsunori
|
r30147 | |||
timeless
|
r27304 | # stub function, runs code only once instead of in a loop | ||
# experimental config: perf.stub | ||||
Pulkit Goyal
|
r39398 | if ui.configbool(b"perf", b"stub", False): | ||
timeless
|
r27304 | return functools.partial(stub_timer, fm), fm | ||
Boris Feld
|
r38716 | |||
# experimental config: perf.all-timing | ||||
Pulkit Goyal
|
r39398 | displayall = ui.configbool(b"perf", b"all-timing", False) | ||
Boris Feld
|
r38716 | return functools.partial(_timer, fm, displayall=displayall), fm | ||
Pierre-Yves David
|
r23171 | |||
Boris Feld
|
r40716 | def stub_timer(fm, func, setup=None, title=None): | ||
Boris Feld
|
r40757 | if setup is not None: | ||
setup() | ||||
timeless
|
r27304 | func() | ||
Boris Feld
|
r40179 | @contextlib.contextmanager | ||
def timeone(): | ||||
r = [] | ||||
ostart = os.times() | ||||
cstart = util.timer() | ||||
yield r | ||||
cstop = util.timer() | ||||
ostop = os.times() | ||||
a, b = ostart, ostop | ||||
r.append((cstop - cstart, b[0] - a[0], b[1]-a[1])) | ||||
Boris Feld
|
r40716 | def _timer(fm, func, setup=None, title=None, displayall=False): | ||
Gregory Szorc
|
r31397 | gc.collect() | ||
Matt Mackall
|
r7366 | results = [] | ||
Simon Farnsworth
|
r30975 | begin = util.timer() | ||
Matt Mackall
|
r7366 | count = 0 | ||
Martin Geisler
|
r14494 | while True: | ||
Boris Feld
|
r40716 | if setup is not None: | ||
setup() | ||||
Boris Feld
|
r40179 | with timeone() as item: | ||
r = func() | ||||
count += 1 | ||||
results.append(item[0]) | ||||
Simon Farnsworth
|
r30975 | cstop = util.timer() | ||
Matt Mackall
|
r7366 | if cstop - begin > 3 and count >= 100: | ||
break | ||||
if cstop - begin > 10 and count >= 3: | ||||
break | ||||
Pierre-Yves David
|
r23171 | |||
Boris Feld
|
r40180 | formatone(fm, results, title=title, result=r, | ||
displayall=displayall) | ||||
def formatone(fm, timings, title=None, result=None, displayall=False): | ||||
count = len(timings) | ||||
Pierre-Yves David
|
r23171 | fm.startitem() | ||
Patrick Mezard
|
r9826 | if title: | ||
Pulkit Goyal
|
r39398 | fm.write(b'title', b'! %s\n', title) | ||
Boris Feld
|
r40180 | if result: | ||
fm.write(b'result', b'! result: %s\n', result) | ||||
Boris Feld
|
r38716 | def display(role, entry): | ||
Pulkit Goyal
|
r39398 | prefix = b'' | ||
if role != b'best': | ||||
prefix = b'%s.' % role | ||||
fm.plain(b'!') | ||||
fm.write(prefix + b'wall', b' wall %f', entry[0]) | ||||
fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2]) | ||||
fm.write(prefix + b'user', b' user %f', entry[1]) | ||||
fm.write(prefix + b'sys', b' sys %f', entry[2]) | ||||
Boris Feld
|
r40176 | fm.write(prefix + b'count', b' (%s of %%d)' % role, count) | ||
Pulkit Goyal
|
r39398 | fm.plain(b'\n') | ||
Boris Feld
|
r40180 | timings.sort() | ||
min_val = timings[0] | ||||
Pulkit Goyal
|
r39398 | display(b'best', min_val) | ||
Boris Feld
|
r38716 | if displayall: | ||
Boris Feld
|
r40180 | max_val = timings[-1] | ||
Pulkit Goyal
|
r39398 | display(b'max', max_val) | ||
Boris Feld
|
r40180 | avg = tuple([sum(x) / count for x in zip(*timings)]) | ||
Pulkit Goyal
|
r39398 | display(b'avg', avg) | ||
Boris Feld
|
r40180 | median = timings[len(timings) // 2] | ||
Pulkit Goyal
|
r39398 | display(b'median', median) | ||
Matt Mackall
|
r7366 | |||
FUJIWARA Katsunori
|
r30143 | # utilities for historical portability | ||
FUJIWARA Katsunori
|
r30149 | def getint(ui, section, name, default): | ||
# for "historical portability": | ||||
# ui.configint has been available since 1.9 (or fa2b596db182) | ||||
v = ui.config(section, name, None) | ||||
if v is None: | ||||
return default | ||||
try: | ||||
return int(v) | ||||
except ValueError: | ||||
Pulkit Goyal
|
r39398 | raise error.ConfigError((b"%s.%s is not an integer ('%s')") | ||
FUJIWARA Katsunori
|
r30149 | % (section, name, v)) | ||
FUJIWARA Katsunori
|
r30143 | def safeattrsetter(obj, name, ignoremissing=False): | ||
"""Ensure that 'obj' has 'name' attribute before subsequent setattr | ||||
This function is aborted, if 'obj' doesn't have 'name' attribute | ||||
at runtime. This avoids overlooking removal of an attribute, which | ||||
breaks assumption of performance measurement, in the future. | ||||
This function returns the object to (1) assign a new value, and | ||||
(2) restore an original value to the attribute. | ||||
If 'ignoremissing' is true, missing 'name' attribute doesn't cause | ||||
abortion, and this function returns None. This is useful to | ||||
examine an attribute, which isn't ensured in all Mercurial | ||||
versions. | ||||
""" | ||||
if not util.safehasattr(obj, name): | ||||
if ignoremissing: | ||||
return None | ||||
Pulkit Goyal
|
r39398 | raise error.Abort((b"missing attribute %s of %s might break assumption" | ||
b" of performance measurement") % (name, obj)) | ||||
FUJIWARA Katsunori
|
r30143 | |||
Matt Harbison
|
r39846 | origvalue = getattr(obj, _sysstr(name)) | ||
FUJIWARA Katsunori
|
r30143 | class attrutil(object): | ||
def set(self, newvalue): | ||||
Matt Harbison
|
r39846 | setattr(obj, _sysstr(name), newvalue) | ||
FUJIWARA Katsunori
|
r30143 | def restore(self): | ||
Matt Harbison
|
r39846 | setattr(obj, _sysstr(name), origvalue) | ||
FUJIWARA Katsunori
|
r30143 | |||
return attrutil() | ||||
FUJIWARA Katsunori
|
r30144 | # utilities to examine each internal API changes | ||
def getbranchmapsubsettable(): | ||||
# for "historical portability": | ||||
# subsettable is defined in: | ||||
# - branchmap since 2.9 (or 175c6fd8cacc) | ||||
# - repoview since 2.5 (or 59a9f18d4587) | ||||
for mod in (branchmap, repoview): | ||||
subsettable = getattr(mod, 'subsettable', None) | ||||
if subsettable: | ||||
return subsettable | ||||
# bisecting in bcee63733aad::59a9f18d4587 can reach here (both | ||||
# branchmap and repoview modules exist, but subsettable attribute | ||||
# doesn't) | ||||
Pulkit Goyal
|
r39398 | raise error.Abort((b"perfbranchmap not available with this Mercurial"), | ||
hint=b"use 2.5 or later") | ||||
FUJIWARA Katsunori
|
r30144 | |||
FUJIWARA Katsunori
|
r30146 | def getsvfs(repo): | ||
"""Return appropriate object to access files under .hg/store | ||||
""" | ||||
# for "historical portability": | ||||
# repo.svfs has been available since 2.3 (or 7034365089bf) | ||||
svfs = getattr(repo, 'svfs', None) | ||||
if svfs: | ||||
return svfs | ||||
else: | ||||
return getattr(repo, 'sopener') | ||||
def getvfs(repo): | ||||
"""Return appropriate object to access files under .hg | ||||
""" | ||||
# for "historical portability": | ||||
# repo.vfs has been available since 2.3 (or 7034365089bf) | ||||
vfs = getattr(repo, 'vfs', None) | ||||
if vfs: | ||||
return vfs | ||||
else: | ||||
return getattr(repo, 'opener') | ||||
FUJIWARA Katsunori
|
r30150 | def repocleartagscachefunc(repo): | ||
"""Return the function to clear tags cache according to repo internal API | ||||
""" | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525) | ||
FUJIWARA Katsunori
|
r30150 | # in this case, setattr(repo, '_tagscache', None) or so isn't | ||
# correct way to clear tags cache, because existing code paths | ||||
# expect _tagscache to be a structured object. | ||||
def clearcache(): | ||||
# _tagscache has been filteredpropertycache since 2.5 (or | ||||
# 98c867ac1330), and delattr() can't work in such case | ||||
Pulkit Goyal
|
r39398 | if b'_tagscache' in vars(repo): | ||
del repo.__dict__[b'_tagscache'] | ||||
FUJIWARA Katsunori
|
r30150 | return clearcache | ||
Pulkit Goyal
|
r39398 | repotags = safeattrsetter(repo, b'_tags', ignoremissing=True) | ||
FUJIWARA Katsunori
|
r30150 | if repotags: # since 1.4 (or 5614a628d173) | ||
return lambda : repotags.set(None) | ||||
Pulkit Goyal
|
r39398 | repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True) | ||
FUJIWARA Katsunori
|
r30150 | if repotagscache: # since 0.6 (or d7df759d0e97) | ||
return lambda : repotagscache.set(None) | ||||
# Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches | ||||
# this point, but it isn't so problematic, because: | ||||
# - repo.tags of such Mercurial isn't "callable", and repo.tags() | ||||
# in perftags() causes failure soon | ||||
# - perf.py itself has been available since 1.1 (or eb240755386d) | ||||
Pulkit Goyal
|
r39398 | raise error.Abort((b"tags API of this hg command is unknown")) | ||
FUJIWARA Katsunori
|
r30150 | |||
r32731 | # utilities to clear cache | |||
Boris Feld
|
r40719 | def clearfilecache(obj, attrname): | ||
unfiltered = getattr(obj, 'unfiltered', None) | ||||
if unfiltered is not None: | ||||
obj = obj.unfiltered() | ||||
if attrname in vars(obj): | ||||
delattr(obj, attrname) | ||||
obj._filecache.pop(attrname, None) | ||||
r32731 | ||||
Boris Feld
|
r40737 | def clearchangelog(repo): | ||
if repo is not repo.unfiltered(): | ||||
object.__setattr__(repo, r'_clcachekey', None) | ||||
object.__setattr__(repo, r'_clcache', None) | ||||
clearfilecache(repo.unfiltered(), 'changelog') | ||||
FUJIWARA Katsunori
|
r30143 | # perf commands | ||
Pulkit Goyal
|
r39398 | @command(b'perfwalk', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfwalk(ui, repo, *pats, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martin von Zweigbergk
|
r34343 | m = scmutil.match(repo[None], pats, {}) | ||
Martin von Zweigbergk
|
r34344 | timer(lambda: len(list(repo.dirstate.walk(m, subrepos=[], unknown=True, | ||
ignored=False)))) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfannotate', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfannotate(ui, repo, f, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | fc = repo[b'.'][f] | ||
Durham Goode
|
r19292 | timer(lambda: len(fc.annotate(True))) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Durham Goode
|
r19292 | |||
Pulkit Goyal
|
r39398 | @command(b'perfstatus', | ||
[(b'u', b'unknown', False, | ||||
b'ask status to look for unknown files')] + formatteropts) | ||||
Siddharth Agarwal
|
r18033 | def perfstatus(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Matt Mackall
|
r7366 | #m = match.always(repo.root, repo.getcwd()) | ||
Brodie Rao
|
r16683 | #timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False, | ||
# False)))) | ||||
Matt Mackall
|
r27017 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown'])))) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfaddremove', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfaddremove(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18871 | try: | ||
oldquiet = repo.ui.quiet | ||||
repo.ui.quiet = True | ||||
Matt Harbison
|
r23533 | matcher = scmutil.match(repo[None]) | ||
Pulkit Goyal
|
r39398 | opts[b'dry_run'] = True | ||
timer(lambda: scmutil.addremove(repo, matcher, b"", opts)) | ||||
Siddharth Agarwal
|
r18871 | finally: | ||
repo.ui.quiet = oldquiet | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18871 | |||
Bryan O'Sullivan
|
r16785 | def clearcaches(cl): | ||
# behave somewhat consistently across internal API changes | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(cl, b'clearcaches'): | ||
Bryan O'Sullivan
|
r16785 | cl.clearcaches() | ||
Pulkit Goyal
|
r39398 | elif util.safehasattr(cl, b'_nodecache'): | ||
Bryan O'Sullivan
|
r16785 | from mercurial.node import nullid, nullrev | ||
cl._nodecache = {nullid: nullrev} | ||||
cl._nodepos = None | ||||
Pulkit Goyal
|
r39398 | @command(b'perfheads', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfheads(ui, repo, **opts): | ||
Boris Feld
|
r41479 | """benchmark the computation of a changelog heads""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16785 | cl = repo.changelog | ||
Boris Feld
|
r41481 | def s(): | ||
clearcaches(cl) | ||||
Bryan O'Sullivan
|
r16785 | def d(): | ||
len(cl.headrevs()) | ||||
Boris Feld
|
r41481 | timer(d, setup=s) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Boris Feld
|
r40775 | @command(b'perftags', formatteropts+ | ||
[ | ||||
Boris Feld
|
r40778 | (b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||
Boris Feld
|
r40775 | ]) | ||
Pierre-Yves David
|
r25494 | def perftags(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
FUJIWARA Katsunori
|
r30150 | repocleartagscache = repocleartagscachefunc(repo) | ||
Boris Feld
|
r40775 | clearrevlogs = opts[b'clear_revlogs'] | ||
Boris Feld
|
r40718 | def s(): | ||
Boris Feld
|
r40775 | if clearrevlogs: | ||
clearchangelog(repo) | ||||
clearfilecache(repo.unfiltered(), 'manifest') | ||||
FUJIWARA Katsunori
|
r30150 | repocleartagscache() | ||
Boris Feld
|
r40718 | def t(): | ||
Matt Mackall
|
r7366 | return len(repo.tags()) | ||
Boris Feld
|
r40718 | timer(t, setup=s) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfancestors', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfancestors(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16802 | heads = repo.changelog.headrevs() | ||
def d(): | ||||
Bryan O'Sullivan
|
r16866 | for a in repo.changelog.ancestors(heads): | ||
Bryan O'Sullivan
|
r16802 | pass | ||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16802 | |||
Pulkit Goyal
|
r39398 | @command(b'perfancestorset', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfancestorset(ui, repo, revset, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18080 | revs = repo.revs(revset) | ||
heads = repo.changelog.headrevs() | ||||
def d(): | ||||
Siddharth Agarwal
|
r18091 | s = repo.changelog.ancestors(heads) | ||
Siddharth Agarwal
|
r18080 | for rev in revs: | ||
rev in s | ||||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18080 | |||
Georges Racinet
|
r40977 | @command(b'perfdiscovery', formatteropts, b'PATH') | ||
def perfdiscovery(ui, repo, path, **opts): | ||||
"""benchmark discovery between local repo and the peer at given path | ||||
""" | ||||
repos = [repo, None] | ||||
timer, fm = gettimer(ui, opts) | ||||
path = ui.expandpath(path) | ||||
def s(): | ||||
repos[1] = hg.peer(ui, opts, path) | ||||
def d(): | ||||
setdiscovery.findcommonheads(ui, *repos) | ||||
timer(d, setup=s) | ||||
fm.end() | ||||
Boris Feld
|
r40777 | @command(b'perfbookmarks', formatteropts + | ||
[ | ||||
(b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||||
]) | ||||
r32733 | def perfbookmarks(ui, repo, **opts): | |||
"""benchmark parsing bookmarks from disk to memory""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
r32733 | timer, fm = gettimer(ui, opts) | |||
Boris Feld
|
r40717 | |||
Boris Feld
|
r40777 | clearrevlogs = opts[b'clear_revlogs'] | ||
Boris Feld
|
r40717 | def s(): | ||
Boris Feld
|
r40777 | if clearrevlogs: | ||
clearchangelog(repo) | ||||
Pulkit Goyal
|
r39398 | clearfilecache(repo, b'_bookmarks') | ||
Boris Feld
|
r40717 | def d(): | ||
r32733 | repo._bookmarks | |||
Boris Feld
|
r40717 | timer(d, setup=s) | ||
r32733 | fm.end() | |||
Pulkit Goyal
|
r39398 | @command(b'perfbundleread', formatteropts, b'BUNDLE') | ||
Gregory Szorc
|
r35108 | def perfbundleread(ui, repo, bundlepath, **opts): | ||
"""Benchmark reading of bundle files. | ||||
This command is meant to isolate the I/O part of bundle reading as | ||||
much as possible. | ||||
""" | ||||
from mercurial import ( | ||||
bundle2, | ||||
exchange, | ||||
streamclone, | ||||
) | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r35108 | def makebench(fn): | ||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
fn(bundle) | ||||
return run | ||||
def makereadnbytes(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
while bundle.read(size): | ||||
pass | ||||
return run | ||||
def makestdioread(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | while fh.read(size): | ||
pass | ||||
return run | ||||
# bundle1 | ||||
def deltaiter(bundle): | ||||
for delta in bundle.deltaiter(): | ||||
pass | ||||
def iterchunks(bundle): | ||||
for chunk in bundle.getchunks(): | ||||
pass | ||||
# bundle2 | ||||
def forwardchunks(bundle): | ||||
for chunk in bundle._forwardchunks(): | ||||
pass | ||||
def iterparts(bundle): | ||||
for part in bundle.iterparts(): | ||||
pass | ||||
Gregory Szorc
|
r35113 | def iterpartsseekable(bundle): | ||
for part in bundle.iterparts(seekable=True): | ||||
pass | ||||
Gregory Szorc
|
r35108 | def seek(bundle): | ||
Gregory Szorc
|
r35113 | for part in bundle.iterparts(seekable=True): | ||
Gregory Szorc
|
r35108 | part.seek(0, os.SEEK_END) | ||
def makepartreadnbytes(size): | ||||
def run(): | ||||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
for part in bundle.iterparts(): | ||||
while part.read(size): | ||||
pass | ||||
return run | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (makestdioread(8192), b'read(8k)'), | ||
(makestdioread(16384), b'read(16k)'), | ||||
(makestdioread(32768), b'read(32k)'), | ||||
(makestdioread(131072), b'read(128k)'), | ||||
Gregory Szorc
|
r35108 | ] | ||
Pulkit Goyal
|
r39398 | with open(bundlepath, b'rb') as fh: | ||
Gregory Szorc
|
r35108 | bundle = exchange.readbundle(ui, fh, bundlepath) | ||
if isinstance(bundle, changegroup.cg1unpacker): | ||||
benches.extend([ | ||||
Pulkit Goyal
|
r39398 | (makebench(deltaiter), b'cg1 deltaiter()'), | ||
(makebench(iterchunks), b'cg1 getchunks()'), | ||||
(makereadnbytes(8192), b'cg1 read(8k)'), | ||||
(makereadnbytes(16384), b'cg1 read(16k)'), | ||||
(makereadnbytes(32768), b'cg1 read(32k)'), | ||||
(makereadnbytes(131072), b'cg1 read(128k)'), | ||||
Gregory Szorc
|
r35108 | ]) | ||
elif isinstance(bundle, bundle2.unbundle20): | ||||
benches.extend([ | ||||
Pulkit Goyal
|
r39398 | (makebench(forwardchunks), b'bundle2 forwardchunks()'), | ||
(makebench(iterparts), b'bundle2 iterparts()'), | ||||
(makebench(iterpartsseekable), b'bundle2 iterparts() seekable'), | ||||
(makebench(seek), b'bundle2 part seek()'), | ||||
(makepartreadnbytes(8192), b'bundle2 part read(8k)'), | ||||
(makepartreadnbytes(16384), b'bundle2 part read(16k)'), | ||||
(makepartreadnbytes(32768), b'bundle2 part read(32k)'), | ||||
(makepartreadnbytes(131072), b'bundle2 part read(128k)'), | ||||
Gregory Szorc
|
r35108 | ]) | ||
elif isinstance(bundle, streamclone.streamcloneapplier): | ||||
Pulkit Goyal
|
r39398 | raise error.Abort(b'stream clone bundles not supported') | ||
Gregory Szorc
|
r35108 | else: | ||
Pulkit Goyal
|
r39398 | raise error.Abort(b'unhandled bundle type: %s' % type(bundle)) | ||
Gregory Szorc
|
r35108 | |||
for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfchangegroupchangelog', formatteropts + | ||
Pulkit Goyal
|
r40749 | [(b'', b'cgversion', b'02', b'changegroup version'), | ||
Pulkit Goyal
|
r39398 | (b'r', b'rev', b'', b'revisions to add to changegroup')]) | ||
Pulkit Goyal
|
r40749 | def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts): | ||
Gregory Szorc
|
r30018 | """Benchmark producing a changelog group for a changegroup. | ||
This measures the time spent processing the changelog during a | ||||
bundle operation. This occurs during `hg bundle` and on a server | ||||
processing a `getbundle` wire protocol request (handles clones | ||||
and pull requests). | ||||
By default, all revisions are added to the changegroup. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r30018 | cl = repo.changelog | ||
Pulkit Goyal
|
r39398 | nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')] | ||
Pulkit Goyal
|
r40749 | bundler = changegroup.getbundler(cgversion, repo) | ||
Gregory Szorc
|
r30018 | |||
def d(): | ||||
Gregory Szorc
|
r39013 | state, chunks = bundler._generatechangelog(cl, nodes) | ||
for chunk in chunks: | ||||
Gregory Szorc
|
r30018 | pass | ||
timer, fm = gettimer(ui, opts) | ||||
Gregory Szorc
|
r39013 | |||
# Terminal printing can interfere with timing. So disable it. | ||||
Pulkit Goyal
|
r39398 | with ui.configoverride({(b'progress', b'disable'): True}): | ||
Gregory Szorc
|
r39013 | timer(d) | ||
Gregory Szorc
|
r30018 | fm.end() | ||
Pulkit Goyal
|
r39398 | @command(b'perfdirs', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirs(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r18845 | dirstate = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b'a' in dirstate | ||
Bryan O'Sullivan
|
r18845 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate.hasdir(b'a') | ||
Mark Thomas
|
r35083 | del dirstate._map._dirs | ||
Bryan O'Sullivan
|
r18845 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r18845 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstate', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirstate(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | b"a" in repo.dirstate | ||
Matt Mackall
|
r7366 | def d(): | ||
repo.dirstate.invalidate() | ||||
Pulkit Goyal
|
r39398 | b"a" in repo.dirstate | ||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatedirs', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirstatedirs(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | b"a" in repo.dirstate | ||
Matt Mackall
|
r7366 | def d(): | ||
Pulkit Goyal
|
r39398 | repo.dirstate.hasdir(b"a") | ||
Mark Thomas
|
r35083 | del repo.dirstate._map._dirs | ||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatefoldmap', formatteropts) | ||
timeless
|
r27095 | def perfdirstatefoldmap(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r22780 | dirstate = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b'a' in dirstate | ||
Siddharth Agarwal
|
r22780 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate._map.filefoldmap.get(b'a') | ||
Durham Goode
|
r34677 | del dirstate._map.filefoldmap | ||
Siddharth Agarwal
|
r24607 | timer(d) | ||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfdirfoldmap', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirfoldmap(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r24607 | dirstate = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b'a' in dirstate | ||
Siddharth Agarwal
|
r24607 | def d(): | ||
Pulkit Goyal
|
r39398 | dirstate._map.dirfoldmap.get(b'a') | ||
Durham Goode
|
r34679 | del dirstate._map.dirfoldmap | ||
Mark Thomas
|
r35083 | del dirstate._map._dirs | ||
Siddharth Agarwal
|
r22780 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r22780 | |||
Pulkit Goyal
|
r39398 | @command(b'perfdirstatewrite', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdirstatewrite(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16788 | ds = repo.dirstate | ||
Pulkit Goyal
|
r39398 | b"a" in ds | ||
Bryan O'Sullivan
|
r16788 | def d(): | ||
ds._dirty = True | ||||
FUJIWARA Katsunori
|
r26748 | ds.write(repo.currenttransaction()) | ||
Bryan O'Sullivan
|
r16788 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16788 | |||
Pulkit Goyal
|
r39398 | @command(b'perfmergecalculate', | ||
[(b'r', b'rev', b'.', b'rev to merge against')] + formatteropts) | ||||
Pierre-Yves David
|
r25494 | def perfmergecalculate(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18817 | wctx = repo[None] | ||
rctx = scmutil.revsingle(repo, rev, rev) | ||||
ancestor = wctx.ancestor(rctx) | ||||
# we don't want working dir files to be stat'd in the benchmark, so prime | ||||
# that cache | ||||
wctx.dirty() | ||||
def d(): | ||||
# acceptremote is True because we don't want prompts in the middle of | ||||
# our benchmark | ||||
timeless
|
r27098 | merge.calculateupdates(repo, wctx, rctx, [ancestor], False, False, | ||
Augie Fackler
|
r27345 | acceptremote=True, followcopies=True) | ||
Siddharth Agarwal
|
r18817 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18817 | |||
Pulkit Goyal
|
r39398 | @command(b'perfpathcopies', [], b"REV REV") | ||
Pierre-Yves David
|
r25494 | def perfpathcopies(ui, repo, rev1, rev2, **opts): | ||
Boris Feld
|
r40770 | """benchmark the copy tracing logic""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18877 | ctx1 = scmutil.revsingle(repo, rev1, rev1) | ||
ctx2 = scmutil.revsingle(repo, rev2, rev2) | ||||
def d(): | ||||
copies.pathcopies(ctx1, ctx2) | ||||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Siddharth Agarwal
|
r18877 | |||
Pulkit Goyal
|
r39398 | @command(b'perfphases', | ||
[(b'', b'full', False, b'include file reading time too'), | ||||
], b"") | ||||
r32467 | def perfphases(ui, repo, **opts): | |||
"""benchmark phasesets computation""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
r32467 | timer, fm = gettimer(ui, opts) | |||
r32732 | _phases = repo._phasecache | |||
Pulkit Goyal
|
r39398 | full = opts.get(b'full') | ||
r32467 | def d(): | |||
r32732 | phases = _phases | |||
if full: | ||||
Pulkit Goyal
|
r39398 | clearfilecache(repo, b'_phasecache') | ||
r32732 | phases = repo._phasecache | |||
r32467 | phases.invalidate() | |||
phases.loadphaserevs(repo) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfphasesremote', | ||
[], b"[DEST]") | ||||
Boris Feld
|
r39180 | def perfphasesremote(ui, repo, dest=None, **opts): | ||
"""benchmark time needed to analyse phases of the remote server""" | ||||
from mercurial.node import ( | ||||
bin, | ||||
) | ||||
from mercurial import ( | ||||
exchange, | ||||
hg, | ||||
phases, | ||||
) | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r39180 | timer, fm = gettimer(ui, opts) | ||
Pulkit Goyal
|
r39398 | path = ui.paths.getpath(dest, default=(b'default-push', b'default')) | ||
Boris Feld
|
r39180 | if not path: | ||
Pulkit Goyal
|
r39398 | raise error.Abort((b'default repository not configured!'), | ||
hint=(b"see 'hg help config.paths'")) | ||||
Boris Feld
|
r39180 | dest = path.pushloc or path.loc | ||
Pulkit Goyal
|
r39398 | ui.status((b'analysing phase of %s\n') % util.hidepassword(dest)) | ||
Boris Feld
|
r39180 | other = hg.peer(repo, opts, dest) | ||
# easier to perform discovery through the operation | ||||
op = exchange.pushoperation(repo, other) | ||||
exchange._pushdiscoverychangeset(op) | ||||
remotesubset = op.fallbackheads | ||||
with other.commandexecutor() as e: | ||||
Pulkit Goyal
|
r39398 | remotephases = e.callcommand(b'listkeys', | ||
{b'namespace': b'phases'}).result() | ||||
Boris Feld
|
r39180 | del other | ||
Pulkit Goyal
|
r39398 | publishing = remotephases.get(b'publishing', False) | ||
Boris Feld
|
r39180 | if publishing: | ||
Pulkit Goyal
|
r39398 | ui.status((b'publishing: yes\n')) | ||
Boris Feld
|
r39180 | else: | ||
Pulkit Goyal
|
r39398 | ui.status((b'publishing: no\n')) | ||
Boris Feld
|
r39180 | |||
nodemap = repo.changelog.nodemap | ||||
nonpublishroots = 0 | ||||
for nhex, phase in remotephases.iteritems(): | ||||
Pulkit Goyal
|
r39398 | if nhex == b'publishing': # ignore data related to publish option | ||
Boris Feld
|
r39180 | continue | ||
node = bin(nhex) | ||||
if node in nodemap and int(phase): | ||||
nonpublishroots += 1 | ||||
Pulkit Goyal
|
r39398 | ui.status((b'number of roots: %d\n') % len(remotephases)) | ||
ui.status((b'number of known non public roots: %d\n') % nonpublishroots) | ||||
Boris Feld
|
r39180 | def d(): | ||
phases.remotephasessummary(repo, | ||||
remotesubset, | ||||
remotephases) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfmanifest',[ | ||
(b'm', b'manifest-rev', False, b'Look up a manifest node revision'), | ||||
(b'', b'clear-disk', False, b'clear on-disk caches too'), | ||||
Boris Feld
|
r40177 | ] + formatteropts, b'REV|NODE') | ||
Martijn Pieters
|
r38802 | def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts): | ||
Boris Feld
|
r38715 | """benchmark the time to read a manifest from disk and return a usable | ||
dict-like object | ||||
Manifest caches are cleared before retrieval.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martijn Pieters
|
r38802 | if not manifest_rev: | ||
ctx = scmutil.revsingle(repo, rev, rev) | ||||
t = ctx.manifestnode() | ||||
else: | ||||
Gregory Szorc
|
r39354 | from mercurial.node import bin | ||
if len(rev) == 40: | ||||
t = bin(rev) | ||||
else: | ||||
try: | ||||
rev = int(rev) | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(repo.manifestlog, b'getstorage'): | ||
Gregory Szorc
|
r39354 | t = repo.manifestlog.getstorage(b'').node(rev) | ||
else: | ||||
t = repo.manifestlog._revlog.lookup(rev) | ||||
except ValueError: | ||||
Pulkit Goyal
|
r39398 | raise error.Abort(b'manifest revision must be integer or full ' | ||
b'node') | ||||
Matt Mackall
|
r7366 | def d(): | ||
Martijn Pieters
|
r38803 | repo.manifestlog.clearcaches(clear_persisted_data=clear_disk) | ||
Durham Goode
|
r30369 | repo.manifestlog[t].read() | ||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfchangeset', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfchangeset(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Martin von Zweigbergk
|
r37373 | n = scmutil.revsingle(repo, rev).node() | ||
Matt Mackall
|
r16262 | def d(): | ||
Simon Heimberg
|
r19378 | repo.changelog.read(n) | ||
Matt Mackall
|
r16266 | #repo.changelog._cache = None | ||
Matt Mackall
|
r16262 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r16262 | |||
Boris Feld
|
r40781 | @command(b'perfignore', formatteropts) | ||
def perfignore(ui, repo, **opts): | ||||
"""benchmark operation related to computing ignore""" | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
dirstate = repo.dirstate | ||||
def setupone(): | ||||
dirstate.invalidate() | ||||
clearfilecache(dirstate, b'_ignore') | ||||
def runone(): | ||||
dirstate._ignore | ||||
timer(runone, setup=setupone, title=b"load") | ||||
fm.end() | ||||
Boris Feld
|
r40820 | @command(b'perfindex', [ | ||
Boris Feld
|
r41484 | (b'', b'rev', [], b'revision to be looked up (default tip)'), | ||
Boris Feld
|
r41483 | (b'', b'no-lookup', None, b'do not revision lookup post creation'), | ||
Boris Feld
|
r40820 | ] + formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfindex(ui, repo, **opts): | ||
Boris Feld
|
r41482 | """benchmark index creation time followed by a lookup | ||
The default is to look `tip` up. Depending on the index implementation, | ||||
the revision looked up can matters. For example, an implementation | ||||
scanning the index will have a faster lookup time for `--rev tip` than for | ||||
Boris Feld
|
r41484 | `--rev 0`. The number of looked up revisions and their order can also | ||
matters. | ||||
Example of useful set to test: | ||||
* tip | ||||
* 0 | ||||
* -10: | ||||
* :10 | ||||
* -10: + :10 | ||||
* :10: + -10: | ||||
* -10000: | ||||
* -10000: + 0 | ||||
Boris Feld
|
r41482 | |||
Boris Feld
|
r41610 | It is not currently possible to check for lookup of a missing node. For | ||
deeper lookup benchmarking, checkout the `perfnodemap` command.""" | ||||
Matt Mackall
|
r13255 | import mercurial.revlog | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r13277 | mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||
Boris Feld
|
r41483 | if opts[b'no_lookup']: | ||
Boris Feld
|
r41484 | if opts['rev']: | ||
raise error.Abort('--no-lookup and --rev are mutually exclusive') | ||||
nodes = [] | ||||
elif not opts[b'rev']: | ||||
nodes = [repo[b"tip"].node()] | ||||
Boris Feld
|
r40820 | else: | ||
Boris Feld
|
r41484 | revs = scmutil.revrange(repo, opts[b'rev']) | ||
cl = repo.changelog | ||||
nodes = [cl.node(r) for r in revs] | ||||
Boris Feld
|
r40819 | |||
unfi = repo.unfiltered() | ||||
# find the filecache func directly | ||||
# This avoid polluting the benchmark with the filecache logic | ||||
makecl = unfi.__class__.changelog.func | ||||
def setup(): | ||||
# probably not necessary, but for good measure | ||||
clearchangelog(unfi) | ||||
Matt Mackall
|
r7366 | def d(): | ||
Boris Feld
|
r40819 | cl = makecl(unfi) | ||
Boris Feld
|
r41484 | for n in nodes: | ||
Boris Feld
|
r41483 | cl.rev(n) | ||
Boris Feld
|
r40819 | timer(d, setup=setup) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Boris Feld
|
r41610 | @command(b'perfnodemap', [ | ||
Boris Feld
|
r41611 | (b'', b'rev', [], b'revision to be looked up (default tip)'), | ||
(b'', b'clear-caches', True, b'clear revlog cache between calls'), | ||||
Boris Feld
|
r41610 | ] + formatteropts) | ||
def perfnodemap(ui, repo, **opts): | ||||
"""benchmark the time necessary to look up revision from a cold nodemap | ||||
Depending on the implementation, the amount and order of revision we look | ||||
up can varies. Example of useful set to test: | ||||
* tip | ||||
* 0 | ||||
* -10: | ||||
* :10 | ||||
* -10: + :10 | ||||
* :10: + -10: | ||||
* -10000: | ||||
* -10000: + 0 | ||||
The command currently focus on valid binary lookup. Benchmarking for | ||||
hexlookup, prefix lookup and missing lookup would also be valuable. | ||||
""" | ||||
import mercurial.revlog | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
unfi = repo.unfiltered() | ||||
Boris Feld
|
r41611 | clearcaches = opts['clear_caches'] | ||
Boris Feld
|
r41610 | # find the filecache func directly | ||
# This avoid polluting the benchmark with the filecache logic | ||||
makecl = unfi.__class__.changelog.func | ||||
if not opts[b'rev']: | ||||
raise error.Abort('use --rev to specify revisions to look up') | ||||
revs = scmutil.revrange(repo, opts[b'rev']) | ||||
cl = repo.changelog | ||||
nodes = [cl.node(r) for r in revs] | ||||
# use a list to pass reference to a nodemap from one closure to the next | ||||
nodeget = [None] | ||||
def setnodeget(): | ||||
# probably not necessary, but for good measure | ||||
clearchangelog(unfi) | ||||
nodeget[0] = makecl(unfi).nodemap.get | ||||
def d(): | ||||
get = nodeget[0] | ||||
for n in nodes: | ||||
get(n) | ||||
Boris Feld
|
r41611 | setup = None | ||
if clearcaches: | ||||
def setup(): | ||||
setnodeget() | ||||
else: | ||||
setnodeget() | ||||
d() # prewarm the data structure | ||||
Boris Feld
|
r41610 | timer(d, setup=setup) | ||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfstartup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfstartup(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r7366 | def d(): | ||
Matt Harbison
|
r39849 | if os.name != r'nt': | ||
Matt Harbison
|
r40385 | os.system(b"HGRCPATH= %s version -q > /dev/null" % | ||
fsencode(sys.argv[0])) | ||||
Matt Harbison
|
r27382 | else: | ||
Matt Harbison
|
r39849 | os.environ[r'HGRCPATH'] = r' ' | ||
Matt Harbison
|
r40385 | os.system(r"%s version -q > NUL" % sys.argv[0]) | ||
Matt Mackall
|
r7366 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfparents', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfparents(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
timeless
|
r27305 | # control the number of commits perfparents iterates over | ||
# experimental config: perf.parentscount | ||||
Pulkit Goyal
|
r39398 | count = getint(ui, b"perf", b"parentscount", 1000) | ||
timeless
|
r27305 | if len(repo.changelog) < count: | ||
Pulkit Goyal
|
r39398 | raise error.Abort(b"repo needs %d commits for this test" % count) | ||
timeless
|
r27100 | repo = repo.unfiltered() | ||
Matt Harbison
|
r39847 | nl = [repo.changelog.node(i) for i in _xrange(count)] | ||
Matt Mackall
|
r7366 | def d(): | ||
for n in nl: | ||||
repo.changelog.parents(n) | ||||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perfctxfiles', formatteropts) | ||
timeless
|
r27095 | def perfctxfiles(ui, repo, x, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Matt Mackall
|
r24349 | x = int(x) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r24349 | def d(): | ||
len(repo[x].files()) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfrawfiles', formatteropts) | ||
timeless
|
r27095 | def perfrawfiles(ui, repo, x, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Matt Mackall
|
r24349 | x = int(x) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r24349 | cl = repo.changelog | ||
def d(): | ||||
len(cl.read(x)[3]) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perflookup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perflookup(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r7366 | timer(lambda: len(repo.lookup(rev))) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r7366 | |||
Pulkit Goyal
|
r39398 | @command(b'perflinelogedits', | ||
[(b'n', b'edits', 10000, b'number of edits'), | ||||
(b'', b'max-hunk-lines', 10, b'max lines in a hunk'), | ||||
], norepo=True) | ||||
Jun Wu
|
r39005 | def perflinelogedits(ui, **opts): | ||
from mercurial import linelog | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | edits = opts[b'edits'] | ||
maxhunklines = opts[b'max_hunk_lines'] | ||||
Jun Wu
|
r39005 | |||
maxb1 = 100000 | ||||
random.seed(0) | ||||
randint = random.randint | ||||
currentlines = 0 | ||||
arglist = [] | ||||
Matt Harbison
|
r39847 | for rev in _xrange(edits): | ||
Jun Wu
|
r39005 | a1 = randint(0, currentlines) | ||
a2 = randint(a1, min(currentlines, a1 + maxhunklines)) | ||||
b1 = randint(0, maxb1) | ||||
b2 = randint(b1, b1 + maxhunklines) | ||||
currentlines += (b2 - b1) - (a2 - a1) | ||||
arglist.append((rev, a1, a2, b1, b2)) | ||||
def d(): | ||||
ll = linelog.linelog() | ||||
for args in arglist: | ||||
ll.replacelines(*args) | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfrevrange', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfrevrange(ui, repo, *specs, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Bryan O'Sullivan
|
r16858 | revrange = scmutil.revrange | ||
timer(lambda: len(revrange(repo, specs))) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16858 | |||
Pulkit Goyal
|
r39398 | @command(b'perfnodelookup', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfnodelookup(ui, repo, rev, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Matt Mackall
|
r16309 | import mercurial.revlog | ||
mercurial.revlog._prereadsize = 2**24 # disable lazy parser in old hg | ||||
Martin von Zweigbergk
|
r37373 | n = scmutil.revsingle(repo, rev).node() | ||
Pulkit Goyal
|
r39398 | cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i") | ||
Bryan O'Sullivan
|
r16414 | def d(): | ||
cl.rev(n) | ||||
Bryan O'Sullivan
|
r16785 | clearcaches(cl) | ||
Bryan O'Sullivan
|
r16414 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16414 | |||
Pulkit Goyal
|
r39398 | @command(b'perflog', | ||
[(b'', b'rename', False, b'ask log to follow renames') | ||||
] + formatteropts) | ||||
timeless
|
r27306 | def perflog(ui, repo, rev=None, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
timeless
|
r27306 | if rev is None: | ||
rev=[] | ||||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Alexander Solovyov
|
r7872 | ui.pushbuffer() | ||
Pulkit Goyal
|
r39398 | timer(lambda: commands.log(ui, repo, rev=rev, date=b'', user=b'', | ||
copies=opts.get(b'rename'))) | ||||
Alexander Solovyov
|
r7872 | ui.popbuffer() | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Alexander Solovyov
|
r7872 | |||
Pulkit Goyal
|
r39398 | @command(b'perfmoonwalk', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfmoonwalk(ui, repo, **opts): | ||
Brodie Rao
|
r20178 | """benchmark walking the changelog backwards | ||
This also loads the changelog data for each revision in the changelog. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Brodie Rao
|
r20178 | def moonwalk(): | ||
Martin von Zweigbergk
|
r38801 | for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1): | ||
Brodie Rao
|
r20178 | ctx = repo[i] | ||
ctx.branch() # read changelog data (in addition to the index) | ||||
timer(moonwalk) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Brodie Rao
|
r20178 | |||
Pulkit Goyal
|
r39398 | @command(b'perftemplating', | ||
[(b'r', b'rev', [], b'revisions to run the template on'), | ||||
] + formatteropts) | ||||
Boris Feld
|
r38277 | def perftemplating(ui, repo, testedtemplate=None, **opts): | ||
"""test the rendering time of a given template""" | ||||
Boris Feld
|
r38276 | if makelogtemplater is None: | ||
Pulkit Goyal
|
r39398 | raise error.Abort((b"perftemplating not available with this Mercurial"), | ||
hint=b"use 4.3 or later") | ||||
Boris Feld
|
r38276 | |||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r38274 | nullui = ui.copy() | ||
Matt Harbison
|
r39849 | nullui.fout = open(os.devnull, r'wb') | ||
Boris Feld
|
r38274 | nullui.disablepager() | ||
Pulkit Goyal
|
r39398 | revs = opts.get(b'rev') | ||
Boris Feld
|
r38276 | if not revs: | ||
Pulkit Goyal
|
r39398 | revs = [b'all()'] | ||
Boris Feld
|
r38276 | revs = list(scmutil.revrange(repo, revs)) | ||
Pulkit Goyal
|
r39398 | defaulttemplate = (b'{date|shortdate} [{rev}:{node|short}]' | ||
b' {author|person}: {desc|firstline}\n') | ||||
Boris Feld
|
r38277 | if testedtemplate is None: | ||
testedtemplate = defaulttemplate | ||||
displayer = makelogtemplater(nullui, repo, testedtemplate) | ||||
Boris Feld
|
r38273 | def format(): | ||
Boris Feld
|
r38276 | for r in revs: | ||
ctx = repo[r] | ||||
displayer.show(ctx) | ||||
displayer.flush(ctx) | ||||
Boris Feld
|
r38273 | |||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Boris Feld
|
r38273 | timer(format) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Alexander Solovyov
|
r7872 | |||
Boris Feld
|
r40771 | @command(b'perfhelper-pathcopies', formatteropts + | ||
Boris Feld
|
r40727 | [ | ||
(b'r', b'revs', [], b'restrict search to these revisions'), | ||||
Boris Feld
|
r40762 | (b'', b'timing', False, b'provides extra data (costly)'), | ||
Boris Feld
|
r40727 | ]) | ||
Boris Feld
|
r40771 | def perfhelperpathcopies(ui, repo, revs=[], **opts): | ||
Boris Feld
|
r40727 | """find statistic about potential parameters for the `perftracecopies` | ||
This command find source-destination pair relevant for copytracing testing. | ||||
It report value for some of the parameters that impact copy tracing time. | ||||
Boris Feld
|
r40762 | |||
If `--timing` is set, rename detection is run and the associated timing | ||||
will be reported. The extra details comes at the cost of a slower command | ||||
execution. | ||||
Since the rename detection is only run once, other factors might easily | ||||
affect the precision of the timing. However it should give a good | ||||
approximation of which revision pairs are very costly. | ||||
Boris Feld
|
r40727 | """ | ||
opts = _byteskwargs(opts) | ||||
fm = ui.formatter(b'perf', opts) | ||||
Boris Feld
|
r40762 | dotiming = opts[b'timing'] | ||
if dotiming: | ||||
header = '%12s %12s %12s %12s %12s %12s\n' | ||||
output = ("%(source)12s %(destination)12s " | ||||
"%(nbrevs)12d %(nbmissingfiles)12d " | ||||
"%(nbrenamedfiles)12d %(time)18.5f\n") | ||||
header_names = ("source", "destination", "nb-revs", "nb-files", | ||||
"nb-renames", "time") | ||||
fm.plain(header % header_names) | ||||
else: | ||||
header = '%12s %12s %12s %12s\n' | ||||
output = ("%(source)12s %(destination)12s " | ||||
"%(nbrevs)12d %(nbmissingfiles)12d\n") | ||||
fm.plain(header % ("source", "destination", "nb-revs", "nb-files")) | ||||
Boris Feld
|
r40727 | |||
if not revs: | ||||
revs = ['all()'] | ||||
revs = scmutil.revrange(repo, revs) | ||||
roi = repo.revs('merge() and %ld', revs) | ||||
for r in roi: | ||||
ctx = repo[r] | ||||
p1 = ctx.p1().rev() | ||||
p2 = ctx.p2().rev() | ||||
bases = repo.changelog._commonancestorsheads(p1, p2) | ||||
for p in (p1, p2): | ||||
for b in bases: | ||||
base = repo[b] | ||||
parent = repo[p] | ||||
missing = copies._computeforwardmissing(base, parent) | ||||
if not missing: | ||||
continue | ||||
data = { | ||||
b'source': base.hex(), | ||||
b'destination': parent.hex(), | ||||
b'nbrevs': len(repo.revs('%d::%d', b, p)), | ||||
b'nbmissingfiles': len(missing), | ||||
} | ||||
Boris Feld
|
r40762 | if dotiming: | ||
begin = util.timer() | ||||
renames = copies.pathcopies(base, parent) | ||||
end = util.timer() | ||||
# not very stable timing since we did only one run | ||||
data['time'] = end - begin | ||||
data['nbrenamedfiles'] = len(renames) | ||||
fm.startitem() | ||||
Boris Feld
|
r40727 | fm.data(**data) | ||
out = data.copy() | ||||
out['source'] = fm.hexfunc(base.node()) | ||||
out['destination'] = fm.hexfunc(parent.node()) | ||||
fm.plain(output % out) | ||||
Boris Feld
|
r40762 | |||
Boris Feld
|
r40727 | fm.end() | ||
Pulkit Goyal
|
r39398 | @command(b'perfcca', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfcca(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Joshua Redstone
|
r17216 | timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate)) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Matt Mackall
|
r16386 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncacheload', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncacheload(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Bryan O'Sullivan
|
r16403 | def d(): | ||
s.fncache._load() | ||||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16403 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncachewrite', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncachewrite(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Boris Feld
|
r38717 | lock = repo.lock() | ||
Bryan O'Sullivan
|
r16403 | s.fncache._load() | ||
Pulkit Goyal
|
r39398 | tr = repo.transaction(b'perffncachewrite') | ||
tr.addbackup(b'fncache') | ||||
Bryan O'Sullivan
|
r16403 | def d(): | ||
s.fncache._dirty = True | ||||
timeless
|
r27097 | s.fncache.write(tr) | ||
Bryan O'Sullivan
|
r16403 | timer(d) | ||
Pierre-Yves David
|
r30069 | tr.close() | ||
timeless
|
r27097 | lock.release() | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Bryan O'Sullivan
|
r16403 | |||
Pulkit Goyal
|
r39398 | @command(b'perffncacheencode', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perffncacheencode(ui, repo, **opts): | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Adrian Buehlmann
|
r17780 | s = repo.store | ||
Adrian Buehlmann
|
r17553 | s.fncache._load() | ||
def d(): | ||||
for p in s.fncache.entries: | ||||
s.encode(p) | ||||
timer(d) | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Adrian Buehlmann
|
r17553 | |||
Gregory Szorc
|
r36784 | def _bdiffworker(q, blocks, xdiff, ready, done): | ||
Boris Feld
|
r35617 | while not done.is_set(): | ||
pair = q.get() | ||||
while pair is not None: | ||||
Gregory Szorc
|
r36784 | if xdiff: | ||
mdiff.bdiff.xdiffblocks(*pair) | ||||
elif blocks: | ||||
mdiff.bdiff.blocks(*pair) | ||||
else: | ||||
mdiff.textdiff(*pair) | ||||
Boris Feld
|
r35617 | q.task_done() | ||
pair = q.get() | ||||
q.task_done() # for the None one | ||||
with ready: | ||||
ready.wait() | ||||
Gregory Szorc
|
r39355 | def _manifestrevision(repo, mnode): | ||
ml = repo.manifestlog | ||||
Pulkit Goyal
|
r39398 | if util.safehasattr(ml, b'getstorage'): | ||
Gregory Szorc
|
r39355 | store = ml.getstorage(b'') | ||
else: | ||||
store = ml._revlog | ||||
return store.revision(mnode) | ||||
Pulkit Goyal
|
r39398 | @command(b'perfbdiff', revlogopts + formatteropts + [ | ||
(b'', b'count', 1, b'number of revisions to test (when using --startrev)'), | ||||
(b'', b'alldata', False, b'test bdiffs for all associated revisions'), | ||||
(b'', b'threads', 0, b'number of thread to use (disable with 0)'), | ||||
(b'', b'blocks', False, b'test computing diffs into blocks'), | ||||
(b'', b'xdiff', False, b'use xdiff algorithm'), | ||||
Boris Feld
|
r35617 | ], | ||
Pulkit Goyal
|
r39398 | b'-c|-m|FILE REV') | ||
Boris Feld
|
r35617 | def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts): | ||
Gregory Szorc
|
r30336 | """benchmark a bdiff between revisions | ||
By default, benchmark a bdiff between its delta parent and itself. | ||||
With ``--count``, benchmark bdiffs between delta parents and self for N | ||||
revisions starting at the specified revision. | ||||
Gregory Szorc
|
r30337 | |||
With ``--alldata``, assume the requested revision is a changeset and | ||||
measure bdiffs for all changes related to that changeset (manifest | ||||
and filelogs). | ||||
Gregory Szorc
|
r30336 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r36784 | |||
Pulkit Goyal
|
r39398 | if opts[b'xdiff'] and not opts[b'blocks']: | ||
raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks') | ||||
Gregory Szorc
|
r36784 | |||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
opts[b'changelog'] = True | ||||
Gregory Szorc
|
r30337 | |||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Gregory Szorc
|
r30307 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfbdiff', b'invalid arguments') | ||
Gregory Szorc
|
r30307 | |||
Pulkit Goyal
|
r39398 | blocks = opts[b'blocks'] | ||
xdiff = opts[b'xdiff'] | ||||
Gregory Szorc
|
r30335 | textpairs = [] | ||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts) | ||
Gregory Szorc
|
r30307 | |||
Gregory Szorc
|
r30336 | startrev = r.rev(r.lookup(rev)) | ||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
Gregory Szorc
|
r30337 | # Load revisions associated with changeset. | ||
ctx = repo[rev] | ||||
Gregory Szorc
|
r39355 | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||
Gregory Szorc
|
r30337 | for pctx in ctx.parents(): | ||
Gregory Szorc
|
r39355 | pman = _manifestrevision(repo, pctx.manifestnode()) | ||
Gregory Szorc
|
r30337 | textpairs.append((pman, mtext)) | ||
# Load filelog revisions by iterating manifest delta. | ||||
man = ctx.manifest() | ||||
pman = ctx.p1().manifest() | ||||
for filename, change in pman.diff(man).items(): | ||||
fctx = repo.file(filename) | ||||
f1 = fctx.revision(change[0][0] or -1) | ||||
f2 = fctx.revision(change[1][0] or -1) | ||||
textpairs.append((f1, f2)) | ||||
else: | ||||
dp = r.deltaparent(rev) | ||||
textpairs.append((r.revision(dp), r.revision(rev))) | ||||
Gregory Szorc
|
r30307 | |||
Boris Feld
|
r35617 | withthreads = threads > 0 | ||
if not withthreads: | ||||
def d(): | ||||
for pair in textpairs: | ||||
Gregory Szorc
|
r36784 | if xdiff: | ||
mdiff.bdiff.xdiffblocks(*pair) | ||||
elif blocks: | ||||
mdiff.bdiff.blocks(*pair) | ||||
else: | ||||
mdiff.textdiff(*pair) | ||||
Boris Feld
|
r35617 | else: | ||
Gregory Szorc
|
r37863 | q = queue() | ||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
ready = threading.Condition() | ||||
done = threading.Event() | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Gregory Szorc
|
r36784 | threading.Thread(target=_bdiffworker, | ||
args=(q, blocks, xdiff, ready, done)).start() | ||||
Boris Feld
|
r35617 | q.join() | ||
def d(): | ||||
for pair in textpairs: | ||||
q.put(pair) | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
with ready: | ||||
ready.notify_all() | ||||
q.join() | ||||
Gregory Szorc
|
r30307 | timer, fm = gettimer(ui, opts) | ||
timer(d) | ||||
fm.end() | ||||
Boris Feld
|
r35617 | if withthreads: | ||
done.set() | ||||
Matt Harbison
|
r39847 | for i in _xrange(threads): | ||
Boris Feld
|
r35617 | q.put(None) | ||
with ready: | ||||
ready.notify_all() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfunidiff', revlogopts + formatteropts + [ | ||
(b'', b'count', 1, b'number of revisions to test (when using --startrev)'), | ||||
(b'', b'alldata', False, b'test unidiffs for all associated revisions'), | ||||
], b'-c|-m|FILE REV') | ||||
Augie Fackler
|
r35879 | def perfunidiff(ui, repo, file_, rev=None, count=None, **opts): | ||
"""benchmark a unified diff between revisions | ||||
This doesn't include any copy tracing - it's just a unified diff | ||||
of the texts. | ||||
By default, benchmark a diff between its delta parent and itself. | ||||
With ``--count``, benchmark diffs between delta parents and self for N | ||||
revisions starting at the specified revision. | ||||
With ``--alldata``, assume the requested revision is a changeset and | ||||
measure diffs for all changes related to that changeset (manifest | ||||
and filelogs). | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
opts[b'changelog'] = True | ||||
Augie Fackler
|
r35879 | |||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Augie Fackler
|
r35879 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfunidiff', b'invalid arguments') | ||
Augie Fackler
|
r35879 | |||
textpairs = [] | ||||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts) | ||
Augie Fackler
|
r35879 | |||
startrev = r.rev(r.lookup(rev)) | ||||
for rev in range(startrev, min(startrev + count, len(r) - 1)): | ||||
Pulkit Goyal
|
r39398 | if opts[b'alldata']: | ||
Augie Fackler
|
r35879 | # Load revisions associated with changeset. | ||
ctx = repo[rev] | ||||
Gregory Szorc
|
r39355 | mtext = _manifestrevision(repo, ctx.manifestnode()) | ||
Augie Fackler
|
r35879 | for pctx in ctx.parents(): | ||
Gregory Szorc
|
r39355 | pman = _manifestrevision(repo, pctx.manifestnode()) | ||
Augie Fackler
|
r35879 | textpairs.append((pman, mtext)) | ||
# Load filelog revisions by iterating manifest delta. | ||||
man = ctx.manifest() | ||||
pman = ctx.p1().manifest() | ||||
for filename, change in pman.diff(man).items(): | ||||
fctx = repo.file(filename) | ||||
f1 = fctx.revision(change[0][0] or -1) | ||||
f2 = fctx.revision(change[1][0] or -1) | ||||
textpairs.append((f1, f2)) | ||||
else: | ||||
dp = r.deltaparent(rev) | ||||
textpairs.append((r.revision(dp), r.revision(rev))) | ||||
def d(): | ||||
for left, right in textpairs: | ||||
# The date strings don't matter, so we pass empty strings. | ||||
headerlines, hunks = mdiff.unidiff( | ||||
Pulkit Goyal
|
r39398 | left, b'', right, b'', b'left', b'right', binary=False) | ||
Augie Fackler
|
r35879 | # consume iterators in roughly the way patch.py does | ||
b'\n'.join(headerlines) | ||||
b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(d) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfdiffwd', formatteropts) | ||
Pierre-Yves David
|
r25494 | def perfdiffwd(ui, repo, **opts): | ||
Patrick Mezard
|
r9826 | """Profile diff of working directory changes""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Patrick Mezard
|
r9826 | options = { | ||
Pulkit Goyal
|
r40250 | 'w': 'ignore_all_space', | ||
'b': 'ignore_space_change', | ||||
'B': 'ignore_blank_lines', | ||||
Patrick Mezard
|
r9826 | } | ||
Pulkit Goyal
|
r40250 | for diffopt in ('', 'w', 'b', 'B', 'wB'): | ||
Pulkit Goyal
|
r39398 | opts = dict((options[c], b'1') for c in diffopt) | ||
Patrick Mezard
|
r9826 | def d(): | ||
ui.pushbuffer() | ||||
commands.diff(ui, repo, **opts) | ||||
ui.popbuffer() | ||||
Pulkit Goyal
|
r40250 | diffopt = diffopt.encode('ascii') | ||
Pulkit Goyal
|
r39398 | title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none') | ||
Boris Feld
|
r40715 | timer(d, title=title) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Patrick Mezard
|
r9826 | |||
Pulkit Goyal
|
r39398 | @command(b'perfrevlogindex', revlogopts + formatteropts, | ||
b'-c|-m|FILE') | ||||
Gregory Szorc
|
r32532 | def perfrevlogindex(ui, repo, file_=None, **opts): | ||
"""Benchmark operations against a revlog index. | ||||
This tests constructing a revlog instance, reading index data, | ||||
parsing index data, and performing various operations related to | ||||
index data. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts) | ||
Gregory Szorc
|
r32532 | |||
opener = getattr(rl, 'opener') # trick linter | ||||
indexfile = rl.indexfile | ||||
data = opener.read(indexfile) | ||||
Pulkit Goyal
|
r39398 | header = struct.unpack(b'>I', data[0:4])[0] | ||
Gregory Szorc
|
r32532 | version = header & 0xFFFF | ||
if version == 1: | ||||
revlogio = revlog.revlogio() | ||||
inline = header & (1 << 16) | ||||
else: | ||||
Pulkit Goyal
|
r39398 | raise error.Abort((b'unsupported revlog version: %d') % version) | ||
Gregory Szorc
|
r32532 | |||
rllen = len(rl) | ||||
node0 = rl.node(0) | ||||
node25 = rl.node(rllen // 4) | ||||
node50 = rl.node(rllen // 2) | ||||
node75 = rl.node(rllen // 4 * 3) | ||||
node100 = rl.node(rllen - 1) | ||||
allrevs = range(rllen) | ||||
allrevsrev = list(reversed(allrevs)) | ||||
allnodes = [rl.node(rev) for rev in range(rllen)] | ||||
allnodesrev = list(reversed(allnodes)) | ||||
def constructor(): | ||||
revlog.revlog(opener, indexfile) | ||||
def read(): | ||||
with opener(indexfile) as fh: | ||||
fh.read() | ||||
def parseindex(): | ||||
revlogio.parseindex(data, inline) | ||||
def getentry(revornode): | ||||
index = revlogio.parseindex(data, inline)[0] | ||||
index[revornode] | ||||
def getentries(revs, count=1): | ||||
index = revlogio.parseindex(data, inline)[0] | ||||
for i in range(count): | ||||
for rev in revs: | ||||
index[rev] | ||||
def resolvenode(node): | ||||
nodemap = revlogio.parseindex(data, inline)[1] | ||||
# This only works for the C code. | ||||
if nodemap is None: | ||||
return | ||||
try: | ||||
nodemap[node] | ||||
except error.RevlogError: | ||||
pass | ||||
def resolvenodes(nodes, count=1): | ||||
nodemap = revlogio.parseindex(data, inline)[1] | ||||
if nodemap is None: | ||||
return | ||||
for i in range(count): | ||||
for node in nodes: | ||||
try: | ||||
nodemap[node] | ||||
except error.RevlogError: | ||||
pass | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (constructor, b'revlog constructor'), | ||
(read, b'read'), | ||||
(parseindex, b'create index object'), | ||||
(lambda: getentry(0), b'retrieve index entry for rev 0'), | ||||
(lambda: resolvenode(b'a' * 20), b'look up missing node'), | ||||
(lambda: resolvenode(node0), b'look up node at rev 0'), | ||||
(lambda: resolvenode(node25), b'look up node at 1/4 len'), | ||||
(lambda: resolvenode(node50), b'look up node at 1/2 len'), | ||||
(lambda: resolvenode(node75), b'look up node at 3/4 len'), | ||||
(lambda: resolvenode(node100), b'look up node at tip'), | ||||
Gregory Szorc
|
r32532 | # 2x variation is to measure caching impact. | ||
(lambda: resolvenodes(allnodes), | ||||
Pulkit Goyal
|
r39398 | b'look up all nodes (forward)'), | ||
Gregory Szorc
|
r32532 | (lambda: resolvenodes(allnodes, 2), | ||
Pulkit Goyal
|
r39398 | b'look up all nodes 2x (forward)'), | ||
Gregory Szorc
|
r32532 | (lambda: resolvenodes(allnodesrev), | ||
Pulkit Goyal
|
r39398 | b'look up all nodes (reverse)'), | ||
Gregory Szorc
|
r32532 | (lambda: resolvenodes(allnodesrev, 2), | ||
Pulkit Goyal
|
r39398 | b'look up all nodes 2x (reverse)'), | ||
Gregory Szorc
|
r32532 | (lambda: getentries(allrevs), | ||
Pulkit Goyal
|
r39398 | b'retrieve all index entries (forward)'), | ||
Gregory Szorc
|
r32532 | (lambda: getentries(allrevs, 2), | ||
Pulkit Goyal
|
r39398 | b'retrieve all index entries 2x (forward)'), | ||
Gregory Szorc
|
r32532 | (lambda: getentries(allrevsrev), | ||
Pulkit Goyal
|
r39398 | b'retrieve all index entries (reverse)'), | ||
Gregory Szorc
|
r32532 | (lambda: getentries(allrevsrev, 2), | ||
Pulkit Goyal
|
r39398 | b'retrieve all index entries 2x (reverse)'), | ||
Gregory Szorc
|
r32532 | ] | ||
for fn, title in benches: | ||||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfrevlogrevisions', revlogopts + formatteropts + | ||
[(b'd', b'dist', 100, b'distance between the revisions'), | ||||
(b's', b'startrev', 0, b'revision to start reading at'), | ||||
(b'', b'reverse', False, b'read in reverse')], | ||||
b'-c|-m|FILE') | ||||
Gregory Szorc
|
r32531 | def perfrevlogrevisions(ui, repo, file_=None, startrev=0, reverse=False, | ||
**opts): | ||||
Gregory Szorc
|
r27492 | """Benchmark reading a series of revisions from a revlog. | ||
By default, we read every ``-d/--dist`` revision from 0 to tip of | ||||
the specified revlog. | ||||
Gregory Szorc
|
r27493 | |||
The start revision can be defined via ``-s/--startrev``. | ||||
Gregory Szorc
|
r27492 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts) | ||
Gregory Szorc
|
r32227 | rllen = getlen(ui)(rl) | ||
Gregory Szorc
|
r30017 | |||
Boris Feld
|
r40178 | if startrev < 0: | ||
startrev = rllen + startrev | ||||
Pradeepkumar Gayam
|
r11694 | def d(): | ||
Gregory Szorc
|
r32227 | rl.clearcaches() | ||
Gregory Szorc
|
r30017 | |||
Gregory Szorc
|
r32219 | beginrev = startrev | ||
Gregory Szorc
|
r32227 | endrev = rllen | ||
Pulkit Goyal
|
r39398 | dist = opts[b'dist'] | ||
Gregory Szorc
|
r30017 | |||
if reverse: | ||||
Boris Feld
|
r40573 | beginrev, endrev = endrev - 1, beginrev - 1 | ||
Gregory Szorc
|
r30017 | dist = -1 * dist | ||
Matt Harbison
|
r39847 | for x in _xrange(beginrev, endrev, dist): | ||
Gregory Szorc
|
r32297 | # Old revisions don't support passing int. | ||
n = rl.node(x) | ||||
rl.revision(n) | ||||
Pradeepkumar Gayam
|
r11694 | |||
Gregory Szorc
|
r32220 | timer, fm = gettimer(ui, opts) | ||
Pradeepkumar Gayam
|
r11694 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pradeepkumar Gayam
|
r11694 | |||
Boris Feld
|
r40583 | @command(b'perfrevlogwrite', revlogopts + formatteropts + | ||
[(b's', b'startrev', 1000, b'revision to start writing at'), | ||||
(b'', b'stoprev', -1, b'last revision to write'), | ||||
(b'', b'count', 3, b'last revision to write'), | ||||
Boris Feld
|
r40584 | (b'', b'details', False, b'print timing for every revisions tested'), | ||
Boris Feld
|
r40586 | (b'', b'source', b'full', b'the kind of data feed in the revlog'), | ||
Boris Feld
|
r40591 | (b'', b'lazydeltabase', True, b'try the provided delta first'), | ||
Boris Feld
|
r41013 | (b'', b'clear-caches', True, b'clear revlog cache between calls'), | ||
Boris Feld
|
r40583 | ], | ||
b'-c|-m|FILE') | ||||
def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts): | ||||
"""Benchmark writing a series of revisions to a revlog. | ||||
Boris Feld
|
r40586 | |||
Possible source values are: | ||||
* `full`: add from a full text (default). | ||||
Boris Feld
|
r40587 | * `parent-1`: add from a delta to the first parent | ||
Boris Feld
|
r40588 | * `parent-2`: add from a delta to the second parent if it exists | ||
(use a delta from the first parent otherwise) | ||||
Boris Feld
|
r40589 | * `parent-smallest`: add from the smallest delta (either p1 or p2) | ||
Boris Feld
|
r40590 | * `storage`: add from the existing precomputed deltas | ||
Boris Feld
|
r40583 | """ | ||
opts = _byteskwargs(opts) | ||||
rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts) | ||||
rllen = getlen(ui)(rl) | ||||
if startrev < 0: | ||||
startrev = rllen + startrev | ||||
if stoprev < 0: | ||||
stoprev = rllen + stoprev | ||||
Boris Feld
|
r40591 | lazydeltabase = opts['lazydeltabase'] | ||
Boris Feld
|
r40586 | source = opts['source'] | ||
Boris Feld
|
r41038 | clearcaches = opts['clear_caches'] | ||
Boris Feld
|
r40590 | validsource = (b'full', b'parent-1', b'parent-2', b'parent-smallest', | ||
b'storage') | ||||
Boris Feld
|
r40586 | if source not in validsource: | ||
raise error.Abort('invalid source type: %s' % source) | ||||
Boris Feld
|
r40583 | ### actually gather results | ||
count = opts['count'] | ||||
if count <= 0: | ||||
raise error.Abort('invalide run count: %d' % count) | ||||
allresults = [] | ||||
for c in range(count): | ||||
Boris Feld
|
r40591 | timing = _timeonewrite(ui, rl, source, startrev, stoprev, c + 1, | ||
Boris Feld
|
r41013 | lazydeltabase=lazydeltabase, | ||
clearcaches=clearcaches) | ||||
Boris Feld
|
r40586 | allresults.append(timing) | ||
Boris Feld
|
r40583 | |||
### consolidate the results in a single list | ||||
results = [] | ||||
for idx, (rev, t) in enumerate(allresults[0]): | ||||
ts = [t] | ||||
for other in allresults[1:]: | ||||
orev, ot = other[idx] | ||||
assert orev == rev | ||||
ts.append(ot) | ||||
results.append((rev, ts)) | ||||
resultcount = len(results) | ||||
### Compute and display relevant statistics | ||||
# get a formatter | ||||
fm = ui.formatter(b'perf', opts) | ||||
displayall = ui.configbool(b"perf", b"all-timing", False) | ||||
Boris Feld
|
r40584 | # print individual details if requested | ||
if opts['details']: | ||||
for idx, item in enumerate(results, 1): | ||||
rev, data = item | ||||
title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev) | ||||
formatone(fm, data, title=title, displayall=displayall) | ||||
Boris Feld
|
r40583 | # sorts results by median time | ||
results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2]) | ||||
# list of (name, index) to display) | ||||
relevants = [ | ||||
("min", 0), | ||||
("10%", resultcount * 10 // 100), | ||||
("25%", resultcount * 25 // 100), | ||||
("50%", resultcount * 70 // 100), | ||||
("75%", resultcount * 75 // 100), | ||||
("90%", resultcount * 90 // 100), | ||||
("95%", resultcount * 95 // 100), | ||||
("99%", resultcount * 99 // 100), | ||||
Boris Feld
|
r40992 | ("99.9%", resultcount * 999 // 1000), | ||
("99.99%", resultcount * 9999 // 10000), | ||||
("99.999%", resultcount * 99999 // 100000), | ||||
Boris Feld
|
r40583 | ("max", -1), | ||
] | ||||
Boris Feld
|
r40585 | if not ui.quiet: | ||
for name, idx in relevants: | ||||
data = results[idx] | ||||
title = '%s of %d, rev %d' % (name, resultcount, data[0]) | ||||
formatone(fm, data[1], title=title, displayall=displayall) | ||||
Boris Feld
|
r40583 | |||
# XXX summing that many float will not be very precise, we ignore this fact | ||||
# for now | ||||
totaltime = [] | ||||
for item in allresults: | ||||
totaltime.append((sum(x[1][0] for x in item), | ||||
sum(x[1][1] for x in item), | ||||
sum(x[1][2] for x in item),) | ||||
) | ||||
formatone(fm, totaltime, title="total time (%d revs)" % resultcount, | ||||
displayall=displayall) | ||||
fm.end() | ||||
class _faketr(object): | ||||
def add(s, x, y, z=None): | ||||
return None | ||||
Boris Feld
|
r40591 | def _timeonewrite(ui, orig, source, startrev, stoprev, runidx=None, | ||
Boris Feld
|
r41013 | lazydeltabase=True, clearcaches=True): | ||
Boris Feld
|
r40583 | timings = [] | ||
tr = _faketr() | ||||
with _temprevlog(ui, orig, startrev) as dest: | ||||
Boris Feld
|
r40591 | dest._lazydeltabase = lazydeltabase | ||
Boris Feld
|
r40583 | revs = list(orig.revs(startrev, stoprev)) | ||
total = len(revs) | ||||
topic = 'adding' | ||||
if runidx is not None: | ||||
topic += ' (run #%d)' % runidx | ||||
Martin von Zweigbergk
|
r41191 | # Support both old and new progress API | ||
if util.safehasattr(ui, 'makeprogress'): | ||||
progress = ui.makeprogress(topic, unit='revs', total=total) | ||||
def updateprogress(pos): | ||||
progress.update(pos) | ||||
def completeprogress(): | ||||
progress.complete() | ||||
else: | ||||
def updateprogress(pos): | ||||
ui.progress(topic, pos, unit='revs', total=total) | ||||
def completeprogress(): | ||||
ui.progress(topic, None, unit='revs', total=total) | ||||
Boris Feld
|
r40583 | for idx, rev in enumerate(revs): | ||
Martin von Zweigbergk
|
r41191 | updateprogress(idx) | ||
Boris Feld
|
r40586 | addargs, addkwargs = _getrevisionseed(orig, rev, tr, source) | ||
Boris Feld
|
r41013 | if clearcaches: | ||
dest.index.clearcaches() | ||||
dest.clearcaches() | ||||
Boris Feld
|
r40583 | with timeone() as r: | ||
dest.addrawrevision(*addargs, **addkwargs) | ||||
timings.append((rev, r[0])) | ||||
Martin von Zweigbergk
|
r41191 | updateprogress(total) | ||
completeprogress() | ||||
Boris Feld
|
r40583 | return timings | ||
Boris Feld
|
r40586 | def _getrevisionseed(orig, rev, tr, source): | ||
Boris Feld
|
r40588 | from mercurial.node import nullid | ||
Boris Feld
|
r40583 | linkrev = orig.linkrev(rev) | ||
node = orig.node(rev) | ||||
p1, p2 = orig.parents(node) | ||||
flags = orig.flags(rev) | ||||
cachedelta = None | ||||
Boris Feld
|
r40586 | text = None | ||
if source == b'full': | ||||
text = orig.revision(rev) | ||||
Boris Feld
|
r40587 | elif source == b'parent-1': | ||
baserev = orig.rev(p1) | ||||
cachedelta = (baserev, orig.revdiff(p1, rev)) | ||||
Boris Feld
|
r40588 | elif source == b'parent-2': | ||
parent = p2 | ||||
if p2 == nullid: | ||||
parent = p1 | ||||
baserev = orig.rev(parent) | ||||
cachedelta = (baserev, orig.revdiff(parent, rev)) | ||||
Boris Feld
|
r40589 | elif source == b'parent-smallest': | ||
p1diff = orig.revdiff(p1, rev) | ||||
parent = p1 | ||||
diff = p1diff | ||||
if p2 != nullid: | ||||
p2diff = orig.revdiff(p2, rev) | ||||
if len(p1diff) > len(p2diff): | ||||
parent = p2 | ||||
diff = p2diff | ||||
baserev = orig.rev(parent) | ||||
cachedelta = (baserev, diff) | ||||
Boris Feld
|
r40590 | elif source == b'storage': | ||
baserev = orig.deltaparent(rev) | ||||
cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev)) | ||||
Boris Feld
|
r40583 | |||
return ((text, tr, linkrev, p1, p2), | ||||
{'node': node, 'flags': flags, 'cachedelta': cachedelta}) | ||||
@contextlib.contextmanager | ||||
def _temprevlog(ui, orig, truncaterev): | ||||
from mercurial import vfs as vfsmod | ||||
if orig._inline: | ||||
raise error.Abort('not supporting inline revlog (yet)') | ||||
origindexpath = orig.opener.join(orig.indexfile) | ||||
origdatapath = orig.opener.join(orig.datafile) | ||||
indexname = 'revlog.i' | ||||
dataname = 'revlog.d' | ||||
tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-') | ||||
try: | ||||
# copy the data file in a temporary directory | ||||
ui.debug('copying data in %s\n' % tmpdir) | ||||
destindexpath = os.path.join(tmpdir, 'revlog.i') | ||||
destdatapath = os.path.join(tmpdir, 'revlog.d') | ||||
shutil.copyfile(origindexpath, destindexpath) | ||||
shutil.copyfile(origdatapath, destdatapath) | ||||
# remove the data we want to add again | ||||
ui.debug('truncating data to be rewritten\n') | ||||
with open(destindexpath, 'ab') as index: | ||||
index.seek(0) | ||||
index.truncate(truncaterev * orig._io.size) | ||||
with open(destdatapath, 'ab') as data: | ||||
data.seek(0) | ||||
data.truncate(orig.start(truncaterev)) | ||||
# instantiate a new revlog from the temporary copy | ||||
ui.debug('truncating adding to be rewritten\n') | ||||
vfs = vfsmod.vfs(tmpdir) | ||||
vfs.options = getattr(orig.opener, 'options', None) | ||||
dest = revlog.revlog(vfs, | ||||
indexfile=indexname, | ||||
datafile=dataname) | ||||
if dest._inline: | ||||
raise error.Abort('not supporting inline revlog (yet)') | ||||
# make sure internals are initialized | ||||
dest.revision(len(dest) - 1) | ||||
yield dest | ||||
del dest, vfs | ||||
finally: | ||||
shutil.rmtree(tmpdir, True) | ||||
Pulkit Goyal
|
r39398 | @command(b'perfrevlogchunks', revlogopts + formatteropts + | ||
[(b'e', b'engines', b'', b'compression engines to use'), | ||||
(b's', b'startrev', 0, b'revision to start at')], | ||||
b'-c|-m|FILE') | ||||
Gregory Szorc
|
r30796 | def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts): | ||
Gregory Szorc
|
r30451 | """Benchmark operations on revlog chunks. | ||
Logically, each revlog is a collection of fulltext revisions. However, | ||||
stored within each revlog are "chunks" of possibly compressed data. This | ||||
data needs to be read and decompressed or compressed and written. | ||||
This command measures the time it takes to read+decompress and recompress | ||||
chunks in a revlog. It effectively isolates I/O and compression performance. | ||||
For measurements of higher-level operations like resolving revisions, | ||||
Gregory Szorc
|
r32531 | see ``perfrevlogrevisions`` and ``perfrevlogrevision``. | ||
Gregory Szorc
|
r30451 | """ | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts) | ||
Gregory Szorc
|
r32224 | |||
# _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | ||||
segmentforrevs = rl._getsegmentforrevs | ||||
except AttributeError: | ||||
segmentforrevs = rl._chunkraw | ||||
Gregory Szorc
|
r30796 | |||
# Verify engines argument. | ||||
if engines: | ||||
Pulkit Goyal
|
r39398 | engines = set(e.strip() for e in engines.split(b',')) | ||
Gregory Szorc
|
r30796 | for engine in engines: | ||
try: | ||||
util.compressionengines[engine] | ||||
except KeyError: | ||||
Pulkit Goyal
|
r39398 | raise error.Abort(b'unknown compression engine: %s' % engine) | ||
Gregory Szorc
|
r30796 | else: | ||
engines = [] | ||||
for e in util.compengines: | ||||
engine = util.compengines[e] | ||||
try: | ||||
if engine.available(): | ||||
Pulkit Goyal
|
r39398 | engine.revlogcompressor().compress(b'dummy') | ||
Gregory Szorc
|
r30796 | engines.append(e) | ||
except NotImplementedError: | ||||
pass | ||||
Gregory Szorc
|
r30451 | revs = list(rl.revs(startrev, len(rl) - 1)) | ||
def rlfh(rl): | ||||
if rl._inline: | ||||
return getsvfs(repo)(rl.indexfile) | ||||
else: | ||||
return getsvfs(repo)(rl.datafile) | ||||
def doread(): | ||||
rl.clearcaches() | ||||
for rev in revs: | ||||
Gregory Szorc
|
r32223 | segmentforrevs(rev, rev) | ||
Gregory Szorc
|
r30451 | |||
def doreadcachedfh(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
for rev in revs: | ||||
Gregory Szorc
|
r32223 | segmentforrevs(rev, rev, df=fh) | ||
Gregory Szorc
|
r30451 | |||
def doreadbatch(): | ||||
rl.clearcaches() | ||||
Gregory Szorc
|
r32223 | segmentforrevs(revs[0], revs[-1]) | ||
Gregory Szorc
|
r30451 | |||
def doreadbatchcachedfh(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
Gregory Szorc
|
r32223 | segmentforrevs(revs[0], revs[-1], df=fh) | ||
Gregory Szorc
|
r30451 | |||
def dochunk(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
for rev in revs: | ||||
rl._chunk(rev, df=fh) | ||||
chunks = [None] | ||||
def dochunkbatch(): | ||||
rl.clearcaches() | ||||
fh = rlfh(rl) | ||||
# Save chunks as a side-effect. | ||||
chunks[0] = rl._chunks(revs, df=fh) | ||||
Gregory Szorc
|
r30796 | def docompress(compressor): | ||
Gregory Szorc
|
r30451 | rl.clearcaches() | ||
Gregory Szorc
|
r30796 | |||
try: | ||||
# Swap in the requested compression engine. | ||||
oldcompressor = rl._compressor | ||||
rl._compressor = compressor | ||||
for chunk in chunks[0]: | ||||
rl.compress(chunk) | ||||
finally: | ||||
rl._compressor = oldcompressor | ||||
Gregory Szorc
|
r30451 | |||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (lambda: doread(), b'read'), | ||
(lambda: doreadcachedfh(), b'read w/ reused fd'), | ||||
(lambda: doreadbatch(), b'read batch'), | ||||
(lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'), | ||||
(lambda: dochunk(), b'chunk'), | ||||
(lambda: dochunkbatch(), b'chunk batch'), | ||||
Gregory Szorc
|
r30451 | ] | ||
Gregory Szorc
|
r30796 | for engine in sorted(engines): | ||
compressor = util.compengines[engine].revlogcompressor() | ||||
benches.append((functools.partial(docompress, compressor), | ||||
Pulkit Goyal
|
r39398 | b'compress w/ %s' % engine)) | ||
Gregory Szorc
|
r30796 | |||
Gregory Szorc
|
r30451 | for fn, title in benches: | ||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
Pulkit Goyal
|
r39398 | @command(b'perfrevlogrevision', revlogopts + formatteropts + | ||
[(b'', b'cache', False, b'use caches instead of clearing')], | ||||
b'-c|-m|FILE REV') | ||||
Gregory Szorc
|
r27470 | def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts): | ||
"""Benchmark obtaining a revlog revision. | ||||
Obtaining a revlog revision consists of roughly the following steps: | ||||
1. Compute the delta chain | ||||
Boris Feld
|
r40567 | 2. Slice the delta chain if applicable | ||
3. Obtain the raw chunks for that delta chain | ||||
4. Decompress each raw chunk | ||||
5. Apply binary patches to obtain fulltext | ||||
6. Verify hash of fulltext | ||||
Gregory Szorc
|
r27470 | |||
This command measures the time spent in each of these phases. | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | if opts.get(b'changelog') or opts.get(b'manifest'): | ||
Gregory Szorc
|
r27470 | file_, rev = None, file_ | ||
elif rev is None: | ||||
Pulkit Goyal
|
r39398 | raise error.CommandError(b'perfrevlogrevision', b'invalid arguments') | ||
Gregory Szorc
|
r27470 | |||
Pulkit Goyal
|
r39398 | r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts) | ||
Gregory Szorc
|
r32224 | |||
# _chunkraw was renamed to _getsegmentforrevs. | ||||
try: | ||||
segmentforrevs = r._getsegmentforrevs | ||||
except AttributeError: | ||||
segmentforrevs = r._chunkraw | ||||
Gregory Szorc
|
r27470 | node = r.lookup(rev) | ||
rev = r.rev(node) | ||||
Gregory Szorc
|
r30882 | def getrawchunks(data, chain): | ||
start = r.start | ||||
length = r.length | ||||
inline = r._inline | ||||
iosize = r._io.size | ||||
buffer = util.buffer | ||||
chunks = [] | ||||
ladd = chunks.append | ||||
Boris Feld
|
r40566 | for idx, item in enumerate(chain): | ||
offset = start(item[0]) | ||||
bits = data[idx] | ||||
for rev in item: | ||||
chunkstart = start(rev) | ||||
if inline: | ||||
chunkstart += (rev + 1) * iosize | ||||
chunklength = length(rev) | ||||
ladd(buffer(bits, chunkstart - offset, chunklength)) | ||||
Gregory Szorc
|
r30882 | |||
return chunks | ||||
Gregory Szorc
|
r27470 | def dodeltachain(rev): | ||
if not cache: | ||||
r.clearcaches() | ||||
r._deltachain(rev) | ||||
def doread(chain): | ||||
if not cache: | ||||
r.clearcaches() | ||||
Boris Feld
|
r40566 | for item in slicedchain: | ||
segmentforrevs(item[0], item[-1]) | ||||
Gregory Szorc
|
r27470 | |||
Boris Feld
|
r40567 | def doslice(r, chain, size): | ||
for s in slicechunk(r, chain, targetsize=size): | ||||
pass | ||||
Gregory Szorc
|
r30882 | def dorawchunks(data, chain): | ||
Gregory Szorc
|
r27470 | if not cache: | ||
r.clearcaches() | ||||
Gregory Szorc
|
r30882 | getrawchunks(data, chain) | ||
Gregory Szorc
|
r27470 | |||
Gregory Szorc
|
r30882 | def dodecompress(chunks): | ||
decomp = r.decompress | ||||
for chunk in chunks: | ||||
decomp(chunk) | ||||
Gregory Szorc
|
r27470 | |||
def dopatch(text, bins): | ||||
if not cache: | ||||
r.clearcaches() | ||||
mdiff.patches(text, bins) | ||||
def dohash(text): | ||||
if not cache: | ||||
r.clearcaches() | ||||
Remi Chaintron
|
r30584 | r.checkhash(text, node, rev=rev) | ||
Gregory Szorc
|
r27470 | |||
def dorevision(): | ||||
if not cache: | ||||
r.clearcaches() | ||||
r.revision(node) | ||||
Boris Feld
|
r40566 | try: | ||
from mercurial.revlogutils.deltas import slicechunk | ||||
except ImportError: | ||||
slicechunk = getattr(revlog, '_slicechunk', None) | ||||
size = r.length(rev) | ||||
Gregory Szorc
|
r27470 | chain = r._deltachain(rev)[0] | ||
Boris Feld
|
r40566 | if not getattr(r, '_withsparseread', False): | ||
slicedchain = (chain,) | ||||
else: | ||||
slicedchain = tuple(slicechunk(r, chain, targetsize=size)) | ||||
data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain] | ||||
rawchunks = getrawchunks(data, slicedchain) | ||||
Gregory Szorc
|
r27470 | bins = r._chunks(chain) | ||
Pulkit Goyal
|
r40250 | text = bytes(bins[0]) | ||
Gregory Szorc
|
r27470 | bins = bins[1:] | ||
text = mdiff.patches(text, bins) | ||||
benches = [ | ||||
Pulkit Goyal
|
r39398 | (lambda: dorevision(), b'full'), | ||
(lambda: dodeltachain(rev), b'deltachain'), | ||||
(lambda: doread(chain), b'read'), | ||||
Boris Feld
|
r40567 | ] | ||
if getattr(r, '_withsparseread', False): | ||||
slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain') | ||||
benches.append(slicing) | ||||
benches.extend([ | ||||
Boris Feld
|
r40566 | (lambda: dorawchunks(data, slicedchain), b'rawchunks'), | ||
Pulkit Goyal
|
r39398 | (lambda: dodecompress(rawchunks), b'decompress'), | ||
(lambda: dopatch(text, bins), b'patch'), | ||||
(lambda: dohash(text), b'hash'), | ||||
Boris Feld
|
r40567 | ]) | ||
Gregory Szorc
|
r27470 | |||
Boris Feld
|
r40565 | timer, fm = gettimer(ui, opts) | ||
Gregory Szorc
|
r27470 | for fn, title in benches: | ||
timer(fn, title=title) | ||||
Boris Feld
|
r40565 | fm.end() | ||
Gregory Szorc
|
r27470 | |||
Pulkit Goyal
|
r39398 | @command(b'perfrevset', | ||
[(b'C', b'clear', False, b'clear volatile cache between each call.'), | ||||
(b'', b'contexts', False, b'obtain changectx for each revision')] | ||||
+ formatteropts, b"REVSET") | ||||
Gregory Szorc
|
r27072 | def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts): | ||
Pierre-Yves David
|
r18239 | """benchmark the execution time of a revset | ||
Mads Kiilerich
|
r18644 | Use the --clean option if need to evaluate the impact of build volatile | ||
Pierre-Yves David
|
r18239 | revisions set cache on the revset execution. Volatile cache hold filtered | ||
and obsolete related cache.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Siddharth Agarwal
|
r18062 | def d(): | ||
Pierre-Yves David
|
r18239 | if clear: | ||
repo.invalidatevolatilesets() | ||||
Gregory Szorc
|
r27072 | if contexts: | ||
for ctx in repo.set(expr): pass | ||||
else: | ||||
for r in repo.revs(expr): pass | ||||
Siddharth Agarwal
|
r18062 | timer(d) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r18240 | |||
Pulkit Goyal
|
r39398 | @command(b'perfvolatilesets', | ||
[(b'', b'clear-obsstore', False, b'drop obsstore between each call.'), | ||||
] + formatteropts) | ||||
Pierre-Yves David
|
r25494 | def perfvolatilesets(ui, repo, *names, **opts): | ||
Pierre-Yves David
|
r18240 | """benchmark the computation of various volatile set | ||
Volatile set computes element related to filtering and obsolescence.""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pierre-Yves David
|
r18240 | repo = repo.unfiltered() | ||
def getobs(name): | ||||
def d(): | ||||
repo.invalidatevolatilesets() | ||||
Pulkit Goyal
|
r39398 | if opts[b'clear_obsstore']: | ||
clearfilecache(repo, b'obsstore') | ||||
Pierre-Yves David
|
r18240 | obsolete.getrevs(repo, name) | ||
return d | ||||
Pierre-Yves David
|
r18241 | allobs = sorted(obsolete.cachefuncs) | ||
if names: | ||||
allobs = [n for n in allobs if n in names] | ||||
for name in allobs: | ||||
Pierre-Yves David
|
r18240 | timer(getobs(name), title=name) | ||
def getfiltered(name): | ||||
def d(): | ||||
repo.invalidatevolatilesets() | ||||
Pulkit Goyal
|
r39398 | if opts[b'clear_obsstore']: | ||
clearfilecache(repo, b'obsstore') | ||||
Pierre-Yves David
|
r20205 | repoview.filterrevs(repo, name) | ||
Pierre-Yves David
|
r18240 | return d | ||
Pierre-Yves David
|
r18241 | allfilter = sorted(repoview.filtertable) | ||
if names: | ||||
allfilter = [n for n in allfilter if n in names] | ||||
for name in allfilter: | ||||
Pierre-Yves David
|
r18240 | timer(getfiltered(name), title=name) | ||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r18304 | |||
Pulkit Goyal
|
r39398 | @command(b'perfbranchmap', | ||
[(b'f', b'full', False, | ||||
b'Includes build time of subset'), | ||||
(b'', b'clear-revbranch', False, | ||||
b'purge the revbranch cache between computation'), | ||||
] + formatteropts) | ||||
Boris Feld
|
r36380 | def perfbranchmap(ui, repo, *filternames, **opts): | ||
Pierre-Yves David
|
r18304 | """benchmark the update of a branchmap | ||
This benchmarks the full repo.branchmap() call with read and write disabled | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Pulkit Goyal
|
r39398 | full = opts.get(b"full", False) | ||
clear_revbranch = opts.get(b"clear_revbranch", False) | ||||
Pierre-Yves David
|
r25494 | timer, fm = gettimer(ui, opts) | ||
Pierre-Yves David
|
r18304 | def getbranchmap(filtername): | ||
"""generate a benchmark function for the filtername""" | ||||
if filtername is None: | ||||
view = repo | ||||
else: | ||||
view = repo.filtered(filtername) | ||||
def d(): | ||||
r32710 | if clear_revbranch: | |||
repo.revbranchcache()._clear() | ||||
Pierre-Yves David
|
r18304 | if full: | ||
view._branchcaches.clear() | ||||
else: | ||||
view._branchcaches.pop(filtername, None) | ||||
view.branchmap() | ||||
return d | ||||
# add filter in smaller subset to bigger subset | ||||
possiblefilters = set(repoview.filtertable) | ||||
Boris Feld
|
r36380 | if filternames: | ||
possiblefilters &= set(filternames) | ||||
FUJIWARA Katsunori
|
r30144 | subsettable = getbranchmapsubsettable() | ||
Pierre-Yves David
|
r18304 | allfilters = [] | ||
while possiblefilters: | ||||
for name in possiblefilters: | ||||
FUJIWARA Katsunori
|
r30144 | subset = subsettable.get(name) | ||
Pierre-Yves David
|
r18304 | if subset not in possiblefilters: | ||
break | ||||
else: | ||||
Pulkit Goyal
|
r39398 | assert False, b'subset cycle %s!' % possiblefilters | ||
Pierre-Yves David
|
r18304 | allfilters.append(name) | ||
possiblefilters.remove(name) | ||||
# warm the cache | ||||
if not full: | ||||
for name in allfilters: | ||||
repo.filtered(name).branchmap() | ||||
Pulkit Goyal
|
r39398 | if not filternames or b'unfiltered' in filternames: | ||
Boris Feld
|
r36380 | # add unfiltered | ||
allfilters.append(None) | ||||
FUJIWARA Katsunori
|
r30145 | |||
Martijn Pieters
|
r41706 | if util.safehasattr(branchmap.branchcache, 'fromfile'): | ||
branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile') | ||||
branchcacheread.set(classmethod(lambda *args: None)) | ||||
else: | ||||
# older versions | ||||
branchcacheread = safeattrsetter(branchmap, b'read') | ||||
branchcacheread.set(lambda *args: None) | ||||
Pulkit Goyal
|
r39398 | branchcachewrite = safeattrsetter(branchmap.branchcache, b'write') | ||
Martijn Pieters
|
r41706 | branchcachewrite.set(lambda *args: None) | ||
Pierre-Yves David
|
r18304 | try: | ||
for name in allfilters: | ||||
Boris Feld
|
r36379 | printname = name | ||
if name is None: | ||||
Pulkit Goyal
|
r39398 | printname = b'unfiltered' | ||
Boris Feld
|
r36379 | timer(getbranchmap(name), title=str(printname)) | ||
Pierre-Yves David
|
r18304 | finally: | ||
FUJIWARA Katsunori
|
r30145 | branchcacheread.restore() | ||
branchcachewrite.restore() | ||||
Pierre-Yves David
|
r23171 | fm.end() | ||
Pierre-Yves David
|
r23485 | |||
Boris Feld
|
r40804 | @command(b'perfbranchmapupdate', [ | ||
(b'', b'base', [], b'subset of revision to start from'), | ||||
(b'', b'target', [], b'subset of revision to end with'), | ||||
Boris Feld
|
r40808 | (b'', b'clear-caches', False, b'clear cache between each runs') | ||
Boris Feld
|
r40804 | ] + formatteropts) | ||
def perfbranchmapupdate(ui, repo, base=(), target=(), **opts): | ||||
"""benchmark branchmap update from for <base> revs to <target> revs | ||||
Boris Feld
|
r40808 | If `--clear-caches` is passed, the following items will be reset before | ||
each update: | ||||
* the changelog instance and associated indexes | ||||
* the rev-branch-cache instance | ||||
Boris Feld
|
r40804 | Examples: | ||
# update for the one last revision | ||||
$ hg perfbranchmapupdate --base 'not tip' --target 'tip' | ||||
$ update for change coming with a new branch | ||||
$ hg perfbranchmapupdate --base 'stable' --target 'default' | ||||
""" | ||||
from mercurial import branchmap | ||||
Boris Feld
|
r40806 | from mercurial import repoview | ||
Boris Feld
|
r40804 | opts = _byteskwargs(opts) | ||
timer, fm = gettimer(ui, opts) | ||||
Boris Feld
|
r40808 | clearcaches = opts[b'clear_caches'] | ||
Boris Feld
|
r40806 | unfi = repo.unfiltered() | ||
Boris Feld
|
r40804 | x = [None] # used to pass data between closure | ||
# we use a `list` here to avoid possible side effect from smartset | ||||
baserevs = list(scmutil.revrange(repo, base)) | ||||
targetrevs = list(scmutil.revrange(repo, target)) | ||||
if not baserevs: | ||||
raise error.Abort(b'no revisions selected for --base') | ||||
if not targetrevs: | ||||
raise error.Abort(b'no revisions selected for --target') | ||||
# make sure the target branchmap also contains the one in the base | ||||
targetrevs = list(set(baserevs) | set(targetrevs)) | ||||
targetrevs.sort() | ||||
cl = repo.changelog | ||||
allbaserevs = list(cl.ancestors(baserevs, inclusive=True)) | ||||
allbaserevs.sort() | ||||
alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True)) | ||||
newrevs = list(alltargetrevs.difference(allbaserevs)) | ||||
newrevs.sort() | ||||
Boris Feld
|
r40806 | allrevs = frozenset(unfi.changelog.revs()) | ||
basefilterrevs = frozenset(allrevs.difference(allbaserevs)) | ||||
targetfilterrevs = frozenset(allrevs.difference(alltargetrevs)) | ||||
def basefilter(repo, visibilityexceptions=None): | ||||
return basefilterrevs | ||||
def targetfilter(repo, visibilityexceptions=None): | ||||
return targetfilterrevs | ||||
Boris Feld
|
r40804 | msg = b'benchmark of branchmap with %d revisions with %d new ones\n' | ||
ui.status(msg % (len(allbaserevs), len(newrevs))) | ||||
Boris Feld
|
r40806 | if targetfilterrevs: | ||
msg = b'(%d revisions still filtered)\n' | ||||
ui.status(msg % len(targetfilterrevs)) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40806 | try: | ||
repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter | ||||
repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter | ||||
baserepo = repo.filtered(b'__perf_branchmap_update_base') | ||||
targetrepo = repo.filtered(b'__perf_branchmap_update_target') | ||||
Boris Feld
|
r40807 | # try to find an existing branchmap to reuse | ||
subsettable = getbranchmapsubsettable() | ||||
candidatefilter = subsettable.get(None) | ||||
while candidatefilter is not None: | ||||
candidatebm = repo.filtered(candidatefilter).branchmap() | ||||
if candidatebm.validfor(baserepo): | ||||
filtered = repoview.filterrevs(repo, candidatefilter) | ||||
missing = [r for r in allbaserevs if r in filtered] | ||||
base = candidatebm.copy() | ||||
base.update(baserepo, missing) | ||||
break | ||||
candidatefilter = subsettable.get(candidatefilter) | ||||
else: | ||||
# no suitable subset where found | ||||
base = branchmap.branchcache() | ||||
base.update(baserepo, allbaserevs) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40805 | def setup(): | ||
x[0] = base.copy() | ||||
Boris Feld
|
r40808 | if clearcaches: | ||
unfi._revbranchcache = None | ||||
clearchangelog(repo) | ||||
Boris Feld
|
r40804 | |||
Boris Feld
|
r40805 | def bench(): | ||
Boris Feld
|
r40806 | x[0].update(targetrepo, newrevs) | ||
Boris Feld
|
r40805 | |||
timer(bench, setup=setup) | ||||
fm.end() | ||||
Boris Feld
|
r40806 | finally: | ||
repoview.filtertable.pop(b'__perf_branchmap_update_base', None) | ||||
repoview.filtertable.pop(b'__perf_branchmap_update_target', None) | ||||
Boris Feld
|
r40804 | |||
Pulkit Goyal
|
r39398 | @command(b'perfbranchmapload', [ | ||
(b'f', b'filter', b'', b'Specify repoview filter'), | ||||
(b'', b'list', False, b'List brachmap filter caches'), | ||||
Boris Feld
|
r40738 | (b'', b'clear-revlogs', False, b'refresh changelog and manifest'), | ||
Martijn Pieters
|
r39150 | ] + formatteropts) | ||
Boris Feld
|
r40735 | def perfbranchmapload(ui, repo, filter=b'', list=False, **opts): | ||
Martijn Pieters
|
r39150 | """benchmark reading the branchmap""" | ||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Boris Feld
|
r40738 | clearrevlogs = opts[b'clear_revlogs'] | ||
Matt Harbison
|
r39850 | |||
Martijn Pieters
|
r39150 | if list: | ||
for name, kind, st in repo.cachevfs.readdir(stat=True): | ||||
Pulkit Goyal
|
r39398 | if name.startswith(b'branch2'): | ||
filtername = name.partition(b'-')[2] or b'unfiltered' | ||||
ui.status(b'%s - %s\n' | ||||
Martijn Pieters
|
r39150 | % (filtername, util.bytecount(st.st_size))) | ||
return | ||||
Boris Feld
|
r40756 | if not filter: | ||
filter = None | ||||
subsettable = getbranchmapsubsettable() | ||||
if filter is None: | ||||
repo = repo.unfiltered() | ||||
else: | ||||
Martijn Pieters
|
r39150 | repo = repoview.repoview(repo, filter) | ||
Boris Feld
|
r40755 | |||
repo.branchmap() # make sure we have a relevant, up to date branchmap | ||||
Martijn Pieters
|
r41706 | try: | ||
fromfile = branchmap.branchcache.fromfile | ||||
except AttributeError: | ||||
# older versions | ||||
fromfile = branchmap.read | ||||
Boris Feld
|
r40756 | currentfilter = filter | ||
Martijn Pieters
|
r39150 | # try once without timer, the filter may not be cached | ||
Martijn Pieters
|
r41706 | while fromfile(repo) is None: | ||
Boris Feld
|
r40756 | currentfilter = subsettable.get(currentfilter) | ||
if currentfilter is None: | ||||
raise error.Abort(b'No branchmap cached for %s repo' | ||||
% (filter or b'unfiltered')) | ||||
repo = repo.filtered(currentfilter) | ||||
Martijn Pieters
|
r39150 | timer, fm = gettimer(ui, opts) | ||
Boris Feld
|
r40738 | def setup(): | ||
if clearrevlogs: | ||||
clearchangelog(repo) | ||||
Boris Feld
|
r40736 | def bench(): | ||
Martijn Pieters
|
r41706 | fromfile(repo) | ||
Boris Feld
|
r40738 | timer(bench, setup=setup) | ||
Martijn Pieters
|
r39150 | fm.end() | ||
Pulkit Goyal
|
r39398 | @command(b'perfloadmarkers') | ||
Pierre-Yves David
|
r23485 | def perfloadmarkers(ui, repo): | ||
"""benchmark the time to parse the on-disk markers for a repo | ||||
Result is the number of markers in the repo.""" | ||||
timer, fm = gettimer(ui) | ||||
FUJIWARA Katsunori
|
r30146 | svfs = getsvfs(repo) | ||
timer(lambda: len(obsolete.obsstore(svfs))) | ||||
Pierre-Yves David
|
r23485 | fm.end() | ||
Gregory Szorc
|
r27286 | |||
Pulkit Goyal
|
r39398 | @command(b'perflrucachedict', formatteropts + | ||
Gregory Szorc
|
r39604 | [(b'', b'costlimit', 0, b'maximum total cost of items in cache'), | ||
(b'', b'mincost', 0, b'smallest cost of items in cache'), | ||||
(b'', b'maxcost', 100, b'maximum cost of items in cache'), | ||||
(b'', b'size', 4, b'size of cache'), | ||||
Pulkit Goyal
|
r39398 | (b'', b'gets', 10000, b'number of key lookups'), | ||
(b'', b'sets', 10000, b'number of key sets'), | ||||
(b'', b'mixed', 10000, b'number of mixed mode operations'), | ||||
(b'', b'mixedgetfreq', 50, b'frequency of get vs set ops in mixed mode')], | ||||
Gregory Szorc
|
r27286 | norepo=True) | ||
Gregory Szorc
|
r39604 | def perflrucache(ui, mincost=0, maxcost=100, costlimit=0, size=4, | ||
gets=10000, sets=10000, mixed=10000, mixedgetfreq=50, **opts): | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Gregory Szorc
|
r27286 | def doinit(): | ||
Matt Harbison
|
r39847 | for i in _xrange(10000): | ||
Gregory Szorc
|
r27286 | util.lrucachedict(size) | ||
Gregory Szorc
|
r39604 | costrange = list(range(mincost, maxcost + 1)) | ||
Gregory Szorc
|
r27286 | values = [] | ||
Matt Harbison
|
r39847 | for i in _xrange(size): | ||
Matt Harbison
|
r39848 | values.append(random.randint(0, _maxint)) | ||
Gregory Szorc
|
r27286 | |||
# Get mode fills the cache and tests raw lookup performance with no | ||||
# eviction. | ||||
getseq = [] | ||||
Matt Harbison
|
r39847 | for i in _xrange(gets): | ||
Gregory Szorc
|
r27286 | getseq.append(random.choice(values)) | ||
def dogets(): | ||||
d = util.lrucachedict(size) | ||||
for v in values: | ||||
d[v] = v | ||||
for key in getseq: | ||||
value = d[key] | ||||
value # silence pyflakes warning | ||||
Gregory Szorc
|
r39604 | def dogetscost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for i, v in enumerate(values): | ||||
d.insert(v, v, cost=costs[i]) | ||||
for key in getseq: | ||||
try: | ||||
value = d[key] | ||||
value # silence pyflakes warning | ||||
except KeyError: | ||||
pass | ||||
Gregory Szorc
|
r27286 | # Set mode tests insertion speed with cache eviction. | ||
setseq = [] | ||||
Gregory Szorc
|
r39604 | costs = [] | ||
Matt Harbison
|
r39847 | for i in _xrange(sets): | ||
Matt Harbison
|
r39848 | setseq.append(random.randint(0, _maxint)) | ||
Gregory Szorc
|
r39604 | costs.append(random.choice(costrange)) | ||
Gregory Szorc
|
r27286 | |||
Gregory Szorc
|
r39603 | def doinserts(): | ||
d = util.lrucachedict(size) | ||||
for v in setseq: | ||||
d.insert(v, v) | ||||
Gregory Szorc
|
r39604 | def doinsertscost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for i, v in enumerate(setseq): | ||||
d.insert(v, v, cost=costs[i]) | ||||
Gregory Szorc
|
r27286 | def dosets(): | ||
d = util.lrucachedict(size) | ||||
for v in setseq: | ||||
d[v] = v | ||||
# Mixed mode randomly performs gets and sets with eviction. | ||||
mixedops = [] | ||||
Matt Harbison
|
r39847 | for i in _xrange(mixed): | ||
Gregory Szorc
|
r27286 | r = random.randint(0, 100) | ||
if r < mixedgetfreq: | ||||
op = 0 | ||||
else: | ||||
op = 1 | ||||
Gregory Szorc
|
r39604 | mixedops.append((op, | ||
random.randint(0, size * 2), | ||||
random.choice(costrange))) | ||||
Gregory Szorc
|
r27286 | |||
def domixed(): | ||||
d = util.lrucachedict(size) | ||||
Gregory Szorc
|
r39604 | for op, v, cost in mixedops: | ||
Gregory Szorc
|
r27286 | if op == 0: | ||
try: | ||||
d[v] | ||||
except KeyError: | ||||
pass | ||||
else: | ||||
d[v] = v | ||||
Gregory Szorc
|
r39604 | def domixedcost(): | ||
d = util.lrucachedict(size, maxcost=costlimit) | ||||
for op, v, cost in mixedops: | ||||
if op == 0: | ||||
try: | ||||
d[v] | ||||
except KeyError: | ||||
pass | ||||
else: | ||||
d.insert(v, v, cost=cost) | ||||
Gregory Szorc
|
r27286 | benches = [ | ||
Pulkit Goyal
|
r39398 | (doinit, b'init'), | ||
Gregory Szorc
|
r27286 | ] | ||
Gregory Szorc
|
r39604 | if costlimit: | ||
benches.extend([ | ||||
(dogetscost, b'gets w/ cost limit'), | ||||
(doinsertscost, b'inserts w/ cost limit'), | ||||
(domixedcost, b'mixed w/ cost limit'), | ||||
]) | ||||
else: | ||||
benches.extend([ | ||||
(dogets, b'gets'), | ||||
(doinserts, b'inserts'), | ||||
(dosets, b'sets'), | ||||
(domixed, b'mixed') | ||||
]) | ||||
Gregory Szorc
|
r27286 | for fn, title in benches: | ||
timer, fm = gettimer(ui, opts) | ||||
timer(fn, title=title) | ||||
fm.end() | ||||
FUJIWARA Katsunori
|
r29495 | |||
Pulkit Goyal
|
r39398 | @command(b'perfwrite', formatteropts) | ||
Simon Farnsworth
|
r30977 | def perfwrite(ui, repo, **opts): | ||
"""microbenchmark ui.write | ||||
""" | ||||
Matt Harbison
|
r39850 | opts = _byteskwargs(opts) | ||
Simon Farnsworth
|
r30977 | timer, fm = gettimer(ui, opts) | ||
def write(): | ||||
for i in range(100000): | ||||
Pulkit Goyal
|
r39398 | ui.write((b'Testing write performance\n')) | ||
Simon Farnsworth
|
r30977 | timer(write) | ||
fm.end() | ||||
FUJIWARA Katsunori
|
r29495 | def uisetup(ui): | ||
Pulkit Goyal
|
r39398 | if (util.safehasattr(cmdutil, b'openrevlog') and | ||
not util.safehasattr(commands, b'debugrevlogopts')): | ||||
FUJIWARA Katsunori
|
r29495 | # for "historical portability": | ||
# In this case, Mercurial should be 1.9 (or a79fea6b3e77) - | ||||
# 3.7 (or 5606f7d0d063). Therefore, '--dir' option for | ||||
# openrevlog() should cause failure, because it has been | ||||
# available since 3.5 (or 49c583ca48c4). | ||||
def openrevlog(orig, repo, cmd, file_, opts): | ||||
Pulkit Goyal
|
r39398 | if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'): | ||
raise error.Abort(b"This version doesn't support --dir option", | ||||
hint=b"use 3.5 or later") | ||||
FUJIWARA Katsunori
|
r29495 | return orig(repo, cmd, file_, opts) | ||
Pulkit Goyal
|
r39398 | extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog) | ||
Gregory Szorc
|
r40961 | |||
@command(b'perfprogress', formatteropts + [ | ||||
(b'', b'topic', b'topic', b'topic for progress messages'), | ||||
(b'c', b'total', 1000000, b'total value we are progressing to'), | ||||
], norepo=True) | ||||
def perfprogress(ui, topic=None, total=None, **opts): | ||||
"""printing of progress bars""" | ||||
opts = _byteskwargs(opts) | ||||
timer, fm = gettimer(ui, opts) | ||||
def doprogress(): | ||||
with ui.makeprogress(topic, total=total) as progress: | ||||
for i in pycompat.xrange(total): | ||||
progress.increment() | ||||
timer(doprogress) | ||||
fm.end() | ||||