##// END OF EJS Templates
perf: add an option to profile the benchmark section...
marmoute -
r42552:3293086f default
parent child Browse files
Show More
@@ -18,6 +18,10 b' Configurations'
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
24
21 25 ``run-limits``
22 26 Control the number of runs each benchmark will perform. The option value
23 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
@@ -109,6 +113,10 b' try:'
109 113 except ImportError:
110 114 pass
111 115
116 try:
117 from mercurial import profiling
118 except ImportError:
119 profiling = None
112 120
113 121 def identity(a):
114 122 return a
@@ -246,6 +254,9 b' try:'
246 254 configitem(b'perf', b'pre-run',
247 255 default=mercurial.configitems.dynamicdefault,
248 256 )
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
259 )
249 260 configitem(b'perf', b'run-limits',
250 261 default=mercurial.configitems.dynamicdefault,
251 262 )
@@ -257,6 +268,13 b' def getlen(ui):'
257 268 return lambda x: 1
258 269 return len
259 270
271 class noop(object):
272 """dummy context manager"""
273 def __enter__(self):
274 pass
275 def __exit__(self, *args):
276 pass
277
260 278 def gettimer(ui, opts=None):
261 279 """return a timer function and formatter: (timer, formatter)
262 280
@@ -347,9 +365,14 b' def gettimer(ui, opts=None):'
347 365 if not limits:
348 366 limits = DEFAULTLIMITS
349 367
368 profiler = None
369 if profiling is not None:
370 if ui.configbool(b"perf", b"profile-benchmark", False):
371 profiler = profiling.profile(ui)
372
350 373 prerun = getint(ui, b"perf", b"pre-run", 0)
351 374 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
352 prerun=prerun)
375 prerun=prerun, profiler=profiler)
353 376 return t, fm
354 377
355 378 def stub_timer(fm, func, setup=None, title=None):
@@ -376,11 +399,13 b' DEFAULTLIMITS = ('
376 399 )
377 400
378 401 def _timer(fm, func, setup=None, title=None, displayall=False,
379 limits=DEFAULTLIMITS, prerun=0):
402 limits=DEFAULTLIMITS, prerun=0, profiler=None):
380 403 gc.collect()
381 404 results = []
382 405 begin = util.timer()
383 406 count = 0
407 if profiler is None:
408 profiler = noop()
384 409 for i in xrange(prerun):
385 410 if setup is not None:
386 411 setup()
@@ -389,8 +414,9 b' def _timer(fm, func, setup=None, title=N'
389 414 while keepgoing:
390 415 if setup is not None:
391 416 setup()
392 with timeone() as item:
393 r = func()
417 with profiler:
418 with timeone() as item:
419 r = func()
394 420 count += 1
395 421 results.append(item[0])
396 422 cstop = util.timer()
@@ -58,6 +58,10 b' perfstatus'
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
64
61 65 "run-limits"
62 66 Control the number of runs each benchmark will perform. The option value
63 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
@@ -349,6 +353,15 b' Test pre-run feature'
349 353 searching for changes
350 354 searching for changes
351 355
356 test profile-benchmark option
357 ------------------------------
358
359 Function to check that statprof ran
360 $ statprofran () {
361 > egrep 'Sample count:|No samples recorded' > /dev/null
362 > }
363 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
364
352 365 Check perf.py for historical portability
353 366 ----------------------------------------
354 367
General Comments 0
You need to be logged in to leave comments. Login now