##// END OF EJS Templates
perf: add an option to profile the benchmark section...
marmoute -
r42552:3293086f default
parent child Browse files
Show More
@@ -18,6 +18,10 b' Configurations'
18 ``pre-run``
18 ``pre-run``
19 number of run to perform before starting measurement.
19 number of run to perform before starting measurement.
20
20
21 ``profile-benchmark``
22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
24
21 ``run-limits``
25 ``run-limits``
22 Control the number of runs each benchmark will perform. The option value
26 Control the number of runs each benchmark will perform. The option value
23 should be a list of `<time>-<numberofrun>` pairs. After each run the
27 should be a list of `<time>-<numberofrun>` pairs. After each run the
@@ -109,6 +113,10 b' try:'
109 except ImportError:
113 except ImportError:
110 pass
114 pass
111
115
116 try:
117 from mercurial import profiling
118 except ImportError:
119 profiling = None
112
120
113 def identity(a):
121 def identity(a):
114 return a
122 return a
@@ -246,6 +254,9 b' try:'
246 configitem(b'perf', b'pre-run',
254 configitem(b'perf', b'pre-run',
247 default=mercurial.configitems.dynamicdefault,
255 default=mercurial.configitems.dynamicdefault,
248 )
256 )
257 configitem(b'perf', b'profile-benchmark',
258 default=mercurial.configitems.dynamicdefault,
259 )
249 configitem(b'perf', b'run-limits',
260 configitem(b'perf', b'run-limits',
250 default=mercurial.configitems.dynamicdefault,
261 default=mercurial.configitems.dynamicdefault,
251 )
262 )
@@ -257,6 +268,13 b' def getlen(ui):'
257 return lambda x: 1
268 return lambda x: 1
258 return len
269 return len
259
270
271 class noop(object):
272 """dummy context manager"""
273 def __enter__(self):
274 pass
275 def __exit__(self, *args):
276 pass
277
260 def gettimer(ui, opts=None):
278 def gettimer(ui, opts=None):
261 """return a timer function and formatter: (timer, formatter)
279 """return a timer function and formatter: (timer, formatter)
262
280
@@ -347,9 +365,14 b' def gettimer(ui, opts=None):'
347 if not limits:
365 if not limits:
348 limits = DEFAULTLIMITS
366 limits = DEFAULTLIMITS
349
367
368 profiler = None
369 if profiling is not None:
370 if ui.configbool(b"perf", b"profile-benchmark", False):
371 profiler = profiling.profile(ui)
372
350 prerun = getint(ui, b"perf", b"pre-run", 0)
373 prerun = getint(ui, b"perf", b"pre-run", 0)
351 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
374 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
352 prerun=prerun)
375 prerun=prerun, profiler=profiler)
353 return t, fm
376 return t, fm
354
377
355 def stub_timer(fm, func, setup=None, title=None):
378 def stub_timer(fm, func, setup=None, title=None):
@@ -376,11 +399,13 b' DEFAULTLIMITS = ('
376 )
399 )
377
400
378 def _timer(fm, func, setup=None, title=None, displayall=False,
401 def _timer(fm, func, setup=None, title=None, displayall=False,
379 limits=DEFAULTLIMITS, prerun=0):
402 limits=DEFAULTLIMITS, prerun=0, profiler=None):
380 gc.collect()
403 gc.collect()
381 results = []
404 results = []
382 begin = util.timer()
405 begin = util.timer()
383 count = 0
406 count = 0
407 if profiler is None:
408 profiler = noop()
384 for i in xrange(prerun):
409 for i in xrange(prerun):
385 if setup is not None:
410 if setup is not None:
386 setup()
411 setup()
@@ -389,8 +414,9 b' def _timer(fm, func, setup=None, title=N'
389 while keepgoing:
414 while keepgoing:
390 if setup is not None:
415 if setup is not None:
391 setup()
416 setup()
392 with timeone() as item:
417 with profiler:
393 r = func()
418 with timeone() as item:
419 r = func()
394 count += 1
420 count += 1
395 results.append(item[0])
421 results.append(item[0])
396 cstop = util.timer()
422 cstop = util.timer()
@@ -58,6 +58,10 b' perfstatus'
58 "pre-run"
58 "pre-run"
59 number of run to perform before starting measurement.
59 number of run to perform before starting measurement.
60
60
61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
64
61 "run-limits"
65 "run-limits"
62 Control the number of runs each benchmark will perform. The option value
66 Control the number of runs each benchmark will perform. The option value
63 should be a list of '<time>-<numberofrun>' pairs. After each run the
67 should be a list of '<time>-<numberofrun>' pairs. After each run the
@@ -349,6 +353,15 b' Test pre-run feature'
349 searching for changes
353 searching for changes
350 searching for changes
354 searching for changes
351
355
356 test profile-benchmark option
357 ------------------------------
358
359 Function to check that statprof ran
360 $ statprofran () {
361 > egrep 'Sample count:|No samples recorded' > /dev/null
362 > }
363 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
364
352 Check perf.py for historical portability
365 Check perf.py for historical portability
353 ----------------------------------------
366 ----------------------------------------
354
367
General Comments 0
You need to be logged in to leave comments. Login now