##// END OF EJS Templates
perf: add a `pre-run` option...
marmoute -
r42551:563cd9a7 default
parent child Browse files
Show More
@@ -15,6 +15,9 Configurations
15 ``presleep``
15 ``presleep``
16 number of second to wait before any group of runs (default: 1)
16 number of second to wait before any group of runs (default: 1)
17
17
18 ``pre-run``
19 number of run to perform before starting measurement.
20
18 ``run-limits``
21 ``run-limits``
19 Control the number of runs each benchmark will perform. The option value
22 Control the number of runs each benchmark will perform. The option value
20 should be a list of `<time>-<numberofrun>` pairs. After each run the
23 should be a list of `<time>-<numberofrun>` pairs. After each run the
@@ -240,6 +243,9 try:
240 configitem(b'perf', b'all-timing',
243 configitem(b'perf', b'all-timing',
241 default=mercurial.configitems.dynamicdefault,
244 default=mercurial.configitems.dynamicdefault,
242 )
245 )
246 configitem(b'perf', b'pre-run',
247 default=mercurial.configitems.dynamicdefault,
248 )
243 configitem(b'perf', b'run-limits',
249 configitem(b'perf', b'run-limits',
244 default=mercurial.configitems.dynamicdefault,
250 default=mercurial.configitems.dynamicdefault,
245 )
251 )
@@ -341,7 +347,9 def gettimer(ui, opts=None):
341 if not limits:
347 if not limits:
342 limits = DEFAULTLIMITS
348 limits = DEFAULTLIMITS
343
349
344 t = functools.partial(_timer, fm, displayall=displayall, limits=limits)
350 prerun = getint(ui, b"perf", b"pre-run", 0)
351 t = functools.partial(_timer, fm, displayall=displayall, limits=limits,
352 prerun=prerun)
345 return t, fm
353 return t, fm
346
354
347 def stub_timer(fm, func, setup=None, title=None):
355 def stub_timer(fm, func, setup=None, title=None):
@@ -368,11 +376,15 DEFAULTLIMITS = (
368 )
376 )
369
377
370 def _timer(fm, func, setup=None, title=None, displayall=False,
378 def _timer(fm, func, setup=None, title=None, displayall=False,
371 limits=DEFAULTLIMITS):
379 limits=DEFAULTLIMITS, prerun=0):
372 gc.collect()
380 gc.collect()
373 results = []
381 results = []
374 begin = util.timer()
382 begin = util.timer()
375 count = 0
383 count = 0
384 for i in xrange(prerun):
385 if setup is not None:
386 setup()
387 func()
376 keepgoing = True
388 keepgoing = True
377 while keepgoing:
389 while keepgoing:
378 if setup is not None:
390 if setup is not None:
@@ -55,6 +55,9 perfstatus
55 "presleep"
55 "presleep"
56 number of second to wait before any group of runs (default: 1)
56 number of second to wait before any group of runs (default: 1)
57
57
58 "pre-run"
59 number of run to perform before starting measurement.
60
58 "run-limits"
61 "run-limits"
59 Control the number of runs each benchmark will perform. The option value
62 Control the number of runs each benchmark will perform. The option value
60 should be a list of '<time>-<numberofrun>' pairs. After each run the
63 should be a list of '<time>-<numberofrun>' pairs. After each run the
@@ -327,6 +330,25 detailed output:
327 }
330 }
328 ]
331 ]
329
332
333 Test pre-run feature
334 --------------------
335
336 (perf discovery has some spurious output)
337
338 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
339 ! wall * comb * user * sys * (best of 1) (glob)
340 searching for changes
341 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
342 ! wall * comb * user * sys * (best of 1) (glob)
343 searching for changes
344 searching for changes
345 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
346 ! wall * comb * user * sys * (best of 1) (glob)
347 searching for changes
348 searching for changes
349 searching for changes
350 searching for changes
351
330 Check perf.py for historical portability
352 Check perf.py for historical portability
331 ----------------------------------------
353 ----------------------------------------
332
354
General Comments 0
You need to be logged in to leave comments. Login now