Show More
@@ -15,6 +15,9 b' Configurations' | |||
|
15 | 15 | ``presleep`` |
|
16 | 16 | number of second to wait before any group of runs (default: 1) |
|
17 | 17 | |
|
18 | ``pre-run`` | |
|
19 | number of run to perform before starting measurement. | |
|
20 | ||
|
18 | 21 | ``run-limits`` |
|
19 | 22 | Control the number of runs each benchmark will perform. The option value |
|
20 | 23 | should be a list of `<time>-<numberofrun>` pairs. After each run the |
@@ -240,6 +243,9 b' try:' | |||
|
240 | 243 | configitem(b'perf', b'all-timing', |
|
241 | 244 | default=mercurial.configitems.dynamicdefault, |
|
242 | 245 | ) |
|
246 | configitem(b'perf', b'pre-run', | |
|
247 | default=mercurial.configitems.dynamicdefault, | |
|
248 | ) | |
|
243 | 249 | configitem(b'perf', b'run-limits', |
|
244 | 250 | default=mercurial.configitems.dynamicdefault, |
|
245 | 251 | ) |
@@ -341,7 +347,9 b' def gettimer(ui, opts=None):' | |||
|
341 | 347 | if not limits: |
|
342 | 348 | limits = DEFAULTLIMITS |
|
343 | 349 | |
|
344 | t = functools.partial(_timer, fm, displayall=displayall, limits=limits) | |
|
350 | prerun = getint(ui, b"perf", b"pre-run", 0) | |
|
351 | t = functools.partial(_timer, fm, displayall=displayall, limits=limits, | |
|
352 | prerun=prerun) | |
|
345 | 353 | return t, fm |
|
346 | 354 | |
|
347 | 355 | def stub_timer(fm, func, setup=None, title=None): |
@@ -368,11 +376,15 b' DEFAULTLIMITS = (' | |||
|
368 | 376 | ) |
|
369 | 377 | |
|
370 | 378 | def _timer(fm, func, setup=None, title=None, displayall=False, |
|
371 | limits=DEFAULTLIMITS): | |
|
379 | limits=DEFAULTLIMITS, prerun=0): | |
|
372 | 380 | gc.collect() |
|
373 | 381 | results = [] |
|
374 | 382 | begin = util.timer() |
|
375 | 383 | count = 0 |
|
384 | for i in xrange(prerun): | |
|
385 | if setup is not None: | |
|
386 | setup() | |
|
387 | func() | |
|
376 | 388 | keepgoing = True |
|
377 | 389 | while keepgoing: |
|
378 | 390 | if setup is not None: |
@@ -55,6 +55,9 b' perfstatus' | |||
|
55 | 55 | "presleep" |
|
56 | 56 | number of second to wait before any group of runs (default: 1) |
|
57 | 57 | |
|
58 | "pre-run" | |
|
59 | number of run to perform before starting measurement. | |
|
60 | ||
|
58 | 61 | "run-limits" |
|
59 | 62 | Control the number of runs each benchmark will perform. The option value |
|
60 | 63 | should be a list of '<time>-<numberofrun>' pairs. After each run the |
@@ -327,6 +330,25 b' detailed output:' | |||
|
327 | 330 | } |
|
328 | 331 | ] |
|
329 | 332 | |
|
333 | Test pre-run feature | |
|
334 | -------------------- | |
|
335 | ||
|
336 | (perf discovery has some spurious output) | |
|
337 | ||
|
338 | $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0 | |
|
339 | ! wall * comb * user * sys * (best of 1) (glob) | |
|
340 | searching for changes | |
|
341 | $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1 | |
|
342 | ! wall * comb * user * sys * (best of 1) (glob) | |
|
343 | searching for changes | |
|
344 | searching for changes | |
|
345 | $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3 | |
|
346 | ! wall * comb * user * sys * (best of 1) (glob) | |
|
347 | searching for changes | |
|
348 | searching for changes | |
|
349 | searching for changes | |
|
350 | searching for changes | |
|
351 | ||
|
330 | 352 | Check perf.py for historical portability |
|
331 | 353 | ---------------------------------------- |
|
332 | 354 |
General Comments 0
You need to be logged in to leave comments.
Login now