##// END OF EJS Templates
command-namespace: use `::` are the command separator...
marmoute -
r47117:d8ad391e default
parent child Browse files
Show More
@@ -1,3915 +1,3919
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if 'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf',
295 295 b'pre-run',
296 296 default=mercurial.configitems.dynamicdefault,
297 297 )
298 298 configitem(
299 299 b'perf',
300 300 b'profile-benchmark',
301 301 default=mercurial.configitems.dynamicdefault,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'run-limits',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 except (ImportError, AttributeError):
310 310 pass
311 311 except TypeError:
312 312 # compatibility fix for a11fd395e83f
313 313 # hg version: 5.2
314 314 configitem(
315 315 b'perf',
316 316 b'presleep',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'stub',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf',
326 326 b'parentscount',
327 327 default=mercurial.configitems.dynamicdefault,
328 328 )
329 329 configitem(
330 330 b'perf',
331 331 b'all-timing',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'pre-run',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'profile-benchmark',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'run-limits',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349
350 350
351 351 def getlen(ui):
352 352 if ui.configbool(b"perf", b"stub", False):
353 353 return lambda x: 1
354 354 return len
355 355
356 356
357 357 class noop(object):
358 358 """dummy context manager"""
359 359
360 360 def __enter__(self):
361 361 pass
362 362
363 363 def __exit__(self, *args):
364 364 pass
365 365
366 366
367 367 NOOPCTX = noop()
368 368
369 369
370 370 def gettimer(ui, opts=None):
371 371 """return a timer function and formatter: (timer, formatter)
372 372
373 373 This function exists to gather the creation of formatter in a single
374 374 place instead of duplicating it in all performance commands."""
375 375
376 376 # enforce an idle period before execution to counteract power management
377 377 # experimental config: perf.presleep
378 378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379 379
380 380 if opts is None:
381 381 opts = {}
382 382 # redirect all to stderr unless buffer api is in use
383 383 if not ui._buffers:
384 384 ui = ui.copy()
385 385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 386 if uifout:
387 387 # for "historical portability":
388 388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 389 uifout.set(ui.ferr)
390 390
391 391 # get a formatter
392 392 uiformatter = getattr(ui, 'formatter', None)
393 393 if uiformatter:
394 394 fm = uiformatter(b'perf', opts)
395 395 else:
396 396 # for "historical portability":
397 397 # define formatter locally, because ui.formatter has been
398 398 # available since 2.2 (or ae5f92e154d3)
399 399 from mercurial import node
400 400
401 401 class defaultformatter(object):
402 402 """Minimized composition of baseformatter and plainformatter"""
403 403
404 404 def __init__(self, ui, topic, opts):
405 405 self._ui = ui
406 406 if ui.debugflag:
407 407 self.hexfunc = node.hex
408 408 else:
409 409 self.hexfunc = node.short
410 410
411 411 def __nonzero__(self):
412 412 return False
413 413
414 414 __bool__ = __nonzero__
415 415
416 416 def startitem(self):
417 417 pass
418 418
419 419 def data(self, **data):
420 420 pass
421 421
422 422 def write(self, fields, deftext, *fielddata, **opts):
423 423 self._ui.write(deftext % fielddata, **opts)
424 424
425 425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 426 if cond:
427 427 self._ui.write(deftext % fielddata, **opts)
428 428
429 429 def plain(self, text, **opts):
430 430 self._ui.write(text, **opts)
431 431
432 432 def end(self):
433 433 pass
434 434
435 435 fm = defaultformatter(ui, b'perf', opts)
436 436
437 437 # stub function, runs code only once instead of in a loop
438 438 # experimental config: perf.stub
439 439 if ui.configbool(b"perf", b"stub", False):
440 440 return functools.partial(stub_timer, fm), fm
441 441
442 442 # experimental config: perf.all-timing
443 443 displayall = ui.configbool(b"perf", b"all-timing", False)
444 444
445 445 # experimental config: perf.run-limits
446 446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 447 limits = []
448 448 for item in limitspec:
449 449 parts = item.split(b'-', 1)
450 450 if len(parts) < 2:
451 451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 452 continue
453 453 try:
454 454 time_limit = float(_sysstr(parts[0]))
455 455 except ValueError as e:
456 456 ui.warn(
457 457 (
458 458 b'malformatted run limit entry, %s: %s\n'
459 459 % (_bytestr(e), item)
460 460 )
461 461 )
462 462 continue
463 463 try:
464 464 run_limit = int(_sysstr(parts[1]))
465 465 except ValueError as e:
466 466 ui.warn(
467 467 (
468 468 b'malformatted run limit entry, %s: %s\n'
469 469 % (_bytestr(e), item)
470 470 )
471 471 )
472 472 continue
473 473 limits.append((time_limit, run_limit))
474 474 if not limits:
475 475 limits = DEFAULTLIMITS
476 476
477 477 profiler = None
478 478 if profiling is not None:
479 479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 480 profiler = profiling.profile(ui)
481 481
482 482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 483 t = functools.partial(
484 484 _timer,
485 485 fm,
486 486 displayall=displayall,
487 487 limits=limits,
488 488 prerun=prerun,
489 489 profiler=profiler,
490 490 )
491 491 return t, fm
492 492
493 493
494 494 def stub_timer(fm, func, setup=None, title=None):
495 495 if setup is not None:
496 496 setup()
497 497 func()
498 498
499 499
500 500 @contextlib.contextmanager
501 501 def timeone():
502 502 r = []
503 503 ostart = os.times()
504 504 cstart = util.timer()
505 505 yield r
506 506 cstop = util.timer()
507 507 ostop = os.times()
508 508 a, b = ostart, ostop
509 509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510 510
511 511
512 512 # list of stop condition (elapsed time, minimal run count)
513 513 DEFAULTLIMITS = (
514 514 (3.0, 100),
515 515 (10.0, 3),
516 516 )
517 517
518 518
519 519 def _timer(
520 520 fm,
521 521 func,
522 522 setup=None,
523 523 title=None,
524 524 displayall=False,
525 525 limits=DEFAULTLIMITS,
526 526 prerun=0,
527 527 profiler=None,
528 528 ):
529 529 gc.collect()
530 530 results = []
531 531 begin = util.timer()
532 532 count = 0
533 533 if profiler is None:
534 534 profiler = NOOPCTX
535 535 for i in range(prerun):
536 536 if setup is not None:
537 537 setup()
538 538 func()
539 539 keepgoing = True
540 540 while keepgoing:
541 541 if setup is not None:
542 542 setup()
543 543 with profiler:
544 544 with timeone() as item:
545 545 r = func()
546 546 profiler = NOOPCTX
547 547 count += 1
548 548 results.append(item[0])
549 549 cstop = util.timer()
550 550 # Look for a stop condition.
551 551 elapsed = cstop - begin
552 552 for t, mincount in limits:
553 553 if elapsed >= t and count >= mincount:
554 554 keepgoing = False
555 555 break
556 556
557 557 formatone(fm, results, title=title, result=r, displayall=displayall)
558 558
559 559
560 560 def formatone(fm, timings, title=None, result=None, displayall=False):
561 561
562 562 count = len(timings)
563 563
564 564 fm.startitem()
565 565
566 566 if title:
567 567 fm.write(b'title', b'! %s\n', title)
568 568 if result:
569 569 fm.write(b'result', b'! result: %s\n', result)
570 570
571 571 def display(role, entry):
572 572 prefix = b''
573 573 if role != b'best':
574 574 prefix = b'%s.' % role
575 575 fm.plain(b'!')
576 576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 578 fm.write(prefix + b'user', b' user %f', entry[1])
579 579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 581 fm.plain(b'\n')
582 582
583 583 timings.sort()
584 584 min_val = timings[0]
585 585 display(b'best', min_val)
586 586 if displayall:
587 587 max_val = timings[-1]
588 588 display(b'max', max_val)
589 589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 590 display(b'avg', avg)
591 591 median = timings[len(timings) // 2]
592 592 display(b'median', median)
593 593
594 594
595 595 # utilities for historical portability
596 596
597 597
598 598 def getint(ui, section, name, default):
599 599 # for "historical portability":
600 600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 601 v = ui.config(section, name, None)
602 602 if v is None:
603 603 return default
604 604 try:
605 605 return int(v)
606 606 except ValueError:
607 607 raise error.ConfigError(
608 608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 609 )
610 610
611 611
612 612 def safeattrsetter(obj, name, ignoremissing=False):
613 613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614 614
615 615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 616 at runtime. This avoids overlooking removal of an attribute, which
617 617 breaks assumption of performance measurement, in the future.
618 618
619 619 This function returns the object to (1) assign a new value, and
620 620 (2) restore an original value to the attribute.
621 621
622 622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 623 abortion, and this function returns None. This is useful to
624 624 examine an attribute, which isn't ensured in all Mercurial
625 625 versions.
626 626 """
627 627 if not util.safehasattr(obj, name):
628 628 if ignoremissing:
629 629 return None
630 630 raise error.Abort(
631 631 (
632 632 b"missing attribute %s of %s might break assumption"
633 633 b" of performance measurement"
634 634 )
635 635 % (name, obj)
636 636 )
637 637
638 638 origvalue = getattr(obj, _sysstr(name))
639 639
640 640 class attrutil(object):
641 641 def set(self, newvalue):
642 642 setattr(obj, _sysstr(name), newvalue)
643 643
644 644 def restore(self):
645 645 setattr(obj, _sysstr(name), origvalue)
646 646
647 647 return attrutil()
648 648
649 649
650 650 # utilities to examine each internal API changes
651 651
652 652
653 653 def getbranchmapsubsettable():
654 654 # for "historical portability":
655 655 # subsettable is defined in:
656 656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 657 # - repoview since 2.5 (or 59a9f18d4587)
658 658 # - repoviewutil since 5.0
659 659 for mod in (branchmap, repoview, repoviewutil):
660 660 subsettable = getattr(mod, 'subsettable', None)
661 661 if subsettable:
662 662 return subsettable
663 663
664 664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 665 # branchmap and repoview modules exist, but subsettable attribute
666 666 # doesn't)
667 667 raise error.Abort(
668 668 b"perfbranchmap not available with this Mercurial",
669 669 hint=b"use 2.5 or later",
670 670 )
671 671
672 672
673 673 def getsvfs(repo):
674 674 """Return appropriate object to access files under .hg/store"""
675 675 # for "historical portability":
676 676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 677 svfs = getattr(repo, 'svfs', None)
678 678 if svfs:
679 679 return svfs
680 680 else:
681 681 return getattr(repo, 'sopener')
682 682
683 683
684 684 def getvfs(repo):
685 685 """Return appropriate object to access files under .hg"""
686 686 # for "historical portability":
687 687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 688 vfs = getattr(repo, 'vfs', None)
689 689 if vfs:
690 690 return vfs
691 691 else:
692 692 return getattr(repo, 'opener')
693 693
694 694
695 695 def repocleartagscachefunc(repo):
696 696 """Return the function to clear tags cache according to repo internal API"""
697 697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 699 # correct way to clear tags cache, because existing code paths
700 700 # expect _tagscache to be a structured object.
701 701 def clearcache():
702 702 # _tagscache has been filteredpropertycache since 2.5 (or
703 703 # 98c867ac1330), and delattr() can't work in such case
704 704 if '_tagscache' in vars(repo):
705 705 del repo.__dict__['_tagscache']
706 706
707 707 return clearcache
708 708
709 709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 710 if repotags: # since 1.4 (or 5614a628d173)
711 711 return lambda: repotags.set(None)
712 712
713 713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 715 return lambda: repotagscache.set(None)
716 716
717 717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 718 # this point, but it isn't so problematic, because:
719 719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 720 # in perftags() causes failure soon
721 721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 722 raise error.Abort(b"tags API of this hg command is unknown")
723 723
724 724
725 725 # utilities to clear cache
726 726
727 727
728 728 def clearfilecache(obj, attrname):
729 729 unfiltered = getattr(obj, 'unfiltered', None)
730 730 if unfiltered is not None:
731 731 obj = obj.unfiltered()
732 732 if attrname in vars(obj):
733 733 delattr(obj, attrname)
734 734 obj._filecache.pop(attrname, None)
735 735
736 736
737 737 def clearchangelog(repo):
738 738 if repo is not repo.unfiltered():
739 739 object.__setattr__(repo, '_clcachekey', None)
740 740 object.__setattr__(repo, '_clcache', None)
741 741 clearfilecache(repo.unfiltered(), 'changelog')
742 742
743 743
744 744 # perf commands
745 745
746 746
747 @command(b'perf--walk', formatteropts)
747 @command(b'perf::walk|perfwalk', formatteropts)
748 748 def perfwalk(ui, repo, *pats, **opts):
749 749 opts = _byteskwargs(opts)
750 750 timer, fm = gettimer(ui, opts)
751 751 m = scmutil.match(repo[None], pats, {})
752 752 timer(
753 753 lambda: len(
754 754 list(
755 755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 756 )
757 757 )
758 758 )
759 759 fm.end()
760 760
761 761
762 @command(b'perf--annotate', formatteropts)
762 @command(b'perf::annotate|perfannotate', formatteropts)
763 763 def perfannotate(ui, repo, f, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 fc = repo[b'.'][f]
767 767 timer(lambda: len(fc.annotate(True)))
768 768 fm.end()
769 769
770 770
771 771 @command(
772 b'perf--status',
772 b'perf::status|perfstatus',
773 773 [
774 774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 776 ]
777 777 + formatteropts,
778 778 )
779 779 def perfstatus(ui, repo, **opts):
780 780 """benchmark the performance of a single status call
781 781
782 782 The repository data are preserved between each call.
783 783
784 784 By default, only the status of the tracked file are requested. If
785 785 `--unknown` is passed, the "unknown" files are also tracked.
786 786 """
787 787 opts = _byteskwargs(opts)
788 788 # m = match.always(repo.root, repo.getcwd())
789 789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 790 # False))))
791 791 timer, fm = gettimer(ui, opts)
792 792 if opts[b'dirstate']:
793 793 dirstate = repo.dirstate
794 794 m = scmutil.matchall(repo)
795 795 unknown = opts[b'unknown']
796 796
797 797 def status_dirstate():
798 798 s = dirstate.status(
799 799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 800 )
801 801 sum(map(bool, s))
802 802
803 803 timer(status_dirstate)
804 804 else:
805 805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 806 fm.end()
807 807
808 808
809 @command(b'perf--addremove', formatteropts)
809 @command(b'perf::addremove|perfaddremove', formatteropts)
810 810 def perfaddremove(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 try:
814 814 oldquiet = repo.ui.quiet
815 815 repo.ui.quiet = True
816 816 matcher = scmutil.match(repo[None])
817 817 opts[b'dry_run'] = True
818 818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 819 uipathfn = scmutil.getuipathfn(repo)
820 820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 821 else:
822 822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 823 finally:
824 824 repo.ui.quiet = oldquiet
825 825 fm.end()
826 826
827 827
828 828 def clearcaches(cl):
829 829 # behave somewhat consistently across internal API changes
830 830 if util.safehasattr(cl, b'clearcaches'):
831 831 cl.clearcaches()
832 832 elif util.safehasattr(cl, b'_nodecache'):
833 833 # <= hg-5.2
834 834 from mercurial.node import nullid, nullrev
835 835
836 836 cl._nodecache = {nullid: nullrev}
837 837 cl._nodepos = None
838 838
839 839
840 @command(b'perf--heads', formatteropts)
840 @command(b'perf::heads|perfheads', formatteropts)
841 841 def perfheads(ui, repo, **opts):
842 842 """benchmark the computation of a changelog heads"""
843 843 opts = _byteskwargs(opts)
844 844 timer, fm = gettimer(ui, opts)
845 845 cl = repo.changelog
846 846
847 847 def s():
848 848 clearcaches(cl)
849 849
850 850 def d():
851 851 len(cl.headrevs())
852 852
853 853 timer(d, setup=s)
854 854 fm.end()
855 855
856 856
857 857 @command(
858 b'perf--tags',
858 b'perf::tags|perftags',
859 859 formatteropts
860 860 + [
861 861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 862 ],
863 863 )
864 864 def perftags(ui, repo, **opts):
865 865 opts = _byteskwargs(opts)
866 866 timer, fm = gettimer(ui, opts)
867 867 repocleartagscache = repocleartagscachefunc(repo)
868 868 clearrevlogs = opts[b'clear_revlogs']
869 869
870 870 def s():
871 871 if clearrevlogs:
872 872 clearchangelog(repo)
873 873 clearfilecache(repo.unfiltered(), 'manifest')
874 874 repocleartagscache()
875 875
876 876 def t():
877 877 return len(repo.tags())
878 878
879 879 timer(t, setup=s)
880 880 fm.end()
881 881
882 882
883 @command(b'perf--ancestors', formatteropts)
883 @command(b'perf::ancestors|perfancestors', formatteropts)
884 884 def perfancestors(ui, repo, **opts):
885 885 opts = _byteskwargs(opts)
886 886 timer, fm = gettimer(ui, opts)
887 887 heads = repo.changelog.headrevs()
888 888
889 889 def d():
890 890 for a in repo.changelog.ancestors(heads):
891 891 pass
892 892
893 893 timer(d)
894 894 fm.end()
895 895
896 896
897 @command(b'perf--ancestorset', formatteropts)
897 @command(b'perf::ancestorset|perfancestorset', formatteropts)
898 898 def perfancestorset(ui, repo, revset, **opts):
899 899 opts = _byteskwargs(opts)
900 900 timer, fm = gettimer(ui, opts)
901 901 revs = repo.revs(revset)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 s = repo.changelog.ancestors(heads)
906 906 for rev in revs:
907 907 rev in s
908 908
909 909 timer(d)
910 910 fm.end()
911 911
912 912
913 @command(b'perf--discovery', formatteropts, b'PATH')
913 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
914 914 def perfdiscovery(ui, repo, path, **opts):
915 915 """benchmark discovery between local repo and the peer at given path"""
916 916 repos = [repo, None]
917 917 timer, fm = gettimer(ui, opts)
918 918 path = ui.expandpath(path)
919 919
920 920 def s():
921 921 repos[1] = hg.peer(ui, opts, path)
922 922
923 923 def d():
924 924 setdiscovery.findcommonheads(ui, *repos)
925 925
926 926 timer(d, setup=s)
927 927 fm.end()
928 928
929 929
930 930 @command(
931 b'perf--bookmarks',
931 b'perf::bookmarks|perfbookmarks',
932 932 formatteropts
933 933 + [
934 934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 935 ],
936 936 )
937 937 def perfbookmarks(ui, repo, **opts):
938 938 """benchmark parsing bookmarks from disk to memory"""
939 939 opts = _byteskwargs(opts)
940 940 timer, fm = gettimer(ui, opts)
941 941
942 942 clearrevlogs = opts[b'clear_revlogs']
943 943
944 944 def s():
945 945 if clearrevlogs:
946 946 clearchangelog(repo)
947 947 clearfilecache(repo, b'_bookmarks')
948 948
949 949 def d():
950 950 repo._bookmarks
951 951
952 952 timer(d, setup=s)
953 953 fm.end()
954 954
955 955
956 @command(b'perf--bundleread', formatteropts, b'BUNDLE')
956 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
957 957 def perfbundleread(ui, repo, bundlepath, **opts):
958 958 """Benchmark reading of bundle files.
959 959
960 960 This command is meant to isolate the I/O part of bundle reading as
961 961 much as possible.
962 962 """
963 963 from mercurial import (
964 964 bundle2,
965 965 exchange,
966 966 streamclone,
967 967 )
968 968
969 969 opts = _byteskwargs(opts)
970 970
971 971 def makebench(fn):
972 972 def run():
973 973 with open(bundlepath, b'rb') as fh:
974 974 bundle = exchange.readbundle(ui, fh, bundlepath)
975 975 fn(bundle)
976 976
977 977 return run
978 978
979 979 def makereadnbytes(size):
980 980 def run():
981 981 with open(bundlepath, b'rb') as fh:
982 982 bundle = exchange.readbundle(ui, fh, bundlepath)
983 983 while bundle.read(size):
984 984 pass
985 985
986 986 return run
987 987
988 988 def makestdioread(size):
989 989 def run():
990 990 with open(bundlepath, b'rb') as fh:
991 991 while fh.read(size):
992 992 pass
993 993
994 994 return run
995 995
996 996 # bundle1
997 997
998 998 def deltaiter(bundle):
999 999 for delta in bundle.deltaiter():
1000 1000 pass
1001 1001
1002 1002 def iterchunks(bundle):
1003 1003 for chunk in bundle.getchunks():
1004 1004 pass
1005 1005
1006 1006 # bundle2
1007 1007
1008 1008 def forwardchunks(bundle):
1009 1009 for chunk in bundle._forwardchunks():
1010 1010 pass
1011 1011
1012 1012 def iterparts(bundle):
1013 1013 for part in bundle.iterparts():
1014 1014 pass
1015 1015
1016 1016 def iterpartsseekable(bundle):
1017 1017 for part in bundle.iterparts(seekable=True):
1018 1018 pass
1019 1019
1020 1020 def seek(bundle):
1021 1021 for part in bundle.iterparts(seekable=True):
1022 1022 part.seek(0, os.SEEK_END)
1023 1023
1024 1024 def makepartreadnbytes(size):
1025 1025 def run():
1026 1026 with open(bundlepath, b'rb') as fh:
1027 1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 1028 for part in bundle.iterparts():
1029 1029 while part.read(size):
1030 1030 pass
1031 1031
1032 1032 return run
1033 1033
1034 1034 benches = [
1035 1035 (makestdioread(8192), b'read(8k)'),
1036 1036 (makestdioread(16384), b'read(16k)'),
1037 1037 (makestdioread(32768), b'read(32k)'),
1038 1038 (makestdioread(131072), b'read(128k)'),
1039 1039 ]
1040 1040
1041 1041 with open(bundlepath, b'rb') as fh:
1042 1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1043 1043
1044 1044 if isinstance(bundle, changegroup.cg1unpacker):
1045 1045 benches.extend(
1046 1046 [
1047 1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 1048 (makebench(iterchunks), b'cg1 getchunks()'),
1049 1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 1053 ]
1054 1054 )
1055 1055 elif isinstance(bundle, bundle2.unbundle20):
1056 1056 benches.extend(
1057 1057 [
1058 1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 1059 (makebench(iterparts), b'bundle2 iterparts()'),
1060 1060 (
1061 1061 makebench(iterpartsseekable),
1062 1062 b'bundle2 iterparts() seekable',
1063 1063 ),
1064 1064 (makebench(seek), b'bundle2 part seek()'),
1065 1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 1069 ]
1070 1070 )
1071 1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 1072 raise error.Abort(b'stream clone bundles not supported')
1073 1073 else:
1074 1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075 1075
1076 1076 for fn, title in benches:
1077 1077 timer, fm = gettimer(ui, opts)
1078 1078 timer(fn, title=title)
1079 1079 fm.end()
1080 1080
1081 1081
1082 1082 @command(
1083 b'perf--changegroupchangelog',
1083 b'perf::changegroupchangelog|perfchangegroupchangelog',
1084 1084 formatteropts
1085 1085 + [
1086 1086 (b'', b'cgversion', b'02', b'changegroup version'),
1087 1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 1088 ],
1089 1089 )
1090 1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 1091 """Benchmark producing a changelog group for a changegroup.
1092 1092
1093 1093 This measures the time spent processing the changelog during a
1094 1094 bundle operation. This occurs during `hg bundle` and on a server
1095 1095 processing a `getbundle` wire protocol request (handles clones
1096 1096 and pull requests).
1097 1097
1098 1098 By default, all revisions are added to the changegroup.
1099 1099 """
1100 1100 opts = _byteskwargs(opts)
1101 1101 cl = repo.changelog
1102 1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 1103 bundler = changegroup.getbundler(cgversion, repo)
1104 1104
1105 1105 def d():
1106 1106 state, chunks = bundler._generatechangelog(cl, nodes)
1107 1107 for chunk in chunks:
1108 1108 pass
1109 1109
1110 1110 timer, fm = gettimer(ui, opts)
1111 1111
1112 1112 # Terminal printing can interfere with timing. So disable it.
1113 1113 with ui.configoverride({(b'progress', b'disable'): True}):
1114 1114 timer(d)
1115 1115
1116 1116 fm.end()
1117 1117
1118 1118
1119 @command(b'perf--dirs', formatteropts)
1119 @command(b'perf::dirs|perfdirs', formatteropts)
1120 1120 def perfdirs(ui, repo, **opts):
1121 1121 opts = _byteskwargs(opts)
1122 1122 timer, fm = gettimer(ui, opts)
1123 1123 dirstate = repo.dirstate
1124 1124 b'a' in dirstate
1125 1125
1126 1126 def d():
1127 1127 dirstate.hasdir(b'a')
1128 1128 del dirstate._map._dirs
1129 1129
1130 1130 timer(d)
1131 1131 fm.end()
1132 1132
1133 1133
1134 1134 @command(
1135 b'perf--dirstate',
1135 b'perf::dirstate|perfdirstate',
1136 1136 [
1137 1137 (
1138 1138 b'',
1139 1139 b'iteration',
1140 1140 None,
1141 1141 b'benchmark a full iteration for the dirstate',
1142 1142 ),
1143 1143 (
1144 1144 b'',
1145 1145 b'contains',
1146 1146 None,
1147 1147 b'benchmark a large amount of `nf in dirstate` calls',
1148 1148 ),
1149 1149 ]
1150 1150 + formatteropts,
1151 1151 )
1152 1152 def perfdirstate(ui, repo, **opts):
1153 1153 """benchmap the time of various distate operations
1154 1154
1155 1155 By default benchmark the time necessary to load a dirstate from scratch.
1156 1156 The dirstate is loaded to the point were a "contains" request can be
1157 1157 answered.
1158 1158 """
1159 1159 opts = _byteskwargs(opts)
1160 1160 timer, fm = gettimer(ui, opts)
1161 1161 b"a" in repo.dirstate
1162 1162
1163 1163 if opts[b'iteration'] and opts[b'contains']:
1164 1164 msg = b'only specify one of --iteration or --contains'
1165 1165 raise error.Abort(msg)
1166 1166
1167 1167 if opts[b'iteration']:
1168 1168 setup = None
1169 1169 dirstate = repo.dirstate
1170 1170
1171 1171 def d():
1172 1172 for f in dirstate:
1173 1173 pass
1174 1174
1175 1175 elif opts[b'contains']:
1176 1176 setup = None
1177 1177 dirstate = repo.dirstate
1178 1178 allfiles = list(dirstate)
1179 1179 # also add file path that will be "missing" from the dirstate
1180 1180 allfiles.extend([f[::-1] for f in allfiles])
1181 1181
1182 1182 def d():
1183 1183 for f in allfiles:
1184 1184 f in dirstate
1185 1185
1186 1186 else:
1187 1187
1188 1188 def setup():
1189 1189 repo.dirstate.invalidate()
1190 1190
1191 1191 def d():
1192 1192 b"a" in repo.dirstate
1193 1193
1194 1194 timer(d, setup=setup)
1195 1195 fm.end()
1196 1196
1197 1197
1198 @command(b'perf--dirstatedirs', formatteropts)
1198 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1199 1199 def perfdirstatedirs(ui, repo, **opts):
1200 1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 1201 opts = _byteskwargs(opts)
1202 1202 timer, fm = gettimer(ui, opts)
1203 1203 repo.dirstate.hasdir(b"a")
1204 1204
1205 1205 def setup():
1206 1206 del repo.dirstate._map._dirs
1207 1207
1208 1208 def d():
1209 1209 repo.dirstate.hasdir(b"a")
1210 1210
1211 1211 timer(d, setup=setup)
1212 1212 fm.end()
1213 1213
1214 1214
1215 @command(b'perf--dirstatefoldmap', formatteropts)
1215 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1216 1216 def perfdirstatefoldmap(ui, repo, **opts):
1217 1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1218 1218
1219 1219 The dirstate filefoldmap cache is dropped between every request.
1220 1220 """
1221 1221 opts = _byteskwargs(opts)
1222 1222 timer, fm = gettimer(ui, opts)
1223 1223 dirstate = repo.dirstate
1224 1224 dirstate._map.filefoldmap.get(b'a')
1225 1225
1226 1226 def setup():
1227 1227 del dirstate._map.filefoldmap
1228 1228
1229 1229 def d():
1230 1230 dirstate._map.filefoldmap.get(b'a')
1231 1231
1232 1232 timer(d, setup=setup)
1233 1233 fm.end()
1234 1234
1235 1235
1236 @command(b'perf--dirfoldmap', formatteropts)
1236 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1237 1237 def perfdirfoldmap(ui, repo, **opts):
1238 1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239 1239
1240 1240 The dirstate dirfoldmap cache is dropped between every request.
1241 1241 """
1242 1242 opts = _byteskwargs(opts)
1243 1243 timer, fm = gettimer(ui, opts)
1244 1244 dirstate = repo.dirstate
1245 1245 dirstate._map.dirfoldmap.get(b'a')
1246 1246
1247 1247 def setup():
1248 1248 del dirstate._map.dirfoldmap
1249 1249 del dirstate._map._dirs
1250 1250
1251 1251 def d():
1252 1252 dirstate._map.dirfoldmap.get(b'a')
1253 1253
1254 1254 timer(d, setup=setup)
1255 1255 fm.end()
1256 1256
1257 1257
1258 @command(b'perf--dirstatewrite', formatteropts)
1258 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1259 1259 def perfdirstatewrite(ui, repo, **opts):
1260 1260 """benchmap the time it take to write a dirstate on disk"""
1261 1261 opts = _byteskwargs(opts)
1262 1262 timer, fm = gettimer(ui, opts)
1263 1263 ds = repo.dirstate
1264 1264 b"a" in ds
1265 1265
1266 1266 def setup():
1267 1267 ds._dirty = True
1268 1268
1269 1269 def d():
1270 1270 ds.write(repo.currenttransaction())
1271 1271
1272 1272 timer(d, setup=setup)
1273 1273 fm.end()
1274 1274
1275 1275
1276 1276 def _getmergerevs(repo, opts):
1277 1277 """parse command argument to return rev involved in merge
1278 1278
1279 1279 input: options dictionnary with `rev`, `from` and `bse`
1280 1280 output: (localctx, otherctx, basectx)
1281 1281 """
1282 1282 if opts[b'from']:
1283 1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 1284 wctx = repo[fromrev]
1285 1285 else:
1286 1286 wctx = repo[None]
1287 1287 # we don't want working dir files to be stat'd in the benchmark, so
1288 1288 # prime that cache
1289 1289 wctx.dirty()
1290 1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 1291 if opts[b'base']:
1292 1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 1293 ancestor = repo[fromrev]
1294 1294 else:
1295 1295 ancestor = wctx.ancestor(rctx)
1296 1296 return (wctx, rctx, ancestor)
1297 1297
1298 1298
1299 1299 @command(
1300 b'perf--mergecalculate',
1300 b'perf::mergecalculate|perfmergecalculate',
1301 1301 [
1302 1302 (b'r', b'rev', b'.', b'rev to merge against'),
1303 1303 (b'', b'from', b'', b'rev to merge from'),
1304 1304 (b'', b'base', b'', b'the revision to use as base'),
1305 1305 ]
1306 1306 + formatteropts,
1307 1307 )
1308 1308 def perfmergecalculate(ui, repo, **opts):
1309 1309 opts = _byteskwargs(opts)
1310 1310 timer, fm = gettimer(ui, opts)
1311 1311
1312 1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313 1313
1314 1314 def d():
1315 1315 # acceptremote is True because we don't want prompts in the middle of
1316 1316 # our benchmark
1317 1317 merge.calculateupdates(
1318 1318 repo,
1319 1319 wctx,
1320 1320 rctx,
1321 1321 [ancestor],
1322 1322 branchmerge=False,
1323 1323 force=False,
1324 1324 acceptremote=True,
1325 1325 followcopies=True,
1326 1326 )
1327 1327
1328 1328 timer(d)
1329 1329 fm.end()
1330 1330
1331 1331
1332 1332 @command(
1333 b'perf--mergecopies',
1333 b'perf::mergecopies|perfmergecopies',
1334 1334 [
1335 1335 (b'r', b'rev', b'.', b'rev to merge against'),
1336 1336 (b'', b'from', b'', b'rev to merge from'),
1337 1337 (b'', b'base', b'', b'the revision to use as base'),
1338 1338 ]
1339 1339 + formatteropts,
1340 1340 )
1341 1341 def perfmergecopies(ui, repo, **opts):
1342 1342 """measure runtime of `copies.mergecopies`"""
1343 1343 opts = _byteskwargs(opts)
1344 1344 timer, fm = gettimer(ui, opts)
1345 1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346 1346
1347 1347 def d():
1348 1348 # acceptremote is True because we don't want prompts in the middle of
1349 1349 # our benchmark
1350 1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1351 1351
1352 1352 timer(d)
1353 1353 fm.end()
1354 1354
1355 1355
1356 @command(b'perf--pathcopies', [], b"REV REV")
1356 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1357 1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 1358 """benchmark the copy tracing logic"""
1359 1359 opts = _byteskwargs(opts)
1360 1360 timer, fm = gettimer(ui, opts)
1361 1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363 1363
1364 1364 def d():
1365 1365 copies.pathcopies(ctx1, ctx2)
1366 1366
1367 1367 timer(d)
1368 1368 fm.end()
1369 1369
1370 1370
1371 1371 @command(
1372 b'perf--phases',
1372 b'perf::phases|perfphases',
1373 1373 [
1374 1374 (b'', b'full', False, b'include file reading time too'),
1375 1375 ],
1376 1376 b"",
1377 1377 )
1378 1378 def perfphases(ui, repo, **opts):
1379 1379 """benchmark phasesets computation"""
1380 1380 opts = _byteskwargs(opts)
1381 1381 timer, fm = gettimer(ui, opts)
1382 1382 _phases = repo._phasecache
1383 1383 full = opts.get(b'full')
1384 1384
1385 1385 def d():
1386 1386 phases = _phases
1387 1387 if full:
1388 1388 clearfilecache(repo, b'_phasecache')
1389 1389 phases = repo._phasecache
1390 1390 phases.invalidate()
1391 1391 phases.loadphaserevs(repo)
1392 1392
1393 1393 timer(d)
1394 1394 fm.end()
1395 1395
1396 1396
1397 @command(b'perf--phasesremote', [], b"[DEST]")
1397 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1398 1398 def perfphasesremote(ui, repo, dest=None, **opts):
1399 1399 """benchmark time needed to analyse phases of the remote server"""
1400 1400 from mercurial.node import bin
1401 1401 from mercurial import (
1402 1402 exchange,
1403 1403 hg,
1404 1404 phases,
1405 1405 )
1406 1406
1407 1407 opts = _byteskwargs(opts)
1408 1408 timer, fm = gettimer(ui, opts)
1409 1409
1410 1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1411 1411 if not path:
1412 1412 raise error.Abort(
1413 1413 b'default repository not configured!',
1414 1414 hint=b"see 'hg help config.paths'",
1415 1415 )
1416 1416 dest = path.pushloc or path.loc
1417 1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 1418 other = hg.peer(repo, opts, dest)
1419 1419
1420 1420 # easier to perform discovery through the operation
1421 1421 op = exchange.pushoperation(repo, other)
1422 1422 exchange._pushdiscoverychangeset(op)
1423 1423
1424 1424 remotesubset = op.fallbackheads
1425 1425
1426 1426 with other.commandexecutor() as e:
1427 1427 remotephases = e.callcommand(
1428 1428 b'listkeys', {b'namespace': b'phases'}
1429 1429 ).result()
1430 1430 del other
1431 1431 publishing = remotephases.get(b'publishing', False)
1432 1432 if publishing:
1433 1433 ui.statusnoi18n(b'publishing: yes\n')
1434 1434 else:
1435 1435 ui.statusnoi18n(b'publishing: no\n')
1436 1436
1437 1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 1438 if has_node is None:
1439 1439 has_node = repo.changelog.nodemap.__contains__
1440 1440 nonpublishroots = 0
1441 1441 for nhex, phase in remotephases.iteritems():
1442 1442 if nhex == b'publishing': # ignore data related to publish option
1443 1443 continue
1444 1444 node = bin(nhex)
1445 1445 if has_node(node) and int(phase):
1446 1446 nonpublishroots += 1
1447 1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449 1449
1450 1450 def d():
1451 1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1452 1452
1453 1453 timer(d)
1454 1454 fm.end()
1455 1455
1456 1456
1457 1457 @command(
1458 b'perf--manifest',
1458 b'perf::manifest|perfmanifest',
1459 1459 [
1460 1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 1462 ]
1463 1463 + formatteropts,
1464 1464 b'REV|NODE',
1465 1465 )
1466 1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 1467 """benchmark the time to read a manifest from disk and return a usable
1468 1468 dict-like object
1469 1469
1470 1470 Manifest caches are cleared before retrieval."""
1471 1471 opts = _byteskwargs(opts)
1472 1472 timer, fm = gettimer(ui, opts)
1473 1473 if not manifest_rev:
1474 1474 ctx = scmutil.revsingle(repo, rev, rev)
1475 1475 t = ctx.manifestnode()
1476 1476 else:
1477 1477 from mercurial.node import bin
1478 1478
1479 1479 if len(rev) == 40:
1480 1480 t = bin(rev)
1481 1481 else:
1482 1482 try:
1483 1483 rev = int(rev)
1484 1484
1485 1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 1486 t = repo.manifestlog.getstorage(b'').node(rev)
1487 1487 else:
1488 1488 t = repo.manifestlog._revlog.lookup(rev)
1489 1489 except ValueError:
1490 1490 raise error.Abort(
1491 1491 b'manifest revision must be integer or full node'
1492 1492 )
1493 1493
1494 1494 def d():
1495 1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 1496 repo.manifestlog[t].read()
1497 1497
1498 1498 timer(d)
1499 1499 fm.end()
1500 1500
1501 1501
1502 @command(b'perf--changeset', formatteropts)
1502 @command(b'perf::changeset|perfchangeset', formatteropts)
1503 1503 def perfchangeset(ui, repo, rev, **opts):
1504 1504 opts = _byteskwargs(opts)
1505 1505 timer, fm = gettimer(ui, opts)
1506 1506 n = scmutil.revsingle(repo, rev).node()
1507 1507
1508 1508 def d():
1509 1509 repo.changelog.read(n)
1510 1510 # repo.changelog._cache = None
1511 1511
1512 1512 timer(d)
1513 1513 fm.end()
1514 1514
1515 1515
1516 @command(b'perf--ignore', formatteropts)
1516 @command(b'perf::ignore|perfignore', formatteropts)
1517 1517 def perfignore(ui, repo, **opts):
1518 1518 """benchmark operation related to computing ignore"""
1519 1519 opts = _byteskwargs(opts)
1520 1520 timer, fm = gettimer(ui, opts)
1521 1521 dirstate = repo.dirstate
1522 1522
1523 1523 def setupone():
1524 1524 dirstate.invalidate()
1525 1525 clearfilecache(dirstate, b'_ignore')
1526 1526
1527 1527 def runone():
1528 1528 dirstate._ignore
1529 1529
1530 1530 timer(runone, setup=setupone, title=b"load")
1531 1531 fm.end()
1532 1532
1533 1533
1534 1534 @command(
1535 b'perf--index',
1535 b'perf::index|perfindex',
1536 1536 [
1537 1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 1539 ]
1540 1540 + formatteropts,
1541 1541 )
1542 1542 def perfindex(ui, repo, **opts):
1543 1543 """benchmark index creation time followed by a lookup
1544 1544
1545 1545 The default is to look `tip` up. Depending on the index implementation,
1546 1546 the revision looked up can matters. For example, an implementation
1547 1547 scanning the index will have a faster lookup time for `--rev tip` than for
1548 1548 `--rev 0`. The number of looked up revisions and their order can also
1549 1549 matters.
1550 1550
1551 1551 Example of useful set to test:
1552 1552
1553 1553 * tip
1554 1554 * 0
1555 1555 * -10:
1556 1556 * :10
1557 1557 * -10: + :10
1558 1558 * :10: + -10:
1559 1559 * -10000:
1560 1560 * -10000: + 0
1561 1561
1562 1562 It is not currently possible to check for lookup of a missing node. For
1563 1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 1564 import mercurial.revlog
1565 1565
1566 1566 opts = _byteskwargs(opts)
1567 1567 timer, fm = gettimer(ui, opts)
1568 1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 1569 if opts[b'no_lookup']:
1570 1570 if opts['rev']:
1571 1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 1572 nodes = []
1573 1573 elif not opts[b'rev']:
1574 1574 nodes = [repo[b"tip"].node()]
1575 1575 else:
1576 1576 revs = scmutil.revrange(repo, opts[b'rev'])
1577 1577 cl = repo.changelog
1578 1578 nodes = [cl.node(r) for r in revs]
1579 1579
1580 1580 unfi = repo.unfiltered()
1581 1581 # find the filecache func directly
1582 1582 # This avoid polluting the benchmark with the filecache logic
1583 1583 makecl = unfi.__class__.changelog.func
1584 1584
1585 1585 def setup():
1586 1586 # probably not necessary, but for good measure
1587 1587 clearchangelog(unfi)
1588 1588
1589 1589 def d():
1590 1590 cl = makecl(unfi)
1591 1591 for n in nodes:
1592 1592 cl.rev(n)
1593 1593
1594 1594 timer(d, setup=setup)
1595 1595 fm.end()
1596 1596
1597 1597
1598 1598 @command(
1599 b'perf--nodemap',
1599 b'perf::nodemap|perfnodemap',
1600 1600 [
1601 1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 1603 ]
1604 1604 + formatteropts,
1605 1605 )
1606 1606 def perfnodemap(ui, repo, **opts):
1607 1607 """benchmark the time necessary to look up revision from a cold nodemap
1608 1608
1609 1609 Depending on the implementation, the amount and order of revision we look
1610 1610 up can varies. Example of useful set to test:
1611 1611 * tip
1612 1612 * 0
1613 1613 * -10:
1614 1614 * :10
1615 1615 * -10: + :10
1616 1616 * :10: + -10:
1617 1617 * -10000:
1618 1618 * -10000: + 0
1619 1619
1620 1620 The command currently focus on valid binary lookup. Benchmarking for
1621 1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 1622 """
1623 1623 import mercurial.revlog
1624 1624
1625 1625 opts = _byteskwargs(opts)
1626 1626 timer, fm = gettimer(ui, opts)
1627 1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628 1628
1629 1629 unfi = repo.unfiltered()
1630 1630 clearcaches = opts[b'clear_caches']
1631 1631 # find the filecache func directly
1632 1632 # This avoid polluting the benchmark with the filecache logic
1633 1633 makecl = unfi.__class__.changelog.func
1634 1634 if not opts[b'rev']:
1635 1635 raise error.Abort(b'use --rev to specify revisions to look up')
1636 1636 revs = scmutil.revrange(repo, opts[b'rev'])
1637 1637 cl = repo.changelog
1638 1638 nodes = [cl.node(r) for r in revs]
1639 1639
1640 1640 # use a list to pass reference to a nodemap from one closure to the next
1641 1641 nodeget = [None]
1642 1642
1643 1643 def setnodeget():
1644 1644 # probably not necessary, but for good measure
1645 1645 clearchangelog(unfi)
1646 1646 cl = makecl(unfi)
1647 1647 if util.safehasattr(cl.index, 'get_rev'):
1648 1648 nodeget[0] = cl.index.get_rev
1649 1649 else:
1650 1650 nodeget[0] = cl.nodemap.get
1651 1651
1652 1652 def d():
1653 1653 get = nodeget[0]
1654 1654 for n in nodes:
1655 1655 get(n)
1656 1656
1657 1657 setup = None
1658 1658 if clearcaches:
1659 1659
1660 1660 def setup():
1661 1661 setnodeget()
1662 1662
1663 1663 else:
1664 1664 setnodeget()
1665 1665 d() # prewarm the data structure
1666 1666 timer(d, setup=setup)
1667 1667 fm.end()
1668 1668
1669 1669
1670 @command(b'perf--startup', formatteropts)
1670 @command(b'perf::startup|perfstartup', formatteropts)
1671 1671 def perfstartup(ui, repo, **opts):
1672 1672 opts = _byteskwargs(opts)
1673 1673 timer, fm = gettimer(ui, opts)
1674 1674
1675 1675 def d():
1676 1676 if os.name != 'nt':
1677 1677 os.system(
1678 1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 1679 )
1680 1680 else:
1681 1681 os.environ['HGRCPATH'] = r' '
1682 1682 os.system("%s version -q > NUL" % sys.argv[0])
1683 1683
1684 1684 timer(d)
1685 1685 fm.end()
1686 1686
1687 1687
1688 @command(b'perf--parents', formatteropts)
1688 @command(b'perf::parents|perfparents', formatteropts)
1689 1689 def perfparents(ui, repo, **opts):
1690 1690 """benchmark the time necessary to fetch one changeset's parents.
1691 1691
1692 1692 The fetch is done using the `node identifier`, traversing all object layers
1693 1693 from the repository object. The first N revisions will be used for this
1694 1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 1695 (default: 1000).
1696 1696 """
1697 1697 opts = _byteskwargs(opts)
1698 1698 timer, fm = gettimer(ui, opts)
1699 1699 # control the number of commits perfparents iterates over
1700 1700 # experimental config: perf.parentscount
1701 1701 count = getint(ui, b"perf", b"parentscount", 1000)
1702 1702 if len(repo.changelog) < count:
1703 1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 1704 repo = repo.unfiltered()
1705 1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706 1706
1707 1707 def d():
1708 1708 for n in nl:
1709 1709 repo.changelog.parents(n)
1710 1710
1711 1711 timer(d)
1712 1712 fm.end()
1713 1713
1714 1714
1715 @command(b'perf--ctxfiles', formatteropts)
1715 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1716 1716 def perfctxfiles(ui, repo, x, **opts):
1717 1717 opts = _byteskwargs(opts)
1718 1718 x = int(x)
1719 1719 timer, fm = gettimer(ui, opts)
1720 1720
1721 1721 def d():
1722 1722 len(repo[x].files())
1723 1723
1724 1724 timer(d)
1725 1725 fm.end()
1726 1726
1727 1727
1728 @command(b'perf--rawfiles', formatteropts)
1728 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1729 1729 def perfrawfiles(ui, repo, x, **opts):
1730 1730 opts = _byteskwargs(opts)
1731 1731 x = int(x)
1732 1732 timer, fm = gettimer(ui, opts)
1733 1733 cl = repo.changelog
1734 1734
1735 1735 def d():
1736 1736 len(cl.read(x)[3])
1737 1737
1738 1738 timer(d)
1739 1739 fm.end()
1740 1740
1741 1741
1742 @command(b'perf--lookup', formatteropts)
1742 @command(b'perf::lookup|perflookup', formatteropts)
1743 1743 def perflookup(ui, repo, rev, **opts):
1744 1744 opts = _byteskwargs(opts)
1745 1745 timer, fm = gettimer(ui, opts)
1746 1746 timer(lambda: len(repo.lookup(rev)))
1747 1747 fm.end()
1748 1748
1749 1749
1750 1750 @command(
1751 b'perf--linelogedits',
1751 b'perf::linelogedits|perflinelogedits',
1752 1752 [
1753 1753 (b'n', b'edits', 10000, b'number of edits'),
1754 1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 1755 ],
1756 1756 norepo=True,
1757 1757 )
1758 1758 def perflinelogedits(ui, **opts):
1759 1759 from mercurial import linelog
1760 1760
1761 1761 opts = _byteskwargs(opts)
1762 1762
1763 1763 edits = opts[b'edits']
1764 1764 maxhunklines = opts[b'max_hunk_lines']
1765 1765
1766 1766 maxb1 = 100000
1767 1767 random.seed(0)
1768 1768 randint = random.randint
1769 1769 currentlines = 0
1770 1770 arglist = []
1771 1771 for rev in _xrange(edits):
1772 1772 a1 = randint(0, currentlines)
1773 1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 1774 b1 = randint(0, maxb1)
1775 1775 b2 = randint(b1, b1 + maxhunklines)
1776 1776 currentlines += (b2 - b1) - (a2 - a1)
1777 1777 arglist.append((rev, a1, a2, b1, b2))
1778 1778
1779 1779 def d():
1780 1780 ll = linelog.linelog()
1781 1781 for args in arglist:
1782 1782 ll.replacelines(*args)
1783 1783
1784 1784 timer, fm = gettimer(ui, opts)
1785 1785 timer(d)
1786 1786 fm.end()
1787 1787
1788 1788
1789 @command(b'perf--revrange', formatteropts)
1789 @command(b'perf::revrange|perfrevrange', formatteropts)
1790 1790 def perfrevrange(ui, repo, *specs, **opts):
1791 1791 opts = _byteskwargs(opts)
1792 1792 timer, fm = gettimer(ui, opts)
1793 1793 revrange = scmutil.revrange
1794 1794 timer(lambda: len(revrange(repo, specs)))
1795 1795 fm.end()
1796 1796
1797 1797
1798 @command(b'perf--nodelookup', formatteropts)
1798 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1799 1799 def perfnodelookup(ui, repo, rev, **opts):
1800 1800 opts = _byteskwargs(opts)
1801 1801 timer, fm = gettimer(ui, opts)
1802 1802 import mercurial.revlog
1803 1803
1804 1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 1805 n = scmutil.revsingle(repo, rev).node()
1806 1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807 1807
1808 1808 def d():
1809 1809 cl.rev(n)
1810 1810 clearcaches(cl)
1811 1811
1812 1812 timer(d)
1813 1813 fm.end()
1814 1814
1815 1815
1816 1816 @command(
1817 b'perf--log',
1817 b'perf::log|perflog',
1818 1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 1819 )
1820 1820 def perflog(ui, repo, rev=None, **opts):
1821 1821 opts = _byteskwargs(opts)
1822 1822 if rev is None:
1823 1823 rev = []
1824 1824 timer, fm = gettimer(ui, opts)
1825 1825 ui.pushbuffer()
1826 1826 timer(
1827 1827 lambda: commands.log(
1828 1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 1829 )
1830 1830 )
1831 1831 ui.popbuffer()
1832 1832 fm.end()
1833 1833
1834 1834
1835 @command(b'perf--moonwalk', formatteropts)
1835 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1836 1836 def perfmoonwalk(ui, repo, **opts):
1837 1837 """benchmark walking the changelog backwards
1838 1838
1839 1839 This also loads the changelog data for each revision in the changelog.
1840 1840 """
1841 1841 opts = _byteskwargs(opts)
1842 1842 timer, fm = gettimer(ui, opts)
1843 1843
1844 1844 def moonwalk():
1845 1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 1846 ctx = repo[i]
1847 1847 ctx.branch() # read changelog data (in addition to the index)
1848 1848
1849 1849 timer(moonwalk)
1850 1850 fm.end()
1851 1851
1852 1852
1853 1853 @command(
1854 b'perf--templating',
1854 b'perf::templating|perftemplating',
1855 1855 [
1856 1856 (b'r', b'rev', [], b'revisions to run the template on'),
1857 1857 ]
1858 1858 + formatteropts,
1859 1859 )
1860 1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 1861 """test the rendering time of a given template"""
1862 1862 if makelogtemplater is None:
1863 1863 raise error.Abort(
1864 1864 b"perftemplating not available with this Mercurial",
1865 1865 hint=b"use 4.3 or later",
1866 1866 )
1867 1867
1868 1868 opts = _byteskwargs(opts)
1869 1869
1870 1870 nullui = ui.copy()
1871 1871 nullui.fout = open(os.devnull, 'wb')
1872 1872 nullui.disablepager()
1873 1873 revs = opts.get(b'rev')
1874 1874 if not revs:
1875 1875 revs = [b'all()']
1876 1876 revs = list(scmutil.revrange(repo, revs))
1877 1877
1878 1878 defaulttemplate = (
1879 1879 b'{date|shortdate} [{rev}:{node|short}]'
1880 1880 b' {author|person}: {desc|firstline}\n'
1881 1881 )
1882 1882 if testedtemplate is None:
1883 1883 testedtemplate = defaulttemplate
1884 1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885 1885
1886 1886 def format():
1887 1887 for r in revs:
1888 1888 ctx = repo[r]
1889 1889 displayer.show(ctx)
1890 1890 displayer.flush(ctx)
1891 1891
1892 1892 timer, fm = gettimer(ui, opts)
1893 1893 timer(format)
1894 1894 fm.end()
1895 1895
1896 1896
1897 1897 def _displaystats(ui, opts, entries, data):
1898 1898 # use a second formatter because the data are quite different, not sure
1899 1899 # how it flies with the templater.
1900 1900 fm = ui.formatter(b'perf-stats', opts)
1901 1901 for key, title in entries:
1902 1902 values = data[key]
1903 1903 nbvalues = len(data)
1904 1904 values.sort()
1905 1905 stats = {
1906 1906 'key': key,
1907 1907 'title': title,
1908 1908 'nbitems': len(values),
1909 1909 'min': values[0][0],
1910 1910 '10%': values[(nbvalues * 10) // 100][0],
1911 1911 '25%': values[(nbvalues * 25) // 100][0],
1912 1912 '50%': values[(nbvalues * 50) // 100][0],
1913 1913 '75%': values[(nbvalues * 75) // 100][0],
1914 1914 '80%': values[(nbvalues * 80) // 100][0],
1915 1915 '85%': values[(nbvalues * 85) // 100][0],
1916 1916 '90%': values[(nbvalues * 90) // 100][0],
1917 1917 '95%': values[(nbvalues * 95) // 100][0],
1918 1918 '99%': values[(nbvalues * 99) // 100][0],
1919 1919 'max': values[-1][0],
1920 1920 }
1921 1921 fm.startitem()
1922 1922 fm.data(**stats)
1923 1923 # make node pretty for the human output
1924 1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 1925 lines = [
1926 1926 'min',
1927 1927 '10%',
1928 1928 '25%',
1929 1929 '50%',
1930 1930 '75%',
1931 1931 '80%',
1932 1932 '85%',
1933 1933 '90%',
1934 1934 '95%',
1935 1935 '99%',
1936 1936 'max',
1937 1937 ]
1938 1938 for l in lines:
1939 1939 fm.plain('%s: %s\n' % (l, stats[l]))
1940 1940 fm.end()
1941 1941
1942 1942
1943 1943 @command(
1944 b'perf--helper-mergecopies',
1944 b'perf::helper-mergecopies|perfhelper-mergecopies',
1945 1945 formatteropts
1946 1946 + [
1947 1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 1948 (b'', b'timing', False, b'provides extra data (costly)'),
1949 1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 1950 ],
1951 1951 )
1952 1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 1953 """find statistics about potential parameters for `perfmergecopies`
1954 1954
1955 1955 This command find (base, p1, p2) triplet relevant for copytracing
1956 1956 benchmarking in the context of a merge. It reports values for some of the
1957 1957 parameters that impact merge copy tracing time during merge.
1958 1958
1959 1959 If `--timing` is set, rename detection is run and the associated timing
1960 1960 will be reported. The extra details come at the cost of slower command
1961 1961 execution.
1962 1962
1963 1963 Since rename detection is only run once, other factors might easily
1964 1964 affect the precision of the timing. However it should give a good
1965 1965 approximation of which revision triplets are very costly.
1966 1966 """
1967 1967 opts = _byteskwargs(opts)
1968 1968 fm = ui.formatter(b'perf', opts)
1969 1969 dotiming = opts[b'timing']
1970 1970 dostats = opts[b'stats']
1971 1971
1972 1972 output_template = [
1973 1973 ("base", "%(base)12s"),
1974 1974 ("p1", "%(p1.node)12s"),
1975 1975 ("p2", "%(p2.node)12s"),
1976 1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 1979 ("p1.time", "%(p1.time)12.3f"),
1980 1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 1983 ("p2.time", "%(p2.time)12.3f"),
1984 1984 ("renames", "%(nbrenamedfiles)12d"),
1985 1985 ("total.time", "%(time)12.3f"),
1986 1986 ]
1987 1987 if not dotiming:
1988 1988 output_template = [
1989 1989 i
1990 1990 for i in output_template
1991 1991 if not ('time' in i[0] or 'renames' in i[0])
1992 1992 ]
1993 1993 header_names = [h for (h, v) in output_template]
1994 1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 1996 fm.plain(header % tuple(header_names))
1997 1997
1998 1998 if not revs:
1999 1999 revs = ['all()']
2000 2000 revs = scmutil.revrange(repo, revs)
2001 2001
2002 2002 if dostats:
2003 2003 alldata = {
2004 2004 'nbrevs': [],
2005 2005 'nbmissingfiles': [],
2006 2006 }
2007 2007 if dotiming:
2008 2008 alldata['parentnbrenames'] = []
2009 2009 alldata['totalnbrenames'] = []
2010 2010 alldata['parenttime'] = []
2011 2011 alldata['totaltime'] = []
2012 2012
2013 2013 roi = repo.revs('merge() and %ld', revs)
2014 2014 for r in roi:
2015 2015 ctx = repo[r]
2016 2016 p1 = ctx.p1()
2017 2017 p2 = ctx.p2()
2018 2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 2019 for b in bases:
2020 2020 b = repo[b]
2021 2021 p1missing = copies._computeforwardmissing(b, p1)
2022 2022 p2missing = copies._computeforwardmissing(b, p2)
2023 2023 data = {
2024 2024 b'base': b.hex(),
2025 2025 b'p1.node': p1.hex(),
2026 2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 2027 b'p1.nbmissingfiles': len(p1missing),
2028 2028 b'p2.node': p2.hex(),
2029 2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 2030 b'p2.nbmissingfiles': len(p2missing),
2031 2031 }
2032 2032 if dostats:
2033 2033 if p1missing:
2034 2034 alldata['nbrevs'].append(
2035 2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 2036 )
2037 2037 alldata['nbmissingfiles'].append(
2038 2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 2039 )
2040 2040 if p2missing:
2041 2041 alldata['nbrevs'].append(
2042 2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 2043 )
2044 2044 alldata['nbmissingfiles'].append(
2045 2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 2046 )
2047 2047 if dotiming:
2048 2048 begin = util.timer()
2049 2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 2050 end = util.timer()
2051 2051 # not very stable timing since we did only one run
2052 2052 data['time'] = end - begin
2053 2053 # mergedata contains five dicts: "copy", "movewithdir",
2054 2054 # "diverge", "renamedelete" and "dirmove".
2055 2055 # The first 4 are about renamed file so lets count that.
2056 2056 renames = len(mergedata[0])
2057 2057 renames += len(mergedata[1])
2058 2058 renames += len(mergedata[2])
2059 2059 renames += len(mergedata[3])
2060 2060 data['nbrenamedfiles'] = renames
2061 2061 begin = util.timer()
2062 2062 p1renames = copies.pathcopies(b, p1)
2063 2063 end = util.timer()
2064 2064 data['p1.time'] = end - begin
2065 2065 begin = util.timer()
2066 2066 p2renames = copies.pathcopies(b, p2)
2067 2067 end = util.timer()
2068 2068 data['p2.time'] = end - begin
2069 2069 data['p1.renamedfiles'] = len(p1renames)
2070 2070 data['p2.renamedfiles'] = len(p2renames)
2071 2071
2072 2072 if dostats:
2073 2073 if p1missing:
2074 2074 alldata['parentnbrenames'].append(
2075 2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 2076 )
2077 2077 alldata['parenttime'].append(
2078 2078 (data['p1.time'], b.hex(), p1.hex())
2079 2079 )
2080 2080 if p2missing:
2081 2081 alldata['parentnbrenames'].append(
2082 2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 2083 )
2084 2084 alldata['parenttime'].append(
2085 2085 (data['p2.time'], b.hex(), p2.hex())
2086 2086 )
2087 2087 if p1missing or p2missing:
2088 2088 alldata['totalnbrenames'].append(
2089 2089 (
2090 2090 data['nbrenamedfiles'],
2091 2091 b.hex(),
2092 2092 p1.hex(),
2093 2093 p2.hex(),
2094 2094 )
2095 2095 )
2096 2096 alldata['totaltime'].append(
2097 2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 2098 )
2099 2099 fm.startitem()
2100 2100 fm.data(**data)
2101 2101 # make node pretty for the human output
2102 2102 out = data.copy()
2103 2103 out['base'] = fm.hexfunc(b.node())
2104 2104 out['p1.node'] = fm.hexfunc(p1.node())
2105 2105 out['p2.node'] = fm.hexfunc(p2.node())
2106 2106 fm.plain(output % out)
2107 2107
2108 2108 fm.end()
2109 2109 if dostats:
2110 2110 # use a second formatter because the data are quite different, not sure
2111 2111 # how it flies with the templater.
2112 2112 entries = [
2113 2113 ('nbrevs', 'number of revision covered'),
2114 2114 ('nbmissingfiles', 'number of missing files at head'),
2115 2115 ]
2116 2116 if dotiming:
2117 2117 entries.append(
2118 2118 ('parentnbrenames', 'rename from one parent to base')
2119 2119 )
2120 2120 entries.append(('totalnbrenames', 'total number of renames'))
2121 2121 entries.append(('parenttime', 'time for one parent'))
2122 2122 entries.append(('totaltime', 'time for both parents'))
2123 2123 _displaystats(ui, opts, entries, alldata)
2124 2124
2125 2125
2126 2126 @command(
2127 b'perf--helper-pathcopies',
2127 b'perf::helper-pathcopies|perfhelper-pathcopies',
2128 2128 formatteropts
2129 2129 + [
2130 2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 2131 (b'', b'timing', False, b'provides extra data (costly)'),
2132 2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 2133 ],
2134 2134 )
2135 2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 2136 """find statistic about potential parameters for the `perftracecopies`
2137 2137
2138 2138 This command find source-destination pair relevant for copytracing testing.
2139 2139 It report value for some of the parameters that impact copy tracing time.
2140 2140
2141 2141 If `--timing` is set, rename detection is run and the associated timing
2142 2142 will be reported. The extra details comes at the cost of a slower command
2143 2143 execution.
2144 2144
2145 2145 Since the rename detection is only run once, other factors might easily
2146 2146 affect the precision of the timing. However it should give a good
2147 2147 approximation of which revision pairs are very costly.
2148 2148 """
2149 2149 opts = _byteskwargs(opts)
2150 2150 fm = ui.formatter(b'perf', opts)
2151 2151 dotiming = opts[b'timing']
2152 2152 dostats = opts[b'stats']
2153 2153
2154 2154 if dotiming:
2155 2155 header = '%12s %12s %12s %12s %12s %12s\n'
2156 2156 output = (
2157 2157 "%(source)12s %(destination)12s "
2158 2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 2160 )
2161 2161 header_names = (
2162 2162 "source",
2163 2163 "destination",
2164 2164 "nb-revs",
2165 2165 "nb-files",
2166 2166 "nb-renames",
2167 2167 "time",
2168 2168 )
2169 2169 fm.plain(header % header_names)
2170 2170 else:
2171 2171 header = '%12s %12s %12s %12s\n'
2172 2172 output = (
2173 2173 "%(source)12s %(destination)12s "
2174 2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 2175 )
2176 2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177 2177
2178 2178 if not revs:
2179 2179 revs = ['all()']
2180 2180 revs = scmutil.revrange(repo, revs)
2181 2181
2182 2182 if dostats:
2183 2183 alldata = {
2184 2184 'nbrevs': [],
2185 2185 'nbmissingfiles': [],
2186 2186 }
2187 2187 if dotiming:
2188 2188 alldata['nbrenames'] = []
2189 2189 alldata['time'] = []
2190 2190
2191 2191 roi = repo.revs('merge() and %ld', revs)
2192 2192 for r in roi:
2193 2193 ctx = repo[r]
2194 2194 p1 = ctx.p1().rev()
2195 2195 p2 = ctx.p2().rev()
2196 2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 2197 for p in (p1, p2):
2198 2198 for b in bases:
2199 2199 base = repo[b]
2200 2200 parent = repo[p]
2201 2201 missing = copies._computeforwardmissing(base, parent)
2202 2202 if not missing:
2203 2203 continue
2204 2204 data = {
2205 2205 b'source': base.hex(),
2206 2206 b'destination': parent.hex(),
2207 2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 2208 b'nbmissingfiles': len(missing),
2209 2209 }
2210 2210 if dostats:
2211 2211 alldata['nbrevs'].append(
2212 2212 (
2213 2213 data['nbrevs'],
2214 2214 base.hex(),
2215 2215 parent.hex(),
2216 2216 )
2217 2217 )
2218 2218 alldata['nbmissingfiles'].append(
2219 2219 (
2220 2220 data['nbmissingfiles'],
2221 2221 base.hex(),
2222 2222 parent.hex(),
2223 2223 )
2224 2224 )
2225 2225 if dotiming:
2226 2226 begin = util.timer()
2227 2227 renames = copies.pathcopies(base, parent)
2228 2228 end = util.timer()
2229 2229 # not very stable timing since we did only one run
2230 2230 data['time'] = end - begin
2231 2231 data['nbrenamedfiles'] = len(renames)
2232 2232 if dostats:
2233 2233 alldata['time'].append(
2234 2234 (
2235 2235 data['time'],
2236 2236 base.hex(),
2237 2237 parent.hex(),
2238 2238 )
2239 2239 )
2240 2240 alldata['nbrenames'].append(
2241 2241 (
2242 2242 data['nbrenamedfiles'],
2243 2243 base.hex(),
2244 2244 parent.hex(),
2245 2245 )
2246 2246 )
2247 2247 fm.startitem()
2248 2248 fm.data(**data)
2249 2249 out = data.copy()
2250 2250 out['source'] = fm.hexfunc(base.node())
2251 2251 out['destination'] = fm.hexfunc(parent.node())
2252 2252 fm.plain(output % out)
2253 2253
2254 2254 fm.end()
2255 2255 if dostats:
2256 2256 entries = [
2257 2257 ('nbrevs', 'number of revision covered'),
2258 2258 ('nbmissingfiles', 'number of missing files at head'),
2259 2259 ]
2260 2260 if dotiming:
2261 2261 entries.append(('nbrenames', 'renamed files'))
2262 2262 entries.append(('time', 'time'))
2263 2263 _displaystats(ui, opts, entries, alldata)
2264 2264
2265 2265
2266 @command(b'perf--cca', formatteropts)
2266 @command(b'perf::cca|perfcca', formatteropts)
2267 2267 def perfcca(ui, repo, **opts):
2268 2268 opts = _byteskwargs(opts)
2269 2269 timer, fm = gettimer(ui, opts)
2270 2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 2271 fm.end()
2272 2272
2273 2273
2274 @command(b'perf--fncacheload', formatteropts)
2274 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2275 2275 def perffncacheload(ui, repo, **opts):
2276 2276 opts = _byteskwargs(opts)
2277 2277 timer, fm = gettimer(ui, opts)
2278 2278 s = repo.store
2279 2279
2280 2280 def d():
2281 2281 s.fncache._load()
2282 2282
2283 2283 timer(d)
2284 2284 fm.end()
2285 2285
2286 2286
2287 @command(b'perf--fncachewrite', formatteropts)
2287 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2288 2288 def perffncachewrite(ui, repo, **opts):
2289 2289 opts = _byteskwargs(opts)
2290 2290 timer, fm = gettimer(ui, opts)
2291 2291 s = repo.store
2292 2292 lock = repo.lock()
2293 2293 s.fncache._load()
2294 2294 tr = repo.transaction(b'perffncachewrite')
2295 2295 tr.addbackup(b'fncache')
2296 2296
2297 2297 def d():
2298 2298 s.fncache._dirty = True
2299 2299 s.fncache.write(tr)
2300 2300
2301 2301 timer(d)
2302 2302 tr.close()
2303 2303 lock.release()
2304 2304 fm.end()
2305 2305
2306 2306
2307 @command(b'perf--fncacheencode', formatteropts)
2307 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2308 2308 def perffncacheencode(ui, repo, **opts):
2309 2309 opts = _byteskwargs(opts)
2310 2310 timer, fm = gettimer(ui, opts)
2311 2311 s = repo.store
2312 2312 s.fncache._load()
2313 2313
2314 2314 def d():
2315 2315 for p in s.fncache.entries:
2316 2316 s.encode(p)
2317 2317
2318 2318 timer(d)
2319 2319 fm.end()
2320 2320
2321 2321
2322 2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 2323 while not done.is_set():
2324 2324 pair = q.get()
2325 2325 while pair is not None:
2326 2326 if xdiff:
2327 2327 mdiff.bdiff.xdiffblocks(*pair)
2328 2328 elif blocks:
2329 2329 mdiff.bdiff.blocks(*pair)
2330 2330 else:
2331 2331 mdiff.textdiff(*pair)
2332 2332 q.task_done()
2333 2333 pair = q.get()
2334 2334 q.task_done() # for the None one
2335 2335 with ready:
2336 2336 ready.wait()
2337 2337
2338 2338
2339 2339 def _manifestrevision(repo, mnode):
2340 2340 ml = repo.manifestlog
2341 2341
2342 2342 if util.safehasattr(ml, b'getstorage'):
2343 2343 store = ml.getstorage(b'')
2344 2344 else:
2345 2345 store = ml._revlog
2346 2346
2347 2347 return store.revision(mnode)
2348 2348
2349 2349
2350 2350 @command(
2351 b'perf--bdiff',
2351 b'perf::bdiff|perfbdiff',
2352 2352 revlogopts
2353 2353 + formatteropts
2354 2354 + [
2355 2355 (
2356 2356 b'',
2357 2357 b'count',
2358 2358 1,
2359 2359 b'number of revisions to test (when using --startrev)',
2360 2360 ),
2361 2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 2365 ],
2366 2366 b'-c|-m|FILE REV',
2367 2367 )
2368 2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 2369 """benchmark a bdiff between revisions
2370 2370
2371 2371 By default, benchmark a bdiff between its delta parent and itself.
2372 2372
2373 2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 2374 revisions starting at the specified revision.
2375 2375
2376 2376 With ``--alldata``, assume the requested revision is a changeset and
2377 2377 measure bdiffs for all changes related to that changeset (manifest
2378 2378 and filelogs).
2379 2379 """
2380 2380 opts = _byteskwargs(opts)
2381 2381
2382 2382 if opts[b'xdiff'] and not opts[b'blocks']:
2383 2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384 2384
2385 2385 if opts[b'alldata']:
2386 2386 opts[b'changelog'] = True
2387 2387
2388 2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 2389 file_, rev = None, file_
2390 2390 elif rev is None:
2391 2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392 2392
2393 2393 blocks = opts[b'blocks']
2394 2394 xdiff = opts[b'xdiff']
2395 2395 textpairs = []
2396 2396
2397 2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398 2398
2399 2399 startrev = r.rev(r.lookup(rev))
2400 2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 2401 if opts[b'alldata']:
2402 2402 # Load revisions associated with changeset.
2403 2403 ctx = repo[rev]
2404 2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 2405 for pctx in ctx.parents():
2406 2406 pman = _manifestrevision(repo, pctx.manifestnode())
2407 2407 textpairs.append((pman, mtext))
2408 2408
2409 2409 # Load filelog revisions by iterating manifest delta.
2410 2410 man = ctx.manifest()
2411 2411 pman = ctx.p1().manifest()
2412 2412 for filename, change in pman.diff(man).items():
2413 2413 fctx = repo.file(filename)
2414 2414 f1 = fctx.revision(change[0][0] or -1)
2415 2415 f2 = fctx.revision(change[1][0] or -1)
2416 2416 textpairs.append((f1, f2))
2417 2417 else:
2418 2418 dp = r.deltaparent(rev)
2419 2419 textpairs.append((r.revision(dp), r.revision(rev)))
2420 2420
2421 2421 withthreads = threads > 0
2422 2422 if not withthreads:
2423 2423
2424 2424 def d():
2425 2425 for pair in textpairs:
2426 2426 if xdiff:
2427 2427 mdiff.bdiff.xdiffblocks(*pair)
2428 2428 elif blocks:
2429 2429 mdiff.bdiff.blocks(*pair)
2430 2430 else:
2431 2431 mdiff.textdiff(*pair)
2432 2432
2433 2433 else:
2434 2434 q = queue()
2435 2435 for i in _xrange(threads):
2436 2436 q.put(None)
2437 2437 ready = threading.Condition()
2438 2438 done = threading.Event()
2439 2439 for i in _xrange(threads):
2440 2440 threading.Thread(
2441 2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 2442 ).start()
2443 2443 q.join()
2444 2444
2445 2445 def d():
2446 2446 for pair in textpairs:
2447 2447 q.put(pair)
2448 2448 for i in _xrange(threads):
2449 2449 q.put(None)
2450 2450 with ready:
2451 2451 ready.notify_all()
2452 2452 q.join()
2453 2453
2454 2454 timer, fm = gettimer(ui, opts)
2455 2455 timer(d)
2456 2456 fm.end()
2457 2457
2458 2458 if withthreads:
2459 2459 done.set()
2460 2460 for i in _xrange(threads):
2461 2461 q.put(None)
2462 2462 with ready:
2463 2463 ready.notify_all()
2464 2464
2465 2465
2466 2466 @command(
2467 b'perf--unidiff',
2467 b'perf::unidiff|perfunidiff',
2468 2468 revlogopts
2469 2469 + formatteropts
2470 2470 + [
2471 2471 (
2472 2472 b'',
2473 2473 b'count',
2474 2474 1,
2475 2475 b'number of revisions to test (when using --startrev)',
2476 2476 ),
2477 2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 2478 ],
2479 2479 b'-c|-m|FILE REV',
2480 2480 )
2481 2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 2482 """benchmark a unified diff between revisions
2483 2483
2484 2484 This doesn't include any copy tracing - it's just a unified diff
2485 2485 of the texts.
2486 2486
2487 2487 By default, benchmark a diff between its delta parent and itself.
2488 2488
2489 2489 With ``--count``, benchmark diffs between delta parents and self for N
2490 2490 revisions starting at the specified revision.
2491 2491
2492 2492 With ``--alldata``, assume the requested revision is a changeset and
2493 2493 measure diffs for all changes related to that changeset (manifest
2494 2494 and filelogs).
2495 2495 """
2496 2496 opts = _byteskwargs(opts)
2497 2497 if opts[b'alldata']:
2498 2498 opts[b'changelog'] = True
2499 2499
2500 2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 2501 file_, rev = None, file_
2502 2502 elif rev is None:
2503 2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504 2504
2505 2505 textpairs = []
2506 2506
2507 2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508 2508
2509 2509 startrev = r.rev(r.lookup(rev))
2510 2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 2511 if opts[b'alldata']:
2512 2512 # Load revisions associated with changeset.
2513 2513 ctx = repo[rev]
2514 2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 2515 for pctx in ctx.parents():
2516 2516 pman = _manifestrevision(repo, pctx.manifestnode())
2517 2517 textpairs.append((pman, mtext))
2518 2518
2519 2519 # Load filelog revisions by iterating manifest delta.
2520 2520 man = ctx.manifest()
2521 2521 pman = ctx.p1().manifest()
2522 2522 for filename, change in pman.diff(man).items():
2523 2523 fctx = repo.file(filename)
2524 2524 f1 = fctx.revision(change[0][0] or -1)
2525 2525 f2 = fctx.revision(change[1][0] or -1)
2526 2526 textpairs.append((f1, f2))
2527 2527 else:
2528 2528 dp = r.deltaparent(rev)
2529 2529 textpairs.append((r.revision(dp), r.revision(rev)))
2530 2530
2531 2531 def d():
2532 2532 for left, right in textpairs:
2533 2533 # The date strings don't matter, so we pass empty strings.
2534 2534 headerlines, hunks = mdiff.unidiff(
2535 2535 left, b'', right, b'', b'left', b'right', binary=False
2536 2536 )
2537 2537 # consume iterators in roughly the way patch.py does
2538 2538 b'\n'.join(headerlines)
2539 2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540 2540
2541 2541 timer, fm = gettimer(ui, opts)
2542 2542 timer(d)
2543 2543 fm.end()
2544 2544
2545 2545
2546 @command(b'perf--diffwd', formatteropts)
2546 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2547 2547 def perfdiffwd(ui, repo, **opts):
2548 2548 """Profile diff of working directory changes"""
2549 2549 opts = _byteskwargs(opts)
2550 2550 timer, fm = gettimer(ui, opts)
2551 2551 options = {
2552 2552 'w': 'ignore_all_space',
2553 2553 'b': 'ignore_space_change',
2554 2554 'B': 'ignore_blank_lines',
2555 2555 }
2556 2556
2557 2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 2558 opts = {options[c]: b'1' for c in diffopt}
2559 2559
2560 2560 def d():
2561 2561 ui.pushbuffer()
2562 2562 commands.diff(ui, repo, **opts)
2563 2563 ui.popbuffer()
2564 2564
2565 2565 diffopt = diffopt.encode('ascii')
2566 2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 2567 timer(d, title=title)
2568 2568 fm.end()
2569 2569
2570 2570
2571 @command(b'perf--revlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2571 @command(
2572 b'perf::revlogindex|perfrevlogindex',
2573 revlogopts + formatteropts,
2574 b'-c|-m|FILE',
2575 )
2572 2576 def perfrevlogindex(ui, repo, file_=None, **opts):
2573 2577 """Benchmark operations against a revlog index.
2574 2578
2575 2579 This tests constructing a revlog instance, reading index data,
2576 2580 parsing index data, and performing various operations related to
2577 2581 index data.
2578 2582 """
2579 2583
2580 2584 opts = _byteskwargs(opts)
2581 2585
2582 2586 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2583 2587
2584 2588 opener = getattr(rl, 'opener') # trick linter
2585 2589 indexfile = rl.indexfile
2586 2590 data = opener.read(indexfile)
2587 2591
2588 2592 header = struct.unpack(b'>I', data[0:4])[0]
2589 2593 version = header & 0xFFFF
2590 2594 if version == 1:
2591 2595 revlogio = revlog.revlogio()
2592 2596 inline = header & (1 << 16)
2593 2597 else:
2594 2598 raise error.Abort(b'unsupported revlog version: %d' % version)
2595 2599
2596 2600 rllen = len(rl)
2597 2601
2598 2602 node0 = rl.node(0)
2599 2603 node25 = rl.node(rllen // 4)
2600 2604 node50 = rl.node(rllen // 2)
2601 2605 node75 = rl.node(rllen // 4 * 3)
2602 2606 node100 = rl.node(rllen - 1)
2603 2607
2604 2608 allrevs = range(rllen)
2605 2609 allrevsrev = list(reversed(allrevs))
2606 2610 allnodes = [rl.node(rev) for rev in range(rllen)]
2607 2611 allnodesrev = list(reversed(allnodes))
2608 2612
2609 2613 def constructor():
2610 2614 revlog.revlog(opener, indexfile)
2611 2615
2612 2616 def read():
2613 2617 with opener(indexfile) as fh:
2614 2618 fh.read()
2615 2619
2616 2620 def parseindex():
2617 2621 revlogio.parseindex(data, inline)
2618 2622
2619 2623 def getentry(revornode):
2620 2624 index = revlogio.parseindex(data, inline)[0]
2621 2625 index[revornode]
2622 2626
2623 2627 def getentries(revs, count=1):
2624 2628 index = revlogio.parseindex(data, inline)[0]
2625 2629
2626 2630 for i in range(count):
2627 2631 for rev in revs:
2628 2632 index[rev]
2629 2633
2630 2634 def resolvenode(node):
2631 2635 index = revlogio.parseindex(data, inline)[0]
2632 2636 rev = getattr(index, 'rev', None)
2633 2637 if rev is None:
2634 2638 nodemap = getattr(
2635 2639 revlogio.parseindex(data, inline)[0], 'nodemap', None
2636 2640 )
2637 2641 # This only works for the C code.
2638 2642 if nodemap is None:
2639 2643 return
2640 2644 rev = nodemap.__getitem__
2641 2645
2642 2646 try:
2643 2647 rev(node)
2644 2648 except error.RevlogError:
2645 2649 pass
2646 2650
2647 2651 def resolvenodes(nodes, count=1):
2648 2652 index = revlogio.parseindex(data, inline)[0]
2649 2653 rev = getattr(index, 'rev', None)
2650 2654 if rev is None:
2651 2655 nodemap = getattr(
2652 2656 revlogio.parseindex(data, inline)[0], 'nodemap', None
2653 2657 )
2654 2658 # This only works for the C code.
2655 2659 if nodemap is None:
2656 2660 return
2657 2661 rev = nodemap.__getitem__
2658 2662
2659 2663 for i in range(count):
2660 2664 for node in nodes:
2661 2665 try:
2662 2666 rev(node)
2663 2667 except error.RevlogError:
2664 2668 pass
2665 2669
2666 2670 benches = [
2667 2671 (constructor, b'revlog constructor'),
2668 2672 (read, b'read'),
2669 2673 (parseindex, b'create index object'),
2670 2674 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2671 2675 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2672 2676 (lambda: resolvenode(node0), b'look up node at rev 0'),
2673 2677 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2674 2678 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2675 2679 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2676 2680 (lambda: resolvenode(node100), b'look up node at tip'),
2677 2681 # 2x variation is to measure caching impact.
2678 2682 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2679 2683 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2680 2684 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2681 2685 (
2682 2686 lambda: resolvenodes(allnodesrev, 2),
2683 2687 b'look up all nodes 2x (reverse)',
2684 2688 ),
2685 2689 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2686 2690 (
2687 2691 lambda: getentries(allrevs, 2),
2688 2692 b'retrieve all index entries 2x (forward)',
2689 2693 ),
2690 2694 (
2691 2695 lambda: getentries(allrevsrev),
2692 2696 b'retrieve all index entries (reverse)',
2693 2697 ),
2694 2698 (
2695 2699 lambda: getentries(allrevsrev, 2),
2696 2700 b'retrieve all index entries 2x (reverse)',
2697 2701 ),
2698 2702 ]
2699 2703
2700 2704 for fn, title in benches:
2701 2705 timer, fm = gettimer(ui, opts)
2702 2706 timer(fn, title=title)
2703 2707 fm.end()
2704 2708
2705 2709
2706 2710 @command(
2707 b'perf--revlogrevisions',
2711 b'perf::revlogrevisions|perfrevlogrevisions',
2708 2712 revlogopts
2709 2713 + formatteropts
2710 2714 + [
2711 2715 (b'd', b'dist', 100, b'distance between the revisions'),
2712 2716 (b's', b'startrev', 0, b'revision to start reading at'),
2713 2717 (b'', b'reverse', False, b'read in reverse'),
2714 2718 ],
2715 2719 b'-c|-m|FILE',
2716 2720 )
2717 2721 def perfrevlogrevisions(
2718 2722 ui, repo, file_=None, startrev=0, reverse=False, **opts
2719 2723 ):
2720 2724 """Benchmark reading a series of revisions from a revlog.
2721 2725
2722 2726 By default, we read every ``-d/--dist`` revision from 0 to tip of
2723 2727 the specified revlog.
2724 2728
2725 2729 The start revision can be defined via ``-s/--startrev``.
2726 2730 """
2727 2731 opts = _byteskwargs(opts)
2728 2732
2729 2733 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2730 2734 rllen = getlen(ui)(rl)
2731 2735
2732 2736 if startrev < 0:
2733 2737 startrev = rllen + startrev
2734 2738
2735 2739 def d():
2736 2740 rl.clearcaches()
2737 2741
2738 2742 beginrev = startrev
2739 2743 endrev = rllen
2740 2744 dist = opts[b'dist']
2741 2745
2742 2746 if reverse:
2743 2747 beginrev, endrev = endrev - 1, beginrev - 1
2744 2748 dist = -1 * dist
2745 2749
2746 2750 for x in _xrange(beginrev, endrev, dist):
2747 2751 # Old revisions don't support passing int.
2748 2752 n = rl.node(x)
2749 2753 rl.revision(n)
2750 2754
2751 2755 timer, fm = gettimer(ui, opts)
2752 2756 timer(d)
2753 2757 fm.end()
2754 2758
2755 2759
2756 2760 @command(
2757 b'perf--revlogwrite',
2761 b'perf::revlogwrite|perfrevlogwrite',
2758 2762 revlogopts
2759 2763 + formatteropts
2760 2764 + [
2761 2765 (b's', b'startrev', 1000, b'revision to start writing at'),
2762 2766 (b'', b'stoprev', -1, b'last revision to write'),
2763 2767 (b'', b'count', 3, b'number of passes to perform'),
2764 2768 (b'', b'details', False, b'print timing for every revisions tested'),
2765 2769 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2766 2770 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2767 2771 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2768 2772 ],
2769 2773 b'-c|-m|FILE',
2770 2774 )
2771 2775 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2772 2776 """Benchmark writing a series of revisions to a revlog.
2773 2777
2774 2778 Possible source values are:
2775 2779 * `full`: add from a full text (default).
2776 2780 * `parent-1`: add from a delta to the first parent
2777 2781 * `parent-2`: add from a delta to the second parent if it exists
2778 2782 (use a delta from the first parent otherwise)
2779 2783 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2780 2784 * `storage`: add from the existing precomputed deltas
2781 2785
2782 2786 Note: This performance command measures performance in a custom way. As a
2783 2787 result some of the global configuration of the 'perf' command does not
2784 2788 apply to it:
2785 2789
2786 2790 * ``pre-run``: disabled
2787 2791
2788 2792 * ``profile-benchmark``: disabled
2789 2793
2790 2794 * ``run-limits``: disabled use --count instead
2791 2795 """
2792 2796 opts = _byteskwargs(opts)
2793 2797
2794 2798 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2795 2799 rllen = getlen(ui)(rl)
2796 2800 if startrev < 0:
2797 2801 startrev = rllen + startrev
2798 2802 if stoprev < 0:
2799 2803 stoprev = rllen + stoprev
2800 2804
2801 2805 lazydeltabase = opts['lazydeltabase']
2802 2806 source = opts['source']
2803 2807 clearcaches = opts['clear_caches']
2804 2808 validsource = (
2805 2809 b'full',
2806 2810 b'parent-1',
2807 2811 b'parent-2',
2808 2812 b'parent-smallest',
2809 2813 b'storage',
2810 2814 )
2811 2815 if source not in validsource:
2812 2816 raise error.Abort('invalid source type: %s' % source)
2813 2817
2814 2818 ### actually gather results
2815 2819 count = opts['count']
2816 2820 if count <= 0:
2817 2821 raise error.Abort('invalide run count: %d' % count)
2818 2822 allresults = []
2819 2823 for c in range(count):
2820 2824 timing = _timeonewrite(
2821 2825 ui,
2822 2826 rl,
2823 2827 source,
2824 2828 startrev,
2825 2829 stoprev,
2826 2830 c + 1,
2827 2831 lazydeltabase=lazydeltabase,
2828 2832 clearcaches=clearcaches,
2829 2833 )
2830 2834 allresults.append(timing)
2831 2835
2832 2836 ### consolidate the results in a single list
2833 2837 results = []
2834 2838 for idx, (rev, t) in enumerate(allresults[0]):
2835 2839 ts = [t]
2836 2840 for other in allresults[1:]:
2837 2841 orev, ot = other[idx]
2838 2842 assert orev == rev
2839 2843 ts.append(ot)
2840 2844 results.append((rev, ts))
2841 2845 resultcount = len(results)
2842 2846
2843 2847 ### Compute and display relevant statistics
2844 2848
2845 2849 # get a formatter
2846 2850 fm = ui.formatter(b'perf', opts)
2847 2851 displayall = ui.configbool(b"perf", b"all-timing", False)
2848 2852
2849 2853 # print individual details if requested
2850 2854 if opts['details']:
2851 2855 for idx, item in enumerate(results, 1):
2852 2856 rev, data = item
2853 2857 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2854 2858 formatone(fm, data, title=title, displayall=displayall)
2855 2859
2856 2860 # sorts results by median time
2857 2861 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2858 2862 # list of (name, index) to display)
2859 2863 relevants = [
2860 2864 ("min", 0),
2861 2865 ("10%", resultcount * 10 // 100),
2862 2866 ("25%", resultcount * 25 // 100),
2863 2867 ("50%", resultcount * 70 // 100),
2864 2868 ("75%", resultcount * 75 // 100),
2865 2869 ("90%", resultcount * 90 // 100),
2866 2870 ("95%", resultcount * 95 // 100),
2867 2871 ("99%", resultcount * 99 // 100),
2868 2872 ("99.9%", resultcount * 999 // 1000),
2869 2873 ("99.99%", resultcount * 9999 // 10000),
2870 2874 ("99.999%", resultcount * 99999 // 100000),
2871 2875 ("max", -1),
2872 2876 ]
2873 2877 if not ui.quiet:
2874 2878 for name, idx in relevants:
2875 2879 data = results[idx]
2876 2880 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2877 2881 formatone(fm, data[1], title=title, displayall=displayall)
2878 2882
2879 2883 # XXX summing that many float will not be very precise, we ignore this fact
2880 2884 # for now
2881 2885 totaltime = []
2882 2886 for item in allresults:
2883 2887 totaltime.append(
2884 2888 (
2885 2889 sum(x[1][0] for x in item),
2886 2890 sum(x[1][1] for x in item),
2887 2891 sum(x[1][2] for x in item),
2888 2892 )
2889 2893 )
2890 2894 formatone(
2891 2895 fm,
2892 2896 totaltime,
2893 2897 title="total time (%d revs)" % resultcount,
2894 2898 displayall=displayall,
2895 2899 )
2896 2900 fm.end()
2897 2901
2898 2902
2899 2903 class _faketr(object):
2900 2904 def add(s, x, y, z=None):
2901 2905 return None
2902 2906
2903 2907
2904 2908 def _timeonewrite(
2905 2909 ui,
2906 2910 orig,
2907 2911 source,
2908 2912 startrev,
2909 2913 stoprev,
2910 2914 runidx=None,
2911 2915 lazydeltabase=True,
2912 2916 clearcaches=True,
2913 2917 ):
2914 2918 timings = []
2915 2919 tr = _faketr()
2916 2920 with _temprevlog(ui, orig, startrev) as dest:
2917 2921 dest._lazydeltabase = lazydeltabase
2918 2922 revs = list(orig.revs(startrev, stoprev))
2919 2923 total = len(revs)
2920 2924 topic = 'adding'
2921 2925 if runidx is not None:
2922 2926 topic += ' (run #%d)' % runidx
2923 2927 # Support both old and new progress API
2924 2928 if util.safehasattr(ui, 'makeprogress'):
2925 2929 progress = ui.makeprogress(topic, unit='revs', total=total)
2926 2930
2927 2931 def updateprogress(pos):
2928 2932 progress.update(pos)
2929 2933
2930 2934 def completeprogress():
2931 2935 progress.complete()
2932 2936
2933 2937 else:
2934 2938
2935 2939 def updateprogress(pos):
2936 2940 ui.progress(topic, pos, unit='revs', total=total)
2937 2941
2938 2942 def completeprogress():
2939 2943 ui.progress(topic, None, unit='revs', total=total)
2940 2944
2941 2945 for idx, rev in enumerate(revs):
2942 2946 updateprogress(idx)
2943 2947 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2944 2948 if clearcaches:
2945 2949 dest.index.clearcaches()
2946 2950 dest.clearcaches()
2947 2951 with timeone() as r:
2948 2952 dest.addrawrevision(*addargs, **addkwargs)
2949 2953 timings.append((rev, r[0]))
2950 2954 updateprogress(total)
2951 2955 completeprogress()
2952 2956 return timings
2953 2957
2954 2958
2955 2959 def _getrevisionseed(orig, rev, tr, source):
2956 2960 from mercurial.node import nullid
2957 2961
2958 2962 linkrev = orig.linkrev(rev)
2959 2963 node = orig.node(rev)
2960 2964 p1, p2 = orig.parents(node)
2961 2965 flags = orig.flags(rev)
2962 2966 cachedelta = None
2963 2967 text = None
2964 2968
2965 2969 if source == b'full':
2966 2970 text = orig.revision(rev)
2967 2971 elif source == b'parent-1':
2968 2972 baserev = orig.rev(p1)
2969 2973 cachedelta = (baserev, orig.revdiff(p1, rev))
2970 2974 elif source == b'parent-2':
2971 2975 parent = p2
2972 2976 if p2 == nullid:
2973 2977 parent = p1
2974 2978 baserev = orig.rev(parent)
2975 2979 cachedelta = (baserev, orig.revdiff(parent, rev))
2976 2980 elif source == b'parent-smallest':
2977 2981 p1diff = orig.revdiff(p1, rev)
2978 2982 parent = p1
2979 2983 diff = p1diff
2980 2984 if p2 != nullid:
2981 2985 p2diff = orig.revdiff(p2, rev)
2982 2986 if len(p1diff) > len(p2diff):
2983 2987 parent = p2
2984 2988 diff = p2diff
2985 2989 baserev = orig.rev(parent)
2986 2990 cachedelta = (baserev, diff)
2987 2991 elif source == b'storage':
2988 2992 baserev = orig.deltaparent(rev)
2989 2993 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2990 2994
2991 2995 return (
2992 2996 (text, tr, linkrev, p1, p2),
2993 2997 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2994 2998 )
2995 2999
2996 3000
2997 3001 @contextlib.contextmanager
2998 3002 def _temprevlog(ui, orig, truncaterev):
2999 3003 from mercurial import vfs as vfsmod
3000 3004
3001 3005 if orig._inline:
3002 3006 raise error.Abort('not supporting inline revlog (yet)')
3003 3007 revlogkwargs = {}
3004 3008 k = 'upperboundcomp'
3005 3009 if util.safehasattr(orig, k):
3006 3010 revlogkwargs[k] = getattr(orig, k)
3007 3011
3008 3012 origindexpath = orig.opener.join(orig.indexfile)
3009 3013 origdatapath = orig.opener.join(orig.datafile)
3010 3014 indexname = 'revlog.i'
3011 3015 dataname = 'revlog.d'
3012 3016
3013 3017 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3014 3018 try:
3015 3019 # copy the data file in a temporary directory
3016 3020 ui.debug('copying data in %s\n' % tmpdir)
3017 3021 destindexpath = os.path.join(tmpdir, 'revlog.i')
3018 3022 destdatapath = os.path.join(tmpdir, 'revlog.d')
3019 3023 shutil.copyfile(origindexpath, destindexpath)
3020 3024 shutil.copyfile(origdatapath, destdatapath)
3021 3025
3022 3026 # remove the data we want to add again
3023 3027 ui.debug('truncating data to be rewritten\n')
3024 3028 with open(destindexpath, 'ab') as index:
3025 3029 index.seek(0)
3026 3030 index.truncate(truncaterev * orig._io.size)
3027 3031 with open(destdatapath, 'ab') as data:
3028 3032 data.seek(0)
3029 3033 data.truncate(orig.start(truncaterev))
3030 3034
3031 3035 # instantiate a new revlog from the temporary copy
3032 3036 ui.debug('truncating adding to be rewritten\n')
3033 3037 vfs = vfsmod.vfs(tmpdir)
3034 3038 vfs.options = getattr(orig.opener, 'options', None)
3035 3039
3036 3040 dest = revlog.revlog(
3037 3041 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3038 3042 )
3039 3043 if dest._inline:
3040 3044 raise error.Abort('not supporting inline revlog (yet)')
3041 3045 # make sure internals are initialized
3042 3046 dest.revision(len(dest) - 1)
3043 3047 yield dest
3044 3048 del dest, vfs
3045 3049 finally:
3046 3050 shutil.rmtree(tmpdir, True)
3047 3051
3048 3052
3049 3053 @command(
3050 b'perf--revlogchunks',
3054 b'perf::revlogchunks|perfrevlogchunks',
3051 3055 revlogopts
3052 3056 + formatteropts
3053 3057 + [
3054 3058 (b'e', b'engines', b'', b'compression engines to use'),
3055 3059 (b's', b'startrev', 0, b'revision to start at'),
3056 3060 ],
3057 3061 b'-c|-m|FILE',
3058 3062 )
3059 3063 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3060 3064 """Benchmark operations on revlog chunks.
3061 3065
3062 3066 Logically, each revlog is a collection of fulltext revisions. However,
3063 3067 stored within each revlog are "chunks" of possibly compressed data. This
3064 3068 data needs to be read and decompressed or compressed and written.
3065 3069
3066 3070 This command measures the time it takes to read+decompress and recompress
3067 3071 chunks in a revlog. It effectively isolates I/O and compression performance.
3068 3072 For measurements of higher-level operations like resolving revisions,
3069 3073 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3070 3074 """
3071 3075 opts = _byteskwargs(opts)
3072 3076
3073 3077 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3074 3078
3075 3079 # _chunkraw was renamed to _getsegmentforrevs.
3076 3080 try:
3077 3081 segmentforrevs = rl._getsegmentforrevs
3078 3082 except AttributeError:
3079 3083 segmentforrevs = rl._chunkraw
3080 3084
3081 3085 # Verify engines argument.
3082 3086 if engines:
3083 3087 engines = {e.strip() for e in engines.split(b',')}
3084 3088 for engine in engines:
3085 3089 try:
3086 3090 util.compressionengines[engine]
3087 3091 except KeyError:
3088 3092 raise error.Abort(b'unknown compression engine: %s' % engine)
3089 3093 else:
3090 3094 engines = []
3091 3095 for e in util.compengines:
3092 3096 engine = util.compengines[e]
3093 3097 try:
3094 3098 if engine.available():
3095 3099 engine.revlogcompressor().compress(b'dummy')
3096 3100 engines.append(e)
3097 3101 except NotImplementedError:
3098 3102 pass
3099 3103
3100 3104 revs = list(rl.revs(startrev, len(rl) - 1))
3101 3105
3102 3106 def rlfh(rl):
3103 3107 if rl._inline:
3104 3108 return getsvfs(repo)(rl.indexfile)
3105 3109 else:
3106 3110 return getsvfs(repo)(rl.datafile)
3107 3111
3108 3112 def doread():
3109 3113 rl.clearcaches()
3110 3114 for rev in revs:
3111 3115 segmentforrevs(rev, rev)
3112 3116
3113 3117 def doreadcachedfh():
3114 3118 rl.clearcaches()
3115 3119 fh = rlfh(rl)
3116 3120 for rev in revs:
3117 3121 segmentforrevs(rev, rev, df=fh)
3118 3122
3119 3123 def doreadbatch():
3120 3124 rl.clearcaches()
3121 3125 segmentforrevs(revs[0], revs[-1])
3122 3126
3123 3127 def doreadbatchcachedfh():
3124 3128 rl.clearcaches()
3125 3129 fh = rlfh(rl)
3126 3130 segmentforrevs(revs[0], revs[-1], df=fh)
3127 3131
3128 3132 def dochunk():
3129 3133 rl.clearcaches()
3130 3134 fh = rlfh(rl)
3131 3135 for rev in revs:
3132 3136 rl._chunk(rev, df=fh)
3133 3137
3134 3138 chunks = [None]
3135 3139
3136 3140 def dochunkbatch():
3137 3141 rl.clearcaches()
3138 3142 fh = rlfh(rl)
3139 3143 # Save chunks as a side-effect.
3140 3144 chunks[0] = rl._chunks(revs, df=fh)
3141 3145
3142 3146 def docompress(compressor):
3143 3147 rl.clearcaches()
3144 3148
3145 3149 try:
3146 3150 # Swap in the requested compression engine.
3147 3151 oldcompressor = rl._compressor
3148 3152 rl._compressor = compressor
3149 3153 for chunk in chunks[0]:
3150 3154 rl.compress(chunk)
3151 3155 finally:
3152 3156 rl._compressor = oldcompressor
3153 3157
3154 3158 benches = [
3155 3159 (lambda: doread(), b'read'),
3156 3160 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3157 3161 (lambda: doreadbatch(), b'read batch'),
3158 3162 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3159 3163 (lambda: dochunk(), b'chunk'),
3160 3164 (lambda: dochunkbatch(), b'chunk batch'),
3161 3165 ]
3162 3166
3163 3167 for engine in sorted(engines):
3164 3168 compressor = util.compengines[engine].revlogcompressor()
3165 3169 benches.append(
3166 3170 (
3167 3171 functools.partial(docompress, compressor),
3168 3172 b'compress w/ %s' % engine,
3169 3173 )
3170 3174 )
3171 3175
3172 3176 for fn, title in benches:
3173 3177 timer, fm = gettimer(ui, opts)
3174 3178 timer(fn, title=title)
3175 3179 fm.end()
3176 3180
3177 3181
3178 3182 @command(
3179 b'perf--revlogrevision',
3183 b'perf::revlogrevision|perfrevlogrevision',
3180 3184 revlogopts
3181 3185 + formatteropts
3182 3186 + [(b'', b'cache', False, b'use caches instead of clearing')],
3183 3187 b'-c|-m|FILE REV',
3184 3188 )
3185 3189 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3186 3190 """Benchmark obtaining a revlog revision.
3187 3191
3188 3192 Obtaining a revlog revision consists of roughly the following steps:
3189 3193
3190 3194 1. Compute the delta chain
3191 3195 2. Slice the delta chain if applicable
3192 3196 3. Obtain the raw chunks for that delta chain
3193 3197 4. Decompress each raw chunk
3194 3198 5. Apply binary patches to obtain fulltext
3195 3199 6. Verify hash of fulltext
3196 3200
3197 3201 This command measures the time spent in each of these phases.
3198 3202 """
3199 3203 opts = _byteskwargs(opts)
3200 3204
3201 3205 if opts.get(b'changelog') or opts.get(b'manifest'):
3202 3206 file_, rev = None, file_
3203 3207 elif rev is None:
3204 3208 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3205 3209
3206 3210 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3207 3211
3208 3212 # _chunkraw was renamed to _getsegmentforrevs.
3209 3213 try:
3210 3214 segmentforrevs = r._getsegmentforrevs
3211 3215 except AttributeError:
3212 3216 segmentforrevs = r._chunkraw
3213 3217
3214 3218 node = r.lookup(rev)
3215 3219 rev = r.rev(node)
3216 3220
3217 3221 def getrawchunks(data, chain):
3218 3222 start = r.start
3219 3223 length = r.length
3220 3224 inline = r._inline
3221 3225 iosize = r._io.size
3222 3226 buffer = util.buffer
3223 3227
3224 3228 chunks = []
3225 3229 ladd = chunks.append
3226 3230 for idx, item in enumerate(chain):
3227 3231 offset = start(item[0])
3228 3232 bits = data[idx]
3229 3233 for rev in item:
3230 3234 chunkstart = start(rev)
3231 3235 if inline:
3232 3236 chunkstart += (rev + 1) * iosize
3233 3237 chunklength = length(rev)
3234 3238 ladd(buffer(bits, chunkstart - offset, chunklength))
3235 3239
3236 3240 return chunks
3237 3241
3238 3242 def dodeltachain(rev):
3239 3243 if not cache:
3240 3244 r.clearcaches()
3241 3245 r._deltachain(rev)
3242 3246
3243 3247 def doread(chain):
3244 3248 if not cache:
3245 3249 r.clearcaches()
3246 3250 for item in slicedchain:
3247 3251 segmentforrevs(item[0], item[-1])
3248 3252
3249 3253 def doslice(r, chain, size):
3250 3254 for s in slicechunk(r, chain, targetsize=size):
3251 3255 pass
3252 3256
3253 3257 def dorawchunks(data, chain):
3254 3258 if not cache:
3255 3259 r.clearcaches()
3256 3260 getrawchunks(data, chain)
3257 3261
3258 3262 def dodecompress(chunks):
3259 3263 decomp = r.decompress
3260 3264 for chunk in chunks:
3261 3265 decomp(chunk)
3262 3266
3263 3267 def dopatch(text, bins):
3264 3268 if not cache:
3265 3269 r.clearcaches()
3266 3270 mdiff.patches(text, bins)
3267 3271
3268 3272 def dohash(text):
3269 3273 if not cache:
3270 3274 r.clearcaches()
3271 3275 r.checkhash(text, node, rev=rev)
3272 3276
3273 3277 def dorevision():
3274 3278 if not cache:
3275 3279 r.clearcaches()
3276 3280 r.revision(node)
3277 3281
3278 3282 try:
3279 3283 from mercurial.revlogutils.deltas import slicechunk
3280 3284 except ImportError:
3281 3285 slicechunk = getattr(revlog, '_slicechunk', None)
3282 3286
3283 3287 size = r.length(rev)
3284 3288 chain = r._deltachain(rev)[0]
3285 3289 if not getattr(r, '_withsparseread', False):
3286 3290 slicedchain = (chain,)
3287 3291 else:
3288 3292 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3289 3293 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3290 3294 rawchunks = getrawchunks(data, slicedchain)
3291 3295 bins = r._chunks(chain)
3292 3296 text = bytes(bins[0])
3293 3297 bins = bins[1:]
3294 3298 text = mdiff.patches(text, bins)
3295 3299
3296 3300 benches = [
3297 3301 (lambda: dorevision(), b'full'),
3298 3302 (lambda: dodeltachain(rev), b'deltachain'),
3299 3303 (lambda: doread(chain), b'read'),
3300 3304 ]
3301 3305
3302 3306 if getattr(r, '_withsparseread', False):
3303 3307 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3304 3308 benches.append(slicing)
3305 3309
3306 3310 benches.extend(
3307 3311 [
3308 3312 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3309 3313 (lambda: dodecompress(rawchunks), b'decompress'),
3310 3314 (lambda: dopatch(text, bins), b'patch'),
3311 3315 (lambda: dohash(text), b'hash'),
3312 3316 ]
3313 3317 )
3314 3318
3315 3319 timer, fm = gettimer(ui, opts)
3316 3320 for fn, title in benches:
3317 3321 timer(fn, title=title)
3318 3322 fm.end()
3319 3323
3320 3324
3321 3325 @command(
3322 b'perf--revset',
3326 b'perf::revset|perfrevset',
3323 3327 [
3324 3328 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3325 3329 (b'', b'contexts', False, b'obtain changectx for each revision'),
3326 3330 ]
3327 3331 + formatteropts,
3328 3332 b"REVSET",
3329 3333 )
3330 3334 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3331 3335 """benchmark the execution time of a revset
3332 3336
3333 3337 Use the --clean option if need to evaluate the impact of build volatile
3334 3338 revisions set cache on the revset execution. Volatile cache hold filtered
3335 3339 and obsolete related cache."""
3336 3340 opts = _byteskwargs(opts)
3337 3341
3338 3342 timer, fm = gettimer(ui, opts)
3339 3343
3340 3344 def d():
3341 3345 if clear:
3342 3346 repo.invalidatevolatilesets()
3343 3347 if contexts:
3344 3348 for ctx in repo.set(expr):
3345 3349 pass
3346 3350 else:
3347 3351 for r in repo.revs(expr):
3348 3352 pass
3349 3353
3350 3354 timer(d)
3351 3355 fm.end()
3352 3356
3353 3357
3354 3358 @command(
3355 b'perf--volatilesets',
3359 b'perf::volatilesets|perfvolatilesets',
3356 3360 [
3357 3361 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3358 3362 ]
3359 3363 + formatteropts,
3360 3364 )
3361 3365 def perfvolatilesets(ui, repo, *names, **opts):
3362 3366 """benchmark the computation of various volatile set
3363 3367
3364 3368 Volatile set computes element related to filtering and obsolescence."""
3365 3369 opts = _byteskwargs(opts)
3366 3370 timer, fm = gettimer(ui, opts)
3367 3371 repo = repo.unfiltered()
3368 3372
3369 3373 def getobs(name):
3370 3374 def d():
3371 3375 repo.invalidatevolatilesets()
3372 3376 if opts[b'clear_obsstore']:
3373 3377 clearfilecache(repo, b'obsstore')
3374 3378 obsolete.getrevs(repo, name)
3375 3379
3376 3380 return d
3377 3381
3378 3382 allobs = sorted(obsolete.cachefuncs)
3379 3383 if names:
3380 3384 allobs = [n for n in allobs if n in names]
3381 3385
3382 3386 for name in allobs:
3383 3387 timer(getobs(name), title=name)
3384 3388
3385 3389 def getfiltered(name):
3386 3390 def d():
3387 3391 repo.invalidatevolatilesets()
3388 3392 if opts[b'clear_obsstore']:
3389 3393 clearfilecache(repo, b'obsstore')
3390 3394 repoview.filterrevs(repo, name)
3391 3395
3392 3396 return d
3393 3397
3394 3398 allfilter = sorted(repoview.filtertable)
3395 3399 if names:
3396 3400 allfilter = [n for n in allfilter if n in names]
3397 3401
3398 3402 for name in allfilter:
3399 3403 timer(getfiltered(name), title=name)
3400 3404 fm.end()
3401 3405
3402 3406
3403 3407 @command(
3404 b'perf--branchmap',
3408 b'perf::branchmap|perfbranchmap',
3405 3409 [
3406 3410 (b'f', b'full', False, b'Includes build time of subset'),
3407 3411 (
3408 3412 b'',
3409 3413 b'clear-revbranch',
3410 3414 False,
3411 3415 b'purge the revbranch cache between computation',
3412 3416 ),
3413 3417 ]
3414 3418 + formatteropts,
3415 3419 )
3416 3420 def perfbranchmap(ui, repo, *filternames, **opts):
3417 3421 """benchmark the update of a branchmap
3418 3422
3419 3423 This benchmarks the full repo.branchmap() call with read and write disabled
3420 3424 """
3421 3425 opts = _byteskwargs(opts)
3422 3426 full = opts.get(b"full", False)
3423 3427 clear_revbranch = opts.get(b"clear_revbranch", False)
3424 3428 timer, fm = gettimer(ui, opts)
3425 3429
3426 3430 def getbranchmap(filtername):
3427 3431 """generate a benchmark function for the filtername"""
3428 3432 if filtername is None:
3429 3433 view = repo
3430 3434 else:
3431 3435 view = repo.filtered(filtername)
3432 3436 if util.safehasattr(view._branchcaches, '_per_filter'):
3433 3437 filtered = view._branchcaches._per_filter
3434 3438 else:
3435 3439 # older versions
3436 3440 filtered = view._branchcaches
3437 3441
3438 3442 def d():
3439 3443 if clear_revbranch:
3440 3444 repo.revbranchcache()._clear()
3441 3445 if full:
3442 3446 view._branchcaches.clear()
3443 3447 else:
3444 3448 filtered.pop(filtername, None)
3445 3449 view.branchmap()
3446 3450
3447 3451 return d
3448 3452
3449 3453 # add filter in smaller subset to bigger subset
3450 3454 possiblefilters = set(repoview.filtertable)
3451 3455 if filternames:
3452 3456 possiblefilters &= set(filternames)
3453 3457 subsettable = getbranchmapsubsettable()
3454 3458 allfilters = []
3455 3459 while possiblefilters:
3456 3460 for name in possiblefilters:
3457 3461 subset = subsettable.get(name)
3458 3462 if subset not in possiblefilters:
3459 3463 break
3460 3464 else:
3461 3465 assert False, b'subset cycle %s!' % possiblefilters
3462 3466 allfilters.append(name)
3463 3467 possiblefilters.remove(name)
3464 3468
3465 3469 # warm the cache
3466 3470 if not full:
3467 3471 for name in allfilters:
3468 3472 repo.filtered(name).branchmap()
3469 3473 if not filternames or b'unfiltered' in filternames:
3470 3474 # add unfiltered
3471 3475 allfilters.append(None)
3472 3476
3473 3477 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3474 3478 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3475 3479 branchcacheread.set(classmethod(lambda *args: None))
3476 3480 else:
3477 3481 # older versions
3478 3482 branchcacheread = safeattrsetter(branchmap, b'read')
3479 3483 branchcacheread.set(lambda *args: None)
3480 3484 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3481 3485 branchcachewrite.set(lambda *args: None)
3482 3486 try:
3483 3487 for name in allfilters:
3484 3488 printname = name
3485 3489 if name is None:
3486 3490 printname = b'unfiltered'
3487 3491 timer(getbranchmap(name), title=printname)
3488 3492 finally:
3489 3493 branchcacheread.restore()
3490 3494 branchcachewrite.restore()
3491 3495 fm.end()
3492 3496
3493 3497
3494 3498 @command(
3495 b'perf--branchmapupdate',
3499 b'perf::branchmapupdate|perfbranchmapupdate',
3496 3500 [
3497 3501 (b'', b'base', [], b'subset of revision to start from'),
3498 3502 (b'', b'target', [], b'subset of revision to end with'),
3499 3503 (b'', b'clear-caches', False, b'clear cache between each runs'),
3500 3504 ]
3501 3505 + formatteropts,
3502 3506 )
3503 3507 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3504 3508 """benchmark branchmap update from for <base> revs to <target> revs
3505 3509
3506 3510 If `--clear-caches` is passed, the following items will be reset before
3507 3511 each update:
3508 3512 * the changelog instance and associated indexes
3509 3513 * the rev-branch-cache instance
3510 3514
3511 3515 Examples:
3512 3516
3513 3517 # update for the one last revision
3514 3518 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3515 3519
3516 3520 $ update for change coming with a new branch
3517 3521 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3518 3522 """
3519 3523 from mercurial import branchmap
3520 3524 from mercurial import repoview
3521 3525
3522 3526 opts = _byteskwargs(opts)
3523 3527 timer, fm = gettimer(ui, opts)
3524 3528 clearcaches = opts[b'clear_caches']
3525 3529 unfi = repo.unfiltered()
3526 3530 x = [None] # used to pass data between closure
3527 3531
3528 3532 # we use a `list` here to avoid possible side effect from smartset
3529 3533 baserevs = list(scmutil.revrange(repo, base))
3530 3534 targetrevs = list(scmutil.revrange(repo, target))
3531 3535 if not baserevs:
3532 3536 raise error.Abort(b'no revisions selected for --base')
3533 3537 if not targetrevs:
3534 3538 raise error.Abort(b'no revisions selected for --target')
3535 3539
3536 3540 # make sure the target branchmap also contains the one in the base
3537 3541 targetrevs = list(set(baserevs) | set(targetrevs))
3538 3542 targetrevs.sort()
3539 3543
3540 3544 cl = repo.changelog
3541 3545 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3542 3546 allbaserevs.sort()
3543 3547 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3544 3548
3545 3549 newrevs = list(alltargetrevs.difference(allbaserevs))
3546 3550 newrevs.sort()
3547 3551
3548 3552 allrevs = frozenset(unfi.changelog.revs())
3549 3553 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3550 3554 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3551 3555
3552 3556 def basefilter(repo, visibilityexceptions=None):
3553 3557 return basefilterrevs
3554 3558
3555 3559 def targetfilter(repo, visibilityexceptions=None):
3556 3560 return targetfilterrevs
3557 3561
3558 3562 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3559 3563 ui.status(msg % (len(allbaserevs), len(newrevs)))
3560 3564 if targetfilterrevs:
3561 3565 msg = b'(%d revisions still filtered)\n'
3562 3566 ui.status(msg % len(targetfilterrevs))
3563 3567
3564 3568 try:
3565 3569 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3566 3570 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3567 3571
3568 3572 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3569 3573 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3570 3574
3571 3575 # try to find an existing branchmap to reuse
3572 3576 subsettable = getbranchmapsubsettable()
3573 3577 candidatefilter = subsettable.get(None)
3574 3578 while candidatefilter is not None:
3575 3579 candidatebm = repo.filtered(candidatefilter).branchmap()
3576 3580 if candidatebm.validfor(baserepo):
3577 3581 filtered = repoview.filterrevs(repo, candidatefilter)
3578 3582 missing = [r for r in allbaserevs if r in filtered]
3579 3583 base = candidatebm.copy()
3580 3584 base.update(baserepo, missing)
3581 3585 break
3582 3586 candidatefilter = subsettable.get(candidatefilter)
3583 3587 else:
3584 3588 # no suitable subset where found
3585 3589 base = branchmap.branchcache()
3586 3590 base.update(baserepo, allbaserevs)
3587 3591
3588 3592 def setup():
3589 3593 x[0] = base.copy()
3590 3594 if clearcaches:
3591 3595 unfi._revbranchcache = None
3592 3596 clearchangelog(repo)
3593 3597
3594 3598 def bench():
3595 3599 x[0].update(targetrepo, newrevs)
3596 3600
3597 3601 timer(bench, setup=setup)
3598 3602 fm.end()
3599 3603 finally:
3600 3604 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3601 3605 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3602 3606
3603 3607
3604 3608 @command(
3605 b'perf--branchmapload',
3609 b'perf::branchmapload|perfbranchmapload',
3606 3610 [
3607 3611 (b'f', b'filter', b'', b'Specify repoview filter'),
3608 3612 (b'', b'list', False, b'List brachmap filter caches'),
3609 3613 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3610 3614 ]
3611 3615 + formatteropts,
3612 3616 )
3613 3617 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3614 3618 """benchmark reading the branchmap"""
3615 3619 opts = _byteskwargs(opts)
3616 3620 clearrevlogs = opts[b'clear_revlogs']
3617 3621
3618 3622 if list:
3619 3623 for name, kind, st in repo.cachevfs.readdir(stat=True):
3620 3624 if name.startswith(b'branch2'):
3621 3625 filtername = name.partition(b'-')[2] or b'unfiltered'
3622 3626 ui.status(
3623 3627 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3624 3628 )
3625 3629 return
3626 3630 if not filter:
3627 3631 filter = None
3628 3632 subsettable = getbranchmapsubsettable()
3629 3633 if filter is None:
3630 3634 repo = repo.unfiltered()
3631 3635 else:
3632 3636 repo = repoview.repoview(repo, filter)
3633 3637
3634 3638 repo.branchmap() # make sure we have a relevant, up to date branchmap
3635 3639
3636 3640 try:
3637 3641 fromfile = branchmap.branchcache.fromfile
3638 3642 except AttributeError:
3639 3643 # older versions
3640 3644 fromfile = branchmap.read
3641 3645
3642 3646 currentfilter = filter
3643 3647 # try once without timer, the filter may not be cached
3644 3648 while fromfile(repo) is None:
3645 3649 currentfilter = subsettable.get(currentfilter)
3646 3650 if currentfilter is None:
3647 3651 raise error.Abort(
3648 3652 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3649 3653 )
3650 3654 repo = repo.filtered(currentfilter)
3651 3655 timer, fm = gettimer(ui, opts)
3652 3656
3653 3657 def setup():
3654 3658 if clearrevlogs:
3655 3659 clearchangelog(repo)
3656 3660
3657 3661 def bench():
3658 3662 fromfile(repo)
3659 3663
3660 3664 timer(bench, setup=setup)
3661 3665 fm.end()
3662 3666
3663 3667
3664 @command(b'perf--loadmarkers')
3668 @command(b'perf::loadmarkers|perfloadmarkers')
3665 3669 def perfloadmarkers(ui, repo):
3666 3670 """benchmark the time to parse the on-disk markers for a repo
3667 3671
3668 3672 Result is the number of markers in the repo."""
3669 3673 timer, fm = gettimer(ui)
3670 3674 svfs = getsvfs(repo)
3671 3675 timer(lambda: len(obsolete.obsstore(svfs)))
3672 3676 fm.end()
3673 3677
3674 3678
3675 3679 @command(
3676 b'perf--lrucachedict',
3680 b'perf::lrucachedict|perflrucachedict',
3677 3681 formatteropts
3678 3682 + [
3679 3683 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3680 3684 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3681 3685 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3682 3686 (b'', b'size', 4, b'size of cache'),
3683 3687 (b'', b'gets', 10000, b'number of key lookups'),
3684 3688 (b'', b'sets', 10000, b'number of key sets'),
3685 3689 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3686 3690 (
3687 3691 b'',
3688 3692 b'mixedgetfreq',
3689 3693 50,
3690 3694 b'frequency of get vs set ops in mixed mode',
3691 3695 ),
3692 3696 ],
3693 3697 norepo=True,
3694 3698 )
3695 3699 def perflrucache(
3696 3700 ui,
3697 3701 mincost=0,
3698 3702 maxcost=100,
3699 3703 costlimit=0,
3700 3704 size=4,
3701 3705 gets=10000,
3702 3706 sets=10000,
3703 3707 mixed=10000,
3704 3708 mixedgetfreq=50,
3705 3709 **opts
3706 3710 ):
3707 3711 opts = _byteskwargs(opts)
3708 3712
3709 3713 def doinit():
3710 3714 for i in _xrange(10000):
3711 3715 util.lrucachedict(size)
3712 3716
3713 3717 costrange = list(range(mincost, maxcost + 1))
3714 3718
3715 3719 values = []
3716 3720 for i in _xrange(size):
3717 3721 values.append(random.randint(0, _maxint))
3718 3722
3719 3723 # Get mode fills the cache and tests raw lookup performance with no
3720 3724 # eviction.
3721 3725 getseq = []
3722 3726 for i in _xrange(gets):
3723 3727 getseq.append(random.choice(values))
3724 3728
3725 3729 def dogets():
3726 3730 d = util.lrucachedict(size)
3727 3731 for v in values:
3728 3732 d[v] = v
3729 3733 for key in getseq:
3730 3734 value = d[key]
3731 3735 value # silence pyflakes warning
3732 3736
3733 3737 def dogetscost():
3734 3738 d = util.lrucachedict(size, maxcost=costlimit)
3735 3739 for i, v in enumerate(values):
3736 3740 d.insert(v, v, cost=costs[i])
3737 3741 for key in getseq:
3738 3742 try:
3739 3743 value = d[key]
3740 3744 value # silence pyflakes warning
3741 3745 except KeyError:
3742 3746 pass
3743 3747
3744 3748 # Set mode tests insertion speed with cache eviction.
3745 3749 setseq = []
3746 3750 costs = []
3747 3751 for i in _xrange(sets):
3748 3752 setseq.append(random.randint(0, _maxint))
3749 3753 costs.append(random.choice(costrange))
3750 3754
3751 3755 def doinserts():
3752 3756 d = util.lrucachedict(size)
3753 3757 for v in setseq:
3754 3758 d.insert(v, v)
3755 3759
3756 3760 def doinsertscost():
3757 3761 d = util.lrucachedict(size, maxcost=costlimit)
3758 3762 for i, v in enumerate(setseq):
3759 3763 d.insert(v, v, cost=costs[i])
3760 3764
3761 3765 def dosets():
3762 3766 d = util.lrucachedict(size)
3763 3767 for v in setseq:
3764 3768 d[v] = v
3765 3769
3766 3770 # Mixed mode randomly performs gets and sets with eviction.
3767 3771 mixedops = []
3768 3772 for i in _xrange(mixed):
3769 3773 r = random.randint(0, 100)
3770 3774 if r < mixedgetfreq:
3771 3775 op = 0
3772 3776 else:
3773 3777 op = 1
3774 3778
3775 3779 mixedops.append(
3776 3780 (op, random.randint(0, size * 2), random.choice(costrange))
3777 3781 )
3778 3782
3779 3783 def domixed():
3780 3784 d = util.lrucachedict(size)
3781 3785
3782 3786 for op, v, cost in mixedops:
3783 3787 if op == 0:
3784 3788 try:
3785 3789 d[v]
3786 3790 except KeyError:
3787 3791 pass
3788 3792 else:
3789 3793 d[v] = v
3790 3794
3791 3795 def domixedcost():
3792 3796 d = util.lrucachedict(size, maxcost=costlimit)
3793 3797
3794 3798 for op, v, cost in mixedops:
3795 3799 if op == 0:
3796 3800 try:
3797 3801 d[v]
3798 3802 except KeyError:
3799 3803 pass
3800 3804 else:
3801 3805 d.insert(v, v, cost=cost)
3802 3806
3803 3807 benches = [
3804 3808 (doinit, b'init'),
3805 3809 ]
3806 3810
3807 3811 if costlimit:
3808 3812 benches.extend(
3809 3813 [
3810 3814 (dogetscost, b'gets w/ cost limit'),
3811 3815 (doinsertscost, b'inserts w/ cost limit'),
3812 3816 (domixedcost, b'mixed w/ cost limit'),
3813 3817 ]
3814 3818 )
3815 3819 else:
3816 3820 benches.extend(
3817 3821 [
3818 3822 (dogets, b'gets'),
3819 3823 (doinserts, b'inserts'),
3820 3824 (dosets, b'sets'),
3821 3825 (domixed, b'mixed'),
3822 3826 ]
3823 3827 )
3824 3828
3825 3829 for fn, title in benches:
3826 3830 timer, fm = gettimer(ui, opts)
3827 3831 timer(fn, title=title)
3828 3832 fm.end()
3829 3833
3830 3834
3831 3835 @command(
3832 b'perf--write',
3836 b'perf::write|perfwrite',
3833 3837 formatteropts
3834 3838 + [
3835 3839 (b'', b'write-method', b'write', b'ui write method'),
3836 3840 (b'', b'nlines', 100, b'number of lines'),
3837 3841 (b'', b'nitems', 100, b'number of items (per line)'),
3838 3842 (b'', b'item', b'x', b'item that is written'),
3839 3843 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3840 3844 (b'', b'flush-line', None, b'flush after each line'),
3841 3845 ],
3842 3846 )
3843 3847 def perfwrite(ui, repo, **opts):
3844 3848 """microbenchmark ui.write (and others)"""
3845 3849 opts = _byteskwargs(opts)
3846 3850
3847 3851 write = getattr(ui, _sysstr(opts[b'write_method']))
3848 3852 nlines = int(opts[b'nlines'])
3849 3853 nitems = int(opts[b'nitems'])
3850 3854 item = opts[b'item']
3851 3855 batch_line = opts.get(b'batch_line')
3852 3856 flush_line = opts.get(b'flush_line')
3853 3857
3854 3858 if batch_line:
3855 3859 line = item * nitems + b'\n'
3856 3860
3857 3861 def benchmark():
3858 3862 for i in pycompat.xrange(nlines):
3859 3863 if batch_line:
3860 3864 write(line)
3861 3865 else:
3862 3866 for i in pycompat.xrange(nitems):
3863 3867 write(item)
3864 3868 write(b'\n')
3865 3869 if flush_line:
3866 3870 ui.flush()
3867 3871 ui.flush()
3868 3872
3869 3873 timer, fm = gettimer(ui, opts)
3870 3874 timer(benchmark)
3871 3875 fm.end()
3872 3876
3873 3877
3874 3878 def uisetup(ui):
3875 3879 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3876 3880 commands, b'debugrevlogopts'
3877 3881 ):
3878 3882 # for "historical portability":
3879 3883 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3880 3884 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3881 3885 # openrevlog() should cause failure, because it has been
3882 3886 # available since 3.5 (or 49c583ca48c4).
3883 3887 def openrevlog(orig, repo, cmd, file_, opts):
3884 3888 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3885 3889 raise error.Abort(
3886 3890 b"This version doesn't support --dir option",
3887 3891 hint=b"use 3.5 or later",
3888 3892 )
3889 3893 return orig(repo, cmd, file_, opts)
3890 3894
3891 3895 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3892 3896
3893 3897
3894 3898 @command(
3895 b'perf--progress',
3899 b'perf::progress|perfprogress',
3896 3900 formatteropts
3897 3901 + [
3898 3902 (b'', b'topic', b'topic', b'topic for progress messages'),
3899 3903 (b'c', b'total', 1000000, b'total value we are progressing to'),
3900 3904 ],
3901 3905 norepo=True,
3902 3906 )
3903 3907 def perfprogress(ui, topic=None, total=None, **opts):
3904 3908 """printing of progress bars"""
3905 3909 opts = _byteskwargs(opts)
3906 3910
3907 3911 timer, fm = gettimer(ui, opts)
3908 3912
3909 3913 def doprogress():
3910 3914 with ui.makeprogress(topic, total=total) as progress:
3911 3915 for i in _xrange(total):
3912 3916 progress.increment()
3913 3917
3914 3918 timer(doprogress)
3915 3919 fm.end()
@@ -1,425 +1,425
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 perf--addremove
81 perf::addremove
82 82 (no help text available)
83 perf--ancestors
83 perf::ancestors
84 84 (no help text available)
85 perf--ancestorset
85 perf::ancestorset
86 86 (no help text available)
87 perf--annotate
87 perf::annotate
88 88 (no help text available)
89 perf--bdiff benchmark a bdiff between revisions
90 perf--bookmarks
89 perf::bdiff benchmark a bdiff between revisions
90 perf::bookmarks
91 91 benchmark parsing bookmarks from disk to memory
92 perf--branchmap
92 perf::branchmap
93 93 benchmark the update of a branchmap
94 perf--branchmapload
94 perf::branchmapload
95 95 benchmark reading the branchmap
96 perf--branchmapupdate
96 perf::branchmapupdate
97 97 benchmark branchmap update from for <base> revs to <target>
98 98 revs
99 perf--bundleread
99 perf::bundleread
100 100 Benchmark reading of bundle files.
101 perf--cca (no help text available)
102 perf--changegroupchangelog
101 perf::cca (no help text available)
102 perf::changegroupchangelog
103 103 Benchmark producing a changelog group for a changegroup.
104 perf--changeset
104 perf::changeset
105 105 (no help text available)
106 perf--ctxfiles
106 perf::ctxfiles
107 107 (no help text available)
108 perf--diffwd Profile diff of working directory changes
109 perf--dirfoldmap
108 perf::diffwd Profile diff of working directory changes
109 perf::dirfoldmap
110 110 benchmap a 'dirstate._map.dirfoldmap.get()' request
111 perf--dirs (no help text available)
112 perf--dirstate
111 perf::dirs (no help text available)
112 perf::dirstate
113 113 benchmap the time of various distate operations
114 perf--dirstatedirs
114 perf::dirstatedirs
115 115 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
116 perf--dirstatefoldmap
116 perf::dirstatefoldmap
117 117 benchmap a 'dirstate._map.filefoldmap.get()' request
118 perf--dirstatewrite
118 perf::dirstatewrite
119 119 benchmap the time it take to write a dirstate on disk
120 perf--discovery
120 perf::discovery
121 121 benchmark discovery between local repo and the peer at given
122 122 path
123 perf--fncacheencode
123 perf::fncacheencode
124 124 (no help text available)
125 perf--fncacheload
125 perf::fncacheload
126 126 (no help text available)
127 perf--fncachewrite
127 perf::fncachewrite
128 128 (no help text available)
129 perf--heads benchmark the computation of a changelog heads
130 perf--helper-mergecopies
129 perf::heads benchmark the computation of a changelog heads
130 perf::helper-mergecopies
131 131 find statistics about potential parameters for
132 132 'perfmergecopies'
133 perf--helper-pathcopies
133 perf::helper-pathcopies
134 134 find statistic about potential parameters for the
135 135 'perftracecopies'
136 perf--ignore benchmark operation related to computing ignore
137 perf--index benchmark index creation time followed by a lookup
138 perf--linelogedits
136 perf::ignore benchmark operation related to computing ignore
137 perf::index benchmark index creation time followed by a lookup
138 perf::linelogedits
139 139 (no help text available)
140 perf--loadmarkers
140 perf::loadmarkers
141 141 benchmark the time to parse the on-disk markers for a repo
142 perf--log (no help text available)
143 perf--lookup (no help text available)
144 perf--lrucachedict
142 perf::log (no help text available)
143 perf::lookup (no help text available)
144 perf::lrucachedict
145 145 (no help text available)
146 perf--manifest
146 perf::manifest
147 147 benchmark the time to read a manifest from disk and return a
148 148 usable
149 perf--mergecalculate
149 perf::mergecalculate
150 150 (no help text available)
151 perf--mergecopies
151 perf::mergecopies
152 152 measure runtime of 'copies.mergecopies'
153 perf--moonwalk
153 perf::moonwalk
154 154 benchmark walking the changelog backwards
155 perf--nodelookup
155 perf::nodelookup
156 156 (no help text available)
157 perf--nodemap
157 perf::nodemap
158 158 benchmark the time necessary to look up revision from a cold
159 159 nodemap
160 perf--parents
160 perf::parents
161 161 benchmark the time necessary to fetch one changeset's parents.
162 perf--pathcopies
162 perf::pathcopies
163 163 benchmark the copy tracing logic
164 perf--phases benchmark phasesets computation
165 perf--phasesremote
164 perf::phases benchmark phasesets computation
165 perf::phasesremote
166 166 benchmark time needed to analyse phases of the remote server
167 perf--progress
167 perf::progress
168 168 printing of progress bars
169 perf--rawfiles
169 perf::rawfiles
170 170 (no help text available)
171 perf--revlogchunks
171 perf::revlogchunks
172 172 Benchmark operations on revlog chunks.
173 perf--revlogindex
173 perf::revlogindex
174 174 Benchmark operations against a revlog index.
175 perf--revlogrevision
175 perf::revlogrevision
176 176 Benchmark obtaining a revlog revision.
177 perf--revlogrevisions
177 perf::revlogrevisions
178 178 Benchmark reading a series of revisions from a revlog.
179 perf--revlogwrite
179 perf::revlogwrite
180 180 Benchmark writing a series of revisions to a revlog.
181 perf--revrange
181 perf::revrange
182 182 (no help text available)
183 perf--revset benchmark the execution time of a revset
184 perf--startup
183 perf::revset benchmark the execution time of a revset
184 perf::startup
185 185 (no help text available)
186 perf--status benchmark the performance of a single status call
187 perf--tags (no help text available)
188 perf--templating
186 perf::status benchmark the performance of a single status call
187 perf::tags (no help text available)
188 perf::templating
189 189 test the rendering time of a given template
190 perf--unidiff
190 perf::unidiff
191 191 benchmark a unified diff between revisions
192 perf--volatilesets
192 perf::volatilesets
193 193 benchmark the computation of various volatile set
194 perf--walk (no help text available)
195 perf--write microbenchmark ui.write (and others)
194 perf::walk (no help text available)
195 perf::write microbenchmark ui.write (and others)
196 196
197 197 (use 'hg help -v perf' to show built-in aliases and global options)
198 198
199 199 $ hg help perfaddremove
200 hg perf--addremove
200 hg perf::addremove
201 201
202 202 aliases: perfaddremove
203 203
204 204 (no help text available)
205 205
206 206 options:
207 207
208 208 -T --template TEMPLATE display with template
209 209
210 210 (some details hidden, use --verbose to show complete help)
211 211
212 212 $ hg perfaddremove
213 213 $ hg perfancestors
214 214 $ hg perfancestorset 2
215 215 $ hg perfannotate a
216 216 $ hg perfbdiff -c 1
217 217 $ hg perfbdiff --alldata 1
218 218 $ hg perfunidiff -c 1
219 219 $ hg perfunidiff --alldata 1
220 220 $ hg perfbookmarks
221 221 $ hg perfbranchmap
222 222 $ hg perfbranchmapload
223 223 $ hg perfbranchmapupdate --base "not tip" --target "tip"
224 224 benchmark of branchmap with 3 revisions with 1 new ones
225 225 $ hg perfcca
226 226 $ hg perfchangegroupchangelog
227 227 $ hg perfchangegroupchangelog --cgversion 01
228 228 $ hg perfchangeset 2
229 229 $ hg perfctxfiles 2
230 230 $ hg perfdiffwd
231 231 $ hg perfdirfoldmap
232 232 $ hg perfdirs
233 233 $ hg perfdirstate
234 234 $ hg perfdirstate --contains
235 235 $ hg perfdirstate --iteration
236 236 $ hg perfdirstatedirs
237 237 $ hg perfdirstatefoldmap
238 238 $ hg perfdirstatewrite
239 239 #if repofncache
240 240 $ hg perffncacheencode
241 241 $ hg perffncacheload
242 242 $ hg debugrebuildfncache
243 243 fncache already up to date
244 244 $ hg perffncachewrite
245 245 $ hg debugrebuildfncache
246 246 fncache already up to date
247 247 #endif
248 248 $ hg perfheads
249 249 $ hg perfignore
250 250 $ hg perfindex
251 251 $ hg perflinelogedits -n 1
252 252 $ hg perfloadmarkers
253 253 $ hg perflog
254 254 $ hg perflookup 2
255 255 $ hg perflrucache
256 256 $ hg perfmanifest 2
257 257 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
258 258 $ hg perfmanifest -m 44fe2c8352bb
259 259 abort: manifest revision must be integer or full node
260 260 [255]
261 261 $ hg perfmergecalculate -r 3
262 262 $ hg perfmoonwalk
263 263 $ hg perfnodelookup 2
264 264 $ hg perfpathcopies 1 2
265 265 $ hg perfprogress --total 1000
266 266 $ hg perfrawfiles 2
267 267 $ hg perfrevlogindex -c
268 268 #if reporevlogstore
269 269 $ hg perfrevlogrevisions .hg/store/data/a.i
270 270 #endif
271 271 $ hg perfrevlogrevision -m 0
272 272 $ hg perfrevlogchunks -c
273 273 $ hg perfrevrange
274 274 $ hg perfrevset 'all()'
275 275 $ hg perfstartup
276 276 $ hg perfstatus
277 277 $ hg perfstatus --dirstate
278 278 $ hg perftags
279 279 $ hg perftemplating
280 280 $ hg perfvolatilesets
281 281 $ hg perfwalk
282 282 $ hg perfparents
283 283 $ hg perfdiscovery -q .
284 284
285 285 Test run control
286 286 ----------------
287 287
288 288 Simple single entry
289 289
290 290 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
291 291 ! wall * comb * user * sys * (best of 15) (glob)
292 292
293 293 Multiple entries
294 294
295 295 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
296 296 ! wall * comb * user * sys * (best of 5) (glob)
297 297
298 298 error case are ignored
299 299
300 300 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
301 301 malformatted run limit entry, missing "-": 500
302 302 ! wall * comb * user * sys * (best of 5) (glob)
303 303 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
304 304 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
305 305 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
306 306 ! wall * comb * user * sys * (best of 5) (glob)
307 307 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
308 308 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
309 309 ! wall * comb * user * sys * (best of 5) (glob)
310 310
311 311 test actual output
312 312 ------------------
313 313
314 314 normal output:
315 315
316 316 $ hg perfheads --config perf.stub=no
317 317 ! wall * comb * user * sys * (best of *) (glob)
318 318
319 319 detailed output:
320 320
321 321 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
322 322 ! wall * comb * user * sys * (best of *) (glob)
323 323 ! wall * comb * user * sys * (max of *) (glob)
324 324 ! wall * comb * user * sys * (avg of *) (glob)
325 325 ! wall * comb * user * sys * (median of *) (glob)
326 326
327 327 test json output
328 328 ----------------
329 329
330 330 normal output:
331 331
332 332 $ hg perfheads --template json --config perf.stub=no
333 333 [
334 334 {
335 335 "comb": *, (glob)
336 336 "count": *, (glob)
337 337 "sys": *, (glob)
338 338 "user": *, (glob)
339 339 "wall": * (glob)
340 340 }
341 341 ]
342 342
343 343 detailed output:
344 344
345 345 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
346 346 [
347 347 {
348 348 "avg.comb": *, (glob)
349 349 "avg.count": *, (glob)
350 350 "avg.sys": *, (glob)
351 351 "avg.user": *, (glob)
352 352 "avg.wall": *, (glob)
353 353 "comb": *, (glob)
354 354 "count": *, (glob)
355 355 "max.comb": *, (glob)
356 356 "max.count": *, (glob)
357 357 "max.sys": *, (glob)
358 358 "max.user": *, (glob)
359 359 "max.wall": *, (glob)
360 360 "median.comb": *, (glob)
361 361 "median.count": *, (glob)
362 362 "median.sys": *, (glob)
363 363 "median.user": *, (glob)
364 364 "median.wall": *, (glob)
365 365 "sys": *, (glob)
366 366 "user": *, (glob)
367 367 "wall": * (glob)
368 368 }
369 369 ]
370 370
371 371 Test pre-run feature
372 372 --------------------
373 373
374 374 (perf discovery has some spurious output)
375 375
376 376 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
377 377 ! wall * comb * user * sys * (best of 1) (glob)
378 378 searching for changes
379 379 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
380 380 ! wall * comb * user * sys * (best of 1) (glob)
381 381 searching for changes
382 382 searching for changes
383 383 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
384 384 ! wall * comb * user * sys * (best of 1) (glob)
385 385 searching for changes
386 386 searching for changes
387 387 searching for changes
388 388 searching for changes
389 389
390 390 test profile-benchmark option
391 391 ------------------------------
392 392
393 393 Function to check that statprof ran
394 394 $ statprofran () {
395 395 > egrep 'Sample count:|No samples recorded' > /dev/null
396 396 > }
397 397 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
398 398
399 399 Check perf.py for historical portability
400 400 ----------------------------------------
401 401
402 402 $ cd "$TESTDIR/.."
403 403
404 404 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
405 405 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
406 406 > "$TESTDIR"/check-perf-code.py contrib/perf.py
407 407 contrib/perf.py:\d+: (re)
408 408 > from mercurial import (
409 409 import newer module separately in try clause for early Mercurial
410 410 contrib/perf.py:\d+: (re)
411 411 > from mercurial import (
412 412 import newer module separately in try clause for early Mercurial
413 413 contrib/perf.py:\d+: (re)
414 414 > origindexpath = orig.opener.join(orig.indexfile)
415 415 use getvfs()/getsvfs() for early Mercurial
416 416 contrib/perf.py:\d+: (re)
417 417 > origdatapath = orig.opener.join(orig.datafile)
418 418 use getvfs()/getsvfs() for early Mercurial
419 419 contrib/perf.py:\d+: (re)
420 420 > vfs = vfsmod.vfs(tmpdir)
421 421 use getvfs()/getsvfs() for early Mercurial
422 422 contrib/perf.py:\d+: (re)
423 423 > vfs.options = getattr(orig.opener, 'options', None)
424 424 use getvfs()/getsvfs() for early Mercurial
425 425 [1]
General Comments 0
You need to be logged in to leave comments. Login now