##// END OF EJS Templates
perf: add a perf::stream-generate command...
marmoute -
r51570:b8de54ac default
parent child Browse files
Show More
@@ -1,4337 +1,4369 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", False)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 @command(
886 886 b'perf::tags|perftags',
887 887 formatteropts
888 888 + [
889 889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
890 890 ],
891 891 )
892 892 def perftags(ui, repo, **opts):
893 893 opts = _byteskwargs(opts)
894 894 timer, fm = gettimer(ui, opts)
895 895 repocleartagscache = repocleartagscachefunc(repo)
896 896 clearrevlogs = opts[b'clear_revlogs']
897 897
898 898 def s():
899 899 if clearrevlogs:
900 900 clearchangelog(repo)
901 901 clearfilecache(repo.unfiltered(), 'manifest')
902 902 repocleartagscache()
903 903
904 904 def t():
905 905 return len(repo.tags())
906 906
907 907 timer(t, setup=s)
908 908 fm.end()
909 909
910 910
911 911 @command(b'perf::ancestors|perfancestors', formatteropts)
912 912 def perfancestors(ui, repo, **opts):
913 913 opts = _byteskwargs(opts)
914 914 timer, fm = gettimer(ui, opts)
915 915 heads = repo.changelog.headrevs()
916 916
917 917 def d():
918 918 for a in repo.changelog.ancestors(heads):
919 919 pass
920 920
921 921 timer(d)
922 922 fm.end()
923 923
924 924
925 925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 926 def perfancestorset(ui, repo, revset, **opts):
927 927 opts = _byteskwargs(opts)
928 928 timer, fm = gettimer(ui, opts)
929 929 revs = repo.revs(revset)
930 930 heads = repo.changelog.headrevs()
931 931
932 932 def d():
933 933 s = repo.changelog.ancestors(heads)
934 934 for rev in revs:
935 935 rev in s
936 936
937 937 timer(d)
938 938 fm.end()
939 939
940 940
941 941 @command(
942 942 b'perf::delta-find',
943 943 revlogopts + formatteropts,
944 944 b'-c|-m|FILE REV',
945 945 )
946 946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 947 """benchmark the process of finding a valid delta for a revlog revision
948 948
949 949 When a revlog receives a new revision (e.g. from a commit, or from an
950 950 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 951 This perf command measures how much time we spend in this process. It
952 952 operates on an already stored revision.
953 953
954 954 See `hg help debug-delta-find` for another related command.
955 955 """
956 956 from mercurial import revlogutils
957 957 import mercurial.revlogutils.deltas as deltautil
958 958
959 959 opts = _byteskwargs(opts)
960 960 if arg_2 is None:
961 961 file_ = None
962 962 rev = arg_1
963 963 else:
964 964 file_ = arg_1
965 965 rev = arg_2
966 966
967 967 repo = repo.unfiltered()
968 968
969 969 timer, fm = gettimer(ui, opts)
970 970
971 971 rev = int(rev)
972 972
973 973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974 974
975 975 deltacomputer = deltautil.deltacomputer(revlog)
976 976
977 977 node = revlog.node(rev)
978 978 p1r, p2r = revlog.parentrevs(rev)
979 979 p1 = revlog.node(p1r)
980 980 p2 = revlog.node(p2r)
981 981 full_text = revlog.revision(rev)
982 982 textlen = len(full_text)
983 983 cachedelta = None
984 984 flags = revlog.flags(rev)
985 985
986 986 revinfo = revlogutils.revisioninfo(
987 987 node,
988 988 p1,
989 989 p2,
990 990 [full_text], # btext
991 991 textlen,
992 992 cachedelta,
993 993 flags,
994 994 )
995 995
996 996 # Note: we should probably purge the potential caches (like the full
997 997 # manifest cache) between runs.
998 998 def find_one():
999 999 with revlog._datafp() as fh:
1000 1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001 1001
1002 1002 timer(find_one)
1003 1003 fm.end()
1004 1004
1005 1005
1006 1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 1007 def perfdiscovery(ui, repo, path, **opts):
1008 1008 """benchmark discovery between local repo and the peer at given path"""
1009 1009 repos = [repo, None]
1010 1010 timer, fm = gettimer(ui, opts)
1011 1011
1012 1012 try:
1013 1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014 1014
1015 1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 1016 except ImportError:
1017 1017 try:
1018 1018 from mercurial.utils.urlutil import get_unique_pull_path
1019 1019
1020 1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 1021 except ImportError:
1022 1022 path = ui.expandpath(path)
1023 1023
1024 1024 def s():
1025 1025 repos[1] = hg.peer(ui, opts, path)
1026 1026
1027 1027 def d():
1028 1028 setdiscovery.findcommonheads(ui, *repos)
1029 1029
1030 1030 timer(d, setup=s)
1031 1031 fm.end()
1032 1032
1033 1033
1034 1034 @command(
1035 1035 b'perf::bookmarks|perfbookmarks',
1036 1036 formatteropts
1037 1037 + [
1038 1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 1039 ],
1040 1040 )
1041 1041 def perfbookmarks(ui, repo, **opts):
1042 1042 """benchmark parsing bookmarks from disk to memory"""
1043 1043 opts = _byteskwargs(opts)
1044 1044 timer, fm = gettimer(ui, opts)
1045 1045
1046 1046 clearrevlogs = opts[b'clear_revlogs']
1047 1047
1048 1048 def s():
1049 1049 if clearrevlogs:
1050 1050 clearchangelog(repo)
1051 1051 clearfilecache(repo, b'_bookmarks')
1052 1052
1053 1053 def d():
1054 1054 repo._bookmarks
1055 1055
1056 1056 timer(d, setup=s)
1057 1057 fm.end()
1058 1058
1059 1059
1060 1060 @command(
1061 1061 b'perf::bundle',
1062 1062 [
1063 1063 (
1064 1064 b'r',
1065 1065 b'rev',
1066 1066 [],
1067 1067 b'changesets to bundle',
1068 1068 b'REV',
1069 1069 ),
1070 1070 (
1071 1071 b't',
1072 1072 b'type',
1073 1073 b'none',
1074 1074 b'bundlespec to use (see `hg help bundlespec`)',
1075 1075 b'TYPE',
1076 1076 ),
1077 1077 ]
1078 1078 + formatteropts,
1079 1079 b'REVS',
1080 1080 )
1081 1081 def perfbundle(ui, repo, *revs, **opts):
1082 1082 """benchmark the creation of a bundle from a repository
1083 1083
1084 1084 For now, this only supports "none" compression.
1085 1085 """
1086 1086 try:
1087 1087 from mercurial import bundlecaches
1088 1088
1089 1089 parsebundlespec = bundlecaches.parsebundlespec
1090 1090 except ImportError:
1091 1091 from mercurial import exchange
1092 1092
1093 1093 parsebundlespec = exchange.parsebundlespec
1094 1094
1095 1095 from mercurial import discovery
1096 1096 from mercurial import bundle2
1097 1097
1098 1098 opts = _byteskwargs(opts)
1099 1099 timer, fm = gettimer(ui, opts)
1100 1100
1101 1101 cl = repo.changelog
1102 1102 revs = list(revs)
1103 1103 revs.extend(opts.get(b'rev', ()))
1104 1104 revs = scmutil.revrange(repo, revs)
1105 1105 if not revs:
1106 1106 raise error.Abort(b"not revision specified")
1107 1107 # make it a consistent set (ie: without topological gaps)
1108 1108 old_len = len(revs)
1109 1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 1110 if old_len != len(revs):
1111 1111 new_count = len(revs) - old_len
1112 1112 msg = b"add %d new revisions to make it a consistent set\n"
1113 1113 ui.write_err(msg % new_count)
1114 1114
1115 1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 1117 outgoing = discovery.outgoing(repo, bases, targets)
1118 1118
1119 1119 bundle_spec = opts.get(b'type')
1120 1120
1121 1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122 1122
1123 1123 cgversion = bundle_spec.params.get(b"cg.version")
1124 1124 if cgversion is None:
1125 1125 if bundle_spec.version == b'v1':
1126 1126 cgversion = b'01'
1127 1127 if bundle_spec.version == b'v2':
1128 1128 cgversion = b'02'
1129 1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 1130 err = b"repository does not support bundle version %s"
1131 1131 raise error.Abort(err % cgversion)
1132 1132
1133 1133 if cgversion == b'01': # bundle1
1134 1134 bversion = b'HG10' + bundle_spec.wirecompression
1135 1135 bcompression = None
1136 1136 elif cgversion in (b'02', b'03'):
1137 1137 bversion = b'HG20'
1138 1138 bcompression = bundle_spec.wirecompression
1139 1139 else:
1140 1140 err = b'perf::bundle: unexpected changegroup version %s'
1141 1141 raise error.ProgrammingError(err % cgversion)
1142 1142
1143 1143 if bcompression is None:
1144 1144 bcompression = b'UN'
1145 1145
1146 1146 if bcompression != b'UN':
1147 1147 err = b'perf::bundle: compression currently unsupported: %s'
1148 1148 raise error.ProgrammingError(err % bcompression)
1149 1149
1150 1150 def do_bundle():
1151 1151 bundle2.writenewbundle(
1152 1152 ui,
1153 1153 repo,
1154 1154 b'perf::bundle',
1155 1155 os.devnull,
1156 1156 bversion,
1157 1157 outgoing,
1158 1158 bundle_spec.params,
1159 1159 )
1160 1160
1161 1161 timer(do_bundle)
1162 1162 fm.end()
1163 1163
1164 1164
1165 1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 1166 def perfbundleread(ui, repo, bundlepath, **opts):
1167 1167 """Benchmark reading of bundle files.
1168 1168
1169 1169 This command is meant to isolate the I/O part of bundle reading as
1170 1170 much as possible.
1171 1171 """
1172 1172 from mercurial import (
1173 1173 bundle2,
1174 1174 exchange,
1175 1175 streamclone,
1176 1176 )
1177 1177
1178 1178 opts = _byteskwargs(opts)
1179 1179
1180 1180 def makebench(fn):
1181 1181 def run():
1182 1182 with open(bundlepath, b'rb') as fh:
1183 1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 1184 fn(bundle)
1185 1185
1186 1186 return run
1187 1187
1188 1188 def makereadnbytes(size):
1189 1189 def run():
1190 1190 with open(bundlepath, b'rb') as fh:
1191 1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 1192 while bundle.read(size):
1193 1193 pass
1194 1194
1195 1195 return run
1196 1196
1197 1197 def makestdioread(size):
1198 1198 def run():
1199 1199 with open(bundlepath, b'rb') as fh:
1200 1200 while fh.read(size):
1201 1201 pass
1202 1202
1203 1203 return run
1204 1204
1205 1205 # bundle1
1206 1206
1207 1207 def deltaiter(bundle):
1208 1208 for delta in bundle.deltaiter():
1209 1209 pass
1210 1210
1211 1211 def iterchunks(bundle):
1212 1212 for chunk in bundle.getchunks():
1213 1213 pass
1214 1214
1215 1215 # bundle2
1216 1216
1217 1217 def forwardchunks(bundle):
1218 1218 for chunk in bundle._forwardchunks():
1219 1219 pass
1220 1220
1221 1221 def iterparts(bundle):
1222 1222 for part in bundle.iterparts():
1223 1223 pass
1224 1224
1225 1225 def iterpartsseekable(bundle):
1226 1226 for part in bundle.iterparts(seekable=True):
1227 1227 pass
1228 1228
1229 1229 def seek(bundle):
1230 1230 for part in bundle.iterparts(seekable=True):
1231 1231 part.seek(0, os.SEEK_END)
1232 1232
1233 1233 def makepartreadnbytes(size):
1234 1234 def run():
1235 1235 with open(bundlepath, b'rb') as fh:
1236 1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 1237 for part in bundle.iterparts():
1238 1238 while part.read(size):
1239 1239 pass
1240 1240
1241 1241 return run
1242 1242
1243 1243 benches = [
1244 1244 (makestdioread(8192), b'read(8k)'),
1245 1245 (makestdioread(16384), b'read(16k)'),
1246 1246 (makestdioread(32768), b'read(32k)'),
1247 1247 (makestdioread(131072), b'read(128k)'),
1248 1248 ]
1249 1249
1250 1250 with open(bundlepath, b'rb') as fh:
1251 1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1252 1252
1253 1253 if isinstance(bundle, changegroup.cg1unpacker):
1254 1254 benches.extend(
1255 1255 [
1256 1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 1257 (makebench(iterchunks), b'cg1 getchunks()'),
1258 1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 1262 ]
1263 1263 )
1264 1264 elif isinstance(bundle, bundle2.unbundle20):
1265 1265 benches.extend(
1266 1266 [
1267 1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 1268 (makebench(iterparts), b'bundle2 iterparts()'),
1269 1269 (
1270 1270 makebench(iterpartsseekable),
1271 1271 b'bundle2 iterparts() seekable',
1272 1272 ),
1273 1273 (makebench(seek), b'bundle2 part seek()'),
1274 1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 1278 ]
1279 1279 )
1280 1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 1281 raise error.Abort(b'stream clone bundles not supported')
1282 1282 else:
1283 1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284 1284
1285 1285 for fn, title in benches:
1286 1286 timer, fm = gettimer(ui, opts)
1287 1287 timer(fn, title=title)
1288 1288 fm.end()
1289 1289
1290 1290
1291 1291 @command(
1292 1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 1293 formatteropts
1294 1294 + [
1295 1295 (b'', b'cgversion', b'02', b'changegroup version'),
1296 1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 1297 ],
1298 1298 )
1299 1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 1300 """Benchmark producing a changelog group for a changegroup.
1301 1301
1302 1302 This measures the time spent processing the changelog during a
1303 1303 bundle operation. This occurs during `hg bundle` and on a server
1304 1304 processing a `getbundle` wire protocol request (handles clones
1305 1305 and pull requests).
1306 1306
1307 1307 By default, all revisions are added to the changegroup.
1308 1308 """
1309 1309 opts = _byteskwargs(opts)
1310 1310 cl = repo.changelog
1311 1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 1312 bundler = changegroup.getbundler(cgversion, repo)
1313 1313
1314 1314 def d():
1315 1315 state, chunks = bundler._generatechangelog(cl, nodes)
1316 1316 for chunk in chunks:
1317 1317 pass
1318 1318
1319 1319 timer, fm = gettimer(ui, opts)
1320 1320
1321 1321 # Terminal printing can interfere with timing. So disable it.
1322 1322 with ui.configoverride({(b'progress', b'disable'): True}):
1323 1323 timer(d)
1324 1324
1325 1325 fm.end()
1326 1326
1327 1327
1328 1328 @command(b'perf::dirs|perfdirs', formatteropts)
1329 1329 def perfdirs(ui, repo, **opts):
1330 1330 opts = _byteskwargs(opts)
1331 1331 timer, fm = gettimer(ui, opts)
1332 1332 dirstate = repo.dirstate
1333 1333 b'a' in dirstate
1334 1334
1335 1335 def d():
1336 1336 dirstate.hasdir(b'a')
1337 1337 try:
1338 1338 del dirstate._map._dirs
1339 1339 except AttributeError:
1340 1340 pass
1341 1341
1342 1342 timer(d)
1343 1343 fm.end()
1344 1344
1345 1345
1346 1346 @command(
1347 1347 b'perf::dirstate|perfdirstate',
1348 1348 [
1349 1349 (
1350 1350 b'',
1351 1351 b'iteration',
1352 1352 None,
1353 1353 b'benchmark a full iteration for the dirstate',
1354 1354 ),
1355 1355 (
1356 1356 b'',
1357 1357 b'contains',
1358 1358 None,
1359 1359 b'benchmark a large amount of `nf in dirstate` calls',
1360 1360 ),
1361 1361 ]
1362 1362 + formatteropts,
1363 1363 )
1364 1364 def perfdirstate(ui, repo, **opts):
1365 1365 """benchmap the time of various distate operations
1366 1366
1367 1367 By default benchmark the time necessary to load a dirstate from scratch.
1368 1368 The dirstate is loaded to the point were a "contains" request can be
1369 1369 answered.
1370 1370 """
1371 1371 opts = _byteskwargs(opts)
1372 1372 timer, fm = gettimer(ui, opts)
1373 1373 b"a" in repo.dirstate
1374 1374
1375 1375 if opts[b'iteration'] and opts[b'contains']:
1376 1376 msg = b'only specify one of --iteration or --contains'
1377 1377 raise error.Abort(msg)
1378 1378
1379 1379 if opts[b'iteration']:
1380 1380 setup = None
1381 1381 dirstate = repo.dirstate
1382 1382
1383 1383 def d():
1384 1384 for f in dirstate:
1385 1385 pass
1386 1386
1387 1387 elif opts[b'contains']:
1388 1388 setup = None
1389 1389 dirstate = repo.dirstate
1390 1390 allfiles = list(dirstate)
1391 1391 # also add file path that will be "missing" from the dirstate
1392 1392 allfiles.extend([f[::-1] for f in allfiles])
1393 1393
1394 1394 def d():
1395 1395 for f in allfiles:
1396 1396 f in dirstate
1397 1397
1398 1398 else:
1399 1399
1400 1400 def setup():
1401 1401 repo.dirstate.invalidate()
1402 1402
1403 1403 def d():
1404 1404 b"a" in repo.dirstate
1405 1405
1406 1406 timer(d, setup=setup)
1407 1407 fm.end()
1408 1408
1409 1409
1410 1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 1411 def perfdirstatedirs(ui, repo, **opts):
1412 1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 1413 opts = _byteskwargs(opts)
1414 1414 timer, fm = gettimer(ui, opts)
1415 1415 repo.dirstate.hasdir(b"a")
1416 1416
1417 1417 def setup():
1418 1418 try:
1419 1419 del repo.dirstate._map._dirs
1420 1420 except AttributeError:
1421 1421 pass
1422 1422
1423 1423 def d():
1424 1424 repo.dirstate.hasdir(b"a")
1425 1425
1426 1426 timer(d, setup=setup)
1427 1427 fm.end()
1428 1428
1429 1429
1430 1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 1431 def perfdirstatefoldmap(ui, repo, **opts):
1432 1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1433 1433
1434 1434 The dirstate filefoldmap cache is dropped between every request.
1435 1435 """
1436 1436 opts = _byteskwargs(opts)
1437 1437 timer, fm = gettimer(ui, opts)
1438 1438 dirstate = repo.dirstate
1439 1439 dirstate._map.filefoldmap.get(b'a')
1440 1440
1441 1441 def setup():
1442 1442 del dirstate._map.filefoldmap
1443 1443
1444 1444 def d():
1445 1445 dirstate._map.filefoldmap.get(b'a')
1446 1446
1447 1447 timer(d, setup=setup)
1448 1448 fm.end()
1449 1449
1450 1450
1451 1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 1452 def perfdirfoldmap(ui, repo, **opts):
1453 1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454 1454
1455 1455 The dirstate dirfoldmap cache is dropped between every request.
1456 1456 """
1457 1457 opts = _byteskwargs(opts)
1458 1458 timer, fm = gettimer(ui, opts)
1459 1459 dirstate = repo.dirstate
1460 1460 dirstate._map.dirfoldmap.get(b'a')
1461 1461
1462 1462 def setup():
1463 1463 del dirstate._map.dirfoldmap
1464 1464 try:
1465 1465 del dirstate._map._dirs
1466 1466 except AttributeError:
1467 1467 pass
1468 1468
1469 1469 def d():
1470 1470 dirstate._map.dirfoldmap.get(b'a')
1471 1471
1472 1472 timer(d, setup=setup)
1473 1473 fm.end()
1474 1474
1475 1475
1476 1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 1477 def perfdirstatewrite(ui, repo, **opts):
1478 1478 """benchmap the time it take to write a dirstate on disk"""
1479 1479 opts = _byteskwargs(opts)
1480 1480 timer, fm = gettimer(ui, opts)
1481 1481 ds = repo.dirstate
1482 1482 b"a" in ds
1483 1483
1484 1484 def setup():
1485 1485 ds._dirty = True
1486 1486
1487 1487 def d():
1488 1488 ds.write(repo.currenttransaction())
1489 1489
1490 1490 with repo.wlock():
1491 1491 timer(d, setup=setup)
1492 1492 fm.end()
1493 1493
1494 1494
1495 1495 def _getmergerevs(repo, opts):
1496 1496 """parse command argument to return rev involved in merge
1497 1497
1498 1498 input: options dictionnary with `rev`, `from` and `bse`
1499 1499 output: (localctx, otherctx, basectx)
1500 1500 """
1501 1501 if opts[b'from']:
1502 1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 1503 wctx = repo[fromrev]
1504 1504 else:
1505 1505 wctx = repo[None]
1506 1506 # we don't want working dir files to be stat'd in the benchmark, so
1507 1507 # prime that cache
1508 1508 wctx.dirty()
1509 1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 1510 if opts[b'base']:
1511 1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 1512 ancestor = repo[fromrev]
1513 1513 else:
1514 1514 ancestor = wctx.ancestor(rctx)
1515 1515 return (wctx, rctx, ancestor)
1516 1516
1517 1517
1518 1518 @command(
1519 1519 b'perf::mergecalculate|perfmergecalculate',
1520 1520 [
1521 1521 (b'r', b'rev', b'.', b'rev to merge against'),
1522 1522 (b'', b'from', b'', b'rev to merge from'),
1523 1523 (b'', b'base', b'', b'the revision to use as base'),
1524 1524 ]
1525 1525 + formatteropts,
1526 1526 )
1527 1527 def perfmergecalculate(ui, repo, **opts):
1528 1528 opts = _byteskwargs(opts)
1529 1529 timer, fm = gettimer(ui, opts)
1530 1530
1531 1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532 1532
1533 1533 def d():
1534 1534 # acceptremote is True because we don't want prompts in the middle of
1535 1535 # our benchmark
1536 1536 merge.calculateupdates(
1537 1537 repo,
1538 1538 wctx,
1539 1539 rctx,
1540 1540 [ancestor],
1541 1541 branchmerge=False,
1542 1542 force=False,
1543 1543 acceptremote=True,
1544 1544 followcopies=True,
1545 1545 )
1546 1546
1547 1547 timer(d)
1548 1548 fm.end()
1549 1549
1550 1550
1551 1551 @command(
1552 1552 b'perf::mergecopies|perfmergecopies',
1553 1553 [
1554 1554 (b'r', b'rev', b'.', b'rev to merge against'),
1555 1555 (b'', b'from', b'', b'rev to merge from'),
1556 1556 (b'', b'base', b'', b'the revision to use as base'),
1557 1557 ]
1558 1558 + formatteropts,
1559 1559 )
1560 1560 def perfmergecopies(ui, repo, **opts):
1561 1561 """measure runtime of `copies.mergecopies`"""
1562 1562 opts = _byteskwargs(opts)
1563 1563 timer, fm = gettimer(ui, opts)
1564 1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565 1565
1566 1566 def d():
1567 1567 # acceptremote is True because we don't want prompts in the middle of
1568 1568 # our benchmark
1569 1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1570 1570
1571 1571 timer(d)
1572 1572 fm.end()
1573 1573
1574 1574
1575 1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 1577 """benchmark the copy tracing logic"""
1578 1578 opts = _byteskwargs(opts)
1579 1579 timer, fm = gettimer(ui, opts)
1580 1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582 1582
1583 1583 def d():
1584 1584 copies.pathcopies(ctx1, ctx2)
1585 1585
1586 1586 timer(d)
1587 1587 fm.end()
1588 1588
1589 1589
1590 1590 @command(
1591 1591 b'perf::phases|perfphases',
1592 1592 [
1593 1593 (b'', b'full', False, b'include file reading time too'),
1594 1594 ],
1595 1595 b"",
1596 1596 )
1597 1597 def perfphases(ui, repo, **opts):
1598 1598 """benchmark phasesets computation"""
1599 1599 opts = _byteskwargs(opts)
1600 1600 timer, fm = gettimer(ui, opts)
1601 1601 _phases = repo._phasecache
1602 1602 full = opts.get(b'full')
1603 1603
1604 1604 def d():
1605 1605 phases = _phases
1606 1606 if full:
1607 1607 clearfilecache(repo, b'_phasecache')
1608 1608 phases = repo._phasecache
1609 1609 phases.invalidate()
1610 1610 phases.loadphaserevs(repo)
1611 1611
1612 1612 timer(d)
1613 1613 fm.end()
1614 1614
1615 1615
1616 1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 1617 def perfphasesremote(ui, repo, dest=None, **opts):
1618 1618 """benchmark time needed to analyse phases of the remote server"""
1619 1619 from mercurial.node import bin
1620 1620 from mercurial import (
1621 1621 exchange,
1622 1622 hg,
1623 1623 phases,
1624 1624 )
1625 1625
1626 1626 opts = _byteskwargs(opts)
1627 1627 timer, fm = gettimer(ui, opts)
1628 1628
1629 1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 1630 if not path:
1631 1631 raise error.Abort(
1632 1632 b'default repository not configured!',
1633 1633 hint=b"see 'hg help config.paths'",
1634 1634 )
1635 1635 if util.safehasattr(path, 'main_path'):
1636 1636 path = path.get_push_variant()
1637 1637 dest = path.loc
1638 1638 else:
1639 1639 dest = path.pushloc or path.loc
1640 1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 1641 other = hg.peer(repo, opts, dest)
1642 1642
1643 1643 # easier to perform discovery through the operation
1644 1644 op = exchange.pushoperation(repo, other)
1645 1645 exchange._pushdiscoverychangeset(op)
1646 1646
1647 1647 remotesubset = op.fallbackheads
1648 1648
1649 1649 with other.commandexecutor() as e:
1650 1650 remotephases = e.callcommand(
1651 1651 b'listkeys', {b'namespace': b'phases'}
1652 1652 ).result()
1653 1653 del other
1654 1654 publishing = remotephases.get(b'publishing', False)
1655 1655 if publishing:
1656 1656 ui.statusnoi18n(b'publishing: yes\n')
1657 1657 else:
1658 1658 ui.statusnoi18n(b'publishing: no\n')
1659 1659
1660 1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 1661 if has_node is None:
1662 1662 has_node = repo.changelog.nodemap.__contains__
1663 1663 nonpublishroots = 0
1664 1664 for nhex, phase in remotephases.iteritems():
1665 1665 if nhex == b'publishing': # ignore data related to publish option
1666 1666 continue
1667 1667 node = bin(nhex)
1668 1668 if has_node(node) and int(phase):
1669 1669 nonpublishroots += 1
1670 1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672 1672
1673 1673 def d():
1674 1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1675 1675
1676 1676 timer(d)
1677 1677 fm.end()
1678 1678
1679 1679
1680 1680 @command(
1681 1681 b'perf::manifest|perfmanifest',
1682 1682 [
1683 1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 1685 ]
1686 1686 + formatteropts,
1687 1687 b'REV|NODE',
1688 1688 )
1689 1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 1690 """benchmark the time to read a manifest from disk and return a usable
1691 1691 dict-like object
1692 1692
1693 1693 Manifest caches are cleared before retrieval."""
1694 1694 opts = _byteskwargs(opts)
1695 1695 timer, fm = gettimer(ui, opts)
1696 1696 if not manifest_rev:
1697 1697 ctx = scmutil.revsingle(repo, rev, rev)
1698 1698 t = ctx.manifestnode()
1699 1699 else:
1700 1700 from mercurial.node import bin
1701 1701
1702 1702 if len(rev) == 40:
1703 1703 t = bin(rev)
1704 1704 else:
1705 1705 try:
1706 1706 rev = int(rev)
1707 1707
1708 1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 1709 t = repo.manifestlog.getstorage(b'').node(rev)
1710 1710 else:
1711 1711 t = repo.manifestlog._revlog.lookup(rev)
1712 1712 except ValueError:
1713 1713 raise error.Abort(
1714 1714 b'manifest revision must be integer or full node'
1715 1715 )
1716 1716
1717 1717 def d():
1718 1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 1719 repo.manifestlog[t].read()
1720 1720
1721 1721 timer(d)
1722 1722 fm.end()
1723 1723
1724 1724
1725 1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 1726 def perfchangeset(ui, repo, rev, **opts):
1727 1727 opts = _byteskwargs(opts)
1728 1728 timer, fm = gettimer(ui, opts)
1729 1729 n = scmutil.revsingle(repo, rev).node()
1730 1730
1731 1731 def d():
1732 1732 repo.changelog.read(n)
1733 1733 # repo.changelog._cache = None
1734 1734
1735 1735 timer(d)
1736 1736 fm.end()
1737 1737
1738 1738
1739 1739 @command(b'perf::ignore|perfignore', formatteropts)
1740 1740 def perfignore(ui, repo, **opts):
1741 1741 """benchmark operation related to computing ignore"""
1742 1742 opts = _byteskwargs(opts)
1743 1743 timer, fm = gettimer(ui, opts)
1744 1744 dirstate = repo.dirstate
1745 1745
1746 1746 def setupone():
1747 1747 dirstate.invalidate()
1748 1748 clearfilecache(dirstate, b'_ignore')
1749 1749
1750 1750 def runone():
1751 1751 dirstate._ignore
1752 1752
1753 1753 timer(runone, setup=setupone, title=b"load")
1754 1754 fm.end()
1755 1755
1756 1756
1757 1757 @command(
1758 1758 b'perf::index|perfindex',
1759 1759 [
1760 1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 1762 ]
1763 1763 + formatteropts,
1764 1764 )
1765 1765 def perfindex(ui, repo, **opts):
1766 1766 """benchmark index creation time followed by a lookup
1767 1767
1768 1768 The default is to look `tip` up. Depending on the index implementation,
1769 1769 the revision looked up can matters. For example, an implementation
1770 1770 scanning the index will have a faster lookup time for `--rev tip` than for
1771 1771 `--rev 0`. The number of looked up revisions and their order can also
1772 1772 matters.
1773 1773
1774 1774 Example of useful set to test:
1775 1775
1776 1776 * tip
1777 1777 * 0
1778 1778 * -10:
1779 1779 * :10
1780 1780 * -10: + :10
1781 1781 * :10: + -10:
1782 1782 * -10000:
1783 1783 * -10000: + 0
1784 1784
1785 1785 It is not currently possible to check for lookup of a missing node. For
1786 1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 1787 import mercurial.revlog
1788 1788
1789 1789 opts = _byteskwargs(opts)
1790 1790 timer, fm = gettimer(ui, opts)
1791 1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 1792 if opts[b'no_lookup']:
1793 1793 if opts['rev']:
1794 1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 1795 nodes = []
1796 1796 elif not opts[b'rev']:
1797 1797 nodes = [repo[b"tip"].node()]
1798 1798 else:
1799 1799 revs = scmutil.revrange(repo, opts[b'rev'])
1800 1800 cl = repo.changelog
1801 1801 nodes = [cl.node(r) for r in revs]
1802 1802
1803 1803 unfi = repo.unfiltered()
1804 1804 # find the filecache func directly
1805 1805 # This avoid polluting the benchmark with the filecache logic
1806 1806 makecl = unfi.__class__.changelog.func
1807 1807
1808 1808 def setup():
1809 1809 # probably not necessary, but for good measure
1810 1810 clearchangelog(unfi)
1811 1811
1812 1812 def d():
1813 1813 cl = makecl(unfi)
1814 1814 for n in nodes:
1815 1815 cl.rev(n)
1816 1816
1817 1817 timer(d, setup=setup)
1818 1818 fm.end()
1819 1819
1820 1820
1821 1821 @command(
1822 1822 b'perf::nodemap|perfnodemap',
1823 1823 [
1824 1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 1826 ]
1827 1827 + formatteropts,
1828 1828 )
1829 1829 def perfnodemap(ui, repo, **opts):
1830 1830 """benchmark the time necessary to look up revision from a cold nodemap
1831 1831
1832 1832 Depending on the implementation, the amount and order of revision we look
1833 1833 up can varies. Example of useful set to test:
1834 1834 * tip
1835 1835 * 0
1836 1836 * -10:
1837 1837 * :10
1838 1838 * -10: + :10
1839 1839 * :10: + -10:
1840 1840 * -10000:
1841 1841 * -10000: + 0
1842 1842
1843 1843 The command currently focus on valid binary lookup. Benchmarking for
1844 1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 1845 """
1846 1846 import mercurial.revlog
1847 1847
1848 1848 opts = _byteskwargs(opts)
1849 1849 timer, fm = gettimer(ui, opts)
1850 1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851 1851
1852 1852 unfi = repo.unfiltered()
1853 1853 clearcaches = opts[b'clear_caches']
1854 1854 # find the filecache func directly
1855 1855 # This avoid polluting the benchmark with the filecache logic
1856 1856 makecl = unfi.__class__.changelog.func
1857 1857 if not opts[b'rev']:
1858 1858 raise error.Abort(b'use --rev to specify revisions to look up')
1859 1859 revs = scmutil.revrange(repo, opts[b'rev'])
1860 1860 cl = repo.changelog
1861 1861 nodes = [cl.node(r) for r in revs]
1862 1862
1863 1863 # use a list to pass reference to a nodemap from one closure to the next
1864 1864 nodeget = [None]
1865 1865
1866 1866 def setnodeget():
1867 1867 # probably not necessary, but for good measure
1868 1868 clearchangelog(unfi)
1869 1869 cl = makecl(unfi)
1870 1870 if util.safehasattr(cl.index, 'get_rev'):
1871 1871 nodeget[0] = cl.index.get_rev
1872 1872 else:
1873 1873 nodeget[0] = cl.nodemap.get
1874 1874
1875 1875 def d():
1876 1876 get = nodeget[0]
1877 1877 for n in nodes:
1878 1878 get(n)
1879 1879
1880 1880 setup = None
1881 1881 if clearcaches:
1882 1882
1883 1883 def setup():
1884 1884 setnodeget()
1885 1885
1886 1886 else:
1887 1887 setnodeget()
1888 1888 d() # prewarm the data structure
1889 1889 timer(d, setup=setup)
1890 1890 fm.end()
1891 1891
1892 1892
1893 1893 @command(b'perf::startup|perfstartup', formatteropts)
1894 1894 def perfstartup(ui, repo, **opts):
1895 1895 opts = _byteskwargs(opts)
1896 1896 timer, fm = gettimer(ui, opts)
1897 1897
1898 1898 def d():
1899 1899 if os.name != 'nt':
1900 1900 os.system(
1901 1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 1902 )
1903 1903 else:
1904 1904 os.environ['HGRCPATH'] = r' '
1905 1905 os.system("%s version -q > NUL" % sys.argv[0])
1906 1906
1907 1907 timer(d)
1908 1908 fm.end()
1909 1909
1910 1910
1911 1911 def _find_stream_generator(version):
1912 1912 """find the proper generator function for this stream version"""
1913 1913 import mercurial.streamclone
1914 1914
1915 1915 available = {}
1916 1916
1917 1917 # try to fetch a v1 generator
1918 1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 1919 if generatev1 is not None:
1920 1920
1921 1921 def generate(repo):
1922 1922 entries, bytes, data = generatev2(repo, None, None, True)
1923 1923 return data
1924 1924
1925 1925 available[b'v1'] = generatev1
1926 1926 # try to fetch a v2 generator
1927 1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 1928 if generatev2 is not None:
1929 1929
1930 1930 def generate(repo):
1931 1931 entries, bytes, data = generatev2(repo, None, None, True)
1932 1932 return data
1933 1933
1934 1934 available[b'v2'] = generate
1935 1935 # try to fetch a v3 generator
1936 1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 1937 if generatev3 is not None:
1938 1938
1939 1939 def generate(repo):
1940 1940 entries, bytes, data = generatev3(repo, None, None, True)
1941 1941 return data
1942 1942
1943 1943 available[b'v3-exp'] = generate
1944 1944
1945 1945 # resolve the request
1946 1946 if version == b"latest":
1947 1947 # latest is the highest non experimental version
1948 1948 latest_key = max(v for v in available if b'-exp' not in v)
1949 1949 return available[latest_key]
1950 1950 elif version in available:
1951 1951 return available[version]
1952 1952 else:
1953 1953 msg = b"unkown or unavailable version: %s"
1954 1954 msg %= version
1955 1955 hint = b"available versions: %s"
1956 1956 hint %= b', '.join(sorted(available))
1957 1957 raise error.Abort(msg, hint=hint)
1958 1958
1959 1959
1960 1960 @command(
1961 1961 b'perf::stream-locked-section',
1962 1962 [
1963 1963 (
1964 1964 b'',
1965 1965 b'stream-version',
1966 1966 b'latest',
1967 1967 b'stream version to us ("v1", "v2" or "latest", (the default))',
1968 1968 ),
1969 1969 ]
1970 1970 + formatteropts,
1971 1971 )
1972 1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1974 1974
1975 1975 opts = _byteskwargs(opts)
1976 1976 timer, fm = gettimer(ui, opts)
1977 1977
1978 1978 # deletion of the generator may trigger some cleanup that we do not want to
1979 1979 # measure
1980 1980 result_holder = [None]
1981 1981
1982 1982 def setupone():
1983 1983 result_holder[0] = None
1984 1984
1985 1985 generate = _find_stream_generator(stream_version)
1986 1986
1987 1987 def runone():
1988 1988 # the lock is held for the duration the initialisation
1989 1989 result_holder[0] = generate(repo)
1990 1990
1991 1991 timer(runone, setup=setupone, title=b"load")
1992 1992 fm.end()
1993 1993
1994 1994
1995 @command(
1996 b'perf::stream-generate',
1997 [
1998 (
1999 b'',
2000 b'stream-version',
2001 b'latest',
2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 ),
2004 ]
2005 + formatteropts,
2006 )
2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 """benchmark the full generation of a stream clone"""
2009
2010 opts = _byteskwargs(opts)
2011 timer, fm = gettimer(ui, opts)
2012
2013 # deletion of the generator may trigger some cleanup that we do not want to
2014 # measure
2015
2016 generate = _find_stream_generator(stream_version)
2017
2018 def runone():
2019 # the lock is held for the duration the initialisation
2020 for chunk in generate(repo):
2021 pass
2022
2023 timer(runone, title=b"generate")
2024 fm.end()
2025
2026
1995 2027 @command(b'perf::parents|perfparents', formatteropts)
1996 2028 def perfparents(ui, repo, **opts):
1997 2029 """benchmark the time necessary to fetch one changeset's parents.
1998 2030
1999 2031 The fetch is done using the `node identifier`, traversing all object layers
2000 2032 from the repository object. The first N revisions will be used for this
2001 2033 benchmark. N is controlled by the ``perf.parentscount`` config option
2002 2034 (default: 1000).
2003 2035 """
2004 2036 opts = _byteskwargs(opts)
2005 2037 timer, fm = gettimer(ui, opts)
2006 2038 # control the number of commits perfparents iterates over
2007 2039 # experimental config: perf.parentscount
2008 2040 count = getint(ui, b"perf", b"parentscount", 1000)
2009 2041 if len(repo.changelog) < count:
2010 2042 raise error.Abort(b"repo needs %d commits for this test" % count)
2011 2043 repo = repo.unfiltered()
2012 2044 nl = [repo.changelog.node(i) for i in _xrange(count)]
2013 2045
2014 2046 def d():
2015 2047 for n in nl:
2016 2048 repo.changelog.parents(n)
2017 2049
2018 2050 timer(d)
2019 2051 fm.end()
2020 2052
2021 2053
2022 2054 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2023 2055 def perfctxfiles(ui, repo, x, **opts):
2024 2056 opts = _byteskwargs(opts)
2025 2057 x = int(x)
2026 2058 timer, fm = gettimer(ui, opts)
2027 2059
2028 2060 def d():
2029 2061 len(repo[x].files())
2030 2062
2031 2063 timer(d)
2032 2064 fm.end()
2033 2065
2034 2066
2035 2067 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2036 2068 def perfrawfiles(ui, repo, x, **opts):
2037 2069 opts = _byteskwargs(opts)
2038 2070 x = int(x)
2039 2071 timer, fm = gettimer(ui, opts)
2040 2072 cl = repo.changelog
2041 2073
2042 2074 def d():
2043 2075 len(cl.read(x)[3])
2044 2076
2045 2077 timer(d)
2046 2078 fm.end()
2047 2079
2048 2080
2049 2081 @command(b'perf::lookup|perflookup', formatteropts)
2050 2082 def perflookup(ui, repo, rev, **opts):
2051 2083 opts = _byteskwargs(opts)
2052 2084 timer, fm = gettimer(ui, opts)
2053 2085 timer(lambda: len(repo.lookup(rev)))
2054 2086 fm.end()
2055 2087
2056 2088
2057 2089 @command(
2058 2090 b'perf::linelogedits|perflinelogedits',
2059 2091 [
2060 2092 (b'n', b'edits', 10000, b'number of edits'),
2061 2093 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2062 2094 ],
2063 2095 norepo=True,
2064 2096 )
2065 2097 def perflinelogedits(ui, **opts):
2066 2098 from mercurial import linelog
2067 2099
2068 2100 opts = _byteskwargs(opts)
2069 2101
2070 2102 edits = opts[b'edits']
2071 2103 maxhunklines = opts[b'max_hunk_lines']
2072 2104
2073 2105 maxb1 = 100000
2074 2106 random.seed(0)
2075 2107 randint = random.randint
2076 2108 currentlines = 0
2077 2109 arglist = []
2078 2110 for rev in _xrange(edits):
2079 2111 a1 = randint(0, currentlines)
2080 2112 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2081 2113 b1 = randint(0, maxb1)
2082 2114 b2 = randint(b1, b1 + maxhunklines)
2083 2115 currentlines += (b2 - b1) - (a2 - a1)
2084 2116 arglist.append((rev, a1, a2, b1, b2))
2085 2117
2086 2118 def d():
2087 2119 ll = linelog.linelog()
2088 2120 for args in arglist:
2089 2121 ll.replacelines(*args)
2090 2122
2091 2123 timer, fm = gettimer(ui, opts)
2092 2124 timer(d)
2093 2125 fm.end()
2094 2126
2095 2127
2096 2128 @command(b'perf::revrange|perfrevrange', formatteropts)
2097 2129 def perfrevrange(ui, repo, *specs, **opts):
2098 2130 opts = _byteskwargs(opts)
2099 2131 timer, fm = gettimer(ui, opts)
2100 2132 revrange = scmutil.revrange
2101 2133 timer(lambda: len(revrange(repo, specs)))
2102 2134 fm.end()
2103 2135
2104 2136
2105 2137 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2106 2138 def perfnodelookup(ui, repo, rev, **opts):
2107 2139 opts = _byteskwargs(opts)
2108 2140 timer, fm = gettimer(ui, opts)
2109 2141 import mercurial.revlog
2110 2142
2111 2143 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2112 2144 n = scmutil.revsingle(repo, rev).node()
2113 2145
2114 2146 try:
2115 2147 cl = revlog(getsvfs(repo), radix=b"00changelog")
2116 2148 except TypeError:
2117 2149 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2118 2150
2119 2151 def d():
2120 2152 cl.rev(n)
2121 2153 clearcaches(cl)
2122 2154
2123 2155 timer(d)
2124 2156 fm.end()
2125 2157
2126 2158
2127 2159 @command(
2128 2160 b'perf::log|perflog',
2129 2161 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2130 2162 )
2131 2163 def perflog(ui, repo, rev=None, **opts):
2132 2164 opts = _byteskwargs(opts)
2133 2165 if rev is None:
2134 2166 rev = []
2135 2167 timer, fm = gettimer(ui, opts)
2136 2168 ui.pushbuffer()
2137 2169 timer(
2138 2170 lambda: commands.log(
2139 2171 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2140 2172 )
2141 2173 )
2142 2174 ui.popbuffer()
2143 2175 fm.end()
2144 2176
2145 2177
2146 2178 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2147 2179 def perfmoonwalk(ui, repo, **opts):
2148 2180 """benchmark walking the changelog backwards
2149 2181
2150 2182 This also loads the changelog data for each revision in the changelog.
2151 2183 """
2152 2184 opts = _byteskwargs(opts)
2153 2185 timer, fm = gettimer(ui, opts)
2154 2186
2155 2187 def moonwalk():
2156 2188 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2157 2189 ctx = repo[i]
2158 2190 ctx.branch() # read changelog data (in addition to the index)
2159 2191
2160 2192 timer(moonwalk)
2161 2193 fm.end()
2162 2194
2163 2195
2164 2196 @command(
2165 2197 b'perf::templating|perftemplating',
2166 2198 [
2167 2199 (b'r', b'rev', [], b'revisions to run the template on'),
2168 2200 ]
2169 2201 + formatteropts,
2170 2202 )
2171 2203 def perftemplating(ui, repo, testedtemplate=None, **opts):
2172 2204 """test the rendering time of a given template"""
2173 2205 if makelogtemplater is None:
2174 2206 raise error.Abort(
2175 2207 b"perftemplating not available with this Mercurial",
2176 2208 hint=b"use 4.3 or later",
2177 2209 )
2178 2210
2179 2211 opts = _byteskwargs(opts)
2180 2212
2181 2213 nullui = ui.copy()
2182 2214 nullui.fout = open(os.devnull, 'wb')
2183 2215 nullui.disablepager()
2184 2216 revs = opts.get(b'rev')
2185 2217 if not revs:
2186 2218 revs = [b'all()']
2187 2219 revs = list(scmutil.revrange(repo, revs))
2188 2220
2189 2221 defaulttemplate = (
2190 2222 b'{date|shortdate} [{rev}:{node|short}]'
2191 2223 b' {author|person}: {desc|firstline}\n'
2192 2224 )
2193 2225 if testedtemplate is None:
2194 2226 testedtemplate = defaulttemplate
2195 2227 displayer = makelogtemplater(nullui, repo, testedtemplate)
2196 2228
2197 2229 def format():
2198 2230 for r in revs:
2199 2231 ctx = repo[r]
2200 2232 displayer.show(ctx)
2201 2233 displayer.flush(ctx)
2202 2234
2203 2235 timer, fm = gettimer(ui, opts)
2204 2236 timer(format)
2205 2237 fm.end()
2206 2238
2207 2239
2208 2240 def _displaystats(ui, opts, entries, data):
2209 2241 # use a second formatter because the data are quite different, not sure
2210 2242 # how it flies with the templater.
2211 2243 fm = ui.formatter(b'perf-stats', opts)
2212 2244 for key, title in entries:
2213 2245 values = data[key]
2214 2246 nbvalues = len(data)
2215 2247 values.sort()
2216 2248 stats = {
2217 2249 'key': key,
2218 2250 'title': title,
2219 2251 'nbitems': len(values),
2220 2252 'min': values[0][0],
2221 2253 '10%': values[(nbvalues * 10) // 100][0],
2222 2254 '25%': values[(nbvalues * 25) // 100][0],
2223 2255 '50%': values[(nbvalues * 50) // 100][0],
2224 2256 '75%': values[(nbvalues * 75) // 100][0],
2225 2257 '80%': values[(nbvalues * 80) // 100][0],
2226 2258 '85%': values[(nbvalues * 85) // 100][0],
2227 2259 '90%': values[(nbvalues * 90) // 100][0],
2228 2260 '95%': values[(nbvalues * 95) // 100][0],
2229 2261 '99%': values[(nbvalues * 99) // 100][0],
2230 2262 'max': values[-1][0],
2231 2263 }
2232 2264 fm.startitem()
2233 2265 fm.data(**stats)
2234 2266 # make node pretty for the human output
2235 2267 fm.plain('### %s (%d items)\n' % (title, len(values)))
2236 2268 lines = [
2237 2269 'min',
2238 2270 '10%',
2239 2271 '25%',
2240 2272 '50%',
2241 2273 '75%',
2242 2274 '80%',
2243 2275 '85%',
2244 2276 '90%',
2245 2277 '95%',
2246 2278 '99%',
2247 2279 'max',
2248 2280 ]
2249 2281 for l in lines:
2250 2282 fm.plain('%s: %s\n' % (l, stats[l]))
2251 2283 fm.end()
2252 2284
2253 2285
2254 2286 @command(
2255 2287 b'perf::helper-mergecopies|perfhelper-mergecopies',
2256 2288 formatteropts
2257 2289 + [
2258 2290 (b'r', b'revs', [], b'restrict search to these revisions'),
2259 2291 (b'', b'timing', False, b'provides extra data (costly)'),
2260 2292 (b'', b'stats', False, b'provides statistic about the measured data'),
2261 2293 ],
2262 2294 )
2263 2295 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2264 2296 """find statistics about potential parameters for `perfmergecopies`
2265 2297
2266 2298 This command find (base, p1, p2) triplet relevant for copytracing
2267 2299 benchmarking in the context of a merge. It reports values for some of the
2268 2300 parameters that impact merge copy tracing time during merge.
2269 2301
2270 2302 If `--timing` is set, rename detection is run and the associated timing
2271 2303 will be reported. The extra details come at the cost of slower command
2272 2304 execution.
2273 2305
2274 2306 Since rename detection is only run once, other factors might easily
2275 2307 affect the precision of the timing. However it should give a good
2276 2308 approximation of which revision triplets are very costly.
2277 2309 """
2278 2310 opts = _byteskwargs(opts)
2279 2311 fm = ui.formatter(b'perf', opts)
2280 2312 dotiming = opts[b'timing']
2281 2313 dostats = opts[b'stats']
2282 2314
2283 2315 output_template = [
2284 2316 ("base", "%(base)12s"),
2285 2317 ("p1", "%(p1.node)12s"),
2286 2318 ("p2", "%(p2.node)12s"),
2287 2319 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2288 2320 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2289 2321 ("p1.renames", "%(p1.renamedfiles)12d"),
2290 2322 ("p1.time", "%(p1.time)12.3f"),
2291 2323 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2292 2324 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2293 2325 ("p2.renames", "%(p2.renamedfiles)12d"),
2294 2326 ("p2.time", "%(p2.time)12.3f"),
2295 2327 ("renames", "%(nbrenamedfiles)12d"),
2296 2328 ("total.time", "%(time)12.3f"),
2297 2329 ]
2298 2330 if not dotiming:
2299 2331 output_template = [
2300 2332 i
2301 2333 for i in output_template
2302 2334 if not ('time' in i[0] or 'renames' in i[0])
2303 2335 ]
2304 2336 header_names = [h for (h, v) in output_template]
2305 2337 output = ' '.join([v for (h, v) in output_template]) + '\n'
2306 2338 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2307 2339 fm.plain(header % tuple(header_names))
2308 2340
2309 2341 if not revs:
2310 2342 revs = ['all()']
2311 2343 revs = scmutil.revrange(repo, revs)
2312 2344
2313 2345 if dostats:
2314 2346 alldata = {
2315 2347 'nbrevs': [],
2316 2348 'nbmissingfiles': [],
2317 2349 }
2318 2350 if dotiming:
2319 2351 alldata['parentnbrenames'] = []
2320 2352 alldata['totalnbrenames'] = []
2321 2353 alldata['parenttime'] = []
2322 2354 alldata['totaltime'] = []
2323 2355
2324 2356 roi = repo.revs('merge() and %ld', revs)
2325 2357 for r in roi:
2326 2358 ctx = repo[r]
2327 2359 p1 = ctx.p1()
2328 2360 p2 = ctx.p2()
2329 2361 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2330 2362 for b in bases:
2331 2363 b = repo[b]
2332 2364 p1missing = copies._computeforwardmissing(b, p1)
2333 2365 p2missing = copies._computeforwardmissing(b, p2)
2334 2366 data = {
2335 2367 b'base': b.hex(),
2336 2368 b'p1.node': p1.hex(),
2337 2369 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2338 2370 b'p1.nbmissingfiles': len(p1missing),
2339 2371 b'p2.node': p2.hex(),
2340 2372 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2341 2373 b'p2.nbmissingfiles': len(p2missing),
2342 2374 }
2343 2375 if dostats:
2344 2376 if p1missing:
2345 2377 alldata['nbrevs'].append(
2346 2378 (data['p1.nbrevs'], b.hex(), p1.hex())
2347 2379 )
2348 2380 alldata['nbmissingfiles'].append(
2349 2381 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2350 2382 )
2351 2383 if p2missing:
2352 2384 alldata['nbrevs'].append(
2353 2385 (data['p2.nbrevs'], b.hex(), p2.hex())
2354 2386 )
2355 2387 alldata['nbmissingfiles'].append(
2356 2388 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2357 2389 )
2358 2390 if dotiming:
2359 2391 begin = util.timer()
2360 2392 mergedata = copies.mergecopies(repo, p1, p2, b)
2361 2393 end = util.timer()
2362 2394 # not very stable timing since we did only one run
2363 2395 data['time'] = end - begin
2364 2396 # mergedata contains five dicts: "copy", "movewithdir",
2365 2397 # "diverge", "renamedelete" and "dirmove".
2366 2398 # The first 4 are about renamed file so lets count that.
2367 2399 renames = len(mergedata[0])
2368 2400 renames += len(mergedata[1])
2369 2401 renames += len(mergedata[2])
2370 2402 renames += len(mergedata[3])
2371 2403 data['nbrenamedfiles'] = renames
2372 2404 begin = util.timer()
2373 2405 p1renames = copies.pathcopies(b, p1)
2374 2406 end = util.timer()
2375 2407 data['p1.time'] = end - begin
2376 2408 begin = util.timer()
2377 2409 p2renames = copies.pathcopies(b, p2)
2378 2410 end = util.timer()
2379 2411 data['p2.time'] = end - begin
2380 2412 data['p1.renamedfiles'] = len(p1renames)
2381 2413 data['p2.renamedfiles'] = len(p2renames)
2382 2414
2383 2415 if dostats:
2384 2416 if p1missing:
2385 2417 alldata['parentnbrenames'].append(
2386 2418 (data['p1.renamedfiles'], b.hex(), p1.hex())
2387 2419 )
2388 2420 alldata['parenttime'].append(
2389 2421 (data['p1.time'], b.hex(), p1.hex())
2390 2422 )
2391 2423 if p2missing:
2392 2424 alldata['parentnbrenames'].append(
2393 2425 (data['p2.renamedfiles'], b.hex(), p2.hex())
2394 2426 )
2395 2427 alldata['parenttime'].append(
2396 2428 (data['p2.time'], b.hex(), p2.hex())
2397 2429 )
2398 2430 if p1missing or p2missing:
2399 2431 alldata['totalnbrenames'].append(
2400 2432 (
2401 2433 data['nbrenamedfiles'],
2402 2434 b.hex(),
2403 2435 p1.hex(),
2404 2436 p2.hex(),
2405 2437 )
2406 2438 )
2407 2439 alldata['totaltime'].append(
2408 2440 (data['time'], b.hex(), p1.hex(), p2.hex())
2409 2441 )
2410 2442 fm.startitem()
2411 2443 fm.data(**data)
2412 2444 # make node pretty for the human output
2413 2445 out = data.copy()
2414 2446 out['base'] = fm.hexfunc(b.node())
2415 2447 out['p1.node'] = fm.hexfunc(p1.node())
2416 2448 out['p2.node'] = fm.hexfunc(p2.node())
2417 2449 fm.plain(output % out)
2418 2450
2419 2451 fm.end()
2420 2452 if dostats:
2421 2453 # use a second formatter because the data are quite different, not sure
2422 2454 # how it flies with the templater.
2423 2455 entries = [
2424 2456 ('nbrevs', 'number of revision covered'),
2425 2457 ('nbmissingfiles', 'number of missing files at head'),
2426 2458 ]
2427 2459 if dotiming:
2428 2460 entries.append(
2429 2461 ('parentnbrenames', 'rename from one parent to base')
2430 2462 )
2431 2463 entries.append(('totalnbrenames', 'total number of renames'))
2432 2464 entries.append(('parenttime', 'time for one parent'))
2433 2465 entries.append(('totaltime', 'time for both parents'))
2434 2466 _displaystats(ui, opts, entries, alldata)
2435 2467
2436 2468
2437 2469 @command(
2438 2470 b'perf::helper-pathcopies|perfhelper-pathcopies',
2439 2471 formatteropts
2440 2472 + [
2441 2473 (b'r', b'revs', [], b'restrict search to these revisions'),
2442 2474 (b'', b'timing', False, b'provides extra data (costly)'),
2443 2475 (b'', b'stats', False, b'provides statistic about the measured data'),
2444 2476 ],
2445 2477 )
2446 2478 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2447 2479 """find statistic about potential parameters for the `perftracecopies`
2448 2480
2449 2481 This command find source-destination pair relevant for copytracing testing.
2450 2482 It report value for some of the parameters that impact copy tracing time.
2451 2483
2452 2484 If `--timing` is set, rename detection is run and the associated timing
2453 2485 will be reported. The extra details comes at the cost of a slower command
2454 2486 execution.
2455 2487
2456 2488 Since the rename detection is only run once, other factors might easily
2457 2489 affect the precision of the timing. However it should give a good
2458 2490 approximation of which revision pairs are very costly.
2459 2491 """
2460 2492 opts = _byteskwargs(opts)
2461 2493 fm = ui.formatter(b'perf', opts)
2462 2494 dotiming = opts[b'timing']
2463 2495 dostats = opts[b'stats']
2464 2496
2465 2497 if dotiming:
2466 2498 header = '%12s %12s %12s %12s %12s %12s\n'
2467 2499 output = (
2468 2500 "%(source)12s %(destination)12s "
2469 2501 "%(nbrevs)12d %(nbmissingfiles)12d "
2470 2502 "%(nbrenamedfiles)12d %(time)18.5f\n"
2471 2503 )
2472 2504 header_names = (
2473 2505 "source",
2474 2506 "destination",
2475 2507 "nb-revs",
2476 2508 "nb-files",
2477 2509 "nb-renames",
2478 2510 "time",
2479 2511 )
2480 2512 fm.plain(header % header_names)
2481 2513 else:
2482 2514 header = '%12s %12s %12s %12s\n'
2483 2515 output = (
2484 2516 "%(source)12s %(destination)12s "
2485 2517 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2486 2518 )
2487 2519 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2488 2520
2489 2521 if not revs:
2490 2522 revs = ['all()']
2491 2523 revs = scmutil.revrange(repo, revs)
2492 2524
2493 2525 if dostats:
2494 2526 alldata = {
2495 2527 'nbrevs': [],
2496 2528 'nbmissingfiles': [],
2497 2529 }
2498 2530 if dotiming:
2499 2531 alldata['nbrenames'] = []
2500 2532 alldata['time'] = []
2501 2533
2502 2534 roi = repo.revs('merge() and %ld', revs)
2503 2535 for r in roi:
2504 2536 ctx = repo[r]
2505 2537 p1 = ctx.p1().rev()
2506 2538 p2 = ctx.p2().rev()
2507 2539 bases = repo.changelog._commonancestorsheads(p1, p2)
2508 2540 for p in (p1, p2):
2509 2541 for b in bases:
2510 2542 base = repo[b]
2511 2543 parent = repo[p]
2512 2544 missing = copies._computeforwardmissing(base, parent)
2513 2545 if not missing:
2514 2546 continue
2515 2547 data = {
2516 2548 b'source': base.hex(),
2517 2549 b'destination': parent.hex(),
2518 2550 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2519 2551 b'nbmissingfiles': len(missing),
2520 2552 }
2521 2553 if dostats:
2522 2554 alldata['nbrevs'].append(
2523 2555 (
2524 2556 data['nbrevs'],
2525 2557 base.hex(),
2526 2558 parent.hex(),
2527 2559 )
2528 2560 )
2529 2561 alldata['nbmissingfiles'].append(
2530 2562 (
2531 2563 data['nbmissingfiles'],
2532 2564 base.hex(),
2533 2565 parent.hex(),
2534 2566 )
2535 2567 )
2536 2568 if dotiming:
2537 2569 begin = util.timer()
2538 2570 renames = copies.pathcopies(base, parent)
2539 2571 end = util.timer()
2540 2572 # not very stable timing since we did only one run
2541 2573 data['time'] = end - begin
2542 2574 data['nbrenamedfiles'] = len(renames)
2543 2575 if dostats:
2544 2576 alldata['time'].append(
2545 2577 (
2546 2578 data['time'],
2547 2579 base.hex(),
2548 2580 parent.hex(),
2549 2581 )
2550 2582 )
2551 2583 alldata['nbrenames'].append(
2552 2584 (
2553 2585 data['nbrenamedfiles'],
2554 2586 base.hex(),
2555 2587 parent.hex(),
2556 2588 )
2557 2589 )
2558 2590 fm.startitem()
2559 2591 fm.data(**data)
2560 2592 out = data.copy()
2561 2593 out['source'] = fm.hexfunc(base.node())
2562 2594 out['destination'] = fm.hexfunc(parent.node())
2563 2595 fm.plain(output % out)
2564 2596
2565 2597 fm.end()
2566 2598 if dostats:
2567 2599 entries = [
2568 2600 ('nbrevs', 'number of revision covered'),
2569 2601 ('nbmissingfiles', 'number of missing files at head'),
2570 2602 ]
2571 2603 if dotiming:
2572 2604 entries.append(('nbrenames', 'renamed files'))
2573 2605 entries.append(('time', 'time'))
2574 2606 _displaystats(ui, opts, entries, alldata)
2575 2607
2576 2608
2577 2609 @command(b'perf::cca|perfcca', formatteropts)
2578 2610 def perfcca(ui, repo, **opts):
2579 2611 opts = _byteskwargs(opts)
2580 2612 timer, fm = gettimer(ui, opts)
2581 2613 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2582 2614 fm.end()
2583 2615
2584 2616
2585 2617 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2586 2618 def perffncacheload(ui, repo, **opts):
2587 2619 opts = _byteskwargs(opts)
2588 2620 timer, fm = gettimer(ui, opts)
2589 2621 s = repo.store
2590 2622
2591 2623 def d():
2592 2624 s.fncache._load()
2593 2625
2594 2626 timer(d)
2595 2627 fm.end()
2596 2628
2597 2629
2598 2630 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2599 2631 def perffncachewrite(ui, repo, **opts):
2600 2632 opts = _byteskwargs(opts)
2601 2633 timer, fm = gettimer(ui, opts)
2602 2634 s = repo.store
2603 2635 lock = repo.lock()
2604 2636 s.fncache._load()
2605 2637 tr = repo.transaction(b'perffncachewrite')
2606 2638 tr.addbackup(b'fncache')
2607 2639
2608 2640 def d():
2609 2641 s.fncache._dirty = True
2610 2642 s.fncache.write(tr)
2611 2643
2612 2644 timer(d)
2613 2645 tr.close()
2614 2646 lock.release()
2615 2647 fm.end()
2616 2648
2617 2649
2618 2650 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2619 2651 def perffncacheencode(ui, repo, **opts):
2620 2652 opts = _byteskwargs(opts)
2621 2653 timer, fm = gettimer(ui, opts)
2622 2654 s = repo.store
2623 2655 s.fncache._load()
2624 2656
2625 2657 def d():
2626 2658 for p in s.fncache.entries:
2627 2659 s.encode(p)
2628 2660
2629 2661 timer(d)
2630 2662 fm.end()
2631 2663
2632 2664
2633 2665 def _bdiffworker(q, blocks, xdiff, ready, done):
2634 2666 while not done.is_set():
2635 2667 pair = q.get()
2636 2668 while pair is not None:
2637 2669 if xdiff:
2638 2670 mdiff.bdiff.xdiffblocks(*pair)
2639 2671 elif blocks:
2640 2672 mdiff.bdiff.blocks(*pair)
2641 2673 else:
2642 2674 mdiff.textdiff(*pair)
2643 2675 q.task_done()
2644 2676 pair = q.get()
2645 2677 q.task_done() # for the None one
2646 2678 with ready:
2647 2679 ready.wait()
2648 2680
2649 2681
2650 2682 def _manifestrevision(repo, mnode):
2651 2683 ml = repo.manifestlog
2652 2684
2653 2685 if util.safehasattr(ml, b'getstorage'):
2654 2686 store = ml.getstorage(b'')
2655 2687 else:
2656 2688 store = ml._revlog
2657 2689
2658 2690 return store.revision(mnode)
2659 2691
2660 2692
2661 2693 @command(
2662 2694 b'perf::bdiff|perfbdiff',
2663 2695 revlogopts
2664 2696 + formatteropts
2665 2697 + [
2666 2698 (
2667 2699 b'',
2668 2700 b'count',
2669 2701 1,
2670 2702 b'number of revisions to test (when using --startrev)',
2671 2703 ),
2672 2704 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2673 2705 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2674 2706 (b'', b'blocks', False, b'test computing diffs into blocks'),
2675 2707 (b'', b'xdiff', False, b'use xdiff algorithm'),
2676 2708 ],
2677 2709 b'-c|-m|FILE REV',
2678 2710 )
2679 2711 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2680 2712 """benchmark a bdiff between revisions
2681 2713
2682 2714 By default, benchmark a bdiff between its delta parent and itself.
2683 2715
2684 2716 With ``--count``, benchmark bdiffs between delta parents and self for N
2685 2717 revisions starting at the specified revision.
2686 2718
2687 2719 With ``--alldata``, assume the requested revision is a changeset and
2688 2720 measure bdiffs for all changes related to that changeset (manifest
2689 2721 and filelogs).
2690 2722 """
2691 2723 opts = _byteskwargs(opts)
2692 2724
2693 2725 if opts[b'xdiff'] and not opts[b'blocks']:
2694 2726 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2695 2727
2696 2728 if opts[b'alldata']:
2697 2729 opts[b'changelog'] = True
2698 2730
2699 2731 if opts.get(b'changelog') or opts.get(b'manifest'):
2700 2732 file_, rev = None, file_
2701 2733 elif rev is None:
2702 2734 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2703 2735
2704 2736 blocks = opts[b'blocks']
2705 2737 xdiff = opts[b'xdiff']
2706 2738 textpairs = []
2707 2739
2708 2740 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2709 2741
2710 2742 startrev = r.rev(r.lookup(rev))
2711 2743 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2712 2744 if opts[b'alldata']:
2713 2745 # Load revisions associated with changeset.
2714 2746 ctx = repo[rev]
2715 2747 mtext = _manifestrevision(repo, ctx.manifestnode())
2716 2748 for pctx in ctx.parents():
2717 2749 pman = _manifestrevision(repo, pctx.manifestnode())
2718 2750 textpairs.append((pman, mtext))
2719 2751
2720 2752 # Load filelog revisions by iterating manifest delta.
2721 2753 man = ctx.manifest()
2722 2754 pman = ctx.p1().manifest()
2723 2755 for filename, change in pman.diff(man).items():
2724 2756 fctx = repo.file(filename)
2725 2757 f1 = fctx.revision(change[0][0] or -1)
2726 2758 f2 = fctx.revision(change[1][0] or -1)
2727 2759 textpairs.append((f1, f2))
2728 2760 else:
2729 2761 dp = r.deltaparent(rev)
2730 2762 textpairs.append((r.revision(dp), r.revision(rev)))
2731 2763
2732 2764 withthreads = threads > 0
2733 2765 if not withthreads:
2734 2766
2735 2767 def d():
2736 2768 for pair in textpairs:
2737 2769 if xdiff:
2738 2770 mdiff.bdiff.xdiffblocks(*pair)
2739 2771 elif blocks:
2740 2772 mdiff.bdiff.blocks(*pair)
2741 2773 else:
2742 2774 mdiff.textdiff(*pair)
2743 2775
2744 2776 else:
2745 2777 q = queue()
2746 2778 for i in _xrange(threads):
2747 2779 q.put(None)
2748 2780 ready = threading.Condition()
2749 2781 done = threading.Event()
2750 2782 for i in _xrange(threads):
2751 2783 threading.Thread(
2752 2784 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2753 2785 ).start()
2754 2786 q.join()
2755 2787
2756 2788 def d():
2757 2789 for pair in textpairs:
2758 2790 q.put(pair)
2759 2791 for i in _xrange(threads):
2760 2792 q.put(None)
2761 2793 with ready:
2762 2794 ready.notify_all()
2763 2795 q.join()
2764 2796
2765 2797 timer, fm = gettimer(ui, opts)
2766 2798 timer(d)
2767 2799 fm.end()
2768 2800
2769 2801 if withthreads:
2770 2802 done.set()
2771 2803 for i in _xrange(threads):
2772 2804 q.put(None)
2773 2805 with ready:
2774 2806 ready.notify_all()
2775 2807
2776 2808
2777 2809 @command(
2778 2810 b'perf::unbundle',
2779 2811 formatteropts,
2780 2812 b'BUNDLE_FILE',
2781 2813 )
2782 2814 def perf_unbundle(ui, repo, fname, **opts):
2783 2815 """benchmark application of a bundle in a repository.
2784 2816
2785 2817 This does not include the final transaction processing"""
2786 2818
2787 2819 from mercurial import exchange
2788 2820 from mercurial import bundle2
2789 2821 from mercurial import transaction
2790 2822
2791 2823 opts = _byteskwargs(opts)
2792 2824
2793 2825 ### some compatibility hotfix
2794 2826 #
2795 2827 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2796 2828 # critical regression that break transaction rollback for files that are
2797 2829 # de-inlined.
2798 2830 method = transaction.transaction._addentry
2799 2831 pre_63edc384d3b7 = "data" in getargspec(method).args
2800 2832 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2801 2833 # a changeset that is a close descendant of 18415fc918a1, the changeset
2802 2834 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2803 2835 args = getargspec(error.Abort.__init__).args
2804 2836 post_18415fc918a1 = "detailed_exit_code" in args
2805 2837
2806 2838 old_max_inline = None
2807 2839 try:
2808 2840 if not (pre_63edc384d3b7 or post_18415fc918a1):
2809 2841 # disable inlining
2810 2842 old_max_inline = mercurial.revlog._maxinline
2811 2843 # large enough to never happen
2812 2844 mercurial.revlog._maxinline = 2 ** 50
2813 2845
2814 2846 with repo.lock():
2815 2847 bundle = [None, None]
2816 2848 orig_quiet = repo.ui.quiet
2817 2849 try:
2818 2850 repo.ui.quiet = True
2819 2851 with open(fname, mode="rb") as f:
2820 2852
2821 2853 def noop_report(*args, **kwargs):
2822 2854 pass
2823 2855
2824 2856 def setup():
2825 2857 gen, tr = bundle
2826 2858 if tr is not None:
2827 2859 tr.abort()
2828 2860 bundle[:] = [None, None]
2829 2861 f.seek(0)
2830 2862 bundle[0] = exchange.readbundle(ui, f, fname)
2831 2863 bundle[1] = repo.transaction(b'perf::unbundle')
2832 2864 # silence the transaction
2833 2865 bundle[1]._report = noop_report
2834 2866
2835 2867 def apply():
2836 2868 gen, tr = bundle
2837 2869 bundle2.applybundle(
2838 2870 repo,
2839 2871 gen,
2840 2872 tr,
2841 2873 source=b'perf::unbundle',
2842 2874 url=fname,
2843 2875 )
2844 2876
2845 2877 timer, fm = gettimer(ui, opts)
2846 2878 timer(apply, setup=setup)
2847 2879 fm.end()
2848 2880 finally:
2849 2881 repo.ui.quiet == orig_quiet
2850 2882 gen, tr = bundle
2851 2883 if tr is not None:
2852 2884 tr.abort()
2853 2885 finally:
2854 2886 if old_max_inline is not None:
2855 2887 mercurial.revlog._maxinline = old_max_inline
2856 2888
2857 2889
2858 2890 @command(
2859 2891 b'perf::unidiff|perfunidiff',
2860 2892 revlogopts
2861 2893 + formatteropts
2862 2894 + [
2863 2895 (
2864 2896 b'',
2865 2897 b'count',
2866 2898 1,
2867 2899 b'number of revisions to test (when using --startrev)',
2868 2900 ),
2869 2901 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2870 2902 ],
2871 2903 b'-c|-m|FILE REV',
2872 2904 )
2873 2905 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2874 2906 """benchmark a unified diff between revisions
2875 2907
2876 2908 This doesn't include any copy tracing - it's just a unified diff
2877 2909 of the texts.
2878 2910
2879 2911 By default, benchmark a diff between its delta parent and itself.
2880 2912
2881 2913 With ``--count``, benchmark diffs between delta parents and self for N
2882 2914 revisions starting at the specified revision.
2883 2915
2884 2916 With ``--alldata``, assume the requested revision is a changeset and
2885 2917 measure diffs for all changes related to that changeset (manifest
2886 2918 and filelogs).
2887 2919 """
2888 2920 opts = _byteskwargs(opts)
2889 2921 if opts[b'alldata']:
2890 2922 opts[b'changelog'] = True
2891 2923
2892 2924 if opts.get(b'changelog') or opts.get(b'manifest'):
2893 2925 file_, rev = None, file_
2894 2926 elif rev is None:
2895 2927 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2896 2928
2897 2929 textpairs = []
2898 2930
2899 2931 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2900 2932
2901 2933 startrev = r.rev(r.lookup(rev))
2902 2934 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2903 2935 if opts[b'alldata']:
2904 2936 # Load revisions associated with changeset.
2905 2937 ctx = repo[rev]
2906 2938 mtext = _manifestrevision(repo, ctx.manifestnode())
2907 2939 for pctx in ctx.parents():
2908 2940 pman = _manifestrevision(repo, pctx.manifestnode())
2909 2941 textpairs.append((pman, mtext))
2910 2942
2911 2943 # Load filelog revisions by iterating manifest delta.
2912 2944 man = ctx.manifest()
2913 2945 pman = ctx.p1().manifest()
2914 2946 for filename, change in pman.diff(man).items():
2915 2947 fctx = repo.file(filename)
2916 2948 f1 = fctx.revision(change[0][0] or -1)
2917 2949 f2 = fctx.revision(change[1][0] or -1)
2918 2950 textpairs.append((f1, f2))
2919 2951 else:
2920 2952 dp = r.deltaparent(rev)
2921 2953 textpairs.append((r.revision(dp), r.revision(rev)))
2922 2954
2923 2955 def d():
2924 2956 for left, right in textpairs:
2925 2957 # The date strings don't matter, so we pass empty strings.
2926 2958 headerlines, hunks = mdiff.unidiff(
2927 2959 left, b'', right, b'', b'left', b'right', binary=False
2928 2960 )
2929 2961 # consume iterators in roughly the way patch.py does
2930 2962 b'\n'.join(headerlines)
2931 2963 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2932 2964
2933 2965 timer, fm = gettimer(ui, opts)
2934 2966 timer(d)
2935 2967 fm.end()
2936 2968
2937 2969
2938 2970 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2939 2971 def perfdiffwd(ui, repo, **opts):
2940 2972 """Profile diff of working directory changes"""
2941 2973 opts = _byteskwargs(opts)
2942 2974 timer, fm = gettimer(ui, opts)
2943 2975 options = {
2944 2976 'w': 'ignore_all_space',
2945 2977 'b': 'ignore_space_change',
2946 2978 'B': 'ignore_blank_lines',
2947 2979 }
2948 2980
2949 2981 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2950 2982 opts = {options[c]: b'1' for c in diffopt}
2951 2983
2952 2984 def d():
2953 2985 ui.pushbuffer()
2954 2986 commands.diff(ui, repo, **opts)
2955 2987 ui.popbuffer()
2956 2988
2957 2989 diffopt = diffopt.encode('ascii')
2958 2990 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2959 2991 timer(d, title=title)
2960 2992 fm.end()
2961 2993
2962 2994
2963 2995 @command(
2964 2996 b'perf::revlogindex|perfrevlogindex',
2965 2997 revlogopts + formatteropts,
2966 2998 b'-c|-m|FILE',
2967 2999 )
2968 3000 def perfrevlogindex(ui, repo, file_=None, **opts):
2969 3001 """Benchmark operations against a revlog index.
2970 3002
2971 3003 This tests constructing a revlog instance, reading index data,
2972 3004 parsing index data, and performing various operations related to
2973 3005 index data.
2974 3006 """
2975 3007
2976 3008 opts = _byteskwargs(opts)
2977 3009
2978 3010 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2979 3011
2980 3012 opener = getattr(rl, 'opener') # trick linter
2981 3013 # compat with hg <= 5.8
2982 3014 radix = getattr(rl, 'radix', None)
2983 3015 indexfile = getattr(rl, '_indexfile', None)
2984 3016 if indexfile is None:
2985 3017 # compatibility with <= hg-5.8
2986 3018 indexfile = getattr(rl, 'indexfile')
2987 3019 data = opener.read(indexfile)
2988 3020
2989 3021 header = struct.unpack(b'>I', data[0:4])[0]
2990 3022 version = header & 0xFFFF
2991 3023 if version == 1:
2992 3024 inline = header & (1 << 16)
2993 3025 else:
2994 3026 raise error.Abort(b'unsupported revlog version: %d' % version)
2995 3027
2996 3028 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2997 3029 if parse_index_v1 is None:
2998 3030 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2999 3031
3000 3032 rllen = len(rl)
3001 3033
3002 3034 node0 = rl.node(0)
3003 3035 node25 = rl.node(rllen // 4)
3004 3036 node50 = rl.node(rllen // 2)
3005 3037 node75 = rl.node(rllen // 4 * 3)
3006 3038 node100 = rl.node(rllen - 1)
3007 3039
3008 3040 allrevs = range(rllen)
3009 3041 allrevsrev = list(reversed(allrevs))
3010 3042 allnodes = [rl.node(rev) for rev in range(rllen)]
3011 3043 allnodesrev = list(reversed(allnodes))
3012 3044
3013 3045 def constructor():
3014 3046 if radix is not None:
3015 3047 revlog(opener, radix=radix)
3016 3048 else:
3017 3049 # hg <= 5.8
3018 3050 revlog(opener, indexfile=indexfile)
3019 3051
3020 3052 def read():
3021 3053 with opener(indexfile) as fh:
3022 3054 fh.read()
3023 3055
3024 3056 def parseindex():
3025 3057 parse_index_v1(data, inline)
3026 3058
3027 3059 def getentry(revornode):
3028 3060 index = parse_index_v1(data, inline)[0]
3029 3061 index[revornode]
3030 3062
3031 3063 def getentries(revs, count=1):
3032 3064 index = parse_index_v1(data, inline)[0]
3033 3065
3034 3066 for i in range(count):
3035 3067 for rev in revs:
3036 3068 index[rev]
3037 3069
3038 3070 def resolvenode(node):
3039 3071 index = parse_index_v1(data, inline)[0]
3040 3072 rev = getattr(index, 'rev', None)
3041 3073 if rev is None:
3042 3074 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3043 3075 # This only works for the C code.
3044 3076 if nodemap is None:
3045 3077 return
3046 3078 rev = nodemap.__getitem__
3047 3079
3048 3080 try:
3049 3081 rev(node)
3050 3082 except error.RevlogError:
3051 3083 pass
3052 3084
3053 3085 def resolvenodes(nodes, count=1):
3054 3086 index = parse_index_v1(data, inline)[0]
3055 3087 rev = getattr(index, 'rev', None)
3056 3088 if rev is None:
3057 3089 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3058 3090 # This only works for the C code.
3059 3091 if nodemap is None:
3060 3092 return
3061 3093 rev = nodemap.__getitem__
3062 3094
3063 3095 for i in range(count):
3064 3096 for node in nodes:
3065 3097 try:
3066 3098 rev(node)
3067 3099 except error.RevlogError:
3068 3100 pass
3069 3101
3070 3102 benches = [
3071 3103 (constructor, b'revlog constructor'),
3072 3104 (read, b'read'),
3073 3105 (parseindex, b'create index object'),
3074 3106 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3075 3107 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3076 3108 (lambda: resolvenode(node0), b'look up node at rev 0'),
3077 3109 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3078 3110 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3079 3111 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3080 3112 (lambda: resolvenode(node100), b'look up node at tip'),
3081 3113 # 2x variation is to measure caching impact.
3082 3114 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3083 3115 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3084 3116 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3085 3117 (
3086 3118 lambda: resolvenodes(allnodesrev, 2),
3087 3119 b'look up all nodes 2x (reverse)',
3088 3120 ),
3089 3121 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3090 3122 (
3091 3123 lambda: getentries(allrevs, 2),
3092 3124 b'retrieve all index entries 2x (forward)',
3093 3125 ),
3094 3126 (
3095 3127 lambda: getentries(allrevsrev),
3096 3128 b'retrieve all index entries (reverse)',
3097 3129 ),
3098 3130 (
3099 3131 lambda: getentries(allrevsrev, 2),
3100 3132 b'retrieve all index entries 2x (reverse)',
3101 3133 ),
3102 3134 ]
3103 3135
3104 3136 for fn, title in benches:
3105 3137 timer, fm = gettimer(ui, opts)
3106 3138 timer(fn, title=title)
3107 3139 fm.end()
3108 3140
3109 3141
3110 3142 @command(
3111 3143 b'perf::revlogrevisions|perfrevlogrevisions',
3112 3144 revlogopts
3113 3145 + formatteropts
3114 3146 + [
3115 3147 (b'd', b'dist', 100, b'distance between the revisions'),
3116 3148 (b's', b'startrev', 0, b'revision to start reading at'),
3117 3149 (b'', b'reverse', False, b'read in reverse'),
3118 3150 ],
3119 3151 b'-c|-m|FILE',
3120 3152 )
3121 3153 def perfrevlogrevisions(
3122 3154 ui, repo, file_=None, startrev=0, reverse=False, **opts
3123 3155 ):
3124 3156 """Benchmark reading a series of revisions from a revlog.
3125 3157
3126 3158 By default, we read every ``-d/--dist`` revision from 0 to tip of
3127 3159 the specified revlog.
3128 3160
3129 3161 The start revision can be defined via ``-s/--startrev``.
3130 3162 """
3131 3163 opts = _byteskwargs(opts)
3132 3164
3133 3165 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3134 3166 rllen = getlen(ui)(rl)
3135 3167
3136 3168 if startrev < 0:
3137 3169 startrev = rllen + startrev
3138 3170
3139 3171 def d():
3140 3172 rl.clearcaches()
3141 3173
3142 3174 beginrev = startrev
3143 3175 endrev = rllen
3144 3176 dist = opts[b'dist']
3145 3177
3146 3178 if reverse:
3147 3179 beginrev, endrev = endrev - 1, beginrev - 1
3148 3180 dist = -1 * dist
3149 3181
3150 3182 for x in _xrange(beginrev, endrev, dist):
3151 3183 # Old revisions don't support passing int.
3152 3184 n = rl.node(x)
3153 3185 rl.revision(n)
3154 3186
3155 3187 timer, fm = gettimer(ui, opts)
3156 3188 timer(d)
3157 3189 fm.end()
3158 3190
3159 3191
3160 3192 @command(
3161 3193 b'perf::revlogwrite|perfrevlogwrite',
3162 3194 revlogopts
3163 3195 + formatteropts
3164 3196 + [
3165 3197 (b's', b'startrev', 1000, b'revision to start writing at'),
3166 3198 (b'', b'stoprev', -1, b'last revision to write'),
3167 3199 (b'', b'count', 3, b'number of passes to perform'),
3168 3200 (b'', b'details', False, b'print timing for every revisions tested'),
3169 3201 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3170 3202 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3171 3203 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3172 3204 ],
3173 3205 b'-c|-m|FILE',
3174 3206 )
3175 3207 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3176 3208 """Benchmark writing a series of revisions to a revlog.
3177 3209
3178 3210 Possible source values are:
3179 3211 * `full`: add from a full text (default).
3180 3212 * `parent-1`: add from a delta to the first parent
3181 3213 * `parent-2`: add from a delta to the second parent if it exists
3182 3214 (use a delta from the first parent otherwise)
3183 3215 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3184 3216 * `storage`: add from the existing precomputed deltas
3185 3217
3186 3218 Note: This performance command measures performance in a custom way. As a
3187 3219 result some of the global configuration of the 'perf' command does not
3188 3220 apply to it:
3189 3221
3190 3222 * ``pre-run``: disabled
3191 3223
3192 3224 * ``profile-benchmark``: disabled
3193 3225
3194 3226 * ``run-limits``: disabled use --count instead
3195 3227 """
3196 3228 opts = _byteskwargs(opts)
3197 3229
3198 3230 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3199 3231 rllen = getlen(ui)(rl)
3200 3232 if startrev < 0:
3201 3233 startrev = rllen + startrev
3202 3234 if stoprev < 0:
3203 3235 stoprev = rllen + stoprev
3204 3236
3205 3237 lazydeltabase = opts['lazydeltabase']
3206 3238 source = opts['source']
3207 3239 clearcaches = opts['clear_caches']
3208 3240 validsource = (
3209 3241 b'full',
3210 3242 b'parent-1',
3211 3243 b'parent-2',
3212 3244 b'parent-smallest',
3213 3245 b'storage',
3214 3246 )
3215 3247 if source not in validsource:
3216 3248 raise error.Abort('invalid source type: %s' % source)
3217 3249
3218 3250 ### actually gather results
3219 3251 count = opts['count']
3220 3252 if count <= 0:
3221 3253 raise error.Abort('invalide run count: %d' % count)
3222 3254 allresults = []
3223 3255 for c in range(count):
3224 3256 timing = _timeonewrite(
3225 3257 ui,
3226 3258 rl,
3227 3259 source,
3228 3260 startrev,
3229 3261 stoprev,
3230 3262 c + 1,
3231 3263 lazydeltabase=lazydeltabase,
3232 3264 clearcaches=clearcaches,
3233 3265 )
3234 3266 allresults.append(timing)
3235 3267
3236 3268 ### consolidate the results in a single list
3237 3269 results = []
3238 3270 for idx, (rev, t) in enumerate(allresults[0]):
3239 3271 ts = [t]
3240 3272 for other in allresults[1:]:
3241 3273 orev, ot = other[idx]
3242 3274 assert orev == rev
3243 3275 ts.append(ot)
3244 3276 results.append((rev, ts))
3245 3277 resultcount = len(results)
3246 3278
3247 3279 ### Compute and display relevant statistics
3248 3280
3249 3281 # get a formatter
3250 3282 fm = ui.formatter(b'perf', opts)
3251 3283 displayall = ui.configbool(b"perf", b"all-timing", False)
3252 3284
3253 3285 # print individual details if requested
3254 3286 if opts['details']:
3255 3287 for idx, item in enumerate(results, 1):
3256 3288 rev, data = item
3257 3289 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3258 3290 formatone(fm, data, title=title, displayall=displayall)
3259 3291
3260 3292 # sorts results by median time
3261 3293 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3262 3294 # list of (name, index) to display)
3263 3295 relevants = [
3264 3296 ("min", 0),
3265 3297 ("10%", resultcount * 10 // 100),
3266 3298 ("25%", resultcount * 25 // 100),
3267 3299 ("50%", resultcount * 70 // 100),
3268 3300 ("75%", resultcount * 75 // 100),
3269 3301 ("90%", resultcount * 90 // 100),
3270 3302 ("95%", resultcount * 95 // 100),
3271 3303 ("99%", resultcount * 99 // 100),
3272 3304 ("99.9%", resultcount * 999 // 1000),
3273 3305 ("99.99%", resultcount * 9999 // 10000),
3274 3306 ("99.999%", resultcount * 99999 // 100000),
3275 3307 ("max", -1),
3276 3308 ]
3277 3309 if not ui.quiet:
3278 3310 for name, idx in relevants:
3279 3311 data = results[idx]
3280 3312 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3281 3313 formatone(fm, data[1], title=title, displayall=displayall)
3282 3314
3283 3315 # XXX summing that many float will not be very precise, we ignore this fact
3284 3316 # for now
3285 3317 totaltime = []
3286 3318 for item in allresults:
3287 3319 totaltime.append(
3288 3320 (
3289 3321 sum(x[1][0] for x in item),
3290 3322 sum(x[1][1] for x in item),
3291 3323 sum(x[1][2] for x in item),
3292 3324 )
3293 3325 )
3294 3326 formatone(
3295 3327 fm,
3296 3328 totaltime,
3297 3329 title="total time (%d revs)" % resultcount,
3298 3330 displayall=displayall,
3299 3331 )
3300 3332 fm.end()
3301 3333
3302 3334
3303 3335 class _faketr:
3304 3336 def add(s, x, y, z=None):
3305 3337 return None
3306 3338
3307 3339
3308 3340 def _timeonewrite(
3309 3341 ui,
3310 3342 orig,
3311 3343 source,
3312 3344 startrev,
3313 3345 stoprev,
3314 3346 runidx=None,
3315 3347 lazydeltabase=True,
3316 3348 clearcaches=True,
3317 3349 ):
3318 3350 timings = []
3319 3351 tr = _faketr()
3320 3352 with _temprevlog(ui, orig, startrev) as dest:
3321 3353 dest._lazydeltabase = lazydeltabase
3322 3354 revs = list(orig.revs(startrev, stoprev))
3323 3355 total = len(revs)
3324 3356 topic = 'adding'
3325 3357 if runidx is not None:
3326 3358 topic += ' (run #%d)' % runidx
3327 3359 # Support both old and new progress API
3328 3360 if util.safehasattr(ui, 'makeprogress'):
3329 3361 progress = ui.makeprogress(topic, unit='revs', total=total)
3330 3362
3331 3363 def updateprogress(pos):
3332 3364 progress.update(pos)
3333 3365
3334 3366 def completeprogress():
3335 3367 progress.complete()
3336 3368
3337 3369 else:
3338 3370
3339 3371 def updateprogress(pos):
3340 3372 ui.progress(topic, pos, unit='revs', total=total)
3341 3373
3342 3374 def completeprogress():
3343 3375 ui.progress(topic, None, unit='revs', total=total)
3344 3376
3345 3377 for idx, rev in enumerate(revs):
3346 3378 updateprogress(idx)
3347 3379 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3348 3380 if clearcaches:
3349 3381 dest.index.clearcaches()
3350 3382 dest.clearcaches()
3351 3383 with timeone() as r:
3352 3384 dest.addrawrevision(*addargs, **addkwargs)
3353 3385 timings.append((rev, r[0]))
3354 3386 updateprogress(total)
3355 3387 completeprogress()
3356 3388 return timings
3357 3389
3358 3390
3359 3391 def _getrevisionseed(orig, rev, tr, source):
3360 3392 from mercurial.node import nullid
3361 3393
3362 3394 linkrev = orig.linkrev(rev)
3363 3395 node = orig.node(rev)
3364 3396 p1, p2 = orig.parents(node)
3365 3397 flags = orig.flags(rev)
3366 3398 cachedelta = None
3367 3399 text = None
3368 3400
3369 3401 if source == b'full':
3370 3402 text = orig.revision(rev)
3371 3403 elif source == b'parent-1':
3372 3404 baserev = orig.rev(p1)
3373 3405 cachedelta = (baserev, orig.revdiff(p1, rev))
3374 3406 elif source == b'parent-2':
3375 3407 parent = p2
3376 3408 if p2 == nullid:
3377 3409 parent = p1
3378 3410 baserev = orig.rev(parent)
3379 3411 cachedelta = (baserev, orig.revdiff(parent, rev))
3380 3412 elif source == b'parent-smallest':
3381 3413 p1diff = orig.revdiff(p1, rev)
3382 3414 parent = p1
3383 3415 diff = p1diff
3384 3416 if p2 != nullid:
3385 3417 p2diff = orig.revdiff(p2, rev)
3386 3418 if len(p1diff) > len(p2diff):
3387 3419 parent = p2
3388 3420 diff = p2diff
3389 3421 baserev = orig.rev(parent)
3390 3422 cachedelta = (baserev, diff)
3391 3423 elif source == b'storage':
3392 3424 baserev = orig.deltaparent(rev)
3393 3425 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3394 3426
3395 3427 return (
3396 3428 (text, tr, linkrev, p1, p2),
3397 3429 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3398 3430 )
3399 3431
3400 3432
3401 3433 @contextlib.contextmanager
3402 3434 def _temprevlog(ui, orig, truncaterev):
3403 3435 from mercurial import vfs as vfsmod
3404 3436
3405 3437 if orig._inline:
3406 3438 raise error.Abort('not supporting inline revlog (yet)')
3407 3439 revlogkwargs = {}
3408 3440 k = 'upperboundcomp'
3409 3441 if util.safehasattr(orig, k):
3410 3442 revlogkwargs[k] = getattr(orig, k)
3411 3443
3412 3444 indexfile = getattr(orig, '_indexfile', None)
3413 3445 if indexfile is None:
3414 3446 # compatibility with <= hg-5.8
3415 3447 indexfile = getattr(orig, 'indexfile')
3416 3448 origindexpath = orig.opener.join(indexfile)
3417 3449
3418 3450 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3419 3451 origdatapath = orig.opener.join(datafile)
3420 3452 radix = b'revlog'
3421 3453 indexname = b'revlog.i'
3422 3454 dataname = b'revlog.d'
3423 3455
3424 3456 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3425 3457 try:
3426 3458 # copy the data file in a temporary directory
3427 3459 ui.debug('copying data in %s\n' % tmpdir)
3428 3460 destindexpath = os.path.join(tmpdir, 'revlog.i')
3429 3461 destdatapath = os.path.join(tmpdir, 'revlog.d')
3430 3462 shutil.copyfile(origindexpath, destindexpath)
3431 3463 shutil.copyfile(origdatapath, destdatapath)
3432 3464
3433 3465 # remove the data we want to add again
3434 3466 ui.debug('truncating data to be rewritten\n')
3435 3467 with open(destindexpath, 'ab') as index:
3436 3468 index.seek(0)
3437 3469 index.truncate(truncaterev * orig._io.size)
3438 3470 with open(destdatapath, 'ab') as data:
3439 3471 data.seek(0)
3440 3472 data.truncate(orig.start(truncaterev))
3441 3473
3442 3474 # instantiate a new revlog from the temporary copy
3443 3475 ui.debug('truncating adding to be rewritten\n')
3444 3476 vfs = vfsmod.vfs(tmpdir)
3445 3477 vfs.options = getattr(orig.opener, 'options', None)
3446 3478
3447 3479 try:
3448 3480 dest = revlog(vfs, radix=radix, **revlogkwargs)
3449 3481 except TypeError:
3450 3482 dest = revlog(
3451 3483 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3452 3484 )
3453 3485 if dest._inline:
3454 3486 raise error.Abort('not supporting inline revlog (yet)')
3455 3487 # make sure internals are initialized
3456 3488 dest.revision(len(dest) - 1)
3457 3489 yield dest
3458 3490 del dest, vfs
3459 3491 finally:
3460 3492 shutil.rmtree(tmpdir, True)
3461 3493
3462 3494
3463 3495 @command(
3464 3496 b'perf::revlogchunks|perfrevlogchunks',
3465 3497 revlogopts
3466 3498 + formatteropts
3467 3499 + [
3468 3500 (b'e', b'engines', b'', b'compression engines to use'),
3469 3501 (b's', b'startrev', 0, b'revision to start at'),
3470 3502 ],
3471 3503 b'-c|-m|FILE',
3472 3504 )
3473 3505 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3474 3506 """Benchmark operations on revlog chunks.
3475 3507
3476 3508 Logically, each revlog is a collection of fulltext revisions. However,
3477 3509 stored within each revlog are "chunks" of possibly compressed data. This
3478 3510 data needs to be read and decompressed or compressed and written.
3479 3511
3480 3512 This command measures the time it takes to read+decompress and recompress
3481 3513 chunks in a revlog. It effectively isolates I/O and compression performance.
3482 3514 For measurements of higher-level operations like resolving revisions,
3483 3515 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3484 3516 """
3485 3517 opts = _byteskwargs(opts)
3486 3518
3487 3519 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3488 3520
3489 3521 # _chunkraw was renamed to _getsegmentforrevs.
3490 3522 try:
3491 3523 segmentforrevs = rl._getsegmentforrevs
3492 3524 except AttributeError:
3493 3525 segmentforrevs = rl._chunkraw
3494 3526
3495 3527 # Verify engines argument.
3496 3528 if engines:
3497 3529 engines = {e.strip() for e in engines.split(b',')}
3498 3530 for engine in engines:
3499 3531 try:
3500 3532 util.compressionengines[engine]
3501 3533 except KeyError:
3502 3534 raise error.Abort(b'unknown compression engine: %s' % engine)
3503 3535 else:
3504 3536 engines = []
3505 3537 for e in util.compengines:
3506 3538 engine = util.compengines[e]
3507 3539 try:
3508 3540 if engine.available():
3509 3541 engine.revlogcompressor().compress(b'dummy')
3510 3542 engines.append(e)
3511 3543 except NotImplementedError:
3512 3544 pass
3513 3545
3514 3546 revs = list(rl.revs(startrev, len(rl) - 1))
3515 3547
3516 3548 def rlfh(rl):
3517 3549 if rl._inline:
3518 3550 indexfile = getattr(rl, '_indexfile', None)
3519 3551 if indexfile is None:
3520 3552 # compatibility with <= hg-5.8
3521 3553 indexfile = getattr(rl, 'indexfile')
3522 3554 return getsvfs(repo)(indexfile)
3523 3555 else:
3524 3556 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3525 3557 return getsvfs(repo)(datafile)
3526 3558
3527 3559 def doread():
3528 3560 rl.clearcaches()
3529 3561 for rev in revs:
3530 3562 segmentforrevs(rev, rev)
3531 3563
3532 3564 def doreadcachedfh():
3533 3565 rl.clearcaches()
3534 3566 fh = rlfh(rl)
3535 3567 for rev in revs:
3536 3568 segmentforrevs(rev, rev, df=fh)
3537 3569
3538 3570 def doreadbatch():
3539 3571 rl.clearcaches()
3540 3572 segmentforrevs(revs[0], revs[-1])
3541 3573
3542 3574 def doreadbatchcachedfh():
3543 3575 rl.clearcaches()
3544 3576 fh = rlfh(rl)
3545 3577 segmentforrevs(revs[0], revs[-1], df=fh)
3546 3578
3547 3579 def dochunk():
3548 3580 rl.clearcaches()
3549 3581 fh = rlfh(rl)
3550 3582 for rev in revs:
3551 3583 rl._chunk(rev, df=fh)
3552 3584
3553 3585 chunks = [None]
3554 3586
3555 3587 def dochunkbatch():
3556 3588 rl.clearcaches()
3557 3589 fh = rlfh(rl)
3558 3590 # Save chunks as a side-effect.
3559 3591 chunks[0] = rl._chunks(revs, df=fh)
3560 3592
3561 3593 def docompress(compressor):
3562 3594 rl.clearcaches()
3563 3595
3564 3596 try:
3565 3597 # Swap in the requested compression engine.
3566 3598 oldcompressor = rl._compressor
3567 3599 rl._compressor = compressor
3568 3600 for chunk in chunks[0]:
3569 3601 rl.compress(chunk)
3570 3602 finally:
3571 3603 rl._compressor = oldcompressor
3572 3604
3573 3605 benches = [
3574 3606 (lambda: doread(), b'read'),
3575 3607 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3576 3608 (lambda: doreadbatch(), b'read batch'),
3577 3609 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3578 3610 (lambda: dochunk(), b'chunk'),
3579 3611 (lambda: dochunkbatch(), b'chunk batch'),
3580 3612 ]
3581 3613
3582 3614 for engine in sorted(engines):
3583 3615 compressor = util.compengines[engine].revlogcompressor()
3584 3616 benches.append(
3585 3617 (
3586 3618 functools.partial(docompress, compressor),
3587 3619 b'compress w/ %s' % engine,
3588 3620 )
3589 3621 )
3590 3622
3591 3623 for fn, title in benches:
3592 3624 timer, fm = gettimer(ui, opts)
3593 3625 timer(fn, title=title)
3594 3626 fm.end()
3595 3627
3596 3628
3597 3629 @command(
3598 3630 b'perf::revlogrevision|perfrevlogrevision',
3599 3631 revlogopts
3600 3632 + formatteropts
3601 3633 + [(b'', b'cache', False, b'use caches instead of clearing')],
3602 3634 b'-c|-m|FILE REV',
3603 3635 )
3604 3636 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3605 3637 """Benchmark obtaining a revlog revision.
3606 3638
3607 3639 Obtaining a revlog revision consists of roughly the following steps:
3608 3640
3609 3641 1. Compute the delta chain
3610 3642 2. Slice the delta chain if applicable
3611 3643 3. Obtain the raw chunks for that delta chain
3612 3644 4. Decompress each raw chunk
3613 3645 5. Apply binary patches to obtain fulltext
3614 3646 6. Verify hash of fulltext
3615 3647
3616 3648 This command measures the time spent in each of these phases.
3617 3649 """
3618 3650 opts = _byteskwargs(opts)
3619 3651
3620 3652 if opts.get(b'changelog') or opts.get(b'manifest'):
3621 3653 file_, rev = None, file_
3622 3654 elif rev is None:
3623 3655 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3624 3656
3625 3657 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3626 3658
3627 3659 # _chunkraw was renamed to _getsegmentforrevs.
3628 3660 try:
3629 3661 segmentforrevs = r._getsegmentforrevs
3630 3662 except AttributeError:
3631 3663 segmentforrevs = r._chunkraw
3632 3664
3633 3665 node = r.lookup(rev)
3634 3666 rev = r.rev(node)
3635 3667
3636 3668 def getrawchunks(data, chain):
3637 3669 start = r.start
3638 3670 length = r.length
3639 3671 inline = r._inline
3640 3672 try:
3641 3673 iosize = r.index.entry_size
3642 3674 except AttributeError:
3643 3675 iosize = r._io.size
3644 3676 buffer = util.buffer
3645 3677
3646 3678 chunks = []
3647 3679 ladd = chunks.append
3648 3680 for idx, item in enumerate(chain):
3649 3681 offset = start(item[0])
3650 3682 bits = data[idx]
3651 3683 for rev in item:
3652 3684 chunkstart = start(rev)
3653 3685 if inline:
3654 3686 chunkstart += (rev + 1) * iosize
3655 3687 chunklength = length(rev)
3656 3688 ladd(buffer(bits, chunkstart - offset, chunklength))
3657 3689
3658 3690 return chunks
3659 3691
3660 3692 def dodeltachain(rev):
3661 3693 if not cache:
3662 3694 r.clearcaches()
3663 3695 r._deltachain(rev)
3664 3696
3665 3697 def doread(chain):
3666 3698 if not cache:
3667 3699 r.clearcaches()
3668 3700 for item in slicedchain:
3669 3701 segmentforrevs(item[0], item[-1])
3670 3702
3671 3703 def doslice(r, chain, size):
3672 3704 for s in slicechunk(r, chain, targetsize=size):
3673 3705 pass
3674 3706
3675 3707 def dorawchunks(data, chain):
3676 3708 if not cache:
3677 3709 r.clearcaches()
3678 3710 getrawchunks(data, chain)
3679 3711
3680 3712 def dodecompress(chunks):
3681 3713 decomp = r.decompress
3682 3714 for chunk in chunks:
3683 3715 decomp(chunk)
3684 3716
3685 3717 def dopatch(text, bins):
3686 3718 if not cache:
3687 3719 r.clearcaches()
3688 3720 mdiff.patches(text, bins)
3689 3721
3690 3722 def dohash(text):
3691 3723 if not cache:
3692 3724 r.clearcaches()
3693 3725 r.checkhash(text, node, rev=rev)
3694 3726
3695 3727 def dorevision():
3696 3728 if not cache:
3697 3729 r.clearcaches()
3698 3730 r.revision(node)
3699 3731
3700 3732 try:
3701 3733 from mercurial.revlogutils.deltas import slicechunk
3702 3734 except ImportError:
3703 3735 slicechunk = getattr(revlog, '_slicechunk', None)
3704 3736
3705 3737 size = r.length(rev)
3706 3738 chain = r._deltachain(rev)[0]
3707 3739 if not getattr(r, '_withsparseread', False):
3708 3740 slicedchain = (chain,)
3709 3741 else:
3710 3742 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3711 3743 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3712 3744 rawchunks = getrawchunks(data, slicedchain)
3713 3745 bins = r._chunks(chain)
3714 3746 text = bytes(bins[0])
3715 3747 bins = bins[1:]
3716 3748 text = mdiff.patches(text, bins)
3717 3749
3718 3750 benches = [
3719 3751 (lambda: dorevision(), b'full'),
3720 3752 (lambda: dodeltachain(rev), b'deltachain'),
3721 3753 (lambda: doread(chain), b'read'),
3722 3754 ]
3723 3755
3724 3756 if getattr(r, '_withsparseread', False):
3725 3757 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3726 3758 benches.append(slicing)
3727 3759
3728 3760 benches.extend(
3729 3761 [
3730 3762 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3731 3763 (lambda: dodecompress(rawchunks), b'decompress'),
3732 3764 (lambda: dopatch(text, bins), b'patch'),
3733 3765 (lambda: dohash(text), b'hash'),
3734 3766 ]
3735 3767 )
3736 3768
3737 3769 timer, fm = gettimer(ui, opts)
3738 3770 for fn, title in benches:
3739 3771 timer(fn, title=title)
3740 3772 fm.end()
3741 3773
3742 3774
3743 3775 @command(
3744 3776 b'perf::revset|perfrevset',
3745 3777 [
3746 3778 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3747 3779 (b'', b'contexts', False, b'obtain changectx for each revision'),
3748 3780 ]
3749 3781 + formatteropts,
3750 3782 b"REVSET",
3751 3783 )
3752 3784 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3753 3785 """benchmark the execution time of a revset
3754 3786
3755 3787 Use the --clean option if need to evaluate the impact of build volatile
3756 3788 revisions set cache on the revset execution. Volatile cache hold filtered
3757 3789 and obsolete related cache."""
3758 3790 opts = _byteskwargs(opts)
3759 3791
3760 3792 timer, fm = gettimer(ui, opts)
3761 3793
3762 3794 def d():
3763 3795 if clear:
3764 3796 repo.invalidatevolatilesets()
3765 3797 if contexts:
3766 3798 for ctx in repo.set(expr):
3767 3799 pass
3768 3800 else:
3769 3801 for r in repo.revs(expr):
3770 3802 pass
3771 3803
3772 3804 timer(d)
3773 3805 fm.end()
3774 3806
3775 3807
3776 3808 @command(
3777 3809 b'perf::volatilesets|perfvolatilesets',
3778 3810 [
3779 3811 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3780 3812 ]
3781 3813 + formatteropts,
3782 3814 )
3783 3815 def perfvolatilesets(ui, repo, *names, **opts):
3784 3816 """benchmark the computation of various volatile set
3785 3817
3786 3818 Volatile set computes element related to filtering and obsolescence."""
3787 3819 opts = _byteskwargs(opts)
3788 3820 timer, fm = gettimer(ui, opts)
3789 3821 repo = repo.unfiltered()
3790 3822
3791 3823 def getobs(name):
3792 3824 def d():
3793 3825 repo.invalidatevolatilesets()
3794 3826 if opts[b'clear_obsstore']:
3795 3827 clearfilecache(repo, b'obsstore')
3796 3828 obsolete.getrevs(repo, name)
3797 3829
3798 3830 return d
3799 3831
3800 3832 allobs = sorted(obsolete.cachefuncs)
3801 3833 if names:
3802 3834 allobs = [n for n in allobs if n in names]
3803 3835
3804 3836 for name in allobs:
3805 3837 timer(getobs(name), title=name)
3806 3838
3807 3839 def getfiltered(name):
3808 3840 def d():
3809 3841 repo.invalidatevolatilesets()
3810 3842 if opts[b'clear_obsstore']:
3811 3843 clearfilecache(repo, b'obsstore')
3812 3844 repoview.filterrevs(repo, name)
3813 3845
3814 3846 return d
3815 3847
3816 3848 allfilter = sorted(repoview.filtertable)
3817 3849 if names:
3818 3850 allfilter = [n for n in allfilter if n in names]
3819 3851
3820 3852 for name in allfilter:
3821 3853 timer(getfiltered(name), title=name)
3822 3854 fm.end()
3823 3855
3824 3856
3825 3857 @command(
3826 3858 b'perf::branchmap|perfbranchmap',
3827 3859 [
3828 3860 (b'f', b'full', False, b'Includes build time of subset'),
3829 3861 (
3830 3862 b'',
3831 3863 b'clear-revbranch',
3832 3864 False,
3833 3865 b'purge the revbranch cache between computation',
3834 3866 ),
3835 3867 ]
3836 3868 + formatteropts,
3837 3869 )
3838 3870 def perfbranchmap(ui, repo, *filternames, **opts):
3839 3871 """benchmark the update of a branchmap
3840 3872
3841 3873 This benchmarks the full repo.branchmap() call with read and write disabled
3842 3874 """
3843 3875 opts = _byteskwargs(opts)
3844 3876 full = opts.get(b"full", False)
3845 3877 clear_revbranch = opts.get(b"clear_revbranch", False)
3846 3878 timer, fm = gettimer(ui, opts)
3847 3879
3848 3880 def getbranchmap(filtername):
3849 3881 """generate a benchmark function for the filtername"""
3850 3882 if filtername is None:
3851 3883 view = repo
3852 3884 else:
3853 3885 view = repo.filtered(filtername)
3854 3886 if util.safehasattr(view._branchcaches, '_per_filter'):
3855 3887 filtered = view._branchcaches._per_filter
3856 3888 else:
3857 3889 # older versions
3858 3890 filtered = view._branchcaches
3859 3891
3860 3892 def d():
3861 3893 if clear_revbranch:
3862 3894 repo.revbranchcache()._clear()
3863 3895 if full:
3864 3896 view._branchcaches.clear()
3865 3897 else:
3866 3898 filtered.pop(filtername, None)
3867 3899 view.branchmap()
3868 3900
3869 3901 return d
3870 3902
3871 3903 # add filter in smaller subset to bigger subset
3872 3904 possiblefilters = set(repoview.filtertable)
3873 3905 if filternames:
3874 3906 possiblefilters &= set(filternames)
3875 3907 subsettable = getbranchmapsubsettable()
3876 3908 allfilters = []
3877 3909 while possiblefilters:
3878 3910 for name in possiblefilters:
3879 3911 subset = subsettable.get(name)
3880 3912 if subset not in possiblefilters:
3881 3913 break
3882 3914 else:
3883 3915 assert False, b'subset cycle %s!' % possiblefilters
3884 3916 allfilters.append(name)
3885 3917 possiblefilters.remove(name)
3886 3918
3887 3919 # warm the cache
3888 3920 if not full:
3889 3921 for name in allfilters:
3890 3922 repo.filtered(name).branchmap()
3891 3923 if not filternames or b'unfiltered' in filternames:
3892 3924 # add unfiltered
3893 3925 allfilters.append(None)
3894 3926
3895 3927 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3896 3928 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3897 3929 branchcacheread.set(classmethod(lambda *args: None))
3898 3930 else:
3899 3931 # older versions
3900 3932 branchcacheread = safeattrsetter(branchmap, b'read')
3901 3933 branchcacheread.set(lambda *args: None)
3902 3934 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3903 3935 branchcachewrite.set(lambda *args: None)
3904 3936 try:
3905 3937 for name in allfilters:
3906 3938 printname = name
3907 3939 if name is None:
3908 3940 printname = b'unfiltered'
3909 3941 timer(getbranchmap(name), title=printname)
3910 3942 finally:
3911 3943 branchcacheread.restore()
3912 3944 branchcachewrite.restore()
3913 3945 fm.end()
3914 3946
3915 3947
3916 3948 @command(
3917 3949 b'perf::branchmapupdate|perfbranchmapupdate',
3918 3950 [
3919 3951 (b'', b'base', [], b'subset of revision to start from'),
3920 3952 (b'', b'target', [], b'subset of revision to end with'),
3921 3953 (b'', b'clear-caches', False, b'clear cache between each runs'),
3922 3954 ]
3923 3955 + formatteropts,
3924 3956 )
3925 3957 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3926 3958 """benchmark branchmap update from for <base> revs to <target> revs
3927 3959
3928 3960 If `--clear-caches` is passed, the following items will be reset before
3929 3961 each update:
3930 3962 * the changelog instance and associated indexes
3931 3963 * the rev-branch-cache instance
3932 3964
3933 3965 Examples:
3934 3966
3935 3967 # update for the one last revision
3936 3968 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3937 3969
3938 3970 $ update for change coming with a new branch
3939 3971 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3940 3972 """
3941 3973 from mercurial import branchmap
3942 3974 from mercurial import repoview
3943 3975
3944 3976 opts = _byteskwargs(opts)
3945 3977 timer, fm = gettimer(ui, opts)
3946 3978 clearcaches = opts[b'clear_caches']
3947 3979 unfi = repo.unfiltered()
3948 3980 x = [None] # used to pass data between closure
3949 3981
3950 3982 # we use a `list` here to avoid possible side effect from smartset
3951 3983 baserevs = list(scmutil.revrange(repo, base))
3952 3984 targetrevs = list(scmutil.revrange(repo, target))
3953 3985 if not baserevs:
3954 3986 raise error.Abort(b'no revisions selected for --base')
3955 3987 if not targetrevs:
3956 3988 raise error.Abort(b'no revisions selected for --target')
3957 3989
3958 3990 # make sure the target branchmap also contains the one in the base
3959 3991 targetrevs = list(set(baserevs) | set(targetrevs))
3960 3992 targetrevs.sort()
3961 3993
3962 3994 cl = repo.changelog
3963 3995 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3964 3996 allbaserevs.sort()
3965 3997 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3966 3998
3967 3999 newrevs = list(alltargetrevs.difference(allbaserevs))
3968 4000 newrevs.sort()
3969 4001
3970 4002 allrevs = frozenset(unfi.changelog.revs())
3971 4003 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3972 4004 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3973 4005
3974 4006 def basefilter(repo, visibilityexceptions=None):
3975 4007 return basefilterrevs
3976 4008
3977 4009 def targetfilter(repo, visibilityexceptions=None):
3978 4010 return targetfilterrevs
3979 4011
3980 4012 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3981 4013 ui.status(msg % (len(allbaserevs), len(newrevs)))
3982 4014 if targetfilterrevs:
3983 4015 msg = b'(%d revisions still filtered)\n'
3984 4016 ui.status(msg % len(targetfilterrevs))
3985 4017
3986 4018 try:
3987 4019 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3988 4020 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3989 4021
3990 4022 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3991 4023 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3992 4024
3993 4025 # try to find an existing branchmap to reuse
3994 4026 subsettable = getbranchmapsubsettable()
3995 4027 candidatefilter = subsettable.get(None)
3996 4028 while candidatefilter is not None:
3997 4029 candidatebm = repo.filtered(candidatefilter).branchmap()
3998 4030 if candidatebm.validfor(baserepo):
3999 4031 filtered = repoview.filterrevs(repo, candidatefilter)
4000 4032 missing = [r for r in allbaserevs if r in filtered]
4001 4033 base = candidatebm.copy()
4002 4034 base.update(baserepo, missing)
4003 4035 break
4004 4036 candidatefilter = subsettable.get(candidatefilter)
4005 4037 else:
4006 4038 # no suitable subset where found
4007 4039 base = branchmap.branchcache()
4008 4040 base.update(baserepo, allbaserevs)
4009 4041
4010 4042 def setup():
4011 4043 x[0] = base.copy()
4012 4044 if clearcaches:
4013 4045 unfi._revbranchcache = None
4014 4046 clearchangelog(repo)
4015 4047
4016 4048 def bench():
4017 4049 x[0].update(targetrepo, newrevs)
4018 4050
4019 4051 timer(bench, setup=setup)
4020 4052 fm.end()
4021 4053 finally:
4022 4054 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4023 4055 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4024 4056
4025 4057
4026 4058 @command(
4027 4059 b'perf::branchmapload|perfbranchmapload',
4028 4060 [
4029 4061 (b'f', b'filter', b'', b'Specify repoview filter'),
4030 4062 (b'', b'list', False, b'List brachmap filter caches'),
4031 4063 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4032 4064 ]
4033 4065 + formatteropts,
4034 4066 )
4035 4067 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4036 4068 """benchmark reading the branchmap"""
4037 4069 opts = _byteskwargs(opts)
4038 4070 clearrevlogs = opts[b'clear_revlogs']
4039 4071
4040 4072 if list:
4041 4073 for name, kind, st in repo.cachevfs.readdir(stat=True):
4042 4074 if name.startswith(b'branch2'):
4043 4075 filtername = name.partition(b'-')[2] or b'unfiltered'
4044 4076 ui.status(
4045 4077 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4046 4078 )
4047 4079 return
4048 4080 if not filter:
4049 4081 filter = None
4050 4082 subsettable = getbranchmapsubsettable()
4051 4083 if filter is None:
4052 4084 repo = repo.unfiltered()
4053 4085 else:
4054 4086 repo = repoview.repoview(repo, filter)
4055 4087
4056 4088 repo.branchmap() # make sure we have a relevant, up to date branchmap
4057 4089
4058 4090 try:
4059 4091 fromfile = branchmap.branchcache.fromfile
4060 4092 except AttributeError:
4061 4093 # older versions
4062 4094 fromfile = branchmap.read
4063 4095
4064 4096 currentfilter = filter
4065 4097 # try once without timer, the filter may not be cached
4066 4098 while fromfile(repo) is None:
4067 4099 currentfilter = subsettable.get(currentfilter)
4068 4100 if currentfilter is None:
4069 4101 raise error.Abort(
4070 4102 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4071 4103 )
4072 4104 repo = repo.filtered(currentfilter)
4073 4105 timer, fm = gettimer(ui, opts)
4074 4106
4075 4107 def setup():
4076 4108 if clearrevlogs:
4077 4109 clearchangelog(repo)
4078 4110
4079 4111 def bench():
4080 4112 fromfile(repo)
4081 4113
4082 4114 timer(bench, setup=setup)
4083 4115 fm.end()
4084 4116
4085 4117
4086 4118 @command(b'perf::loadmarkers|perfloadmarkers')
4087 4119 def perfloadmarkers(ui, repo):
4088 4120 """benchmark the time to parse the on-disk markers for a repo
4089 4121
4090 4122 Result is the number of markers in the repo."""
4091 4123 timer, fm = gettimer(ui)
4092 4124 svfs = getsvfs(repo)
4093 4125 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4094 4126 fm.end()
4095 4127
4096 4128
4097 4129 @command(
4098 4130 b'perf::lrucachedict|perflrucachedict',
4099 4131 formatteropts
4100 4132 + [
4101 4133 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4102 4134 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4103 4135 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4104 4136 (b'', b'size', 4, b'size of cache'),
4105 4137 (b'', b'gets', 10000, b'number of key lookups'),
4106 4138 (b'', b'sets', 10000, b'number of key sets'),
4107 4139 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4108 4140 (
4109 4141 b'',
4110 4142 b'mixedgetfreq',
4111 4143 50,
4112 4144 b'frequency of get vs set ops in mixed mode',
4113 4145 ),
4114 4146 ],
4115 4147 norepo=True,
4116 4148 )
4117 4149 def perflrucache(
4118 4150 ui,
4119 4151 mincost=0,
4120 4152 maxcost=100,
4121 4153 costlimit=0,
4122 4154 size=4,
4123 4155 gets=10000,
4124 4156 sets=10000,
4125 4157 mixed=10000,
4126 4158 mixedgetfreq=50,
4127 4159 **opts
4128 4160 ):
4129 4161 opts = _byteskwargs(opts)
4130 4162
4131 4163 def doinit():
4132 4164 for i in _xrange(10000):
4133 4165 util.lrucachedict(size)
4134 4166
4135 4167 costrange = list(range(mincost, maxcost + 1))
4136 4168
4137 4169 values = []
4138 4170 for i in _xrange(size):
4139 4171 values.append(random.randint(0, _maxint))
4140 4172
4141 4173 # Get mode fills the cache and tests raw lookup performance with no
4142 4174 # eviction.
4143 4175 getseq = []
4144 4176 for i in _xrange(gets):
4145 4177 getseq.append(random.choice(values))
4146 4178
4147 4179 def dogets():
4148 4180 d = util.lrucachedict(size)
4149 4181 for v in values:
4150 4182 d[v] = v
4151 4183 for key in getseq:
4152 4184 value = d[key]
4153 4185 value # silence pyflakes warning
4154 4186
4155 4187 def dogetscost():
4156 4188 d = util.lrucachedict(size, maxcost=costlimit)
4157 4189 for i, v in enumerate(values):
4158 4190 d.insert(v, v, cost=costs[i])
4159 4191 for key in getseq:
4160 4192 try:
4161 4193 value = d[key]
4162 4194 value # silence pyflakes warning
4163 4195 except KeyError:
4164 4196 pass
4165 4197
4166 4198 # Set mode tests insertion speed with cache eviction.
4167 4199 setseq = []
4168 4200 costs = []
4169 4201 for i in _xrange(sets):
4170 4202 setseq.append(random.randint(0, _maxint))
4171 4203 costs.append(random.choice(costrange))
4172 4204
4173 4205 def doinserts():
4174 4206 d = util.lrucachedict(size)
4175 4207 for v in setseq:
4176 4208 d.insert(v, v)
4177 4209
4178 4210 def doinsertscost():
4179 4211 d = util.lrucachedict(size, maxcost=costlimit)
4180 4212 for i, v in enumerate(setseq):
4181 4213 d.insert(v, v, cost=costs[i])
4182 4214
4183 4215 def dosets():
4184 4216 d = util.lrucachedict(size)
4185 4217 for v in setseq:
4186 4218 d[v] = v
4187 4219
4188 4220 # Mixed mode randomly performs gets and sets with eviction.
4189 4221 mixedops = []
4190 4222 for i in _xrange(mixed):
4191 4223 r = random.randint(0, 100)
4192 4224 if r < mixedgetfreq:
4193 4225 op = 0
4194 4226 else:
4195 4227 op = 1
4196 4228
4197 4229 mixedops.append(
4198 4230 (op, random.randint(0, size * 2), random.choice(costrange))
4199 4231 )
4200 4232
4201 4233 def domixed():
4202 4234 d = util.lrucachedict(size)
4203 4235
4204 4236 for op, v, cost in mixedops:
4205 4237 if op == 0:
4206 4238 try:
4207 4239 d[v]
4208 4240 except KeyError:
4209 4241 pass
4210 4242 else:
4211 4243 d[v] = v
4212 4244
4213 4245 def domixedcost():
4214 4246 d = util.lrucachedict(size, maxcost=costlimit)
4215 4247
4216 4248 for op, v, cost in mixedops:
4217 4249 if op == 0:
4218 4250 try:
4219 4251 d[v]
4220 4252 except KeyError:
4221 4253 pass
4222 4254 else:
4223 4255 d.insert(v, v, cost=cost)
4224 4256
4225 4257 benches = [
4226 4258 (doinit, b'init'),
4227 4259 ]
4228 4260
4229 4261 if costlimit:
4230 4262 benches.extend(
4231 4263 [
4232 4264 (dogetscost, b'gets w/ cost limit'),
4233 4265 (doinsertscost, b'inserts w/ cost limit'),
4234 4266 (domixedcost, b'mixed w/ cost limit'),
4235 4267 ]
4236 4268 )
4237 4269 else:
4238 4270 benches.extend(
4239 4271 [
4240 4272 (dogets, b'gets'),
4241 4273 (doinserts, b'inserts'),
4242 4274 (dosets, b'sets'),
4243 4275 (domixed, b'mixed'),
4244 4276 ]
4245 4277 )
4246 4278
4247 4279 for fn, title in benches:
4248 4280 timer, fm = gettimer(ui, opts)
4249 4281 timer(fn, title=title)
4250 4282 fm.end()
4251 4283
4252 4284
4253 4285 @command(
4254 4286 b'perf::write|perfwrite',
4255 4287 formatteropts
4256 4288 + [
4257 4289 (b'', b'write-method', b'write', b'ui write method'),
4258 4290 (b'', b'nlines', 100, b'number of lines'),
4259 4291 (b'', b'nitems', 100, b'number of items (per line)'),
4260 4292 (b'', b'item', b'x', b'item that is written'),
4261 4293 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4262 4294 (b'', b'flush-line', None, b'flush after each line'),
4263 4295 ],
4264 4296 )
4265 4297 def perfwrite(ui, repo, **opts):
4266 4298 """microbenchmark ui.write (and others)"""
4267 4299 opts = _byteskwargs(opts)
4268 4300
4269 4301 write = getattr(ui, _sysstr(opts[b'write_method']))
4270 4302 nlines = int(opts[b'nlines'])
4271 4303 nitems = int(opts[b'nitems'])
4272 4304 item = opts[b'item']
4273 4305 batch_line = opts.get(b'batch_line')
4274 4306 flush_line = opts.get(b'flush_line')
4275 4307
4276 4308 if batch_line:
4277 4309 line = item * nitems + b'\n'
4278 4310
4279 4311 def benchmark():
4280 4312 for i in pycompat.xrange(nlines):
4281 4313 if batch_line:
4282 4314 write(line)
4283 4315 else:
4284 4316 for i in pycompat.xrange(nitems):
4285 4317 write(item)
4286 4318 write(b'\n')
4287 4319 if flush_line:
4288 4320 ui.flush()
4289 4321 ui.flush()
4290 4322
4291 4323 timer, fm = gettimer(ui, opts)
4292 4324 timer(benchmark)
4293 4325 fm.end()
4294 4326
4295 4327
4296 4328 def uisetup(ui):
4297 4329 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4298 4330 commands, b'debugrevlogopts'
4299 4331 ):
4300 4332 # for "historical portability":
4301 4333 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4302 4334 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4303 4335 # openrevlog() should cause failure, because it has been
4304 4336 # available since 3.5 (or 49c583ca48c4).
4305 4337 def openrevlog(orig, repo, cmd, file_, opts):
4306 4338 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4307 4339 raise error.Abort(
4308 4340 b"This version doesn't support --dir option",
4309 4341 hint=b"use 3.5 or later",
4310 4342 )
4311 4343 return orig(repo, cmd, file_, opts)
4312 4344
4313 4345 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4314 4346
4315 4347
4316 4348 @command(
4317 4349 b'perf::progress|perfprogress',
4318 4350 formatteropts
4319 4351 + [
4320 4352 (b'', b'topic', b'topic', b'topic for progress messages'),
4321 4353 (b'c', b'total', 1000000, b'total value we are progressing to'),
4322 4354 ],
4323 4355 norepo=True,
4324 4356 )
4325 4357 def perfprogress(ui, topic=None, total=None, **opts):
4326 4358 """printing of progress bars"""
4327 4359 opts = _byteskwargs(opts)
4328 4360
4329 4361 timer, fm = gettimer(ui, opts)
4330 4362
4331 4363 def doprogress():
4332 4364 with ui.makeprogress(topic, total=total) as progress:
4333 4365 for i in _xrange(total):
4334 4366 progress.increment()
4335 4367
4336 4368 timer(doprogress)
4337 4369 fm.end()
@@ -1,437 +1,439 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perf::addremove
82 82 (no help text available)
83 83 perf::ancestors
84 84 (no help text available)
85 85 perf::ancestorset
86 86 (no help text available)
87 87 perf::annotate
88 88 (no help text available)
89 89 perf::bdiff benchmark a bdiff between revisions
90 90 perf::bookmarks
91 91 benchmark parsing bookmarks from disk to memory
92 92 perf::branchmap
93 93 benchmark the update of a branchmap
94 94 perf::branchmapload
95 95 benchmark reading the branchmap
96 96 perf::branchmapupdate
97 97 benchmark branchmap update from for <base> revs to <target>
98 98 revs
99 99 perf::bundle benchmark the creation of a bundle from a repository
100 100 perf::bundleread
101 101 Benchmark reading of bundle files.
102 102 perf::cca (no help text available)
103 103 perf::changegroupchangelog
104 104 Benchmark producing a changelog group for a changegroup.
105 105 perf::changeset
106 106 (no help text available)
107 107 perf::ctxfiles
108 108 (no help text available)
109 109 perf::delta-find
110 110 benchmark the process of finding a valid delta for a revlog
111 111 revision
112 112 perf::diffwd Profile diff of working directory changes
113 113 perf::dirfoldmap
114 114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 115 perf::dirs (no help text available)
116 116 perf::dirstate
117 117 benchmap the time of various distate operations
118 118 perf::dirstatedirs
119 119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 120 perf::dirstatefoldmap
121 121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 122 perf::dirstatewrite
123 123 benchmap the time it take to write a dirstate on disk
124 124 perf::discovery
125 125 benchmark discovery between local repo and the peer at given
126 126 path
127 127 perf::fncacheencode
128 128 (no help text available)
129 129 perf::fncacheload
130 130 (no help text available)
131 131 perf::fncachewrite
132 132 (no help text available)
133 133 perf::heads benchmark the computation of a changelog heads
134 134 perf::helper-mergecopies
135 135 find statistics about potential parameters for
136 136 'perfmergecopies'
137 137 perf::helper-pathcopies
138 138 find statistic about potential parameters for the
139 139 'perftracecopies'
140 140 perf::ignore benchmark operation related to computing ignore
141 141 perf::index benchmark index creation time followed by a lookup
142 142 perf::linelogedits
143 143 (no help text available)
144 144 perf::loadmarkers
145 145 benchmark the time to parse the on-disk markers for a repo
146 146 perf::log (no help text available)
147 147 perf::lookup (no help text available)
148 148 perf::lrucachedict
149 149 (no help text available)
150 150 perf::manifest
151 151 benchmark the time to read a manifest from disk and return a
152 152 usable
153 153 perf::mergecalculate
154 154 (no help text available)
155 155 perf::mergecopies
156 156 measure runtime of 'copies.mergecopies'
157 157 perf::moonwalk
158 158 benchmark walking the changelog backwards
159 159 perf::nodelookup
160 160 (no help text available)
161 161 perf::nodemap
162 162 benchmark the time necessary to look up revision from a cold
163 163 nodemap
164 164 perf::parents
165 165 benchmark the time necessary to fetch one changeset's parents.
166 166 perf::pathcopies
167 167 benchmark the copy tracing logic
168 168 perf::phases benchmark phasesets computation
169 169 perf::phasesremote
170 170 benchmark time needed to analyse phases of the remote server
171 171 perf::progress
172 172 printing of progress bars
173 173 perf::rawfiles
174 174 (no help text available)
175 175 perf::revlogchunks
176 176 Benchmark operations on revlog chunks.
177 177 perf::revlogindex
178 178 Benchmark operations against a revlog index.
179 179 perf::revlogrevision
180 180 Benchmark obtaining a revlog revision.
181 181 perf::revlogrevisions
182 182 Benchmark reading a series of revisions from a revlog.
183 183 perf::revlogwrite
184 184 Benchmark writing a series of revisions to a revlog.
185 185 perf::revrange
186 186 (no help text available)
187 187 perf::revset benchmark the execution time of a revset
188 188 perf::startup
189 189 (no help text available)
190 190 perf::status benchmark the performance of a single status call
191 perf::stream-generate
192 benchmark the full generation of a stream clone
191 193 perf::stream-locked-section
192 194 benchmark the initial, repo-locked, section of a stream-clone
193 195 perf::tags (no help text available)
194 196 perf::templating
195 197 test the rendering time of a given template
196 198 perf::unbundle
197 199 benchmark application of a bundle in a repository.
198 200 perf::unidiff
199 201 benchmark a unified diff between revisions
200 202 perf::volatilesets
201 203 benchmark the computation of various volatile set
202 204 perf::walk (no help text available)
203 205 perf::write microbenchmark ui.write (and others)
204 206
205 207 (use 'hg help -v perf' to show built-in aliases and global options)
206 208
207 209 $ hg help perfaddremove
208 210 hg perf::addremove
209 211
210 212 aliases: perfaddremove
211 213
212 214 (no help text available)
213 215
214 216 options:
215 217
216 218 -T --template TEMPLATE display with template
217 219
218 220 (some details hidden, use --verbose to show complete help)
219 221
220 222 $ hg perfaddremove
221 223 $ hg perfancestors
222 224 $ hg perfancestorset 2
223 225 $ hg perfannotate a
224 226 $ hg perfbdiff -c 1
225 227 $ hg perfbdiff --alldata 1
226 228 $ hg perfunidiff -c 1
227 229 $ hg perfunidiff --alldata 1
228 230 $ hg perfbookmarks
229 231 $ hg perfbranchmap
230 232 $ hg perfbranchmapload
231 233 $ hg perfbranchmapupdate --base "not tip" --target "tip"
232 234 benchmark of branchmap with 3 revisions with 1 new ones
233 235 $ hg perfcca
234 236 $ hg perfchangegroupchangelog
235 237 $ hg perfchangegroupchangelog --cgversion 01
236 238 $ hg perfchangeset 2
237 239 $ hg perfctxfiles 2
238 240 $ hg perfdiffwd
239 241 $ hg perfdirfoldmap
240 242 $ hg perfdirs
241 243 $ hg perfdirstate
242 244 $ hg perfdirstate --contains
243 245 $ hg perfdirstate --iteration
244 246 $ hg perfdirstatedirs
245 247 $ hg perfdirstatefoldmap
246 248 $ hg perfdirstatewrite
247 249 #if repofncache
248 250 $ hg perffncacheencode
249 251 $ hg perffncacheload
250 252 $ hg debugrebuildfncache
251 253 fncache already up to date
252 254 $ hg perffncachewrite
253 255 $ hg debugrebuildfncache
254 256 fncache already up to date
255 257 #endif
256 258 $ hg perfheads
257 259 $ hg perfignore
258 260 $ hg perfindex
259 261 $ hg perflinelogedits -n 1
260 262 $ hg perfloadmarkers
261 263 $ hg perflog
262 264 $ hg perflookup 2
263 265 $ hg perflrucache
264 266 $ hg perfmanifest 2
265 267 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
266 268 $ hg perfmanifest -m 44fe2c8352bb
267 269 abort: manifest revision must be integer or full node
268 270 [255]
269 271 $ hg perfmergecalculate -r 3
270 272 $ hg perfmoonwalk
271 273 $ hg perfnodelookup 2
272 274 $ hg perfpathcopies 1 2
273 275 $ hg perfprogress --total 1000
274 276 $ hg perfrawfiles 2
275 277 $ hg perfrevlogindex -c
276 278 #if reporevlogstore
277 279 $ hg perfrevlogrevisions .hg/store/data/a.i
278 280 #endif
279 281 $ hg perfrevlogrevision -m 0
280 282 $ hg perfrevlogchunks -c
281 283 $ hg perfrevrange
282 284 $ hg perfrevset 'all()'
283 285 $ hg perfstartup
284 286 $ hg perfstatus
285 287 $ hg perfstatus --dirstate
286 288 $ hg perftags
287 289 $ hg perftemplating
288 290 $ hg perfvolatilesets
289 291 $ hg perfwalk
290 292 $ hg perfparents
291 293 $ hg perfdiscovery -q .
292 294
293 295 Test run control
294 296 ----------------
295 297
296 298 Simple single entry
297 299
298 300 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
299 301 ! wall * comb * user * sys * (best of 15) (glob)
300 302
301 303 Multiple entries
302 304
303 305 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
304 306 ! wall * comb * user * sys * (best of 5) (glob)
305 307
306 308 error case are ignored
307 309
308 310 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
309 311 malformatted run limit entry, missing "-": 500
310 312 ! wall * comb * user * sys * (best of 5) (glob)
311 313 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
312 314 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12
313 315 ! wall * comb * user * sys * (best of 5) (glob)
314 316 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
315 317 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
316 318 ! wall * comb * user * sys * (best of 5) (glob)
317 319
318 320 test actual output
319 321 ------------------
320 322
321 323 normal output:
322 324
323 325 $ hg perfheads --config perf.stub=no
324 326 ! wall * comb * user * sys * (best of *) (glob)
325 327
326 328 detailed output:
327 329
328 330 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
329 331 ! wall * comb * user * sys * (best of *) (glob)
330 332 ! wall * comb * user * sys * (max of *) (glob)
331 333 ! wall * comb * user * sys * (avg of *) (glob)
332 334 ! wall * comb * user * sys * (median of *) (glob)
333 335
334 336 test json output
335 337 ----------------
336 338
337 339 normal output:
338 340
339 341 $ hg perfheads --template json --config perf.stub=no
340 342 [
341 343 {
342 344 "comb": *, (glob)
343 345 "count": *, (glob)
344 346 "sys": *, (glob)
345 347 "user": *, (glob)
346 348 "wall": * (glob)
347 349 }
348 350 ]
349 351
350 352 detailed output:
351 353
352 354 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
353 355 [
354 356 {
355 357 "avg.comb": *, (glob)
356 358 "avg.count": *, (glob)
357 359 "avg.sys": *, (glob)
358 360 "avg.user": *, (glob)
359 361 "avg.wall": *, (glob)
360 362 "comb": *, (glob)
361 363 "count": *, (glob)
362 364 "max.comb": *, (glob)
363 365 "max.count": *, (glob)
364 366 "max.sys": *, (glob)
365 367 "max.user": *, (glob)
366 368 "max.wall": *, (glob)
367 369 "median.comb": *, (glob)
368 370 "median.count": *, (glob)
369 371 "median.sys": *, (glob)
370 372 "median.user": *, (glob)
371 373 "median.wall": *, (glob)
372 374 "sys": *, (glob)
373 375 "user": *, (glob)
374 376 "wall": * (glob)
375 377 }
376 378 ]
377 379
378 380 Test pre-run feature
379 381 --------------------
380 382
381 383 (perf discovery has some spurious output)
382 384
383 385 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
384 386 ! wall * comb * user * sys * (best of 1) (glob)
385 387 searching for changes
386 388 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
387 389 ! wall * comb * user * sys * (best of 1) (glob)
388 390 searching for changes
389 391 searching for changes
390 392 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
391 393 ! wall * comb * user * sys * (best of 1) (glob)
392 394 searching for changes
393 395 searching for changes
394 396 searching for changes
395 397 searching for changes
396 398 $ hg perf::bundle 'last(all(), 5)'
397 399 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
398 400 4 changesets found
399 401 $ hg perf::unbundle last-5.hg
400 402
401 403
402 404 test profile-benchmark option
403 405 ------------------------------
404 406
405 407 Function to check that statprof ran
406 408 $ statprofran () {
407 409 > egrep 'Sample count:|No samples recorded' > /dev/null
408 410 > }
409 411 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
410 412
411 413 Check perf.py for historical portability
412 414 ----------------------------------------
413 415
414 416 $ cd "$TESTDIR/.."
415 417
416 418 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
417 419 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
418 420 > "$TESTDIR"/check-perf-code.py contrib/perf.py
419 421 contrib/perf.py:\d+: (re)
420 422 > from mercurial import (
421 423 import newer module separately in try clause for early Mercurial
422 424 contrib/perf.py:\d+: (re)
423 425 > from mercurial import (
424 426 import newer module separately in try clause for early Mercurial
425 427 contrib/perf.py:\d+: (re)
426 428 > origindexpath = orig.opener.join(indexfile)
427 429 use getvfs()/getsvfs() for early Mercurial
428 430 contrib/perf.py:\d+: (re)
429 431 > origdatapath = orig.opener.join(datafile)
430 432 use getvfs()/getsvfs() for early Mercurial
431 433 contrib/perf.py:\d+: (re)
432 434 > vfs = vfsmod.vfs(tmpdir)
433 435 use getvfs()/getsvfs() for early Mercurial
434 436 contrib/perf.py:\d+: (re)
435 437 > vfs.options = getattr(orig.opener, 'options', None)
436 438 use getvfs()/getsvfs() for early Mercurial
437 439 [1]
General Comments 0
You need to be logged in to leave comments. Login now