##// END OF EJS Templates
perf: add a new "context" argument to timer...
marmoute -
r51569:28620be8 default
parent child Browse files
Show More
@@ -1,4329 +1,4337 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", False)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 @contextlib.contextmanager
536 def noop_context():
537 yield
538
539
535 540 def _timer(
536 541 fm,
537 542 func,
538 543 setup=None,
544 context=noop_context,
539 545 title=None,
540 546 displayall=False,
541 547 limits=DEFAULTLIMITS,
542 548 prerun=0,
543 549 profiler=None,
544 550 ):
545 551 gc.collect()
546 552 results = []
547 553 begin = util.timer()
548 554 count = 0
549 555 if profiler is None:
550 556 profiler = NOOPCTX
551 557 for i in range(prerun):
552 558 if setup is not None:
553 559 setup()
560 with context():
554 561 func()
555 562 keepgoing = True
556 563 while keepgoing:
557 564 if setup is not None:
558 565 setup()
566 with context():
559 567 with profiler:
560 568 with timeone() as item:
561 569 r = func()
562 570 profiler = NOOPCTX
563 571 count += 1
564 572 results.append(item[0])
565 573 cstop = util.timer()
566 574 # Look for a stop condition.
567 575 elapsed = cstop - begin
568 576 for t, mincount in limits:
569 577 if elapsed >= t and count >= mincount:
570 578 keepgoing = False
571 579 break
572 580
573 581 formatone(fm, results, title=title, result=r, displayall=displayall)
574 582
575 583
576 584 def formatone(fm, timings, title=None, result=None, displayall=False):
577 585 count = len(timings)
578 586
579 587 fm.startitem()
580 588
581 589 if title:
582 590 fm.write(b'title', b'! %s\n', title)
583 591 if result:
584 592 fm.write(b'result', b'! result: %s\n', result)
585 593
586 594 def display(role, entry):
587 595 prefix = b''
588 596 if role != b'best':
589 597 prefix = b'%s.' % role
590 598 fm.plain(b'!')
591 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 601 fm.write(prefix + b'user', b' user %f', entry[1])
594 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 604 fm.plain(b'\n')
597 605
598 606 timings.sort()
599 607 min_val = timings[0]
600 608 display(b'best', min_val)
601 609 if displayall:
602 610 max_val = timings[-1]
603 611 display(b'max', max_val)
604 612 avg = tuple([sum(x) / count for x in zip(*timings)])
605 613 display(b'avg', avg)
606 614 median = timings[len(timings) // 2]
607 615 display(b'median', median)
608 616
609 617
610 618 # utilities for historical portability
611 619
612 620
613 621 def getint(ui, section, name, default):
614 622 # for "historical portability":
615 623 # ui.configint has been available since 1.9 (or fa2b596db182)
616 624 v = ui.config(section, name, None)
617 625 if v is None:
618 626 return default
619 627 try:
620 628 return int(v)
621 629 except ValueError:
622 630 raise error.ConfigError(
623 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 632 )
625 633
626 634
627 635 def safeattrsetter(obj, name, ignoremissing=False):
628 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 637
630 638 This function is aborted, if 'obj' doesn't have 'name' attribute
631 639 at runtime. This avoids overlooking removal of an attribute, which
632 640 breaks assumption of performance measurement, in the future.
633 641
634 642 This function returns the object to (1) assign a new value, and
635 643 (2) restore an original value to the attribute.
636 644
637 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 646 abortion, and this function returns None. This is useful to
639 647 examine an attribute, which isn't ensured in all Mercurial
640 648 versions.
641 649 """
642 650 if not util.safehasattr(obj, name):
643 651 if ignoremissing:
644 652 return None
645 653 raise error.Abort(
646 654 (
647 655 b"missing attribute %s of %s might break assumption"
648 656 b" of performance measurement"
649 657 )
650 658 % (name, obj)
651 659 )
652 660
653 661 origvalue = getattr(obj, _sysstr(name))
654 662
655 663 class attrutil:
656 664 def set(self, newvalue):
657 665 setattr(obj, _sysstr(name), newvalue)
658 666
659 667 def restore(self):
660 668 setattr(obj, _sysstr(name), origvalue)
661 669
662 670 return attrutil()
663 671
664 672
665 673 # utilities to examine each internal API changes
666 674
667 675
668 676 def getbranchmapsubsettable():
669 677 # for "historical portability":
670 678 # subsettable is defined in:
671 679 # - branchmap since 2.9 (or 175c6fd8cacc)
672 680 # - repoview since 2.5 (or 59a9f18d4587)
673 681 # - repoviewutil since 5.0
674 682 for mod in (branchmap, repoview, repoviewutil):
675 683 subsettable = getattr(mod, 'subsettable', None)
676 684 if subsettable:
677 685 return subsettable
678 686
679 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 688 # branchmap and repoview modules exist, but subsettable attribute
681 689 # doesn't)
682 690 raise error.Abort(
683 691 b"perfbranchmap not available with this Mercurial",
684 692 hint=b"use 2.5 or later",
685 693 )
686 694
687 695
688 696 def getsvfs(repo):
689 697 """Return appropriate object to access files under .hg/store"""
690 698 # for "historical portability":
691 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 700 svfs = getattr(repo, 'svfs', None)
693 701 if svfs:
694 702 return svfs
695 703 else:
696 704 return getattr(repo, 'sopener')
697 705
698 706
699 707 def getvfs(repo):
700 708 """Return appropriate object to access files under .hg"""
701 709 # for "historical portability":
702 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 711 vfs = getattr(repo, 'vfs', None)
704 712 if vfs:
705 713 return vfs
706 714 else:
707 715 return getattr(repo, 'opener')
708 716
709 717
710 718 def repocleartagscachefunc(repo):
711 719 """Return the function to clear tags cache according to repo internal API"""
712 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 722 # correct way to clear tags cache, because existing code paths
715 723 # expect _tagscache to be a structured object.
716 724 def clearcache():
717 725 # _tagscache has been filteredpropertycache since 2.5 (or
718 726 # 98c867ac1330), and delattr() can't work in such case
719 727 if '_tagscache' in vars(repo):
720 728 del repo.__dict__['_tagscache']
721 729
722 730 return clearcache
723 731
724 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 733 if repotags: # since 1.4 (or 5614a628d173)
726 734 return lambda: repotags.set(None)
727 735
728 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 737 if repotagscache: # since 0.6 (or d7df759d0e97)
730 738 return lambda: repotagscache.set(None)
731 739
732 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 741 # this point, but it isn't so problematic, because:
734 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 743 # in perftags() causes failure soon
736 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 745 raise error.Abort(b"tags API of this hg command is unknown")
738 746
739 747
740 748 # utilities to clear cache
741 749
742 750
743 751 def clearfilecache(obj, attrname):
744 752 unfiltered = getattr(obj, 'unfiltered', None)
745 753 if unfiltered is not None:
746 754 obj = obj.unfiltered()
747 755 if attrname in vars(obj):
748 756 delattr(obj, attrname)
749 757 obj._filecache.pop(attrname, None)
750 758
751 759
752 760 def clearchangelog(repo):
753 761 if repo is not repo.unfiltered():
754 762 object.__setattr__(repo, '_clcachekey', None)
755 763 object.__setattr__(repo, '_clcache', None)
756 764 clearfilecache(repo.unfiltered(), 'changelog')
757 765
758 766
759 767 # perf commands
760 768
761 769
762 770 @command(b'perf::walk|perfwalk', formatteropts)
763 771 def perfwalk(ui, repo, *pats, **opts):
764 772 opts = _byteskwargs(opts)
765 773 timer, fm = gettimer(ui, opts)
766 774 m = scmutil.match(repo[None], pats, {})
767 775 timer(
768 776 lambda: len(
769 777 list(
770 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 779 )
772 780 )
773 781 )
774 782 fm.end()
775 783
776 784
777 785 @command(b'perf::annotate|perfannotate', formatteropts)
778 786 def perfannotate(ui, repo, f, **opts):
779 787 opts = _byteskwargs(opts)
780 788 timer, fm = gettimer(ui, opts)
781 789 fc = repo[b'.'][f]
782 790 timer(lambda: len(fc.annotate(True)))
783 791 fm.end()
784 792
785 793
786 794 @command(
787 795 b'perf::status|perfstatus',
788 796 [
789 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 799 ]
792 800 + formatteropts,
793 801 )
794 802 def perfstatus(ui, repo, **opts):
795 803 """benchmark the performance of a single status call
796 804
797 805 The repository data are preserved between each call.
798 806
799 807 By default, only the status of the tracked file are requested. If
800 808 `--unknown` is passed, the "unknown" files are also tracked.
801 809 """
802 810 opts = _byteskwargs(opts)
803 811 # m = match.always(repo.root, repo.getcwd())
804 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 813 # False))))
806 814 timer, fm = gettimer(ui, opts)
807 815 if opts[b'dirstate']:
808 816 dirstate = repo.dirstate
809 817 m = scmutil.matchall(repo)
810 818 unknown = opts[b'unknown']
811 819
812 820 def status_dirstate():
813 821 s = dirstate.status(
814 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 823 )
816 824 sum(map(bool, s))
817 825
818 826 if util.safehasattr(dirstate, 'running_status'):
819 827 with dirstate.running_status(repo):
820 828 timer(status_dirstate)
821 829 dirstate.invalidate()
822 830 else:
823 831 timer(status_dirstate)
824 832 else:
825 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
826 834 fm.end()
827 835
828 836
829 837 @command(b'perf::addremove|perfaddremove', formatteropts)
830 838 def perfaddremove(ui, repo, **opts):
831 839 opts = _byteskwargs(opts)
832 840 timer, fm = gettimer(ui, opts)
833 841 try:
834 842 oldquiet = repo.ui.quiet
835 843 repo.ui.quiet = True
836 844 matcher = scmutil.match(repo[None])
837 845 opts[b'dry_run'] = True
838 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
839 847 uipathfn = scmutil.getuipathfn(repo)
840 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
841 849 else:
842 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
843 851 finally:
844 852 repo.ui.quiet = oldquiet
845 853 fm.end()
846 854
847 855
848 856 def clearcaches(cl):
849 857 # behave somewhat consistently across internal API changes
850 858 if util.safehasattr(cl, b'clearcaches'):
851 859 cl.clearcaches()
852 860 elif util.safehasattr(cl, b'_nodecache'):
853 861 # <= hg-5.2
854 862 from mercurial.node import nullid, nullrev
855 863
856 864 cl._nodecache = {nullid: nullrev}
857 865 cl._nodepos = None
858 866
859 867
860 868 @command(b'perf::heads|perfheads', formatteropts)
861 869 def perfheads(ui, repo, **opts):
862 870 """benchmark the computation of a changelog heads"""
863 871 opts = _byteskwargs(opts)
864 872 timer, fm = gettimer(ui, opts)
865 873 cl = repo.changelog
866 874
867 875 def s():
868 876 clearcaches(cl)
869 877
870 878 def d():
871 879 len(cl.headrevs())
872 880
873 881 timer(d, setup=s)
874 882 fm.end()
875 883
876 884
877 885 @command(
878 886 b'perf::tags|perftags',
879 887 formatteropts
880 888 + [
881 889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
882 890 ],
883 891 )
884 892 def perftags(ui, repo, **opts):
885 893 opts = _byteskwargs(opts)
886 894 timer, fm = gettimer(ui, opts)
887 895 repocleartagscache = repocleartagscachefunc(repo)
888 896 clearrevlogs = opts[b'clear_revlogs']
889 897
890 898 def s():
891 899 if clearrevlogs:
892 900 clearchangelog(repo)
893 901 clearfilecache(repo.unfiltered(), 'manifest')
894 902 repocleartagscache()
895 903
896 904 def t():
897 905 return len(repo.tags())
898 906
899 907 timer(t, setup=s)
900 908 fm.end()
901 909
902 910
903 911 @command(b'perf::ancestors|perfancestors', formatteropts)
904 912 def perfancestors(ui, repo, **opts):
905 913 opts = _byteskwargs(opts)
906 914 timer, fm = gettimer(ui, opts)
907 915 heads = repo.changelog.headrevs()
908 916
909 917 def d():
910 918 for a in repo.changelog.ancestors(heads):
911 919 pass
912 920
913 921 timer(d)
914 922 fm.end()
915 923
916 924
917 925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
918 926 def perfancestorset(ui, repo, revset, **opts):
919 927 opts = _byteskwargs(opts)
920 928 timer, fm = gettimer(ui, opts)
921 929 revs = repo.revs(revset)
922 930 heads = repo.changelog.headrevs()
923 931
924 932 def d():
925 933 s = repo.changelog.ancestors(heads)
926 934 for rev in revs:
927 935 rev in s
928 936
929 937 timer(d)
930 938 fm.end()
931 939
932 940
933 941 @command(
934 942 b'perf::delta-find',
935 943 revlogopts + formatteropts,
936 944 b'-c|-m|FILE REV',
937 945 )
938 946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
939 947 """benchmark the process of finding a valid delta for a revlog revision
940 948
941 949 When a revlog receives a new revision (e.g. from a commit, or from an
942 950 incoming bundle), it searches for a suitable delta-base to produce a delta.
943 951 This perf command measures how much time we spend in this process. It
944 952 operates on an already stored revision.
945 953
946 954 See `hg help debug-delta-find` for another related command.
947 955 """
948 956 from mercurial import revlogutils
949 957 import mercurial.revlogutils.deltas as deltautil
950 958
951 959 opts = _byteskwargs(opts)
952 960 if arg_2 is None:
953 961 file_ = None
954 962 rev = arg_1
955 963 else:
956 964 file_ = arg_1
957 965 rev = arg_2
958 966
959 967 repo = repo.unfiltered()
960 968
961 969 timer, fm = gettimer(ui, opts)
962 970
963 971 rev = int(rev)
964 972
965 973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
966 974
967 975 deltacomputer = deltautil.deltacomputer(revlog)
968 976
969 977 node = revlog.node(rev)
970 978 p1r, p2r = revlog.parentrevs(rev)
971 979 p1 = revlog.node(p1r)
972 980 p2 = revlog.node(p2r)
973 981 full_text = revlog.revision(rev)
974 982 textlen = len(full_text)
975 983 cachedelta = None
976 984 flags = revlog.flags(rev)
977 985
978 986 revinfo = revlogutils.revisioninfo(
979 987 node,
980 988 p1,
981 989 p2,
982 990 [full_text], # btext
983 991 textlen,
984 992 cachedelta,
985 993 flags,
986 994 )
987 995
988 996 # Note: we should probably purge the potential caches (like the full
989 997 # manifest cache) between runs.
990 998 def find_one():
991 999 with revlog._datafp() as fh:
992 1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
993 1001
994 1002 timer(find_one)
995 1003 fm.end()
996 1004
997 1005
998 1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
999 1007 def perfdiscovery(ui, repo, path, **opts):
1000 1008 """benchmark discovery between local repo and the peer at given path"""
1001 1009 repos = [repo, None]
1002 1010 timer, fm = gettimer(ui, opts)
1003 1011
1004 1012 try:
1005 1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1006 1014
1007 1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1008 1016 except ImportError:
1009 1017 try:
1010 1018 from mercurial.utils.urlutil import get_unique_pull_path
1011 1019
1012 1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1013 1021 except ImportError:
1014 1022 path = ui.expandpath(path)
1015 1023
1016 1024 def s():
1017 1025 repos[1] = hg.peer(ui, opts, path)
1018 1026
1019 1027 def d():
1020 1028 setdiscovery.findcommonheads(ui, *repos)
1021 1029
1022 1030 timer(d, setup=s)
1023 1031 fm.end()
1024 1032
1025 1033
1026 1034 @command(
1027 1035 b'perf::bookmarks|perfbookmarks',
1028 1036 formatteropts
1029 1037 + [
1030 1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1031 1039 ],
1032 1040 )
1033 1041 def perfbookmarks(ui, repo, **opts):
1034 1042 """benchmark parsing bookmarks from disk to memory"""
1035 1043 opts = _byteskwargs(opts)
1036 1044 timer, fm = gettimer(ui, opts)
1037 1045
1038 1046 clearrevlogs = opts[b'clear_revlogs']
1039 1047
1040 1048 def s():
1041 1049 if clearrevlogs:
1042 1050 clearchangelog(repo)
1043 1051 clearfilecache(repo, b'_bookmarks')
1044 1052
1045 1053 def d():
1046 1054 repo._bookmarks
1047 1055
1048 1056 timer(d, setup=s)
1049 1057 fm.end()
1050 1058
1051 1059
1052 1060 @command(
1053 1061 b'perf::bundle',
1054 1062 [
1055 1063 (
1056 1064 b'r',
1057 1065 b'rev',
1058 1066 [],
1059 1067 b'changesets to bundle',
1060 1068 b'REV',
1061 1069 ),
1062 1070 (
1063 1071 b't',
1064 1072 b'type',
1065 1073 b'none',
1066 1074 b'bundlespec to use (see `hg help bundlespec`)',
1067 1075 b'TYPE',
1068 1076 ),
1069 1077 ]
1070 1078 + formatteropts,
1071 1079 b'REVS',
1072 1080 )
1073 1081 def perfbundle(ui, repo, *revs, **opts):
1074 1082 """benchmark the creation of a bundle from a repository
1075 1083
1076 1084 For now, this only supports "none" compression.
1077 1085 """
1078 1086 try:
1079 1087 from mercurial import bundlecaches
1080 1088
1081 1089 parsebundlespec = bundlecaches.parsebundlespec
1082 1090 except ImportError:
1083 1091 from mercurial import exchange
1084 1092
1085 1093 parsebundlespec = exchange.parsebundlespec
1086 1094
1087 1095 from mercurial import discovery
1088 1096 from mercurial import bundle2
1089 1097
1090 1098 opts = _byteskwargs(opts)
1091 1099 timer, fm = gettimer(ui, opts)
1092 1100
1093 1101 cl = repo.changelog
1094 1102 revs = list(revs)
1095 1103 revs.extend(opts.get(b'rev', ()))
1096 1104 revs = scmutil.revrange(repo, revs)
1097 1105 if not revs:
1098 1106 raise error.Abort(b"not revision specified")
1099 1107 # make it a consistent set (ie: without topological gaps)
1100 1108 old_len = len(revs)
1101 1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1102 1110 if old_len != len(revs):
1103 1111 new_count = len(revs) - old_len
1104 1112 msg = b"add %d new revisions to make it a consistent set\n"
1105 1113 ui.write_err(msg % new_count)
1106 1114
1107 1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1108 1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1109 1117 outgoing = discovery.outgoing(repo, bases, targets)
1110 1118
1111 1119 bundle_spec = opts.get(b'type')
1112 1120
1113 1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1114 1122
1115 1123 cgversion = bundle_spec.params.get(b"cg.version")
1116 1124 if cgversion is None:
1117 1125 if bundle_spec.version == b'v1':
1118 1126 cgversion = b'01'
1119 1127 if bundle_spec.version == b'v2':
1120 1128 cgversion = b'02'
1121 1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1122 1130 err = b"repository does not support bundle version %s"
1123 1131 raise error.Abort(err % cgversion)
1124 1132
1125 1133 if cgversion == b'01': # bundle1
1126 1134 bversion = b'HG10' + bundle_spec.wirecompression
1127 1135 bcompression = None
1128 1136 elif cgversion in (b'02', b'03'):
1129 1137 bversion = b'HG20'
1130 1138 bcompression = bundle_spec.wirecompression
1131 1139 else:
1132 1140 err = b'perf::bundle: unexpected changegroup version %s'
1133 1141 raise error.ProgrammingError(err % cgversion)
1134 1142
1135 1143 if bcompression is None:
1136 1144 bcompression = b'UN'
1137 1145
1138 1146 if bcompression != b'UN':
1139 1147 err = b'perf::bundle: compression currently unsupported: %s'
1140 1148 raise error.ProgrammingError(err % bcompression)
1141 1149
1142 1150 def do_bundle():
1143 1151 bundle2.writenewbundle(
1144 1152 ui,
1145 1153 repo,
1146 1154 b'perf::bundle',
1147 1155 os.devnull,
1148 1156 bversion,
1149 1157 outgoing,
1150 1158 bundle_spec.params,
1151 1159 )
1152 1160
1153 1161 timer(do_bundle)
1154 1162 fm.end()
1155 1163
1156 1164
1157 1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1158 1166 def perfbundleread(ui, repo, bundlepath, **opts):
1159 1167 """Benchmark reading of bundle files.
1160 1168
1161 1169 This command is meant to isolate the I/O part of bundle reading as
1162 1170 much as possible.
1163 1171 """
1164 1172 from mercurial import (
1165 1173 bundle2,
1166 1174 exchange,
1167 1175 streamclone,
1168 1176 )
1169 1177
1170 1178 opts = _byteskwargs(opts)
1171 1179
1172 1180 def makebench(fn):
1173 1181 def run():
1174 1182 with open(bundlepath, b'rb') as fh:
1175 1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1176 1184 fn(bundle)
1177 1185
1178 1186 return run
1179 1187
1180 1188 def makereadnbytes(size):
1181 1189 def run():
1182 1190 with open(bundlepath, b'rb') as fh:
1183 1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 1192 while bundle.read(size):
1185 1193 pass
1186 1194
1187 1195 return run
1188 1196
1189 1197 def makestdioread(size):
1190 1198 def run():
1191 1199 with open(bundlepath, b'rb') as fh:
1192 1200 while fh.read(size):
1193 1201 pass
1194 1202
1195 1203 return run
1196 1204
1197 1205 # bundle1
1198 1206
1199 1207 def deltaiter(bundle):
1200 1208 for delta in bundle.deltaiter():
1201 1209 pass
1202 1210
1203 1211 def iterchunks(bundle):
1204 1212 for chunk in bundle.getchunks():
1205 1213 pass
1206 1214
1207 1215 # bundle2
1208 1216
1209 1217 def forwardchunks(bundle):
1210 1218 for chunk in bundle._forwardchunks():
1211 1219 pass
1212 1220
1213 1221 def iterparts(bundle):
1214 1222 for part in bundle.iterparts():
1215 1223 pass
1216 1224
1217 1225 def iterpartsseekable(bundle):
1218 1226 for part in bundle.iterparts(seekable=True):
1219 1227 pass
1220 1228
1221 1229 def seek(bundle):
1222 1230 for part in bundle.iterparts(seekable=True):
1223 1231 part.seek(0, os.SEEK_END)
1224 1232
1225 1233 def makepartreadnbytes(size):
1226 1234 def run():
1227 1235 with open(bundlepath, b'rb') as fh:
1228 1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1229 1237 for part in bundle.iterparts():
1230 1238 while part.read(size):
1231 1239 pass
1232 1240
1233 1241 return run
1234 1242
1235 1243 benches = [
1236 1244 (makestdioread(8192), b'read(8k)'),
1237 1245 (makestdioread(16384), b'read(16k)'),
1238 1246 (makestdioread(32768), b'read(32k)'),
1239 1247 (makestdioread(131072), b'read(128k)'),
1240 1248 ]
1241 1249
1242 1250 with open(bundlepath, b'rb') as fh:
1243 1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1244 1252
1245 1253 if isinstance(bundle, changegroup.cg1unpacker):
1246 1254 benches.extend(
1247 1255 [
1248 1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1249 1257 (makebench(iterchunks), b'cg1 getchunks()'),
1250 1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1251 1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1252 1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1253 1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1254 1262 ]
1255 1263 )
1256 1264 elif isinstance(bundle, bundle2.unbundle20):
1257 1265 benches.extend(
1258 1266 [
1259 1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1260 1268 (makebench(iterparts), b'bundle2 iterparts()'),
1261 1269 (
1262 1270 makebench(iterpartsseekable),
1263 1271 b'bundle2 iterparts() seekable',
1264 1272 ),
1265 1273 (makebench(seek), b'bundle2 part seek()'),
1266 1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1267 1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1268 1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1269 1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1270 1278 ]
1271 1279 )
1272 1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1273 1281 raise error.Abort(b'stream clone bundles not supported')
1274 1282 else:
1275 1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1276 1284
1277 1285 for fn, title in benches:
1278 1286 timer, fm = gettimer(ui, opts)
1279 1287 timer(fn, title=title)
1280 1288 fm.end()
1281 1289
1282 1290
1283 1291 @command(
1284 1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1285 1293 formatteropts
1286 1294 + [
1287 1295 (b'', b'cgversion', b'02', b'changegroup version'),
1288 1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1289 1297 ],
1290 1298 )
1291 1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1292 1300 """Benchmark producing a changelog group for a changegroup.
1293 1301
1294 1302 This measures the time spent processing the changelog during a
1295 1303 bundle operation. This occurs during `hg bundle` and on a server
1296 1304 processing a `getbundle` wire protocol request (handles clones
1297 1305 and pull requests).
1298 1306
1299 1307 By default, all revisions are added to the changegroup.
1300 1308 """
1301 1309 opts = _byteskwargs(opts)
1302 1310 cl = repo.changelog
1303 1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1304 1312 bundler = changegroup.getbundler(cgversion, repo)
1305 1313
1306 1314 def d():
1307 1315 state, chunks = bundler._generatechangelog(cl, nodes)
1308 1316 for chunk in chunks:
1309 1317 pass
1310 1318
1311 1319 timer, fm = gettimer(ui, opts)
1312 1320
1313 1321 # Terminal printing can interfere with timing. So disable it.
1314 1322 with ui.configoverride({(b'progress', b'disable'): True}):
1315 1323 timer(d)
1316 1324
1317 1325 fm.end()
1318 1326
1319 1327
1320 1328 @command(b'perf::dirs|perfdirs', formatteropts)
1321 1329 def perfdirs(ui, repo, **opts):
1322 1330 opts = _byteskwargs(opts)
1323 1331 timer, fm = gettimer(ui, opts)
1324 1332 dirstate = repo.dirstate
1325 1333 b'a' in dirstate
1326 1334
1327 1335 def d():
1328 1336 dirstate.hasdir(b'a')
1329 1337 try:
1330 1338 del dirstate._map._dirs
1331 1339 except AttributeError:
1332 1340 pass
1333 1341
1334 1342 timer(d)
1335 1343 fm.end()
1336 1344
1337 1345
1338 1346 @command(
1339 1347 b'perf::dirstate|perfdirstate',
1340 1348 [
1341 1349 (
1342 1350 b'',
1343 1351 b'iteration',
1344 1352 None,
1345 1353 b'benchmark a full iteration for the dirstate',
1346 1354 ),
1347 1355 (
1348 1356 b'',
1349 1357 b'contains',
1350 1358 None,
1351 1359 b'benchmark a large amount of `nf in dirstate` calls',
1352 1360 ),
1353 1361 ]
1354 1362 + formatteropts,
1355 1363 )
1356 1364 def perfdirstate(ui, repo, **opts):
1357 1365 """benchmap the time of various distate operations
1358 1366
1359 1367 By default benchmark the time necessary to load a dirstate from scratch.
1360 1368 The dirstate is loaded to the point were a "contains" request can be
1361 1369 answered.
1362 1370 """
1363 1371 opts = _byteskwargs(opts)
1364 1372 timer, fm = gettimer(ui, opts)
1365 1373 b"a" in repo.dirstate
1366 1374
1367 1375 if opts[b'iteration'] and opts[b'contains']:
1368 1376 msg = b'only specify one of --iteration or --contains'
1369 1377 raise error.Abort(msg)
1370 1378
1371 1379 if opts[b'iteration']:
1372 1380 setup = None
1373 1381 dirstate = repo.dirstate
1374 1382
1375 1383 def d():
1376 1384 for f in dirstate:
1377 1385 pass
1378 1386
1379 1387 elif opts[b'contains']:
1380 1388 setup = None
1381 1389 dirstate = repo.dirstate
1382 1390 allfiles = list(dirstate)
1383 1391 # also add file path that will be "missing" from the dirstate
1384 1392 allfiles.extend([f[::-1] for f in allfiles])
1385 1393
1386 1394 def d():
1387 1395 for f in allfiles:
1388 1396 f in dirstate
1389 1397
1390 1398 else:
1391 1399
1392 1400 def setup():
1393 1401 repo.dirstate.invalidate()
1394 1402
1395 1403 def d():
1396 1404 b"a" in repo.dirstate
1397 1405
1398 1406 timer(d, setup=setup)
1399 1407 fm.end()
1400 1408
1401 1409
1402 1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1403 1411 def perfdirstatedirs(ui, repo, **opts):
1404 1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1405 1413 opts = _byteskwargs(opts)
1406 1414 timer, fm = gettimer(ui, opts)
1407 1415 repo.dirstate.hasdir(b"a")
1408 1416
1409 1417 def setup():
1410 1418 try:
1411 1419 del repo.dirstate._map._dirs
1412 1420 except AttributeError:
1413 1421 pass
1414 1422
1415 1423 def d():
1416 1424 repo.dirstate.hasdir(b"a")
1417 1425
1418 1426 timer(d, setup=setup)
1419 1427 fm.end()
1420 1428
1421 1429
1422 1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1423 1431 def perfdirstatefoldmap(ui, repo, **opts):
1424 1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1425 1433
1426 1434 The dirstate filefoldmap cache is dropped between every request.
1427 1435 """
1428 1436 opts = _byteskwargs(opts)
1429 1437 timer, fm = gettimer(ui, opts)
1430 1438 dirstate = repo.dirstate
1431 1439 dirstate._map.filefoldmap.get(b'a')
1432 1440
1433 1441 def setup():
1434 1442 del dirstate._map.filefoldmap
1435 1443
1436 1444 def d():
1437 1445 dirstate._map.filefoldmap.get(b'a')
1438 1446
1439 1447 timer(d, setup=setup)
1440 1448 fm.end()
1441 1449
1442 1450
1443 1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1444 1452 def perfdirfoldmap(ui, repo, **opts):
1445 1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1446 1454
1447 1455 The dirstate dirfoldmap cache is dropped between every request.
1448 1456 """
1449 1457 opts = _byteskwargs(opts)
1450 1458 timer, fm = gettimer(ui, opts)
1451 1459 dirstate = repo.dirstate
1452 1460 dirstate._map.dirfoldmap.get(b'a')
1453 1461
1454 1462 def setup():
1455 1463 del dirstate._map.dirfoldmap
1456 1464 try:
1457 1465 del dirstate._map._dirs
1458 1466 except AttributeError:
1459 1467 pass
1460 1468
1461 1469 def d():
1462 1470 dirstate._map.dirfoldmap.get(b'a')
1463 1471
1464 1472 timer(d, setup=setup)
1465 1473 fm.end()
1466 1474
1467 1475
1468 1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1469 1477 def perfdirstatewrite(ui, repo, **opts):
1470 1478 """benchmap the time it take to write a dirstate on disk"""
1471 1479 opts = _byteskwargs(opts)
1472 1480 timer, fm = gettimer(ui, opts)
1473 1481 ds = repo.dirstate
1474 1482 b"a" in ds
1475 1483
1476 1484 def setup():
1477 1485 ds._dirty = True
1478 1486
1479 1487 def d():
1480 1488 ds.write(repo.currenttransaction())
1481 1489
1482 1490 with repo.wlock():
1483 1491 timer(d, setup=setup)
1484 1492 fm.end()
1485 1493
1486 1494
1487 1495 def _getmergerevs(repo, opts):
1488 1496 """parse command argument to return rev involved in merge
1489 1497
1490 1498 input: options dictionnary with `rev`, `from` and `bse`
1491 1499 output: (localctx, otherctx, basectx)
1492 1500 """
1493 1501 if opts[b'from']:
1494 1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1495 1503 wctx = repo[fromrev]
1496 1504 else:
1497 1505 wctx = repo[None]
1498 1506 # we don't want working dir files to be stat'd in the benchmark, so
1499 1507 # prime that cache
1500 1508 wctx.dirty()
1501 1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1502 1510 if opts[b'base']:
1503 1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1504 1512 ancestor = repo[fromrev]
1505 1513 else:
1506 1514 ancestor = wctx.ancestor(rctx)
1507 1515 return (wctx, rctx, ancestor)
1508 1516
1509 1517
1510 1518 @command(
1511 1519 b'perf::mergecalculate|perfmergecalculate',
1512 1520 [
1513 1521 (b'r', b'rev', b'.', b'rev to merge against'),
1514 1522 (b'', b'from', b'', b'rev to merge from'),
1515 1523 (b'', b'base', b'', b'the revision to use as base'),
1516 1524 ]
1517 1525 + formatteropts,
1518 1526 )
1519 1527 def perfmergecalculate(ui, repo, **opts):
1520 1528 opts = _byteskwargs(opts)
1521 1529 timer, fm = gettimer(ui, opts)
1522 1530
1523 1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1524 1532
1525 1533 def d():
1526 1534 # acceptremote is True because we don't want prompts in the middle of
1527 1535 # our benchmark
1528 1536 merge.calculateupdates(
1529 1537 repo,
1530 1538 wctx,
1531 1539 rctx,
1532 1540 [ancestor],
1533 1541 branchmerge=False,
1534 1542 force=False,
1535 1543 acceptremote=True,
1536 1544 followcopies=True,
1537 1545 )
1538 1546
1539 1547 timer(d)
1540 1548 fm.end()
1541 1549
1542 1550
1543 1551 @command(
1544 1552 b'perf::mergecopies|perfmergecopies',
1545 1553 [
1546 1554 (b'r', b'rev', b'.', b'rev to merge against'),
1547 1555 (b'', b'from', b'', b'rev to merge from'),
1548 1556 (b'', b'base', b'', b'the revision to use as base'),
1549 1557 ]
1550 1558 + formatteropts,
1551 1559 )
1552 1560 def perfmergecopies(ui, repo, **opts):
1553 1561 """measure runtime of `copies.mergecopies`"""
1554 1562 opts = _byteskwargs(opts)
1555 1563 timer, fm = gettimer(ui, opts)
1556 1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1557 1565
1558 1566 def d():
1559 1567 # acceptremote is True because we don't want prompts in the middle of
1560 1568 # our benchmark
1561 1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1562 1570
1563 1571 timer(d)
1564 1572 fm.end()
1565 1573
1566 1574
1567 1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1568 1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1569 1577 """benchmark the copy tracing logic"""
1570 1578 opts = _byteskwargs(opts)
1571 1579 timer, fm = gettimer(ui, opts)
1572 1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1573 1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1574 1582
1575 1583 def d():
1576 1584 copies.pathcopies(ctx1, ctx2)
1577 1585
1578 1586 timer(d)
1579 1587 fm.end()
1580 1588
1581 1589
1582 1590 @command(
1583 1591 b'perf::phases|perfphases',
1584 1592 [
1585 1593 (b'', b'full', False, b'include file reading time too'),
1586 1594 ],
1587 1595 b"",
1588 1596 )
1589 1597 def perfphases(ui, repo, **opts):
1590 1598 """benchmark phasesets computation"""
1591 1599 opts = _byteskwargs(opts)
1592 1600 timer, fm = gettimer(ui, opts)
1593 1601 _phases = repo._phasecache
1594 1602 full = opts.get(b'full')
1595 1603
1596 1604 def d():
1597 1605 phases = _phases
1598 1606 if full:
1599 1607 clearfilecache(repo, b'_phasecache')
1600 1608 phases = repo._phasecache
1601 1609 phases.invalidate()
1602 1610 phases.loadphaserevs(repo)
1603 1611
1604 1612 timer(d)
1605 1613 fm.end()
1606 1614
1607 1615
1608 1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1609 1617 def perfphasesremote(ui, repo, dest=None, **opts):
1610 1618 """benchmark time needed to analyse phases of the remote server"""
1611 1619 from mercurial.node import bin
1612 1620 from mercurial import (
1613 1621 exchange,
1614 1622 hg,
1615 1623 phases,
1616 1624 )
1617 1625
1618 1626 opts = _byteskwargs(opts)
1619 1627 timer, fm = gettimer(ui, opts)
1620 1628
1621 1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1622 1630 if not path:
1623 1631 raise error.Abort(
1624 1632 b'default repository not configured!',
1625 1633 hint=b"see 'hg help config.paths'",
1626 1634 )
1627 1635 if util.safehasattr(path, 'main_path'):
1628 1636 path = path.get_push_variant()
1629 1637 dest = path.loc
1630 1638 else:
1631 1639 dest = path.pushloc or path.loc
1632 1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1633 1641 other = hg.peer(repo, opts, dest)
1634 1642
1635 1643 # easier to perform discovery through the operation
1636 1644 op = exchange.pushoperation(repo, other)
1637 1645 exchange._pushdiscoverychangeset(op)
1638 1646
1639 1647 remotesubset = op.fallbackheads
1640 1648
1641 1649 with other.commandexecutor() as e:
1642 1650 remotephases = e.callcommand(
1643 1651 b'listkeys', {b'namespace': b'phases'}
1644 1652 ).result()
1645 1653 del other
1646 1654 publishing = remotephases.get(b'publishing', False)
1647 1655 if publishing:
1648 1656 ui.statusnoi18n(b'publishing: yes\n')
1649 1657 else:
1650 1658 ui.statusnoi18n(b'publishing: no\n')
1651 1659
1652 1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1653 1661 if has_node is None:
1654 1662 has_node = repo.changelog.nodemap.__contains__
1655 1663 nonpublishroots = 0
1656 1664 for nhex, phase in remotephases.iteritems():
1657 1665 if nhex == b'publishing': # ignore data related to publish option
1658 1666 continue
1659 1667 node = bin(nhex)
1660 1668 if has_node(node) and int(phase):
1661 1669 nonpublishroots += 1
1662 1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1663 1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1664 1672
1665 1673 def d():
1666 1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1667 1675
1668 1676 timer(d)
1669 1677 fm.end()
1670 1678
1671 1679
1672 1680 @command(
1673 1681 b'perf::manifest|perfmanifest',
1674 1682 [
1675 1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1676 1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1677 1685 ]
1678 1686 + formatteropts,
1679 1687 b'REV|NODE',
1680 1688 )
1681 1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1682 1690 """benchmark the time to read a manifest from disk and return a usable
1683 1691 dict-like object
1684 1692
1685 1693 Manifest caches are cleared before retrieval."""
1686 1694 opts = _byteskwargs(opts)
1687 1695 timer, fm = gettimer(ui, opts)
1688 1696 if not manifest_rev:
1689 1697 ctx = scmutil.revsingle(repo, rev, rev)
1690 1698 t = ctx.manifestnode()
1691 1699 else:
1692 1700 from mercurial.node import bin
1693 1701
1694 1702 if len(rev) == 40:
1695 1703 t = bin(rev)
1696 1704 else:
1697 1705 try:
1698 1706 rev = int(rev)
1699 1707
1700 1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1701 1709 t = repo.manifestlog.getstorage(b'').node(rev)
1702 1710 else:
1703 1711 t = repo.manifestlog._revlog.lookup(rev)
1704 1712 except ValueError:
1705 1713 raise error.Abort(
1706 1714 b'manifest revision must be integer or full node'
1707 1715 )
1708 1716
1709 1717 def d():
1710 1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1711 1719 repo.manifestlog[t].read()
1712 1720
1713 1721 timer(d)
1714 1722 fm.end()
1715 1723
1716 1724
1717 1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1718 1726 def perfchangeset(ui, repo, rev, **opts):
1719 1727 opts = _byteskwargs(opts)
1720 1728 timer, fm = gettimer(ui, opts)
1721 1729 n = scmutil.revsingle(repo, rev).node()
1722 1730
1723 1731 def d():
1724 1732 repo.changelog.read(n)
1725 1733 # repo.changelog._cache = None
1726 1734
1727 1735 timer(d)
1728 1736 fm.end()
1729 1737
1730 1738
1731 1739 @command(b'perf::ignore|perfignore', formatteropts)
1732 1740 def perfignore(ui, repo, **opts):
1733 1741 """benchmark operation related to computing ignore"""
1734 1742 opts = _byteskwargs(opts)
1735 1743 timer, fm = gettimer(ui, opts)
1736 1744 dirstate = repo.dirstate
1737 1745
1738 1746 def setupone():
1739 1747 dirstate.invalidate()
1740 1748 clearfilecache(dirstate, b'_ignore')
1741 1749
1742 1750 def runone():
1743 1751 dirstate._ignore
1744 1752
1745 1753 timer(runone, setup=setupone, title=b"load")
1746 1754 fm.end()
1747 1755
1748 1756
1749 1757 @command(
1750 1758 b'perf::index|perfindex',
1751 1759 [
1752 1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1753 1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1754 1762 ]
1755 1763 + formatteropts,
1756 1764 )
1757 1765 def perfindex(ui, repo, **opts):
1758 1766 """benchmark index creation time followed by a lookup
1759 1767
1760 1768 The default is to look `tip` up. Depending on the index implementation,
1761 1769 the revision looked up can matters. For example, an implementation
1762 1770 scanning the index will have a faster lookup time for `--rev tip` than for
1763 1771 `--rev 0`. The number of looked up revisions and their order can also
1764 1772 matters.
1765 1773
1766 1774 Example of useful set to test:
1767 1775
1768 1776 * tip
1769 1777 * 0
1770 1778 * -10:
1771 1779 * :10
1772 1780 * -10: + :10
1773 1781 * :10: + -10:
1774 1782 * -10000:
1775 1783 * -10000: + 0
1776 1784
1777 1785 It is not currently possible to check for lookup of a missing node. For
1778 1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1779 1787 import mercurial.revlog
1780 1788
1781 1789 opts = _byteskwargs(opts)
1782 1790 timer, fm = gettimer(ui, opts)
1783 1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1784 1792 if opts[b'no_lookup']:
1785 1793 if opts['rev']:
1786 1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1787 1795 nodes = []
1788 1796 elif not opts[b'rev']:
1789 1797 nodes = [repo[b"tip"].node()]
1790 1798 else:
1791 1799 revs = scmutil.revrange(repo, opts[b'rev'])
1792 1800 cl = repo.changelog
1793 1801 nodes = [cl.node(r) for r in revs]
1794 1802
1795 1803 unfi = repo.unfiltered()
1796 1804 # find the filecache func directly
1797 1805 # This avoid polluting the benchmark with the filecache logic
1798 1806 makecl = unfi.__class__.changelog.func
1799 1807
1800 1808 def setup():
1801 1809 # probably not necessary, but for good measure
1802 1810 clearchangelog(unfi)
1803 1811
1804 1812 def d():
1805 1813 cl = makecl(unfi)
1806 1814 for n in nodes:
1807 1815 cl.rev(n)
1808 1816
1809 1817 timer(d, setup=setup)
1810 1818 fm.end()
1811 1819
1812 1820
1813 1821 @command(
1814 1822 b'perf::nodemap|perfnodemap',
1815 1823 [
1816 1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1817 1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1818 1826 ]
1819 1827 + formatteropts,
1820 1828 )
1821 1829 def perfnodemap(ui, repo, **opts):
1822 1830 """benchmark the time necessary to look up revision from a cold nodemap
1823 1831
1824 1832 Depending on the implementation, the amount and order of revision we look
1825 1833 up can varies. Example of useful set to test:
1826 1834 * tip
1827 1835 * 0
1828 1836 * -10:
1829 1837 * :10
1830 1838 * -10: + :10
1831 1839 * :10: + -10:
1832 1840 * -10000:
1833 1841 * -10000: + 0
1834 1842
1835 1843 The command currently focus on valid binary lookup. Benchmarking for
1836 1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1837 1845 """
1838 1846 import mercurial.revlog
1839 1847
1840 1848 opts = _byteskwargs(opts)
1841 1849 timer, fm = gettimer(ui, opts)
1842 1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1843 1851
1844 1852 unfi = repo.unfiltered()
1845 1853 clearcaches = opts[b'clear_caches']
1846 1854 # find the filecache func directly
1847 1855 # This avoid polluting the benchmark with the filecache logic
1848 1856 makecl = unfi.__class__.changelog.func
1849 1857 if not opts[b'rev']:
1850 1858 raise error.Abort(b'use --rev to specify revisions to look up')
1851 1859 revs = scmutil.revrange(repo, opts[b'rev'])
1852 1860 cl = repo.changelog
1853 1861 nodes = [cl.node(r) for r in revs]
1854 1862
1855 1863 # use a list to pass reference to a nodemap from one closure to the next
1856 1864 nodeget = [None]
1857 1865
1858 1866 def setnodeget():
1859 1867 # probably not necessary, but for good measure
1860 1868 clearchangelog(unfi)
1861 1869 cl = makecl(unfi)
1862 1870 if util.safehasattr(cl.index, 'get_rev'):
1863 1871 nodeget[0] = cl.index.get_rev
1864 1872 else:
1865 1873 nodeget[0] = cl.nodemap.get
1866 1874
1867 1875 def d():
1868 1876 get = nodeget[0]
1869 1877 for n in nodes:
1870 1878 get(n)
1871 1879
1872 1880 setup = None
1873 1881 if clearcaches:
1874 1882
1875 1883 def setup():
1876 1884 setnodeget()
1877 1885
1878 1886 else:
1879 1887 setnodeget()
1880 1888 d() # prewarm the data structure
1881 1889 timer(d, setup=setup)
1882 1890 fm.end()
1883 1891
1884 1892
1885 1893 @command(b'perf::startup|perfstartup', formatteropts)
1886 1894 def perfstartup(ui, repo, **opts):
1887 1895 opts = _byteskwargs(opts)
1888 1896 timer, fm = gettimer(ui, opts)
1889 1897
1890 1898 def d():
1891 1899 if os.name != 'nt':
1892 1900 os.system(
1893 1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1894 1902 )
1895 1903 else:
1896 1904 os.environ['HGRCPATH'] = r' '
1897 1905 os.system("%s version -q > NUL" % sys.argv[0])
1898 1906
1899 1907 timer(d)
1900 1908 fm.end()
1901 1909
1902 1910
1903 1911 def _find_stream_generator(version):
1904 1912 """find the proper generator function for this stream version"""
1905 1913 import mercurial.streamclone
1906 1914
1907 1915 available = {}
1908 1916
1909 1917 # try to fetch a v1 generator
1910 1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1911 1919 if generatev1 is not None:
1912 1920
1913 1921 def generate(repo):
1914 1922 entries, bytes, data = generatev2(repo, None, None, True)
1915 1923 return data
1916 1924
1917 1925 available[b'v1'] = generatev1
1918 1926 # try to fetch a v2 generator
1919 1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1920 1928 if generatev2 is not None:
1921 1929
1922 1930 def generate(repo):
1923 1931 entries, bytes, data = generatev2(repo, None, None, True)
1924 1932 return data
1925 1933
1926 1934 available[b'v2'] = generate
1927 1935 # try to fetch a v3 generator
1928 1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1929 1937 if generatev3 is not None:
1930 1938
1931 1939 def generate(repo):
1932 1940 entries, bytes, data = generatev3(repo, None, None, True)
1933 1941 return data
1934 1942
1935 1943 available[b'v3-exp'] = generate
1936 1944
1937 1945 # resolve the request
1938 1946 if version == b"latest":
1939 1947 # latest is the highest non experimental version
1940 1948 latest_key = max(v for v in available if b'-exp' not in v)
1941 1949 return available[latest_key]
1942 1950 elif version in available:
1943 1951 return available[version]
1944 1952 else:
1945 1953 msg = b"unkown or unavailable version: %s"
1946 1954 msg %= version
1947 1955 hint = b"available versions: %s"
1948 1956 hint %= b', '.join(sorted(available))
1949 1957 raise error.Abort(msg, hint=hint)
1950 1958
1951 1959
1952 1960 @command(
1953 1961 b'perf::stream-locked-section',
1954 1962 [
1955 1963 (
1956 1964 b'',
1957 1965 b'stream-version',
1958 1966 b'latest',
1959 1967 b'stream version to us ("v1", "v2" or "latest", (the default))',
1960 1968 ),
1961 1969 ]
1962 1970 + formatteropts,
1963 1971 )
1964 1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1965 1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1966 1974
1967 1975 opts = _byteskwargs(opts)
1968 1976 timer, fm = gettimer(ui, opts)
1969 1977
1970 1978 # deletion of the generator may trigger some cleanup that we do not want to
1971 1979 # measure
1972 1980 result_holder = [None]
1973 1981
1974 1982 def setupone():
1975 1983 result_holder[0] = None
1976 1984
1977 1985 generate = _find_stream_generator(stream_version)
1978 1986
1979 1987 def runone():
1980 1988 # the lock is held for the duration the initialisation
1981 1989 result_holder[0] = generate(repo)
1982 1990
1983 1991 timer(runone, setup=setupone, title=b"load")
1984 1992 fm.end()
1985 1993
1986 1994
1987 1995 @command(b'perf::parents|perfparents', formatteropts)
1988 1996 def perfparents(ui, repo, **opts):
1989 1997 """benchmark the time necessary to fetch one changeset's parents.
1990 1998
1991 1999 The fetch is done using the `node identifier`, traversing all object layers
1992 2000 from the repository object. The first N revisions will be used for this
1993 2001 benchmark. N is controlled by the ``perf.parentscount`` config option
1994 2002 (default: 1000).
1995 2003 """
1996 2004 opts = _byteskwargs(opts)
1997 2005 timer, fm = gettimer(ui, opts)
1998 2006 # control the number of commits perfparents iterates over
1999 2007 # experimental config: perf.parentscount
2000 2008 count = getint(ui, b"perf", b"parentscount", 1000)
2001 2009 if len(repo.changelog) < count:
2002 2010 raise error.Abort(b"repo needs %d commits for this test" % count)
2003 2011 repo = repo.unfiltered()
2004 2012 nl = [repo.changelog.node(i) for i in _xrange(count)]
2005 2013
2006 2014 def d():
2007 2015 for n in nl:
2008 2016 repo.changelog.parents(n)
2009 2017
2010 2018 timer(d)
2011 2019 fm.end()
2012 2020
2013 2021
2014 2022 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2015 2023 def perfctxfiles(ui, repo, x, **opts):
2016 2024 opts = _byteskwargs(opts)
2017 2025 x = int(x)
2018 2026 timer, fm = gettimer(ui, opts)
2019 2027
2020 2028 def d():
2021 2029 len(repo[x].files())
2022 2030
2023 2031 timer(d)
2024 2032 fm.end()
2025 2033
2026 2034
2027 2035 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2028 2036 def perfrawfiles(ui, repo, x, **opts):
2029 2037 opts = _byteskwargs(opts)
2030 2038 x = int(x)
2031 2039 timer, fm = gettimer(ui, opts)
2032 2040 cl = repo.changelog
2033 2041
2034 2042 def d():
2035 2043 len(cl.read(x)[3])
2036 2044
2037 2045 timer(d)
2038 2046 fm.end()
2039 2047
2040 2048
2041 2049 @command(b'perf::lookup|perflookup', formatteropts)
2042 2050 def perflookup(ui, repo, rev, **opts):
2043 2051 opts = _byteskwargs(opts)
2044 2052 timer, fm = gettimer(ui, opts)
2045 2053 timer(lambda: len(repo.lookup(rev)))
2046 2054 fm.end()
2047 2055
2048 2056
2049 2057 @command(
2050 2058 b'perf::linelogedits|perflinelogedits',
2051 2059 [
2052 2060 (b'n', b'edits', 10000, b'number of edits'),
2053 2061 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2054 2062 ],
2055 2063 norepo=True,
2056 2064 )
2057 2065 def perflinelogedits(ui, **opts):
2058 2066 from mercurial import linelog
2059 2067
2060 2068 opts = _byteskwargs(opts)
2061 2069
2062 2070 edits = opts[b'edits']
2063 2071 maxhunklines = opts[b'max_hunk_lines']
2064 2072
2065 2073 maxb1 = 100000
2066 2074 random.seed(0)
2067 2075 randint = random.randint
2068 2076 currentlines = 0
2069 2077 arglist = []
2070 2078 for rev in _xrange(edits):
2071 2079 a1 = randint(0, currentlines)
2072 2080 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2073 2081 b1 = randint(0, maxb1)
2074 2082 b2 = randint(b1, b1 + maxhunklines)
2075 2083 currentlines += (b2 - b1) - (a2 - a1)
2076 2084 arglist.append((rev, a1, a2, b1, b2))
2077 2085
2078 2086 def d():
2079 2087 ll = linelog.linelog()
2080 2088 for args in arglist:
2081 2089 ll.replacelines(*args)
2082 2090
2083 2091 timer, fm = gettimer(ui, opts)
2084 2092 timer(d)
2085 2093 fm.end()
2086 2094
2087 2095
2088 2096 @command(b'perf::revrange|perfrevrange', formatteropts)
2089 2097 def perfrevrange(ui, repo, *specs, **opts):
2090 2098 opts = _byteskwargs(opts)
2091 2099 timer, fm = gettimer(ui, opts)
2092 2100 revrange = scmutil.revrange
2093 2101 timer(lambda: len(revrange(repo, specs)))
2094 2102 fm.end()
2095 2103
2096 2104
2097 2105 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2098 2106 def perfnodelookup(ui, repo, rev, **opts):
2099 2107 opts = _byteskwargs(opts)
2100 2108 timer, fm = gettimer(ui, opts)
2101 2109 import mercurial.revlog
2102 2110
2103 2111 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2104 2112 n = scmutil.revsingle(repo, rev).node()
2105 2113
2106 2114 try:
2107 2115 cl = revlog(getsvfs(repo), radix=b"00changelog")
2108 2116 except TypeError:
2109 2117 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2110 2118
2111 2119 def d():
2112 2120 cl.rev(n)
2113 2121 clearcaches(cl)
2114 2122
2115 2123 timer(d)
2116 2124 fm.end()
2117 2125
2118 2126
2119 2127 @command(
2120 2128 b'perf::log|perflog',
2121 2129 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2122 2130 )
2123 2131 def perflog(ui, repo, rev=None, **opts):
2124 2132 opts = _byteskwargs(opts)
2125 2133 if rev is None:
2126 2134 rev = []
2127 2135 timer, fm = gettimer(ui, opts)
2128 2136 ui.pushbuffer()
2129 2137 timer(
2130 2138 lambda: commands.log(
2131 2139 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2132 2140 )
2133 2141 )
2134 2142 ui.popbuffer()
2135 2143 fm.end()
2136 2144
2137 2145
2138 2146 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2139 2147 def perfmoonwalk(ui, repo, **opts):
2140 2148 """benchmark walking the changelog backwards
2141 2149
2142 2150 This also loads the changelog data for each revision in the changelog.
2143 2151 """
2144 2152 opts = _byteskwargs(opts)
2145 2153 timer, fm = gettimer(ui, opts)
2146 2154
2147 2155 def moonwalk():
2148 2156 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2149 2157 ctx = repo[i]
2150 2158 ctx.branch() # read changelog data (in addition to the index)
2151 2159
2152 2160 timer(moonwalk)
2153 2161 fm.end()
2154 2162
2155 2163
2156 2164 @command(
2157 2165 b'perf::templating|perftemplating',
2158 2166 [
2159 2167 (b'r', b'rev', [], b'revisions to run the template on'),
2160 2168 ]
2161 2169 + formatteropts,
2162 2170 )
2163 2171 def perftemplating(ui, repo, testedtemplate=None, **opts):
2164 2172 """test the rendering time of a given template"""
2165 2173 if makelogtemplater is None:
2166 2174 raise error.Abort(
2167 2175 b"perftemplating not available with this Mercurial",
2168 2176 hint=b"use 4.3 or later",
2169 2177 )
2170 2178
2171 2179 opts = _byteskwargs(opts)
2172 2180
2173 2181 nullui = ui.copy()
2174 2182 nullui.fout = open(os.devnull, 'wb')
2175 2183 nullui.disablepager()
2176 2184 revs = opts.get(b'rev')
2177 2185 if not revs:
2178 2186 revs = [b'all()']
2179 2187 revs = list(scmutil.revrange(repo, revs))
2180 2188
2181 2189 defaulttemplate = (
2182 2190 b'{date|shortdate} [{rev}:{node|short}]'
2183 2191 b' {author|person}: {desc|firstline}\n'
2184 2192 )
2185 2193 if testedtemplate is None:
2186 2194 testedtemplate = defaulttemplate
2187 2195 displayer = makelogtemplater(nullui, repo, testedtemplate)
2188 2196
2189 2197 def format():
2190 2198 for r in revs:
2191 2199 ctx = repo[r]
2192 2200 displayer.show(ctx)
2193 2201 displayer.flush(ctx)
2194 2202
2195 2203 timer, fm = gettimer(ui, opts)
2196 2204 timer(format)
2197 2205 fm.end()
2198 2206
2199 2207
2200 2208 def _displaystats(ui, opts, entries, data):
2201 2209 # use a second formatter because the data are quite different, not sure
2202 2210 # how it flies with the templater.
2203 2211 fm = ui.formatter(b'perf-stats', opts)
2204 2212 for key, title in entries:
2205 2213 values = data[key]
2206 2214 nbvalues = len(data)
2207 2215 values.sort()
2208 2216 stats = {
2209 2217 'key': key,
2210 2218 'title': title,
2211 2219 'nbitems': len(values),
2212 2220 'min': values[0][0],
2213 2221 '10%': values[(nbvalues * 10) // 100][0],
2214 2222 '25%': values[(nbvalues * 25) // 100][0],
2215 2223 '50%': values[(nbvalues * 50) // 100][0],
2216 2224 '75%': values[(nbvalues * 75) // 100][0],
2217 2225 '80%': values[(nbvalues * 80) // 100][0],
2218 2226 '85%': values[(nbvalues * 85) // 100][0],
2219 2227 '90%': values[(nbvalues * 90) // 100][0],
2220 2228 '95%': values[(nbvalues * 95) // 100][0],
2221 2229 '99%': values[(nbvalues * 99) // 100][0],
2222 2230 'max': values[-1][0],
2223 2231 }
2224 2232 fm.startitem()
2225 2233 fm.data(**stats)
2226 2234 # make node pretty for the human output
2227 2235 fm.plain('### %s (%d items)\n' % (title, len(values)))
2228 2236 lines = [
2229 2237 'min',
2230 2238 '10%',
2231 2239 '25%',
2232 2240 '50%',
2233 2241 '75%',
2234 2242 '80%',
2235 2243 '85%',
2236 2244 '90%',
2237 2245 '95%',
2238 2246 '99%',
2239 2247 'max',
2240 2248 ]
2241 2249 for l in lines:
2242 2250 fm.plain('%s: %s\n' % (l, stats[l]))
2243 2251 fm.end()
2244 2252
2245 2253
2246 2254 @command(
2247 2255 b'perf::helper-mergecopies|perfhelper-mergecopies',
2248 2256 formatteropts
2249 2257 + [
2250 2258 (b'r', b'revs', [], b'restrict search to these revisions'),
2251 2259 (b'', b'timing', False, b'provides extra data (costly)'),
2252 2260 (b'', b'stats', False, b'provides statistic about the measured data'),
2253 2261 ],
2254 2262 )
2255 2263 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2256 2264 """find statistics about potential parameters for `perfmergecopies`
2257 2265
2258 2266 This command find (base, p1, p2) triplet relevant for copytracing
2259 2267 benchmarking in the context of a merge. It reports values for some of the
2260 2268 parameters that impact merge copy tracing time during merge.
2261 2269
2262 2270 If `--timing` is set, rename detection is run and the associated timing
2263 2271 will be reported. The extra details come at the cost of slower command
2264 2272 execution.
2265 2273
2266 2274 Since rename detection is only run once, other factors might easily
2267 2275 affect the precision of the timing. However it should give a good
2268 2276 approximation of which revision triplets are very costly.
2269 2277 """
2270 2278 opts = _byteskwargs(opts)
2271 2279 fm = ui.formatter(b'perf', opts)
2272 2280 dotiming = opts[b'timing']
2273 2281 dostats = opts[b'stats']
2274 2282
2275 2283 output_template = [
2276 2284 ("base", "%(base)12s"),
2277 2285 ("p1", "%(p1.node)12s"),
2278 2286 ("p2", "%(p2.node)12s"),
2279 2287 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2280 2288 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2281 2289 ("p1.renames", "%(p1.renamedfiles)12d"),
2282 2290 ("p1.time", "%(p1.time)12.3f"),
2283 2291 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2284 2292 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2285 2293 ("p2.renames", "%(p2.renamedfiles)12d"),
2286 2294 ("p2.time", "%(p2.time)12.3f"),
2287 2295 ("renames", "%(nbrenamedfiles)12d"),
2288 2296 ("total.time", "%(time)12.3f"),
2289 2297 ]
2290 2298 if not dotiming:
2291 2299 output_template = [
2292 2300 i
2293 2301 for i in output_template
2294 2302 if not ('time' in i[0] or 'renames' in i[0])
2295 2303 ]
2296 2304 header_names = [h for (h, v) in output_template]
2297 2305 output = ' '.join([v for (h, v) in output_template]) + '\n'
2298 2306 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2299 2307 fm.plain(header % tuple(header_names))
2300 2308
2301 2309 if not revs:
2302 2310 revs = ['all()']
2303 2311 revs = scmutil.revrange(repo, revs)
2304 2312
2305 2313 if dostats:
2306 2314 alldata = {
2307 2315 'nbrevs': [],
2308 2316 'nbmissingfiles': [],
2309 2317 }
2310 2318 if dotiming:
2311 2319 alldata['parentnbrenames'] = []
2312 2320 alldata['totalnbrenames'] = []
2313 2321 alldata['parenttime'] = []
2314 2322 alldata['totaltime'] = []
2315 2323
2316 2324 roi = repo.revs('merge() and %ld', revs)
2317 2325 for r in roi:
2318 2326 ctx = repo[r]
2319 2327 p1 = ctx.p1()
2320 2328 p2 = ctx.p2()
2321 2329 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2322 2330 for b in bases:
2323 2331 b = repo[b]
2324 2332 p1missing = copies._computeforwardmissing(b, p1)
2325 2333 p2missing = copies._computeforwardmissing(b, p2)
2326 2334 data = {
2327 2335 b'base': b.hex(),
2328 2336 b'p1.node': p1.hex(),
2329 2337 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2330 2338 b'p1.nbmissingfiles': len(p1missing),
2331 2339 b'p2.node': p2.hex(),
2332 2340 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2333 2341 b'p2.nbmissingfiles': len(p2missing),
2334 2342 }
2335 2343 if dostats:
2336 2344 if p1missing:
2337 2345 alldata['nbrevs'].append(
2338 2346 (data['p1.nbrevs'], b.hex(), p1.hex())
2339 2347 )
2340 2348 alldata['nbmissingfiles'].append(
2341 2349 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2342 2350 )
2343 2351 if p2missing:
2344 2352 alldata['nbrevs'].append(
2345 2353 (data['p2.nbrevs'], b.hex(), p2.hex())
2346 2354 )
2347 2355 alldata['nbmissingfiles'].append(
2348 2356 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2349 2357 )
2350 2358 if dotiming:
2351 2359 begin = util.timer()
2352 2360 mergedata = copies.mergecopies(repo, p1, p2, b)
2353 2361 end = util.timer()
2354 2362 # not very stable timing since we did only one run
2355 2363 data['time'] = end - begin
2356 2364 # mergedata contains five dicts: "copy", "movewithdir",
2357 2365 # "diverge", "renamedelete" and "dirmove".
2358 2366 # The first 4 are about renamed file so lets count that.
2359 2367 renames = len(mergedata[0])
2360 2368 renames += len(mergedata[1])
2361 2369 renames += len(mergedata[2])
2362 2370 renames += len(mergedata[3])
2363 2371 data['nbrenamedfiles'] = renames
2364 2372 begin = util.timer()
2365 2373 p1renames = copies.pathcopies(b, p1)
2366 2374 end = util.timer()
2367 2375 data['p1.time'] = end - begin
2368 2376 begin = util.timer()
2369 2377 p2renames = copies.pathcopies(b, p2)
2370 2378 end = util.timer()
2371 2379 data['p2.time'] = end - begin
2372 2380 data['p1.renamedfiles'] = len(p1renames)
2373 2381 data['p2.renamedfiles'] = len(p2renames)
2374 2382
2375 2383 if dostats:
2376 2384 if p1missing:
2377 2385 alldata['parentnbrenames'].append(
2378 2386 (data['p1.renamedfiles'], b.hex(), p1.hex())
2379 2387 )
2380 2388 alldata['parenttime'].append(
2381 2389 (data['p1.time'], b.hex(), p1.hex())
2382 2390 )
2383 2391 if p2missing:
2384 2392 alldata['parentnbrenames'].append(
2385 2393 (data['p2.renamedfiles'], b.hex(), p2.hex())
2386 2394 )
2387 2395 alldata['parenttime'].append(
2388 2396 (data['p2.time'], b.hex(), p2.hex())
2389 2397 )
2390 2398 if p1missing or p2missing:
2391 2399 alldata['totalnbrenames'].append(
2392 2400 (
2393 2401 data['nbrenamedfiles'],
2394 2402 b.hex(),
2395 2403 p1.hex(),
2396 2404 p2.hex(),
2397 2405 )
2398 2406 )
2399 2407 alldata['totaltime'].append(
2400 2408 (data['time'], b.hex(), p1.hex(), p2.hex())
2401 2409 )
2402 2410 fm.startitem()
2403 2411 fm.data(**data)
2404 2412 # make node pretty for the human output
2405 2413 out = data.copy()
2406 2414 out['base'] = fm.hexfunc(b.node())
2407 2415 out['p1.node'] = fm.hexfunc(p1.node())
2408 2416 out['p2.node'] = fm.hexfunc(p2.node())
2409 2417 fm.plain(output % out)
2410 2418
2411 2419 fm.end()
2412 2420 if dostats:
2413 2421 # use a second formatter because the data are quite different, not sure
2414 2422 # how it flies with the templater.
2415 2423 entries = [
2416 2424 ('nbrevs', 'number of revision covered'),
2417 2425 ('nbmissingfiles', 'number of missing files at head'),
2418 2426 ]
2419 2427 if dotiming:
2420 2428 entries.append(
2421 2429 ('parentnbrenames', 'rename from one parent to base')
2422 2430 )
2423 2431 entries.append(('totalnbrenames', 'total number of renames'))
2424 2432 entries.append(('parenttime', 'time for one parent'))
2425 2433 entries.append(('totaltime', 'time for both parents'))
2426 2434 _displaystats(ui, opts, entries, alldata)
2427 2435
2428 2436
2429 2437 @command(
2430 2438 b'perf::helper-pathcopies|perfhelper-pathcopies',
2431 2439 formatteropts
2432 2440 + [
2433 2441 (b'r', b'revs', [], b'restrict search to these revisions'),
2434 2442 (b'', b'timing', False, b'provides extra data (costly)'),
2435 2443 (b'', b'stats', False, b'provides statistic about the measured data'),
2436 2444 ],
2437 2445 )
2438 2446 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2439 2447 """find statistic about potential parameters for the `perftracecopies`
2440 2448
2441 2449 This command find source-destination pair relevant for copytracing testing.
2442 2450 It report value for some of the parameters that impact copy tracing time.
2443 2451
2444 2452 If `--timing` is set, rename detection is run and the associated timing
2445 2453 will be reported. The extra details comes at the cost of a slower command
2446 2454 execution.
2447 2455
2448 2456 Since the rename detection is only run once, other factors might easily
2449 2457 affect the precision of the timing. However it should give a good
2450 2458 approximation of which revision pairs are very costly.
2451 2459 """
2452 2460 opts = _byteskwargs(opts)
2453 2461 fm = ui.formatter(b'perf', opts)
2454 2462 dotiming = opts[b'timing']
2455 2463 dostats = opts[b'stats']
2456 2464
2457 2465 if dotiming:
2458 2466 header = '%12s %12s %12s %12s %12s %12s\n'
2459 2467 output = (
2460 2468 "%(source)12s %(destination)12s "
2461 2469 "%(nbrevs)12d %(nbmissingfiles)12d "
2462 2470 "%(nbrenamedfiles)12d %(time)18.5f\n"
2463 2471 )
2464 2472 header_names = (
2465 2473 "source",
2466 2474 "destination",
2467 2475 "nb-revs",
2468 2476 "nb-files",
2469 2477 "nb-renames",
2470 2478 "time",
2471 2479 )
2472 2480 fm.plain(header % header_names)
2473 2481 else:
2474 2482 header = '%12s %12s %12s %12s\n'
2475 2483 output = (
2476 2484 "%(source)12s %(destination)12s "
2477 2485 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2478 2486 )
2479 2487 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2480 2488
2481 2489 if not revs:
2482 2490 revs = ['all()']
2483 2491 revs = scmutil.revrange(repo, revs)
2484 2492
2485 2493 if dostats:
2486 2494 alldata = {
2487 2495 'nbrevs': [],
2488 2496 'nbmissingfiles': [],
2489 2497 }
2490 2498 if dotiming:
2491 2499 alldata['nbrenames'] = []
2492 2500 alldata['time'] = []
2493 2501
2494 2502 roi = repo.revs('merge() and %ld', revs)
2495 2503 for r in roi:
2496 2504 ctx = repo[r]
2497 2505 p1 = ctx.p1().rev()
2498 2506 p2 = ctx.p2().rev()
2499 2507 bases = repo.changelog._commonancestorsheads(p1, p2)
2500 2508 for p in (p1, p2):
2501 2509 for b in bases:
2502 2510 base = repo[b]
2503 2511 parent = repo[p]
2504 2512 missing = copies._computeforwardmissing(base, parent)
2505 2513 if not missing:
2506 2514 continue
2507 2515 data = {
2508 2516 b'source': base.hex(),
2509 2517 b'destination': parent.hex(),
2510 2518 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2511 2519 b'nbmissingfiles': len(missing),
2512 2520 }
2513 2521 if dostats:
2514 2522 alldata['nbrevs'].append(
2515 2523 (
2516 2524 data['nbrevs'],
2517 2525 base.hex(),
2518 2526 parent.hex(),
2519 2527 )
2520 2528 )
2521 2529 alldata['nbmissingfiles'].append(
2522 2530 (
2523 2531 data['nbmissingfiles'],
2524 2532 base.hex(),
2525 2533 parent.hex(),
2526 2534 )
2527 2535 )
2528 2536 if dotiming:
2529 2537 begin = util.timer()
2530 2538 renames = copies.pathcopies(base, parent)
2531 2539 end = util.timer()
2532 2540 # not very stable timing since we did only one run
2533 2541 data['time'] = end - begin
2534 2542 data['nbrenamedfiles'] = len(renames)
2535 2543 if dostats:
2536 2544 alldata['time'].append(
2537 2545 (
2538 2546 data['time'],
2539 2547 base.hex(),
2540 2548 parent.hex(),
2541 2549 )
2542 2550 )
2543 2551 alldata['nbrenames'].append(
2544 2552 (
2545 2553 data['nbrenamedfiles'],
2546 2554 base.hex(),
2547 2555 parent.hex(),
2548 2556 )
2549 2557 )
2550 2558 fm.startitem()
2551 2559 fm.data(**data)
2552 2560 out = data.copy()
2553 2561 out['source'] = fm.hexfunc(base.node())
2554 2562 out['destination'] = fm.hexfunc(parent.node())
2555 2563 fm.plain(output % out)
2556 2564
2557 2565 fm.end()
2558 2566 if dostats:
2559 2567 entries = [
2560 2568 ('nbrevs', 'number of revision covered'),
2561 2569 ('nbmissingfiles', 'number of missing files at head'),
2562 2570 ]
2563 2571 if dotiming:
2564 2572 entries.append(('nbrenames', 'renamed files'))
2565 2573 entries.append(('time', 'time'))
2566 2574 _displaystats(ui, opts, entries, alldata)
2567 2575
2568 2576
2569 2577 @command(b'perf::cca|perfcca', formatteropts)
2570 2578 def perfcca(ui, repo, **opts):
2571 2579 opts = _byteskwargs(opts)
2572 2580 timer, fm = gettimer(ui, opts)
2573 2581 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2574 2582 fm.end()
2575 2583
2576 2584
2577 2585 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2578 2586 def perffncacheload(ui, repo, **opts):
2579 2587 opts = _byteskwargs(opts)
2580 2588 timer, fm = gettimer(ui, opts)
2581 2589 s = repo.store
2582 2590
2583 2591 def d():
2584 2592 s.fncache._load()
2585 2593
2586 2594 timer(d)
2587 2595 fm.end()
2588 2596
2589 2597
2590 2598 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2591 2599 def perffncachewrite(ui, repo, **opts):
2592 2600 opts = _byteskwargs(opts)
2593 2601 timer, fm = gettimer(ui, opts)
2594 2602 s = repo.store
2595 2603 lock = repo.lock()
2596 2604 s.fncache._load()
2597 2605 tr = repo.transaction(b'perffncachewrite')
2598 2606 tr.addbackup(b'fncache')
2599 2607
2600 2608 def d():
2601 2609 s.fncache._dirty = True
2602 2610 s.fncache.write(tr)
2603 2611
2604 2612 timer(d)
2605 2613 tr.close()
2606 2614 lock.release()
2607 2615 fm.end()
2608 2616
2609 2617
2610 2618 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2611 2619 def perffncacheencode(ui, repo, **opts):
2612 2620 opts = _byteskwargs(opts)
2613 2621 timer, fm = gettimer(ui, opts)
2614 2622 s = repo.store
2615 2623 s.fncache._load()
2616 2624
2617 2625 def d():
2618 2626 for p in s.fncache.entries:
2619 2627 s.encode(p)
2620 2628
2621 2629 timer(d)
2622 2630 fm.end()
2623 2631
2624 2632
2625 2633 def _bdiffworker(q, blocks, xdiff, ready, done):
2626 2634 while not done.is_set():
2627 2635 pair = q.get()
2628 2636 while pair is not None:
2629 2637 if xdiff:
2630 2638 mdiff.bdiff.xdiffblocks(*pair)
2631 2639 elif blocks:
2632 2640 mdiff.bdiff.blocks(*pair)
2633 2641 else:
2634 2642 mdiff.textdiff(*pair)
2635 2643 q.task_done()
2636 2644 pair = q.get()
2637 2645 q.task_done() # for the None one
2638 2646 with ready:
2639 2647 ready.wait()
2640 2648
2641 2649
2642 2650 def _manifestrevision(repo, mnode):
2643 2651 ml = repo.manifestlog
2644 2652
2645 2653 if util.safehasattr(ml, b'getstorage'):
2646 2654 store = ml.getstorage(b'')
2647 2655 else:
2648 2656 store = ml._revlog
2649 2657
2650 2658 return store.revision(mnode)
2651 2659
2652 2660
2653 2661 @command(
2654 2662 b'perf::bdiff|perfbdiff',
2655 2663 revlogopts
2656 2664 + formatteropts
2657 2665 + [
2658 2666 (
2659 2667 b'',
2660 2668 b'count',
2661 2669 1,
2662 2670 b'number of revisions to test (when using --startrev)',
2663 2671 ),
2664 2672 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2665 2673 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2666 2674 (b'', b'blocks', False, b'test computing diffs into blocks'),
2667 2675 (b'', b'xdiff', False, b'use xdiff algorithm'),
2668 2676 ],
2669 2677 b'-c|-m|FILE REV',
2670 2678 )
2671 2679 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2672 2680 """benchmark a bdiff between revisions
2673 2681
2674 2682 By default, benchmark a bdiff between its delta parent and itself.
2675 2683
2676 2684 With ``--count``, benchmark bdiffs between delta parents and self for N
2677 2685 revisions starting at the specified revision.
2678 2686
2679 2687 With ``--alldata``, assume the requested revision is a changeset and
2680 2688 measure bdiffs for all changes related to that changeset (manifest
2681 2689 and filelogs).
2682 2690 """
2683 2691 opts = _byteskwargs(opts)
2684 2692
2685 2693 if opts[b'xdiff'] and not opts[b'blocks']:
2686 2694 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2687 2695
2688 2696 if opts[b'alldata']:
2689 2697 opts[b'changelog'] = True
2690 2698
2691 2699 if opts.get(b'changelog') or opts.get(b'manifest'):
2692 2700 file_, rev = None, file_
2693 2701 elif rev is None:
2694 2702 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2695 2703
2696 2704 blocks = opts[b'blocks']
2697 2705 xdiff = opts[b'xdiff']
2698 2706 textpairs = []
2699 2707
2700 2708 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2701 2709
2702 2710 startrev = r.rev(r.lookup(rev))
2703 2711 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2704 2712 if opts[b'alldata']:
2705 2713 # Load revisions associated with changeset.
2706 2714 ctx = repo[rev]
2707 2715 mtext = _manifestrevision(repo, ctx.manifestnode())
2708 2716 for pctx in ctx.parents():
2709 2717 pman = _manifestrevision(repo, pctx.manifestnode())
2710 2718 textpairs.append((pman, mtext))
2711 2719
2712 2720 # Load filelog revisions by iterating manifest delta.
2713 2721 man = ctx.manifest()
2714 2722 pman = ctx.p1().manifest()
2715 2723 for filename, change in pman.diff(man).items():
2716 2724 fctx = repo.file(filename)
2717 2725 f1 = fctx.revision(change[0][0] or -1)
2718 2726 f2 = fctx.revision(change[1][0] or -1)
2719 2727 textpairs.append((f1, f2))
2720 2728 else:
2721 2729 dp = r.deltaparent(rev)
2722 2730 textpairs.append((r.revision(dp), r.revision(rev)))
2723 2731
2724 2732 withthreads = threads > 0
2725 2733 if not withthreads:
2726 2734
2727 2735 def d():
2728 2736 for pair in textpairs:
2729 2737 if xdiff:
2730 2738 mdiff.bdiff.xdiffblocks(*pair)
2731 2739 elif blocks:
2732 2740 mdiff.bdiff.blocks(*pair)
2733 2741 else:
2734 2742 mdiff.textdiff(*pair)
2735 2743
2736 2744 else:
2737 2745 q = queue()
2738 2746 for i in _xrange(threads):
2739 2747 q.put(None)
2740 2748 ready = threading.Condition()
2741 2749 done = threading.Event()
2742 2750 for i in _xrange(threads):
2743 2751 threading.Thread(
2744 2752 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2745 2753 ).start()
2746 2754 q.join()
2747 2755
2748 2756 def d():
2749 2757 for pair in textpairs:
2750 2758 q.put(pair)
2751 2759 for i in _xrange(threads):
2752 2760 q.put(None)
2753 2761 with ready:
2754 2762 ready.notify_all()
2755 2763 q.join()
2756 2764
2757 2765 timer, fm = gettimer(ui, opts)
2758 2766 timer(d)
2759 2767 fm.end()
2760 2768
2761 2769 if withthreads:
2762 2770 done.set()
2763 2771 for i in _xrange(threads):
2764 2772 q.put(None)
2765 2773 with ready:
2766 2774 ready.notify_all()
2767 2775
2768 2776
2769 2777 @command(
2770 2778 b'perf::unbundle',
2771 2779 formatteropts,
2772 2780 b'BUNDLE_FILE',
2773 2781 )
2774 2782 def perf_unbundle(ui, repo, fname, **opts):
2775 2783 """benchmark application of a bundle in a repository.
2776 2784
2777 2785 This does not include the final transaction processing"""
2778 2786
2779 2787 from mercurial import exchange
2780 2788 from mercurial import bundle2
2781 2789 from mercurial import transaction
2782 2790
2783 2791 opts = _byteskwargs(opts)
2784 2792
2785 2793 ### some compatibility hotfix
2786 2794 #
2787 2795 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2788 2796 # critical regression that break transaction rollback for files that are
2789 2797 # de-inlined.
2790 2798 method = transaction.transaction._addentry
2791 2799 pre_63edc384d3b7 = "data" in getargspec(method).args
2792 2800 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2793 2801 # a changeset that is a close descendant of 18415fc918a1, the changeset
2794 2802 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2795 2803 args = getargspec(error.Abort.__init__).args
2796 2804 post_18415fc918a1 = "detailed_exit_code" in args
2797 2805
2798 2806 old_max_inline = None
2799 2807 try:
2800 2808 if not (pre_63edc384d3b7 or post_18415fc918a1):
2801 2809 # disable inlining
2802 2810 old_max_inline = mercurial.revlog._maxinline
2803 2811 # large enough to never happen
2804 2812 mercurial.revlog._maxinline = 2 ** 50
2805 2813
2806 2814 with repo.lock():
2807 2815 bundle = [None, None]
2808 2816 orig_quiet = repo.ui.quiet
2809 2817 try:
2810 2818 repo.ui.quiet = True
2811 2819 with open(fname, mode="rb") as f:
2812 2820
2813 2821 def noop_report(*args, **kwargs):
2814 2822 pass
2815 2823
2816 2824 def setup():
2817 2825 gen, tr = bundle
2818 2826 if tr is not None:
2819 2827 tr.abort()
2820 2828 bundle[:] = [None, None]
2821 2829 f.seek(0)
2822 2830 bundle[0] = exchange.readbundle(ui, f, fname)
2823 2831 bundle[1] = repo.transaction(b'perf::unbundle')
2824 2832 # silence the transaction
2825 2833 bundle[1]._report = noop_report
2826 2834
2827 2835 def apply():
2828 2836 gen, tr = bundle
2829 2837 bundle2.applybundle(
2830 2838 repo,
2831 2839 gen,
2832 2840 tr,
2833 2841 source=b'perf::unbundle',
2834 2842 url=fname,
2835 2843 )
2836 2844
2837 2845 timer, fm = gettimer(ui, opts)
2838 2846 timer(apply, setup=setup)
2839 2847 fm.end()
2840 2848 finally:
2841 2849 repo.ui.quiet == orig_quiet
2842 2850 gen, tr = bundle
2843 2851 if tr is not None:
2844 2852 tr.abort()
2845 2853 finally:
2846 2854 if old_max_inline is not None:
2847 2855 mercurial.revlog._maxinline = old_max_inline
2848 2856
2849 2857
2850 2858 @command(
2851 2859 b'perf::unidiff|perfunidiff',
2852 2860 revlogopts
2853 2861 + formatteropts
2854 2862 + [
2855 2863 (
2856 2864 b'',
2857 2865 b'count',
2858 2866 1,
2859 2867 b'number of revisions to test (when using --startrev)',
2860 2868 ),
2861 2869 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2862 2870 ],
2863 2871 b'-c|-m|FILE REV',
2864 2872 )
2865 2873 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2866 2874 """benchmark a unified diff between revisions
2867 2875
2868 2876 This doesn't include any copy tracing - it's just a unified diff
2869 2877 of the texts.
2870 2878
2871 2879 By default, benchmark a diff between its delta parent and itself.
2872 2880
2873 2881 With ``--count``, benchmark diffs between delta parents and self for N
2874 2882 revisions starting at the specified revision.
2875 2883
2876 2884 With ``--alldata``, assume the requested revision is a changeset and
2877 2885 measure diffs for all changes related to that changeset (manifest
2878 2886 and filelogs).
2879 2887 """
2880 2888 opts = _byteskwargs(opts)
2881 2889 if opts[b'alldata']:
2882 2890 opts[b'changelog'] = True
2883 2891
2884 2892 if opts.get(b'changelog') or opts.get(b'manifest'):
2885 2893 file_, rev = None, file_
2886 2894 elif rev is None:
2887 2895 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2888 2896
2889 2897 textpairs = []
2890 2898
2891 2899 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2892 2900
2893 2901 startrev = r.rev(r.lookup(rev))
2894 2902 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2895 2903 if opts[b'alldata']:
2896 2904 # Load revisions associated with changeset.
2897 2905 ctx = repo[rev]
2898 2906 mtext = _manifestrevision(repo, ctx.manifestnode())
2899 2907 for pctx in ctx.parents():
2900 2908 pman = _manifestrevision(repo, pctx.manifestnode())
2901 2909 textpairs.append((pman, mtext))
2902 2910
2903 2911 # Load filelog revisions by iterating manifest delta.
2904 2912 man = ctx.manifest()
2905 2913 pman = ctx.p1().manifest()
2906 2914 for filename, change in pman.diff(man).items():
2907 2915 fctx = repo.file(filename)
2908 2916 f1 = fctx.revision(change[0][0] or -1)
2909 2917 f2 = fctx.revision(change[1][0] or -1)
2910 2918 textpairs.append((f1, f2))
2911 2919 else:
2912 2920 dp = r.deltaparent(rev)
2913 2921 textpairs.append((r.revision(dp), r.revision(rev)))
2914 2922
2915 2923 def d():
2916 2924 for left, right in textpairs:
2917 2925 # The date strings don't matter, so we pass empty strings.
2918 2926 headerlines, hunks = mdiff.unidiff(
2919 2927 left, b'', right, b'', b'left', b'right', binary=False
2920 2928 )
2921 2929 # consume iterators in roughly the way patch.py does
2922 2930 b'\n'.join(headerlines)
2923 2931 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2924 2932
2925 2933 timer, fm = gettimer(ui, opts)
2926 2934 timer(d)
2927 2935 fm.end()
2928 2936
2929 2937
2930 2938 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2931 2939 def perfdiffwd(ui, repo, **opts):
2932 2940 """Profile diff of working directory changes"""
2933 2941 opts = _byteskwargs(opts)
2934 2942 timer, fm = gettimer(ui, opts)
2935 2943 options = {
2936 2944 'w': 'ignore_all_space',
2937 2945 'b': 'ignore_space_change',
2938 2946 'B': 'ignore_blank_lines',
2939 2947 }
2940 2948
2941 2949 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2942 2950 opts = {options[c]: b'1' for c in diffopt}
2943 2951
2944 2952 def d():
2945 2953 ui.pushbuffer()
2946 2954 commands.diff(ui, repo, **opts)
2947 2955 ui.popbuffer()
2948 2956
2949 2957 diffopt = diffopt.encode('ascii')
2950 2958 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2951 2959 timer(d, title=title)
2952 2960 fm.end()
2953 2961
2954 2962
2955 2963 @command(
2956 2964 b'perf::revlogindex|perfrevlogindex',
2957 2965 revlogopts + formatteropts,
2958 2966 b'-c|-m|FILE',
2959 2967 )
2960 2968 def perfrevlogindex(ui, repo, file_=None, **opts):
2961 2969 """Benchmark operations against a revlog index.
2962 2970
2963 2971 This tests constructing a revlog instance, reading index data,
2964 2972 parsing index data, and performing various operations related to
2965 2973 index data.
2966 2974 """
2967 2975
2968 2976 opts = _byteskwargs(opts)
2969 2977
2970 2978 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2971 2979
2972 2980 opener = getattr(rl, 'opener') # trick linter
2973 2981 # compat with hg <= 5.8
2974 2982 radix = getattr(rl, 'radix', None)
2975 2983 indexfile = getattr(rl, '_indexfile', None)
2976 2984 if indexfile is None:
2977 2985 # compatibility with <= hg-5.8
2978 2986 indexfile = getattr(rl, 'indexfile')
2979 2987 data = opener.read(indexfile)
2980 2988
2981 2989 header = struct.unpack(b'>I', data[0:4])[0]
2982 2990 version = header & 0xFFFF
2983 2991 if version == 1:
2984 2992 inline = header & (1 << 16)
2985 2993 else:
2986 2994 raise error.Abort(b'unsupported revlog version: %d' % version)
2987 2995
2988 2996 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2989 2997 if parse_index_v1 is None:
2990 2998 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2991 2999
2992 3000 rllen = len(rl)
2993 3001
2994 3002 node0 = rl.node(0)
2995 3003 node25 = rl.node(rllen // 4)
2996 3004 node50 = rl.node(rllen // 2)
2997 3005 node75 = rl.node(rllen // 4 * 3)
2998 3006 node100 = rl.node(rllen - 1)
2999 3007
3000 3008 allrevs = range(rllen)
3001 3009 allrevsrev = list(reversed(allrevs))
3002 3010 allnodes = [rl.node(rev) for rev in range(rllen)]
3003 3011 allnodesrev = list(reversed(allnodes))
3004 3012
3005 3013 def constructor():
3006 3014 if radix is not None:
3007 3015 revlog(opener, radix=radix)
3008 3016 else:
3009 3017 # hg <= 5.8
3010 3018 revlog(opener, indexfile=indexfile)
3011 3019
3012 3020 def read():
3013 3021 with opener(indexfile) as fh:
3014 3022 fh.read()
3015 3023
3016 3024 def parseindex():
3017 3025 parse_index_v1(data, inline)
3018 3026
3019 3027 def getentry(revornode):
3020 3028 index = parse_index_v1(data, inline)[0]
3021 3029 index[revornode]
3022 3030
3023 3031 def getentries(revs, count=1):
3024 3032 index = parse_index_v1(data, inline)[0]
3025 3033
3026 3034 for i in range(count):
3027 3035 for rev in revs:
3028 3036 index[rev]
3029 3037
3030 3038 def resolvenode(node):
3031 3039 index = parse_index_v1(data, inline)[0]
3032 3040 rev = getattr(index, 'rev', None)
3033 3041 if rev is None:
3034 3042 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3035 3043 # This only works for the C code.
3036 3044 if nodemap is None:
3037 3045 return
3038 3046 rev = nodemap.__getitem__
3039 3047
3040 3048 try:
3041 3049 rev(node)
3042 3050 except error.RevlogError:
3043 3051 pass
3044 3052
3045 3053 def resolvenodes(nodes, count=1):
3046 3054 index = parse_index_v1(data, inline)[0]
3047 3055 rev = getattr(index, 'rev', None)
3048 3056 if rev is None:
3049 3057 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3050 3058 # This only works for the C code.
3051 3059 if nodemap is None:
3052 3060 return
3053 3061 rev = nodemap.__getitem__
3054 3062
3055 3063 for i in range(count):
3056 3064 for node in nodes:
3057 3065 try:
3058 3066 rev(node)
3059 3067 except error.RevlogError:
3060 3068 pass
3061 3069
3062 3070 benches = [
3063 3071 (constructor, b'revlog constructor'),
3064 3072 (read, b'read'),
3065 3073 (parseindex, b'create index object'),
3066 3074 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3067 3075 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3068 3076 (lambda: resolvenode(node0), b'look up node at rev 0'),
3069 3077 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3070 3078 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3071 3079 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3072 3080 (lambda: resolvenode(node100), b'look up node at tip'),
3073 3081 # 2x variation is to measure caching impact.
3074 3082 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3075 3083 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3076 3084 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3077 3085 (
3078 3086 lambda: resolvenodes(allnodesrev, 2),
3079 3087 b'look up all nodes 2x (reverse)',
3080 3088 ),
3081 3089 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3082 3090 (
3083 3091 lambda: getentries(allrevs, 2),
3084 3092 b'retrieve all index entries 2x (forward)',
3085 3093 ),
3086 3094 (
3087 3095 lambda: getentries(allrevsrev),
3088 3096 b'retrieve all index entries (reverse)',
3089 3097 ),
3090 3098 (
3091 3099 lambda: getentries(allrevsrev, 2),
3092 3100 b'retrieve all index entries 2x (reverse)',
3093 3101 ),
3094 3102 ]
3095 3103
3096 3104 for fn, title in benches:
3097 3105 timer, fm = gettimer(ui, opts)
3098 3106 timer(fn, title=title)
3099 3107 fm.end()
3100 3108
3101 3109
3102 3110 @command(
3103 3111 b'perf::revlogrevisions|perfrevlogrevisions',
3104 3112 revlogopts
3105 3113 + formatteropts
3106 3114 + [
3107 3115 (b'd', b'dist', 100, b'distance between the revisions'),
3108 3116 (b's', b'startrev', 0, b'revision to start reading at'),
3109 3117 (b'', b'reverse', False, b'read in reverse'),
3110 3118 ],
3111 3119 b'-c|-m|FILE',
3112 3120 )
3113 3121 def perfrevlogrevisions(
3114 3122 ui, repo, file_=None, startrev=0, reverse=False, **opts
3115 3123 ):
3116 3124 """Benchmark reading a series of revisions from a revlog.
3117 3125
3118 3126 By default, we read every ``-d/--dist`` revision from 0 to tip of
3119 3127 the specified revlog.
3120 3128
3121 3129 The start revision can be defined via ``-s/--startrev``.
3122 3130 """
3123 3131 opts = _byteskwargs(opts)
3124 3132
3125 3133 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3126 3134 rllen = getlen(ui)(rl)
3127 3135
3128 3136 if startrev < 0:
3129 3137 startrev = rllen + startrev
3130 3138
3131 3139 def d():
3132 3140 rl.clearcaches()
3133 3141
3134 3142 beginrev = startrev
3135 3143 endrev = rllen
3136 3144 dist = opts[b'dist']
3137 3145
3138 3146 if reverse:
3139 3147 beginrev, endrev = endrev - 1, beginrev - 1
3140 3148 dist = -1 * dist
3141 3149
3142 3150 for x in _xrange(beginrev, endrev, dist):
3143 3151 # Old revisions don't support passing int.
3144 3152 n = rl.node(x)
3145 3153 rl.revision(n)
3146 3154
3147 3155 timer, fm = gettimer(ui, opts)
3148 3156 timer(d)
3149 3157 fm.end()
3150 3158
3151 3159
3152 3160 @command(
3153 3161 b'perf::revlogwrite|perfrevlogwrite',
3154 3162 revlogopts
3155 3163 + formatteropts
3156 3164 + [
3157 3165 (b's', b'startrev', 1000, b'revision to start writing at'),
3158 3166 (b'', b'stoprev', -1, b'last revision to write'),
3159 3167 (b'', b'count', 3, b'number of passes to perform'),
3160 3168 (b'', b'details', False, b'print timing for every revisions tested'),
3161 3169 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3162 3170 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3163 3171 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3164 3172 ],
3165 3173 b'-c|-m|FILE',
3166 3174 )
3167 3175 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3168 3176 """Benchmark writing a series of revisions to a revlog.
3169 3177
3170 3178 Possible source values are:
3171 3179 * `full`: add from a full text (default).
3172 3180 * `parent-1`: add from a delta to the first parent
3173 3181 * `parent-2`: add from a delta to the second parent if it exists
3174 3182 (use a delta from the first parent otherwise)
3175 3183 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3176 3184 * `storage`: add from the existing precomputed deltas
3177 3185
3178 3186 Note: This performance command measures performance in a custom way. As a
3179 3187 result some of the global configuration of the 'perf' command does not
3180 3188 apply to it:
3181 3189
3182 3190 * ``pre-run``: disabled
3183 3191
3184 3192 * ``profile-benchmark``: disabled
3185 3193
3186 3194 * ``run-limits``: disabled use --count instead
3187 3195 """
3188 3196 opts = _byteskwargs(opts)
3189 3197
3190 3198 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3191 3199 rllen = getlen(ui)(rl)
3192 3200 if startrev < 0:
3193 3201 startrev = rllen + startrev
3194 3202 if stoprev < 0:
3195 3203 stoprev = rllen + stoprev
3196 3204
3197 3205 lazydeltabase = opts['lazydeltabase']
3198 3206 source = opts['source']
3199 3207 clearcaches = opts['clear_caches']
3200 3208 validsource = (
3201 3209 b'full',
3202 3210 b'parent-1',
3203 3211 b'parent-2',
3204 3212 b'parent-smallest',
3205 3213 b'storage',
3206 3214 )
3207 3215 if source not in validsource:
3208 3216 raise error.Abort('invalid source type: %s' % source)
3209 3217
3210 3218 ### actually gather results
3211 3219 count = opts['count']
3212 3220 if count <= 0:
3213 3221 raise error.Abort('invalide run count: %d' % count)
3214 3222 allresults = []
3215 3223 for c in range(count):
3216 3224 timing = _timeonewrite(
3217 3225 ui,
3218 3226 rl,
3219 3227 source,
3220 3228 startrev,
3221 3229 stoprev,
3222 3230 c + 1,
3223 3231 lazydeltabase=lazydeltabase,
3224 3232 clearcaches=clearcaches,
3225 3233 )
3226 3234 allresults.append(timing)
3227 3235
3228 3236 ### consolidate the results in a single list
3229 3237 results = []
3230 3238 for idx, (rev, t) in enumerate(allresults[0]):
3231 3239 ts = [t]
3232 3240 for other in allresults[1:]:
3233 3241 orev, ot = other[idx]
3234 3242 assert orev == rev
3235 3243 ts.append(ot)
3236 3244 results.append((rev, ts))
3237 3245 resultcount = len(results)
3238 3246
3239 3247 ### Compute and display relevant statistics
3240 3248
3241 3249 # get a formatter
3242 3250 fm = ui.formatter(b'perf', opts)
3243 3251 displayall = ui.configbool(b"perf", b"all-timing", False)
3244 3252
3245 3253 # print individual details if requested
3246 3254 if opts['details']:
3247 3255 for idx, item in enumerate(results, 1):
3248 3256 rev, data = item
3249 3257 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3250 3258 formatone(fm, data, title=title, displayall=displayall)
3251 3259
3252 3260 # sorts results by median time
3253 3261 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3254 3262 # list of (name, index) to display)
3255 3263 relevants = [
3256 3264 ("min", 0),
3257 3265 ("10%", resultcount * 10 // 100),
3258 3266 ("25%", resultcount * 25 // 100),
3259 3267 ("50%", resultcount * 70 // 100),
3260 3268 ("75%", resultcount * 75 // 100),
3261 3269 ("90%", resultcount * 90 // 100),
3262 3270 ("95%", resultcount * 95 // 100),
3263 3271 ("99%", resultcount * 99 // 100),
3264 3272 ("99.9%", resultcount * 999 // 1000),
3265 3273 ("99.99%", resultcount * 9999 // 10000),
3266 3274 ("99.999%", resultcount * 99999 // 100000),
3267 3275 ("max", -1),
3268 3276 ]
3269 3277 if not ui.quiet:
3270 3278 for name, idx in relevants:
3271 3279 data = results[idx]
3272 3280 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3273 3281 formatone(fm, data[1], title=title, displayall=displayall)
3274 3282
3275 3283 # XXX summing that many float will not be very precise, we ignore this fact
3276 3284 # for now
3277 3285 totaltime = []
3278 3286 for item in allresults:
3279 3287 totaltime.append(
3280 3288 (
3281 3289 sum(x[1][0] for x in item),
3282 3290 sum(x[1][1] for x in item),
3283 3291 sum(x[1][2] for x in item),
3284 3292 )
3285 3293 )
3286 3294 formatone(
3287 3295 fm,
3288 3296 totaltime,
3289 3297 title="total time (%d revs)" % resultcount,
3290 3298 displayall=displayall,
3291 3299 )
3292 3300 fm.end()
3293 3301
3294 3302
3295 3303 class _faketr:
3296 3304 def add(s, x, y, z=None):
3297 3305 return None
3298 3306
3299 3307
3300 3308 def _timeonewrite(
3301 3309 ui,
3302 3310 orig,
3303 3311 source,
3304 3312 startrev,
3305 3313 stoprev,
3306 3314 runidx=None,
3307 3315 lazydeltabase=True,
3308 3316 clearcaches=True,
3309 3317 ):
3310 3318 timings = []
3311 3319 tr = _faketr()
3312 3320 with _temprevlog(ui, orig, startrev) as dest:
3313 3321 dest._lazydeltabase = lazydeltabase
3314 3322 revs = list(orig.revs(startrev, stoprev))
3315 3323 total = len(revs)
3316 3324 topic = 'adding'
3317 3325 if runidx is not None:
3318 3326 topic += ' (run #%d)' % runidx
3319 3327 # Support both old and new progress API
3320 3328 if util.safehasattr(ui, 'makeprogress'):
3321 3329 progress = ui.makeprogress(topic, unit='revs', total=total)
3322 3330
3323 3331 def updateprogress(pos):
3324 3332 progress.update(pos)
3325 3333
3326 3334 def completeprogress():
3327 3335 progress.complete()
3328 3336
3329 3337 else:
3330 3338
3331 3339 def updateprogress(pos):
3332 3340 ui.progress(topic, pos, unit='revs', total=total)
3333 3341
3334 3342 def completeprogress():
3335 3343 ui.progress(topic, None, unit='revs', total=total)
3336 3344
3337 3345 for idx, rev in enumerate(revs):
3338 3346 updateprogress(idx)
3339 3347 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3340 3348 if clearcaches:
3341 3349 dest.index.clearcaches()
3342 3350 dest.clearcaches()
3343 3351 with timeone() as r:
3344 3352 dest.addrawrevision(*addargs, **addkwargs)
3345 3353 timings.append((rev, r[0]))
3346 3354 updateprogress(total)
3347 3355 completeprogress()
3348 3356 return timings
3349 3357
3350 3358
3351 3359 def _getrevisionseed(orig, rev, tr, source):
3352 3360 from mercurial.node import nullid
3353 3361
3354 3362 linkrev = orig.linkrev(rev)
3355 3363 node = orig.node(rev)
3356 3364 p1, p2 = orig.parents(node)
3357 3365 flags = orig.flags(rev)
3358 3366 cachedelta = None
3359 3367 text = None
3360 3368
3361 3369 if source == b'full':
3362 3370 text = orig.revision(rev)
3363 3371 elif source == b'parent-1':
3364 3372 baserev = orig.rev(p1)
3365 3373 cachedelta = (baserev, orig.revdiff(p1, rev))
3366 3374 elif source == b'parent-2':
3367 3375 parent = p2
3368 3376 if p2 == nullid:
3369 3377 parent = p1
3370 3378 baserev = orig.rev(parent)
3371 3379 cachedelta = (baserev, orig.revdiff(parent, rev))
3372 3380 elif source == b'parent-smallest':
3373 3381 p1diff = orig.revdiff(p1, rev)
3374 3382 parent = p1
3375 3383 diff = p1diff
3376 3384 if p2 != nullid:
3377 3385 p2diff = orig.revdiff(p2, rev)
3378 3386 if len(p1diff) > len(p2diff):
3379 3387 parent = p2
3380 3388 diff = p2diff
3381 3389 baserev = orig.rev(parent)
3382 3390 cachedelta = (baserev, diff)
3383 3391 elif source == b'storage':
3384 3392 baserev = orig.deltaparent(rev)
3385 3393 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3386 3394
3387 3395 return (
3388 3396 (text, tr, linkrev, p1, p2),
3389 3397 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3390 3398 )
3391 3399
3392 3400
3393 3401 @contextlib.contextmanager
3394 3402 def _temprevlog(ui, orig, truncaterev):
3395 3403 from mercurial import vfs as vfsmod
3396 3404
3397 3405 if orig._inline:
3398 3406 raise error.Abort('not supporting inline revlog (yet)')
3399 3407 revlogkwargs = {}
3400 3408 k = 'upperboundcomp'
3401 3409 if util.safehasattr(orig, k):
3402 3410 revlogkwargs[k] = getattr(orig, k)
3403 3411
3404 3412 indexfile = getattr(orig, '_indexfile', None)
3405 3413 if indexfile is None:
3406 3414 # compatibility with <= hg-5.8
3407 3415 indexfile = getattr(orig, 'indexfile')
3408 3416 origindexpath = orig.opener.join(indexfile)
3409 3417
3410 3418 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3411 3419 origdatapath = orig.opener.join(datafile)
3412 3420 radix = b'revlog'
3413 3421 indexname = b'revlog.i'
3414 3422 dataname = b'revlog.d'
3415 3423
3416 3424 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3417 3425 try:
3418 3426 # copy the data file in a temporary directory
3419 3427 ui.debug('copying data in %s\n' % tmpdir)
3420 3428 destindexpath = os.path.join(tmpdir, 'revlog.i')
3421 3429 destdatapath = os.path.join(tmpdir, 'revlog.d')
3422 3430 shutil.copyfile(origindexpath, destindexpath)
3423 3431 shutil.copyfile(origdatapath, destdatapath)
3424 3432
3425 3433 # remove the data we want to add again
3426 3434 ui.debug('truncating data to be rewritten\n')
3427 3435 with open(destindexpath, 'ab') as index:
3428 3436 index.seek(0)
3429 3437 index.truncate(truncaterev * orig._io.size)
3430 3438 with open(destdatapath, 'ab') as data:
3431 3439 data.seek(0)
3432 3440 data.truncate(orig.start(truncaterev))
3433 3441
3434 3442 # instantiate a new revlog from the temporary copy
3435 3443 ui.debug('truncating adding to be rewritten\n')
3436 3444 vfs = vfsmod.vfs(tmpdir)
3437 3445 vfs.options = getattr(orig.opener, 'options', None)
3438 3446
3439 3447 try:
3440 3448 dest = revlog(vfs, radix=radix, **revlogkwargs)
3441 3449 except TypeError:
3442 3450 dest = revlog(
3443 3451 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3444 3452 )
3445 3453 if dest._inline:
3446 3454 raise error.Abort('not supporting inline revlog (yet)')
3447 3455 # make sure internals are initialized
3448 3456 dest.revision(len(dest) - 1)
3449 3457 yield dest
3450 3458 del dest, vfs
3451 3459 finally:
3452 3460 shutil.rmtree(tmpdir, True)
3453 3461
3454 3462
3455 3463 @command(
3456 3464 b'perf::revlogchunks|perfrevlogchunks',
3457 3465 revlogopts
3458 3466 + formatteropts
3459 3467 + [
3460 3468 (b'e', b'engines', b'', b'compression engines to use'),
3461 3469 (b's', b'startrev', 0, b'revision to start at'),
3462 3470 ],
3463 3471 b'-c|-m|FILE',
3464 3472 )
3465 3473 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3466 3474 """Benchmark operations on revlog chunks.
3467 3475
3468 3476 Logically, each revlog is a collection of fulltext revisions. However,
3469 3477 stored within each revlog are "chunks" of possibly compressed data. This
3470 3478 data needs to be read and decompressed or compressed and written.
3471 3479
3472 3480 This command measures the time it takes to read+decompress and recompress
3473 3481 chunks in a revlog. It effectively isolates I/O and compression performance.
3474 3482 For measurements of higher-level operations like resolving revisions,
3475 3483 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3476 3484 """
3477 3485 opts = _byteskwargs(opts)
3478 3486
3479 3487 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3480 3488
3481 3489 # _chunkraw was renamed to _getsegmentforrevs.
3482 3490 try:
3483 3491 segmentforrevs = rl._getsegmentforrevs
3484 3492 except AttributeError:
3485 3493 segmentforrevs = rl._chunkraw
3486 3494
3487 3495 # Verify engines argument.
3488 3496 if engines:
3489 3497 engines = {e.strip() for e in engines.split(b',')}
3490 3498 for engine in engines:
3491 3499 try:
3492 3500 util.compressionengines[engine]
3493 3501 except KeyError:
3494 3502 raise error.Abort(b'unknown compression engine: %s' % engine)
3495 3503 else:
3496 3504 engines = []
3497 3505 for e in util.compengines:
3498 3506 engine = util.compengines[e]
3499 3507 try:
3500 3508 if engine.available():
3501 3509 engine.revlogcompressor().compress(b'dummy')
3502 3510 engines.append(e)
3503 3511 except NotImplementedError:
3504 3512 pass
3505 3513
3506 3514 revs = list(rl.revs(startrev, len(rl) - 1))
3507 3515
3508 3516 def rlfh(rl):
3509 3517 if rl._inline:
3510 3518 indexfile = getattr(rl, '_indexfile', None)
3511 3519 if indexfile is None:
3512 3520 # compatibility with <= hg-5.8
3513 3521 indexfile = getattr(rl, 'indexfile')
3514 3522 return getsvfs(repo)(indexfile)
3515 3523 else:
3516 3524 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3517 3525 return getsvfs(repo)(datafile)
3518 3526
3519 3527 def doread():
3520 3528 rl.clearcaches()
3521 3529 for rev in revs:
3522 3530 segmentforrevs(rev, rev)
3523 3531
3524 3532 def doreadcachedfh():
3525 3533 rl.clearcaches()
3526 3534 fh = rlfh(rl)
3527 3535 for rev in revs:
3528 3536 segmentforrevs(rev, rev, df=fh)
3529 3537
3530 3538 def doreadbatch():
3531 3539 rl.clearcaches()
3532 3540 segmentforrevs(revs[0], revs[-1])
3533 3541
3534 3542 def doreadbatchcachedfh():
3535 3543 rl.clearcaches()
3536 3544 fh = rlfh(rl)
3537 3545 segmentforrevs(revs[0], revs[-1], df=fh)
3538 3546
3539 3547 def dochunk():
3540 3548 rl.clearcaches()
3541 3549 fh = rlfh(rl)
3542 3550 for rev in revs:
3543 3551 rl._chunk(rev, df=fh)
3544 3552
3545 3553 chunks = [None]
3546 3554
3547 3555 def dochunkbatch():
3548 3556 rl.clearcaches()
3549 3557 fh = rlfh(rl)
3550 3558 # Save chunks as a side-effect.
3551 3559 chunks[0] = rl._chunks(revs, df=fh)
3552 3560
3553 3561 def docompress(compressor):
3554 3562 rl.clearcaches()
3555 3563
3556 3564 try:
3557 3565 # Swap in the requested compression engine.
3558 3566 oldcompressor = rl._compressor
3559 3567 rl._compressor = compressor
3560 3568 for chunk in chunks[0]:
3561 3569 rl.compress(chunk)
3562 3570 finally:
3563 3571 rl._compressor = oldcompressor
3564 3572
3565 3573 benches = [
3566 3574 (lambda: doread(), b'read'),
3567 3575 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3568 3576 (lambda: doreadbatch(), b'read batch'),
3569 3577 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3570 3578 (lambda: dochunk(), b'chunk'),
3571 3579 (lambda: dochunkbatch(), b'chunk batch'),
3572 3580 ]
3573 3581
3574 3582 for engine in sorted(engines):
3575 3583 compressor = util.compengines[engine].revlogcompressor()
3576 3584 benches.append(
3577 3585 (
3578 3586 functools.partial(docompress, compressor),
3579 3587 b'compress w/ %s' % engine,
3580 3588 )
3581 3589 )
3582 3590
3583 3591 for fn, title in benches:
3584 3592 timer, fm = gettimer(ui, opts)
3585 3593 timer(fn, title=title)
3586 3594 fm.end()
3587 3595
3588 3596
3589 3597 @command(
3590 3598 b'perf::revlogrevision|perfrevlogrevision',
3591 3599 revlogopts
3592 3600 + formatteropts
3593 3601 + [(b'', b'cache', False, b'use caches instead of clearing')],
3594 3602 b'-c|-m|FILE REV',
3595 3603 )
3596 3604 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3597 3605 """Benchmark obtaining a revlog revision.
3598 3606
3599 3607 Obtaining a revlog revision consists of roughly the following steps:
3600 3608
3601 3609 1. Compute the delta chain
3602 3610 2. Slice the delta chain if applicable
3603 3611 3. Obtain the raw chunks for that delta chain
3604 3612 4. Decompress each raw chunk
3605 3613 5. Apply binary patches to obtain fulltext
3606 3614 6. Verify hash of fulltext
3607 3615
3608 3616 This command measures the time spent in each of these phases.
3609 3617 """
3610 3618 opts = _byteskwargs(opts)
3611 3619
3612 3620 if opts.get(b'changelog') or opts.get(b'manifest'):
3613 3621 file_, rev = None, file_
3614 3622 elif rev is None:
3615 3623 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3616 3624
3617 3625 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3618 3626
3619 3627 # _chunkraw was renamed to _getsegmentforrevs.
3620 3628 try:
3621 3629 segmentforrevs = r._getsegmentforrevs
3622 3630 except AttributeError:
3623 3631 segmentforrevs = r._chunkraw
3624 3632
3625 3633 node = r.lookup(rev)
3626 3634 rev = r.rev(node)
3627 3635
3628 3636 def getrawchunks(data, chain):
3629 3637 start = r.start
3630 3638 length = r.length
3631 3639 inline = r._inline
3632 3640 try:
3633 3641 iosize = r.index.entry_size
3634 3642 except AttributeError:
3635 3643 iosize = r._io.size
3636 3644 buffer = util.buffer
3637 3645
3638 3646 chunks = []
3639 3647 ladd = chunks.append
3640 3648 for idx, item in enumerate(chain):
3641 3649 offset = start(item[0])
3642 3650 bits = data[idx]
3643 3651 for rev in item:
3644 3652 chunkstart = start(rev)
3645 3653 if inline:
3646 3654 chunkstart += (rev + 1) * iosize
3647 3655 chunklength = length(rev)
3648 3656 ladd(buffer(bits, chunkstart - offset, chunklength))
3649 3657
3650 3658 return chunks
3651 3659
3652 3660 def dodeltachain(rev):
3653 3661 if not cache:
3654 3662 r.clearcaches()
3655 3663 r._deltachain(rev)
3656 3664
3657 3665 def doread(chain):
3658 3666 if not cache:
3659 3667 r.clearcaches()
3660 3668 for item in slicedchain:
3661 3669 segmentforrevs(item[0], item[-1])
3662 3670
3663 3671 def doslice(r, chain, size):
3664 3672 for s in slicechunk(r, chain, targetsize=size):
3665 3673 pass
3666 3674
3667 3675 def dorawchunks(data, chain):
3668 3676 if not cache:
3669 3677 r.clearcaches()
3670 3678 getrawchunks(data, chain)
3671 3679
3672 3680 def dodecompress(chunks):
3673 3681 decomp = r.decompress
3674 3682 for chunk in chunks:
3675 3683 decomp(chunk)
3676 3684
3677 3685 def dopatch(text, bins):
3678 3686 if not cache:
3679 3687 r.clearcaches()
3680 3688 mdiff.patches(text, bins)
3681 3689
3682 3690 def dohash(text):
3683 3691 if not cache:
3684 3692 r.clearcaches()
3685 3693 r.checkhash(text, node, rev=rev)
3686 3694
3687 3695 def dorevision():
3688 3696 if not cache:
3689 3697 r.clearcaches()
3690 3698 r.revision(node)
3691 3699
3692 3700 try:
3693 3701 from mercurial.revlogutils.deltas import slicechunk
3694 3702 except ImportError:
3695 3703 slicechunk = getattr(revlog, '_slicechunk', None)
3696 3704
3697 3705 size = r.length(rev)
3698 3706 chain = r._deltachain(rev)[0]
3699 3707 if not getattr(r, '_withsparseread', False):
3700 3708 slicedchain = (chain,)
3701 3709 else:
3702 3710 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3703 3711 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3704 3712 rawchunks = getrawchunks(data, slicedchain)
3705 3713 bins = r._chunks(chain)
3706 3714 text = bytes(bins[0])
3707 3715 bins = bins[1:]
3708 3716 text = mdiff.patches(text, bins)
3709 3717
3710 3718 benches = [
3711 3719 (lambda: dorevision(), b'full'),
3712 3720 (lambda: dodeltachain(rev), b'deltachain'),
3713 3721 (lambda: doread(chain), b'read'),
3714 3722 ]
3715 3723
3716 3724 if getattr(r, '_withsparseread', False):
3717 3725 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3718 3726 benches.append(slicing)
3719 3727
3720 3728 benches.extend(
3721 3729 [
3722 3730 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3723 3731 (lambda: dodecompress(rawchunks), b'decompress'),
3724 3732 (lambda: dopatch(text, bins), b'patch'),
3725 3733 (lambda: dohash(text), b'hash'),
3726 3734 ]
3727 3735 )
3728 3736
3729 3737 timer, fm = gettimer(ui, opts)
3730 3738 for fn, title in benches:
3731 3739 timer(fn, title=title)
3732 3740 fm.end()
3733 3741
3734 3742
3735 3743 @command(
3736 3744 b'perf::revset|perfrevset',
3737 3745 [
3738 3746 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3739 3747 (b'', b'contexts', False, b'obtain changectx for each revision'),
3740 3748 ]
3741 3749 + formatteropts,
3742 3750 b"REVSET",
3743 3751 )
3744 3752 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3745 3753 """benchmark the execution time of a revset
3746 3754
3747 3755 Use the --clean option if need to evaluate the impact of build volatile
3748 3756 revisions set cache on the revset execution. Volatile cache hold filtered
3749 3757 and obsolete related cache."""
3750 3758 opts = _byteskwargs(opts)
3751 3759
3752 3760 timer, fm = gettimer(ui, opts)
3753 3761
3754 3762 def d():
3755 3763 if clear:
3756 3764 repo.invalidatevolatilesets()
3757 3765 if contexts:
3758 3766 for ctx in repo.set(expr):
3759 3767 pass
3760 3768 else:
3761 3769 for r in repo.revs(expr):
3762 3770 pass
3763 3771
3764 3772 timer(d)
3765 3773 fm.end()
3766 3774
3767 3775
3768 3776 @command(
3769 3777 b'perf::volatilesets|perfvolatilesets',
3770 3778 [
3771 3779 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3772 3780 ]
3773 3781 + formatteropts,
3774 3782 )
3775 3783 def perfvolatilesets(ui, repo, *names, **opts):
3776 3784 """benchmark the computation of various volatile set
3777 3785
3778 3786 Volatile set computes element related to filtering and obsolescence."""
3779 3787 opts = _byteskwargs(opts)
3780 3788 timer, fm = gettimer(ui, opts)
3781 3789 repo = repo.unfiltered()
3782 3790
3783 3791 def getobs(name):
3784 3792 def d():
3785 3793 repo.invalidatevolatilesets()
3786 3794 if opts[b'clear_obsstore']:
3787 3795 clearfilecache(repo, b'obsstore')
3788 3796 obsolete.getrevs(repo, name)
3789 3797
3790 3798 return d
3791 3799
3792 3800 allobs = sorted(obsolete.cachefuncs)
3793 3801 if names:
3794 3802 allobs = [n for n in allobs if n in names]
3795 3803
3796 3804 for name in allobs:
3797 3805 timer(getobs(name), title=name)
3798 3806
3799 3807 def getfiltered(name):
3800 3808 def d():
3801 3809 repo.invalidatevolatilesets()
3802 3810 if opts[b'clear_obsstore']:
3803 3811 clearfilecache(repo, b'obsstore')
3804 3812 repoview.filterrevs(repo, name)
3805 3813
3806 3814 return d
3807 3815
3808 3816 allfilter = sorted(repoview.filtertable)
3809 3817 if names:
3810 3818 allfilter = [n for n in allfilter if n in names]
3811 3819
3812 3820 for name in allfilter:
3813 3821 timer(getfiltered(name), title=name)
3814 3822 fm.end()
3815 3823
3816 3824
3817 3825 @command(
3818 3826 b'perf::branchmap|perfbranchmap',
3819 3827 [
3820 3828 (b'f', b'full', False, b'Includes build time of subset'),
3821 3829 (
3822 3830 b'',
3823 3831 b'clear-revbranch',
3824 3832 False,
3825 3833 b'purge the revbranch cache between computation',
3826 3834 ),
3827 3835 ]
3828 3836 + formatteropts,
3829 3837 )
3830 3838 def perfbranchmap(ui, repo, *filternames, **opts):
3831 3839 """benchmark the update of a branchmap
3832 3840
3833 3841 This benchmarks the full repo.branchmap() call with read and write disabled
3834 3842 """
3835 3843 opts = _byteskwargs(opts)
3836 3844 full = opts.get(b"full", False)
3837 3845 clear_revbranch = opts.get(b"clear_revbranch", False)
3838 3846 timer, fm = gettimer(ui, opts)
3839 3847
3840 3848 def getbranchmap(filtername):
3841 3849 """generate a benchmark function for the filtername"""
3842 3850 if filtername is None:
3843 3851 view = repo
3844 3852 else:
3845 3853 view = repo.filtered(filtername)
3846 3854 if util.safehasattr(view._branchcaches, '_per_filter'):
3847 3855 filtered = view._branchcaches._per_filter
3848 3856 else:
3849 3857 # older versions
3850 3858 filtered = view._branchcaches
3851 3859
3852 3860 def d():
3853 3861 if clear_revbranch:
3854 3862 repo.revbranchcache()._clear()
3855 3863 if full:
3856 3864 view._branchcaches.clear()
3857 3865 else:
3858 3866 filtered.pop(filtername, None)
3859 3867 view.branchmap()
3860 3868
3861 3869 return d
3862 3870
3863 3871 # add filter in smaller subset to bigger subset
3864 3872 possiblefilters = set(repoview.filtertable)
3865 3873 if filternames:
3866 3874 possiblefilters &= set(filternames)
3867 3875 subsettable = getbranchmapsubsettable()
3868 3876 allfilters = []
3869 3877 while possiblefilters:
3870 3878 for name in possiblefilters:
3871 3879 subset = subsettable.get(name)
3872 3880 if subset not in possiblefilters:
3873 3881 break
3874 3882 else:
3875 3883 assert False, b'subset cycle %s!' % possiblefilters
3876 3884 allfilters.append(name)
3877 3885 possiblefilters.remove(name)
3878 3886
3879 3887 # warm the cache
3880 3888 if not full:
3881 3889 for name in allfilters:
3882 3890 repo.filtered(name).branchmap()
3883 3891 if not filternames or b'unfiltered' in filternames:
3884 3892 # add unfiltered
3885 3893 allfilters.append(None)
3886 3894
3887 3895 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3888 3896 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3889 3897 branchcacheread.set(classmethod(lambda *args: None))
3890 3898 else:
3891 3899 # older versions
3892 3900 branchcacheread = safeattrsetter(branchmap, b'read')
3893 3901 branchcacheread.set(lambda *args: None)
3894 3902 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3895 3903 branchcachewrite.set(lambda *args: None)
3896 3904 try:
3897 3905 for name in allfilters:
3898 3906 printname = name
3899 3907 if name is None:
3900 3908 printname = b'unfiltered'
3901 3909 timer(getbranchmap(name), title=printname)
3902 3910 finally:
3903 3911 branchcacheread.restore()
3904 3912 branchcachewrite.restore()
3905 3913 fm.end()
3906 3914
3907 3915
3908 3916 @command(
3909 3917 b'perf::branchmapupdate|perfbranchmapupdate',
3910 3918 [
3911 3919 (b'', b'base', [], b'subset of revision to start from'),
3912 3920 (b'', b'target', [], b'subset of revision to end with'),
3913 3921 (b'', b'clear-caches', False, b'clear cache between each runs'),
3914 3922 ]
3915 3923 + formatteropts,
3916 3924 )
3917 3925 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3918 3926 """benchmark branchmap update from for <base> revs to <target> revs
3919 3927
3920 3928 If `--clear-caches` is passed, the following items will be reset before
3921 3929 each update:
3922 3930 * the changelog instance and associated indexes
3923 3931 * the rev-branch-cache instance
3924 3932
3925 3933 Examples:
3926 3934
3927 3935 # update for the one last revision
3928 3936 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3929 3937
3930 3938 $ update for change coming with a new branch
3931 3939 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3932 3940 """
3933 3941 from mercurial import branchmap
3934 3942 from mercurial import repoview
3935 3943
3936 3944 opts = _byteskwargs(opts)
3937 3945 timer, fm = gettimer(ui, opts)
3938 3946 clearcaches = opts[b'clear_caches']
3939 3947 unfi = repo.unfiltered()
3940 3948 x = [None] # used to pass data between closure
3941 3949
3942 3950 # we use a `list` here to avoid possible side effect from smartset
3943 3951 baserevs = list(scmutil.revrange(repo, base))
3944 3952 targetrevs = list(scmutil.revrange(repo, target))
3945 3953 if not baserevs:
3946 3954 raise error.Abort(b'no revisions selected for --base')
3947 3955 if not targetrevs:
3948 3956 raise error.Abort(b'no revisions selected for --target')
3949 3957
3950 3958 # make sure the target branchmap also contains the one in the base
3951 3959 targetrevs = list(set(baserevs) | set(targetrevs))
3952 3960 targetrevs.sort()
3953 3961
3954 3962 cl = repo.changelog
3955 3963 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3956 3964 allbaserevs.sort()
3957 3965 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3958 3966
3959 3967 newrevs = list(alltargetrevs.difference(allbaserevs))
3960 3968 newrevs.sort()
3961 3969
3962 3970 allrevs = frozenset(unfi.changelog.revs())
3963 3971 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3964 3972 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3965 3973
3966 3974 def basefilter(repo, visibilityexceptions=None):
3967 3975 return basefilterrevs
3968 3976
3969 3977 def targetfilter(repo, visibilityexceptions=None):
3970 3978 return targetfilterrevs
3971 3979
3972 3980 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3973 3981 ui.status(msg % (len(allbaserevs), len(newrevs)))
3974 3982 if targetfilterrevs:
3975 3983 msg = b'(%d revisions still filtered)\n'
3976 3984 ui.status(msg % len(targetfilterrevs))
3977 3985
3978 3986 try:
3979 3987 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3980 3988 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3981 3989
3982 3990 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3983 3991 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3984 3992
3985 3993 # try to find an existing branchmap to reuse
3986 3994 subsettable = getbranchmapsubsettable()
3987 3995 candidatefilter = subsettable.get(None)
3988 3996 while candidatefilter is not None:
3989 3997 candidatebm = repo.filtered(candidatefilter).branchmap()
3990 3998 if candidatebm.validfor(baserepo):
3991 3999 filtered = repoview.filterrevs(repo, candidatefilter)
3992 4000 missing = [r for r in allbaserevs if r in filtered]
3993 4001 base = candidatebm.copy()
3994 4002 base.update(baserepo, missing)
3995 4003 break
3996 4004 candidatefilter = subsettable.get(candidatefilter)
3997 4005 else:
3998 4006 # no suitable subset where found
3999 4007 base = branchmap.branchcache()
4000 4008 base.update(baserepo, allbaserevs)
4001 4009
4002 4010 def setup():
4003 4011 x[0] = base.copy()
4004 4012 if clearcaches:
4005 4013 unfi._revbranchcache = None
4006 4014 clearchangelog(repo)
4007 4015
4008 4016 def bench():
4009 4017 x[0].update(targetrepo, newrevs)
4010 4018
4011 4019 timer(bench, setup=setup)
4012 4020 fm.end()
4013 4021 finally:
4014 4022 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4015 4023 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4016 4024
4017 4025
4018 4026 @command(
4019 4027 b'perf::branchmapload|perfbranchmapload',
4020 4028 [
4021 4029 (b'f', b'filter', b'', b'Specify repoview filter'),
4022 4030 (b'', b'list', False, b'List brachmap filter caches'),
4023 4031 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4024 4032 ]
4025 4033 + formatteropts,
4026 4034 )
4027 4035 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4028 4036 """benchmark reading the branchmap"""
4029 4037 opts = _byteskwargs(opts)
4030 4038 clearrevlogs = opts[b'clear_revlogs']
4031 4039
4032 4040 if list:
4033 4041 for name, kind, st in repo.cachevfs.readdir(stat=True):
4034 4042 if name.startswith(b'branch2'):
4035 4043 filtername = name.partition(b'-')[2] or b'unfiltered'
4036 4044 ui.status(
4037 4045 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4038 4046 )
4039 4047 return
4040 4048 if not filter:
4041 4049 filter = None
4042 4050 subsettable = getbranchmapsubsettable()
4043 4051 if filter is None:
4044 4052 repo = repo.unfiltered()
4045 4053 else:
4046 4054 repo = repoview.repoview(repo, filter)
4047 4055
4048 4056 repo.branchmap() # make sure we have a relevant, up to date branchmap
4049 4057
4050 4058 try:
4051 4059 fromfile = branchmap.branchcache.fromfile
4052 4060 except AttributeError:
4053 4061 # older versions
4054 4062 fromfile = branchmap.read
4055 4063
4056 4064 currentfilter = filter
4057 4065 # try once without timer, the filter may not be cached
4058 4066 while fromfile(repo) is None:
4059 4067 currentfilter = subsettable.get(currentfilter)
4060 4068 if currentfilter is None:
4061 4069 raise error.Abort(
4062 4070 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4063 4071 )
4064 4072 repo = repo.filtered(currentfilter)
4065 4073 timer, fm = gettimer(ui, opts)
4066 4074
4067 4075 def setup():
4068 4076 if clearrevlogs:
4069 4077 clearchangelog(repo)
4070 4078
4071 4079 def bench():
4072 4080 fromfile(repo)
4073 4081
4074 4082 timer(bench, setup=setup)
4075 4083 fm.end()
4076 4084
4077 4085
4078 4086 @command(b'perf::loadmarkers|perfloadmarkers')
4079 4087 def perfloadmarkers(ui, repo):
4080 4088 """benchmark the time to parse the on-disk markers for a repo
4081 4089
4082 4090 Result is the number of markers in the repo."""
4083 4091 timer, fm = gettimer(ui)
4084 4092 svfs = getsvfs(repo)
4085 4093 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4086 4094 fm.end()
4087 4095
4088 4096
4089 4097 @command(
4090 4098 b'perf::lrucachedict|perflrucachedict',
4091 4099 formatteropts
4092 4100 + [
4093 4101 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4094 4102 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4095 4103 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4096 4104 (b'', b'size', 4, b'size of cache'),
4097 4105 (b'', b'gets', 10000, b'number of key lookups'),
4098 4106 (b'', b'sets', 10000, b'number of key sets'),
4099 4107 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4100 4108 (
4101 4109 b'',
4102 4110 b'mixedgetfreq',
4103 4111 50,
4104 4112 b'frequency of get vs set ops in mixed mode',
4105 4113 ),
4106 4114 ],
4107 4115 norepo=True,
4108 4116 )
4109 4117 def perflrucache(
4110 4118 ui,
4111 4119 mincost=0,
4112 4120 maxcost=100,
4113 4121 costlimit=0,
4114 4122 size=4,
4115 4123 gets=10000,
4116 4124 sets=10000,
4117 4125 mixed=10000,
4118 4126 mixedgetfreq=50,
4119 4127 **opts
4120 4128 ):
4121 4129 opts = _byteskwargs(opts)
4122 4130
4123 4131 def doinit():
4124 4132 for i in _xrange(10000):
4125 4133 util.lrucachedict(size)
4126 4134
4127 4135 costrange = list(range(mincost, maxcost + 1))
4128 4136
4129 4137 values = []
4130 4138 for i in _xrange(size):
4131 4139 values.append(random.randint(0, _maxint))
4132 4140
4133 4141 # Get mode fills the cache and tests raw lookup performance with no
4134 4142 # eviction.
4135 4143 getseq = []
4136 4144 for i in _xrange(gets):
4137 4145 getseq.append(random.choice(values))
4138 4146
4139 4147 def dogets():
4140 4148 d = util.lrucachedict(size)
4141 4149 for v in values:
4142 4150 d[v] = v
4143 4151 for key in getseq:
4144 4152 value = d[key]
4145 4153 value # silence pyflakes warning
4146 4154
4147 4155 def dogetscost():
4148 4156 d = util.lrucachedict(size, maxcost=costlimit)
4149 4157 for i, v in enumerate(values):
4150 4158 d.insert(v, v, cost=costs[i])
4151 4159 for key in getseq:
4152 4160 try:
4153 4161 value = d[key]
4154 4162 value # silence pyflakes warning
4155 4163 except KeyError:
4156 4164 pass
4157 4165
4158 4166 # Set mode tests insertion speed with cache eviction.
4159 4167 setseq = []
4160 4168 costs = []
4161 4169 for i in _xrange(sets):
4162 4170 setseq.append(random.randint(0, _maxint))
4163 4171 costs.append(random.choice(costrange))
4164 4172
4165 4173 def doinserts():
4166 4174 d = util.lrucachedict(size)
4167 4175 for v in setseq:
4168 4176 d.insert(v, v)
4169 4177
4170 4178 def doinsertscost():
4171 4179 d = util.lrucachedict(size, maxcost=costlimit)
4172 4180 for i, v in enumerate(setseq):
4173 4181 d.insert(v, v, cost=costs[i])
4174 4182
4175 4183 def dosets():
4176 4184 d = util.lrucachedict(size)
4177 4185 for v in setseq:
4178 4186 d[v] = v
4179 4187
4180 4188 # Mixed mode randomly performs gets and sets with eviction.
4181 4189 mixedops = []
4182 4190 for i in _xrange(mixed):
4183 4191 r = random.randint(0, 100)
4184 4192 if r < mixedgetfreq:
4185 4193 op = 0
4186 4194 else:
4187 4195 op = 1
4188 4196
4189 4197 mixedops.append(
4190 4198 (op, random.randint(0, size * 2), random.choice(costrange))
4191 4199 )
4192 4200
4193 4201 def domixed():
4194 4202 d = util.lrucachedict(size)
4195 4203
4196 4204 for op, v, cost in mixedops:
4197 4205 if op == 0:
4198 4206 try:
4199 4207 d[v]
4200 4208 except KeyError:
4201 4209 pass
4202 4210 else:
4203 4211 d[v] = v
4204 4212
4205 4213 def domixedcost():
4206 4214 d = util.lrucachedict(size, maxcost=costlimit)
4207 4215
4208 4216 for op, v, cost in mixedops:
4209 4217 if op == 0:
4210 4218 try:
4211 4219 d[v]
4212 4220 except KeyError:
4213 4221 pass
4214 4222 else:
4215 4223 d.insert(v, v, cost=cost)
4216 4224
4217 4225 benches = [
4218 4226 (doinit, b'init'),
4219 4227 ]
4220 4228
4221 4229 if costlimit:
4222 4230 benches.extend(
4223 4231 [
4224 4232 (dogetscost, b'gets w/ cost limit'),
4225 4233 (doinsertscost, b'inserts w/ cost limit'),
4226 4234 (domixedcost, b'mixed w/ cost limit'),
4227 4235 ]
4228 4236 )
4229 4237 else:
4230 4238 benches.extend(
4231 4239 [
4232 4240 (dogets, b'gets'),
4233 4241 (doinserts, b'inserts'),
4234 4242 (dosets, b'sets'),
4235 4243 (domixed, b'mixed'),
4236 4244 ]
4237 4245 )
4238 4246
4239 4247 for fn, title in benches:
4240 4248 timer, fm = gettimer(ui, opts)
4241 4249 timer(fn, title=title)
4242 4250 fm.end()
4243 4251
4244 4252
4245 4253 @command(
4246 4254 b'perf::write|perfwrite',
4247 4255 formatteropts
4248 4256 + [
4249 4257 (b'', b'write-method', b'write', b'ui write method'),
4250 4258 (b'', b'nlines', 100, b'number of lines'),
4251 4259 (b'', b'nitems', 100, b'number of items (per line)'),
4252 4260 (b'', b'item', b'x', b'item that is written'),
4253 4261 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4254 4262 (b'', b'flush-line', None, b'flush after each line'),
4255 4263 ],
4256 4264 )
4257 4265 def perfwrite(ui, repo, **opts):
4258 4266 """microbenchmark ui.write (and others)"""
4259 4267 opts = _byteskwargs(opts)
4260 4268
4261 4269 write = getattr(ui, _sysstr(opts[b'write_method']))
4262 4270 nlines = int(opts[b'nlines'])
4263 4271 nitems = int(opts[b'nitems'])
4264 4272 item = opts[b'item']
4265 4273 batch_line = opts.get(b'batch_line')
4266 4274 flush_line = opts.get(b'flush_line')
4267 4275
4268 4276 if batch_line:
4269 4277 line = item * nitems + b'\n'
4270 4278
4271 4279 def benchmark():
4272 4280 for i in pycompat.xrange(nlines):
4273 4281 if batch_line:
4274 4282 write(line)
4275 4283 else:
4276 4284 for i in pycompat.xrange(nitems):
4277 4285 write(item)
4278 4286 write(b'\n')
4279 4287 if flush_line:
4280 4288 ui.flush()
4281 4289 ui.flush()
4282 4290
4283 4291 timer, fm = gettimer(ui, opts)
4284 4292 timer(benchmark)
4285 4293 fm.end()
4286 4294
4287 4295
4288 4296 def uisetup(ui):
4289 4297 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4290 4298 commands, b'debugrevlogopts'
4291 4299 ):
4292 4300 # for "historical portability":
4293 4301 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4294 4302 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4295 4303 # openrevlog() should cause failure, because it has been
4296 4304 # available since 3.5 (or 49c583ca48c4).
4297 4305 def openrevlog(orig, repo, cmd, file_, opts):
4298 4306 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4299 4307 raise error.Abort(
4300 4308 b"This version doesn't support --dir option",
4301 4309 hint=b"use 3.5 or later",
4302 4310 )
4303 4311 return orig(repo, cmd, file_, opts)
4304 4312
4305 4313 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4306 4314
4307 4315
4308 4316 @command(
4309 4317 b'perf::progress|perfprogress',
4310 4318 formatteropts
4311 4319 + [
4312 4320 (b'', b'topic', b'topic', b'topic for progress messages'),
4313 4321 (b'c', b'total', 1000000, b'total value we are progressing to'),
4314 4322 ],
4315 4323 norepo=True,
4316 4324 )
4317 4325 def perfprogress(ui, topic=None, total=None, **opts):
4318 4326 """printing of progress bars"""
4319 4327 opts = _byteskwargs(opts)
4320 4328
4321 4329 timer, fm = gettimer(ui, opts)
4322 4330
4323 4331 def doprogress():
4324 4332 with ui.makeprogress(topic, total=total) as progress:
4325 4333 for i in _xrange(total):
4326 4334 progress.increment()
4327 4335
4328 4336 timer(doprogress)
4329 4337 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now