##// END OF EJS Templates
wrapfunction: use sysstr instead of bytes as argument in "perf"...
marmoute -
r51688:193a6e9a default
parent child Browse files
Show More
@@ -1,4448 +1,4449 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", False)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 @command(
886 886 b'perf::tags|perftags',
887 887 formatteropts
888 888 + [
889 889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
890 890 ],
891 891 )
892 892 def perftags(ui, repo, **opts):
893 893 opts = _byteskwargs(opts)
894 894 timer, fm = gettimer(ui, opts)
895 895 repocleartagscache = repocleartagscachefunc(repo)
896 896 clearrevlogs = opts[b'clear_revlogs']
897 897
898 898 def s():
899 899 if clearrevlogs:
900 900 clearchangelog(repo)
901 901 clearfilecache(repo.unfiltered(), 'manifest')
902 902 repocleartagscache()
903 903
904 904 def t():
905 905 return len(repo.tags())
906 906
907 907 timer(t, setup=s)
908 908 fm.end()
909 909
910 910
911 911 @command(b'perf::ancestors|perfancestors', formatteropts)
912 912 def perfancestors(ui, repo, **opts):
913 913 opts = _byteskwargs(opts)
914 914 timer, fm = gettimer(ui, opts)
915 915 heads = repo.changelog.headrevs()
916 916
917 917 def d():
918 918 for a in repo.changelog.ancestors(heads):
919 919 pass
920 920
921 921 timer(d)
922 922 fm.end()
923 923
924 924
925 925 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 926 def perfancestorset(ui, repo, revset, **opts):
927 927 opts = _byteskwargs(opts)
928 928 timer, fm = gettimer(ui, opts)
929 929 revs = repo.revs(revset)
930 930 heads = repo.changelog.headrevs()
931 931
932 932 def d():
933 933 s = repo.changelog.ancestors(heads)
934 934 for rev in revs:
935 935 rev in s
936 936
937 937 timer(d)
938 938 fm.end()
939 939
940 940
941 941 @command(
942 942 b'perf::delta-find',
943 943 revlogopts + formatteropts,
944 944 b'-c|-m|FILE REV',
945 945 )
946 946 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 947 """benchmark the process of finding a valid delta for a revlog revision
948 948
949 949 When a revlog receives a new revision (e.g. from a commit, or from an
950 950 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 951 This perf command measures how much time we spend in this process. It
952 952 operates on an already stored revision.
953 953
954 954 See `hg help debug-delta-find` for another related command.
955 955 """
956 956 from mercurial import revlogutils
957 957 import mercurial.revlogutils.deltas as deltautil
958 958
959 959 opts = _byteskwargs(opts)
960 960 if arg_2 is None:
961 961 file_ = None
962 962 rev = arg_1
963 963 else:
964 964 file_ = arg_1
965 965 rev = arg_2
966 966
967 967 repo = repo.unfiltered()
968 968
969 969 timer, fm = gettimer(ui, opts)
970 970
971 971 rev = int(rev)
972 972
973 973 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974 974
975 975 deltacomputer = deltautil.deltacomputer(revlog)
976 976
977 977 node = revlog.node(rev)
978 978 p1r, p2r = revlog.parentrevs(rev)
979 979 p1 = revlog.node(p1r)
980 980 p2 = revlog.node(p2r)
981 981 full_text = revlog.revision(rev)
982 982 textlen = len(full_text)
983 983 cachedelta = None
984 984 flags = revlog.flags(rev)
985 985
986 986 revinfo = revlogutils.revisioninfo(
987 987 node,
988 988 p1,
989 989 p2,
990 990 [full_text], # btext
991 991 textlen,
992 992 cachedelta,
993 993 flags,
994 994 )
995 995
996 996 # Note: we should probably purge the potential caches (like the full
997 997 # manifest cache) between runs.
998 998 def find_one():
999 999 with revlog._datafp() as fh:
1000 1000 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001 1001
1002 1002 timer(find_one)
1003 1003 fm.end()
1004 1004
1005 1005
1006 1006 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 1007 def perfdiscovery(ui, repo, path, **opts):
1008 1008 """benchmark discovery between local repo and the peer at given path"""
1009 1009 repos = [repo, None]
1010 1010 timer, fm = gettimer(ui, opts)
1011 1011
1012 1012 try:
1013 1013 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014 1014
1015 1015 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 1016 except ImportError:
1017 1017 try:
1018 1018 from mercurial.utils.urlutil import get_unique_pull_path
1019 1019
1020 1020 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 1021 except ImportError:
1022 1022 path = ui.expandpath(path)
1023 1023
1024 1024 def s():
1025 1025 repos[1] = hg.peer(ui, opts, path)
1026 1026
1027 1027 def d():
1028 1028 setdiscovery.findcommonheads(ui, *repos)
1029 1029
1030 1030 timer(d, setup=s)
1031 1031 fm.end()
1032 1032
1033 1033
1034 1034 @command(
1035 1035 b'perf::bookmarks|perfbookmarks',
1036 1036 formatteropts
1037 1037 + [
1038 1038 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 1039 ],
1040 1040 )
1041 1041 def perfbookmarks(ui, repo, **opts):
1042 1042 """benchmark parsing bookmarks from disk to memory"""
1043 1043 opts = _byteskwargs(opts)
1044 1044 timer, fm = gettimer(ui, opts)
1045 1045
1046 1046 clearrevlogs = opts[b'clear_revlogs']
1047 1047
1048 1048 def s():
1049 1049 if clearrevlogs:
1050 1050 clearchangelog(repo)
1051 1051 clearfilecache(repo, b'_bookmarks')
1052 1052
1053 1053 def d():
1054 1054 repo._bookmarks
1055 1055
1056 1056 timer(d, setup=s)
1057 1057 fm.end()
1058 1058
1059 1059
1060 1060 @command(
1061 1061 b'perf::bundle',
1062 1062 [
1063 1063 (
1064 1064 b'r',
1065 1065 b'rev',
1066 1066 [],
1067 1067 b'changesets to bundle',
1068 1068 b'REV',
1069 1069 ),
1070 1070 (
1071 1071 b't',
1072 1072 b'type',
1073 1073 b'none',
1074 1074 b'bundlespec to use (see `hg help bundlespec`)',
1075 1075 b'TYPE',
1076 1076 ),
1077 1077 ]
1078 1078 + formatteropts,
1079 1079 b'REVS',
1080 1080 )
1081 1081 def perfbundle(ui, repo, *revs, **opts):
1082 1082 """benchmark the creation of a bundle from a repository
1083 1083
1084 1084 For now, this only supports "none" compression.
1085 1085 """
1086 1086 try:
1087 1087 from mercurial import bundlecaches
1088 1088
1089 1089 parsebundlespec = bundlecaches.parsebundlespec
1090 1090 except ImportError:
1091 1091 from mercurial import exchange
1092 1092
1093 1093 parsebundlespec = exchange.parsebundlespec
1094 1094
1095 1095 from mercurial import discovery
1096 1096 from mercurial import bundle2
1097 1097
1098 1098 opts = _byteskwargs(opts)
1099 1099 timer, fm = gettimer(ui, opts)
1100 1100
1101 1101 cl = repo.changelog
1102 1102 revs = list(revs)
1103 1103 revs.extend(opts.get(b'rev', ()))
1104 1104 revs = scmutil.revrange(repo, revs)
1105 1105 if not revs:
1106 1106 raise error.Abort(b"not revision specified")
1107 1107 # make it a consistent set (ie: without topological gaps)
1108 1108 old_len = len(revs)
1109 1109 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 1110 if old_len != len(revs):
1111 1111 new_count = len(revs) - old_len
1112 1112 msg = b"add %d new revisions to make it a consistent set\n"
1113 1113 ui.write_err(msg % new_count)
1114 1114
1115 1115 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 1116 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 1117 outgoing = discovery.outgoing(repo, bases, targets)
1118 1118
1119 1119 bundle_spec = opts.get(b'type')
1120 1120
1121 1121 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122 1122
1123 1123 cgversion = bundle_spec.params.get(b"cg.version")
1124 1124 if cgversion is None:
1125 1125 if bundle_spec.version == b'v1':
1126 1126 cgversion = b'01'
1127 1127 if bundle_spec.version == b'v2':
1128 1128 cgversion = b'02'
1129 1129 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 1130 err = b"repository does not support bundle version %s"
1131 1131 raise error.Abort(err % cgversion)
1132 1132
1133 1133 if cgversion == b'01': # bundle1
1134 1134 bversion = b'HG10' + bundle_spec.wirecompression
1135 1135 bcompression = None
1136 1136 elif cgversion in (b'02', b'03'):
1137 1137 bversion = b'HG20'
1138 1138 bcompression = bundle_spec.wirecompression
1139 1139 else:
1140 1140 err = b'perf::bundle: unexpected changegroup version %s'
1141 1141 raise error.ProgrammingError(err % cgversion)
1142 1142
1143 1143 if bcompression is None:
1144 1144 bcompression = b'UN'
1145 1145
1146 1146 if bcompression != b'UN':
1147 1147 err = b'perf::bundle: compression currently unsupported: %s'
1148 1148 raise error.ProgrammingError(err % bcompression)
1149 1149
1150 1150 def do_bundle():
1151 1151 bundle2.writenewbundle(
1152 1152 ui,
1153 1153 repo,
1154 1154 b'perf::bundle',
1155 1155 os.devnull,
1156 1156 bversion,
1157 1157 outgoing,
1158 1158 bundle_spec.params,
1159 1159 )
1160 1160
1161 1161 timer(do_bundle)
1162 1162 fm.end()
1163 1163
1164 1164
1165 1165 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 1166 def perfbundleread(ui, repo, bundlepath, **opts):
1167 1167 """Benchmark reading of bundle files.
1168 1168
1169 1169 This command is meant to isolate the I/O part of bundle reading as
1170 1170 much as possible.
1171 1171 """
1172 1172 from mercurial import (
1173 1173 bundle2,
1174 1174 exchange,
1175 1175 streamclone,
1176 1176 )
1177 1177
1178 1178 opts = _byteskwargs(opts)
1179 1179
1180 1180 def makebench(fn):
1181 1181 def run():
1182 1182 with open(bundlepath, b'rb') as fh:
1183 1183 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 1184 fn(bundle)
1185 1185
1186 1186 return run
1187 1187
1188 1188 def makereadnbytes(size):
1189 1189 def run():
1190 1190 with open(bundlepath, b'rb') as fh:
1191 1191 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 1192 while bundle.read(size):
1193 1193 pass
1194 1194
1195 1195 return run
1196 1196
1197 1197 def makestdioread(size):
1198 1198 def run():
1199 1199 with open(bundlepath, b'rb') as fh:
1200 1200 while fh.read(size):
1201 1201 pass
1202 1202
1203 1203 return run
1204 1204
1205 1205 # bundle1
1206 1206
1207 1207 def deltaiter(bundle):
1208 1208 for delta in bundle.deltaiter():
1209 1209 pass
1210 1210
1211 1211 def iterchunks(bundle):
1212 1212 for chunk in bundle.getchunks():
1213 1213 pass
1214 1214
1215 1215 # bundle2
1216 1216
1217 1217 def forwardchunks(bundle):
1218 1218 for chunk in bundle._forwardchunks():
1219 1219 pass
1220 1220
1221 1221 def iterparts(bundle):
1222 1222 for part in bundle.iterparts():
1223 1223 pass
1224 1224
1225 1225 def iterpartsseekable(bundle):
1226 1226 for part in bundle.iterparts(seekable=True):
1227 1227 pass
1228 1228
1229 1229 def seek(bundle):
1230 1230 for part in bundle.iterparts(seekable=True):
1231 1231 part.seek(0, os.SEEK_END)
1232 1232
1233 1233 def makepartreadnbytes(size):
1234 1234 def run():
1235 1235 with open(bundlepath, b'rb') as fh:
1236 1236 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 1237 for part in bundle.iterparts():
1238 1238 while part.read(size):
1239 1239 pass
1240 1240
1241 1241 return run
1242 1242
1243 1243 benches = [
1244 1244 (makestdioread(8192), b'read(8k)'),
1245 1245 (makestdioread(16384), b'read(16k)'),
1246 1246 (makestdioread(32768), b'read(32k)'),
1247 1247 (makestdioread(131072), b'read(128k)'),
1248 1248 ]
1249 1249
1250 1250 with open(bundlepath, b'rb') as fh:
1251 1251 bundle = exchange.readbundle(ui, fh, bundlepath)
1252 1252
1253 1253 if isinstance(bundle, changegroup.cg1unpacker):
1254 1254 benches.extend(
1255 1255 [
1256 1256 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 1257 (makebench(iterchunks), b'cg1 getchunks()'),
1258 1258 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 1259 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 1260 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 1261 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 1262 ]
1263 1263 )
1264 1264 elif isinstance(bundle, bundle2.unbundle20):
1265 1265 benches.extend(
1266 1266 [
1267 1267 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 1268 (makebench(iterparts), b'bundle2 iterparts()'),
1269 1269 (
1270 1270 makebench(iterpartsseekable),
1271 1271 b'bundle2 iterparts() seekable',
1272 1272 ),
1273 1273 (makebench(seek), b'bundle2 part seek()'),
1274 1274 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 1275 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 1276 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 1277 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 1278 ]
1279 1279 )
1280 1280 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 1281 raise error.Abort(b'stream clone bundles not supported')
1282 1282 else:
1283 1283 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284 1284
1285 1285 for fn, title in benches:
1286 1286 timer, fm = gettimer(ui, opts)
1287 1287 timer(fn, title=title)
1288 1288 fm.end()
1289 1289
1290 1290
1291 1291 @command(
1292 1292 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 1293 formatteropts
1294 1294 + [
1295 1295 (b'', b'cgversion', b'02', b'changegroup version'),
1296 1296 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 1297 ],
1298 1298 )
1299 1299 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 1300 """Benchmark producing a changelog group for a changegroup.
1301 1301
1302 1302 This measures the time spent processing the changelog during a
1303 1303 bundle operation. This occurs during `hg bundle` and on a server
1304 1304 processing a `getbundle` wire protocol request (handles clones
1305 1305 and pull requests).
1306 1306
1307 1307 By default, all revisions are added to the changegroup.
1308 1308 """
1309 1309 opts = _byteskwargs(opts)
1310 1310 cl = repo.changelog
1311 1311 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 1312 bundler = changegroup.getbundler(cgversion, repo)
1313 1313
1314 1314 def d():
1315 1315 state, chunks = bundler._generatechangelog(cl, nodes)
1316 1316 for chunk in chunks:
1317 1317 pass
1318 1318
1319 1319 timer, fm = gettimer(ui, opts)
1320 1320
1321 1321 # Terminal printing can interfere with timing. So disable it.
1322 1322 with ui.configoverride({(b'progress', b'disable'): True}):
1323 1323 timer(d)
1324 1324
1325 1325 fm.end()
1326 1326
1327 1327
1328 1328 @command(b'perf::dirs|perfdirs', formatteropts)
1329 1329 def perfdirs(ui, repo, **opts):
1330 1330 opts = _byteskwargs(opts)
1331 1331 timer, fm = gettimer(ui, opts)
1332 1332 dirstate = repo.dirstate
1333 1333 b'a' in dirstate
1334 1334
1335 1335 def d():
1336 1336 dirstate.hasdir(b'a')
1337 1337 try:
1338 1338 del dirstate._map._dirs
1339 1339 except AttributeError:
1340 1340 pass
1341 1341
1342 1342 timer(d)
1343 1343 fm.end()
1344 1344
1345 1345
1346 1346 @command(
1347 1347 b'perf::dirstate|perfdirstate',
1348 1348 [
1349 1349 (
1350 1350 b'',
1351 1351 b'iteration',
1352 1352 None,
1353 1353 b'benchmark a full iteration for the dirstate',
1354 1354 ),
1355 1355 (
1356 1356 b'',
1357 1357 b'contains',
1358 1358 None,
1359 1359 b'benchmark a large amount of `nf in dirstate` calls',
1360 1360 ),
1361 1361 ]
1362 1362 + formatteropts,
1363 1363 )
1364 1364 def perfdirstate(ui, repo, **opts):
1365 1365 """benchmap the time of various distate operations
1366 1366
1367 1367 By default benchmark the time necessary to load a dirstate from scratch.
1368 1368 The dirstate is loaded to the point were a "contains" request can be
1369 1369 answered.
1370 1370 """
1371 1371 opts = _byteskwargs(opts)
1372 1372 timer, fm = gettimer(ui, opts)
1373 1373 b"a" in repo.dirstate
1374 1374
1375 1375 if opts[b'iteration'] and opts[b'contains']:
1376 1376 msg = b'only specify one of --iteration or --contains'
1377 1377 raise error.Abort(msg)
1378 1378
1379 1379 if opts[b'iteration']:
1380 1380 setup = None
1381 1381 dirstate = repo.dirstate
1382 1382
1383 1383 def d():
1384 1384 for f in dirstate:
1385 1385 pass
1386 1386
1387 1387 elif opts[b'contains']:
1388 1388 setup = None
1389 1389 dirstate = repo.dirstate
1390 1390 allfiles = list(dirstate)
1391 1391 # also add file path that will be "missing" from the dirstate
1392 1392 allfiles.extend([f[::-1] for f in allfiles])
1393 1393
1394 1394 def d():
1395 1395 for f in allfiles:
1396 1396 f in dirstate
1397 1397
1398 1398 else:
1399 1399
1400 1400 def setup():
1401 1401 repo.dirstate.invalidate()
1402 1402
1403 1403 def d():
1404 1404 b"a" in repo.dirstate
1405 1405
1406 1406 timer(d, setup=setup)
1407 1407 fm.end()
1408 1408
1409 1409
1410 1410 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 1411 def perfdirstatedirs(ui, repo, **opts):
1412 1412 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 1413 opts = _byteskwargs(opts)
1414 1414 timer, fm = gettimer(ui, opts)
1415 1415 repo.dirstate.hasdir(b"a")
1416 1416
1417 1417 def setup():
1418 1418 try:
1419 1419 del repo.dirstate._map._dirs
1420 1420 except AttributeError:
1421 1421 pass
1422 1422
1423 1423 def d():
1424 1424 repo.dirstate.hasdir(b"a")
1425 1425
1426 1426 timer(d, setup=setup)
1427 1427 fm.end()
1428 1428
1429 1429
1430 1430 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 1431 def perfdirstatefoldmap(ui, repo, **opts):
1432 1432 """benchmap a `dirstate._map.filefoldmap.get()` request
1433 1433
1434 1434 The dirstate filefoldmap cache is dropped between every request.
1435 1435 """
1436 1436 opts = _byteskwargs(opts)
1437 1437 timer, fm = gettimer(ui, opts)
1438 1438 dirstate = repo.dirstate
1439 1439 dirstate._map.filefoldmap.get(b'a')
1440 1440
1441 1441 def setup():
1442 1442 del dirstate._map.filefoldmap
1443 1443
1444 1444 def d():
1445 1445 dirstate._map.filefoldmap.get(b'a')
1446 1446
1447 1447 timer(d, setup=setup)
1448 1448 fm.end()
1449 1449
1450 1450
1451 1451 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 1452 def perfdirfoldmap(ui, repo, **opts):
1453 1453 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454 1454
1455 1455 The dirstate dirfoldmap cache is dropped between every request.
1456 1456 """
1457 1457 opts = _byteskwargs(opts)
1458 1458 timer, fm = gettimer(ui, opts)
1459 1459 dirstate = repo.dirstate
1460 1460 dirstate._map.dirfoldmap.get(b'a')
1461 1461
1462 1462 def setup():
1463 1463 del dirstate._map.dirfoldmap
1464 1464 try:
1465 1465 del dirstate._map._dirs
1466 1466 except AttributeError:
1467 1467 pass
1468 1468
1469 1469 def d():
1470 1470 dirstate._map.dirfoldmap.get(b'a')
1471 1471
1472 1472 timer(d, setup=setup)
1473 1473 fm.end()
1474 1474
1475 1475
1476 1476 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 1477 def perfdirstatewrite(ui, repo, **opts):
1478 1478 """benchmap the time it take to write a dirstate on disk"""
1479 1479 opts = _byteskwargs(opts)
1480 1480 timer, fm = gettimer(ui, opts)
1481 1481 ds = repo.dirstate
1482 1482 b"a" in ds
1483 1483
1484 1484 def setup():
1485 1485 ds._dirty = True
1486 1486
1487 1487 def d():
1488 1488 ds.write(repo.currenttransaction())
1489 1489
1490 1490 with repo.wlock():
1491 1491 timer(d, setup=setup)
1492 1492 fm.end()
1493 1493
1494 1494
1495 1495 def _getmergerevs(repo, opts):
1496 1496 """parse command argument to return rev involved in merge
1497 1497
1498 1498 input: options dictionnary with `rev`, `from` and `bse`
1499 1499 output: (localctx, otherctx, basectx)
1500 1500 """
1501 1501 if opts[b'from']:
1502 1502 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 1503 wctx = repo[fromrev]
1504 1504 else:
1505 1505 wctx = repo[None]
1506 1506 # we don't want working dir files to be stat'd in the benchmark, so
1507 1507 # prime that cache
1508 1508 wctx.dirty()
1509 1509 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 1510 if opts[b'base']:
1511 1511 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 1512 ancestor = repo[fromrev]
1513 1513 else:
1514 1514 ancestor = wctx.ancestor(rctx)
1515 1515 return (wctx, rctx, ancestor)
1516 1516
1517 1517
1518 1518 @command(
1519 1519 b'perf::mergecalculate|perfmergecalculate',
1520 1520 [
1521 1521 (b'r', b'rev', b'.', b'rev to merge against'),
1522 1522 (b'', b'from', b'', b'rev to merge from'),
1523 1523 (b'', b'base', b'', b'the revision to use as base'),
1524 1524 ]
1525 1525 + formatteropts,
1526 1526 )
1527 1527 def perfmergecalculate(ui, repo, **opts):
1528 1528 opts = _byteskwargs(opts)
1529 1529 timer, fm = gettimer(ui, opts)
1530 1530
1531 1531 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532 1532
1533 1533 def d():
1534 1534 # acceptremote is True because we don't want prompts in the middle of
1535 1535 # our benchmark
1536 1536 merge.calculateupdates(
1537 1537 repo,
1538 1538 wctx,
1539 1539 rctx,
1540 1540 [ancestor],
1541 1541 branchmerge=False,
1542 1542 force=False,
1543 1543 acceptremote=True,
1544 1544 followcopies=True,
1545 1545 )
1546 1546
1547 1547 timer(d)
1548 1548 fm.end()
1549 1549
1550 1550
1551 1551 @command(
1552 1552 b'perf::mergecopies|perfmergecopies',
1553 1553 [
1554 1554 (b'r', b'rev', b'.', b'rev to merge against'),
1555 1555 (b'', b'from', b'', b'rev to merge from'),
1556 1556 (b'', b'base', b'', b'the revision to use as base'),
1557 1557 ]
1558 1558 + formatteropts,
1559 1559 )
1560 1560 def perfmergecopies(ui, repo, **opts):
1561 1561 """measure runtime of `copies.mergecopies`"""
1562 1562 opts = _byteskwargs(opts)
1563 1563 timer, fm = gettimer(ui, opts)
1564 1564 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565 1565
1566 1566 def d():
1567 1567 # acceptremote is True because we don't want prompts in the middle of
1568 1568 # our benchmark
1569 1569 copies.mergecopies(repo, wctx, rctx, ancestor)
1570 1570
1571 1571 timer(d)
1572 1572 fm.end()
1573 1573
1574 1574
1575 1575 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 1576 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 1577 """benchmark the copy tracing logic"""
1578 1578 opts = _byteskwargs(opts)
1579 1579 timer, fm = gettimer(ui, opts)
1580 1580 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 1581 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582 1582
1583 1583 def d():
1584 1584 copies.pathcopies(ctx1, ctx2)
1585 1585
1586 1586 timer(d)
1587 1587 fm.end()
1588 1588
1589 1589
1590 1590 @command(
1591 1591 b'perf::phases|perfphases',
1592 1592 [
1593 1593 (b'', b'full', False, b'include file reading time too'),
1594 1594 ],
1595 1595 b"",
1596 1596 )
1597 1597 def perfphases(ui, repo, **opts):
1598 1598 """benchmark phasesets computation"""
1599 1599 opts = _byteskwargs(opts)
1600 1600 timer, fm = gettimer(ui, opts)
1601 1601 _phases = repo._phasecache
1602 1602 full = opts.get(b'full')
1603 1603
1604 1604 def d():
1605 1605 phases = _phases
1606 1606 if full:
1607 1607 clearfilecache(repo, b'_phasecache')
1608 1608 phases = repo._phasecache
1609 1609 phases.invalidate()
1610 1610 phases.loadphaserevs(repo)
1611 1611
1612 1612 timer(d)
1613 1613 fm.end()
1614 1614
1615 1615
1616 1616 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 1617 def perfphasesremote(ui, repo, dest=None, **opts):
1618 1618 """benchmark time needed to analyse phases of the remote server"""
1619 1619 from mercurial.node import bin
1620 1620 from mercurial import (
1621 1621 exchange,
1622 1622 hg,
1623 1623 phases,
1624 1624 )
1625 1625
1626 1626 opts = _byteskwargs(opts)
1627 1627 timer, fm = gettimer(ui, opts)
1628 1628
1629 1629 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 1630 if not path:
1631 1631 raise error.Abort(
1632 1632 b'default repository not configured!',
1633 1633 hint=b"see 'hg help config.paths'",
1634 1634 )
1635 1635 if util.safehasattr(path, 'main_path'):
1636 1636 path = path.get_push_variant()
1637 1637 dest = path.loc
1638 1638 else:
1639 1639 dest = path.pushloc or path.loc
1640 1640 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 1641 other = hg.peer(repo, opts, dest)
1642 1642
1643 1643 # easier to perform discovery through the operation
1644 1644 op = exchange.pushoperation(repo, other)
1645 1645 exchange._pushdiscoverychangeset(op)
1646 1646
1647 1647 remotesubset = op.fallbackheads
1648 1648
1649 1649 with other.commandexecutor() as e:
1650 1650 remotephases = e.callcommand(
1651 1651 b'listkeys', {b'namespace': b'phases'}
1652 1652 ).result()
1653 1653 del other
1654 1654 publishing = remotephases.get(b'publishing', False)
1655 1655 if publishing:
1656 1656 ui.statusnoi18n(b'publishing: yes\n')
1657 1657 else:
1658 1658 ui.statusnoi18n(b'publishing: no\n')
1659 1659
1660 1660 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 1661 if has_node is None:
1662 1662 has_node = repo.changelog.nodemap.__contains__
1663 1663 nonpublishroots = 0
1664 1664 for nhex, phase in remotephases.iteritems():
1665 1665 if nhex == b'publishing': # ignore data related to publish option
1666 1666 continue
1667 1667 node = bin(nhex)
1668 1668 if has_node(node) and int(phase):
1669 1669 nonpublishroots += 1
1670 1670 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 1671 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672 1672
1673 1673 def d():
1674 1674 phases.remotephasessummary(repo, remotesubset, remotephases)
1675 1675
1676 1676 timer(d)
1677 1677 fm.end()
1678 1678
1679 1679
1680 1680 @command(
1681 1681 b'perf::manifest|perfmanifest',
1682 1682 [
1683 1683 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 1684 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 1685 ]
1686 1686 + formatteropts,
1687 1687 b'REV|NODE',
1688 1688 )
1689 1689 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 1690 """benchmark the time to read a manifest from disk and return a usable
1691 1691 dict-like object
1692 1692
1693 1693 Manifest caches are cleared before retrieval."""
1694 1694 opts = _byteskwargs(opts)
1695 1695 timer, fm = gettimer(ui, opts)
1696 1696 if not manifest_rev:
1697 1697 ctx = scmutil.revsingle(repo, rev, rev)
1698 1698 t = ctx.manifestnode()
1699 1699 else:
1700 1700 from mercurial.node import bin
1701 1701
1702 1702 if len(rev) == 40:
1703 1703 t = bin(rev)
1704 1704 else:
1705 1705 try:
1706 1706 rev = int(rev)
1707 1707
1708 1708 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 1709 t = repo.manifestlog.getstorage(b'').node(rev)
1710 1710 else:
1711 1711 t = repo.manifestlog._revlog.lookup(rev)
1712 1712 except ValueError:
1713 1713 raise error.Abort(
1714 1714 b'manifest revision must be integer or full node'
1715 1715 )
1716 1716
1717 1717 def d():
1718 1718 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 1719 repo.manifestlog[t].read()
1720 1720
1721 1721 timer(d)
1722 1722 fm.end()
1723 1723
1724 1724
1725 1725 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 1726 def perfchangeset(ui, repo, rev, **opts):
1727 1727 opts = _byteskwargs(opts)
1728 1728 timer, fm = gettimer(ui, opts)
1729 1729 n = scmutil.revsingle(repo, rev).node()
1730 1730
1731 1731 def d():
1732 1732 repo.changelog.read(n)
1733 1733 # repo.changelog._cache = None
1734 1734
1735 1735 timer(d)
1736 1736 fm.end()
1737 1737
1738 1738
1739 1739 @command(b'perf::ignore|perfignore', formatteropts)
1740 1740 def perfignore(ui, repo, **opts):
1741 1741 """benchmark operation related to computing ignore"""
1742 1742 opts = _byteskwargs(opts)
1743 1743 timer, fm = gettimer(ui, opts)
1744 1744 dirstate = repo.dirstate
1745 1745
1746 1746 def setupone():
1747 1747 dirstate.invalidate()
1748 1748 clearfilecache(dirstate, b'_ignore')
1749 1749
1750 1750 def runone():
1751 1751 dirstate._ignore
1752 1752
1753 1753 timer(runone, setup=setupone, title=b"load")
1754 1754 fm.end()
1755 1755
1756 1756
1757 1757 @command(
1758 1758 b'perf::index|perfindex',
1759 1759 [
1760 1760 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 1761 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 1762 ]
1763 1763 + formatteropts,
1764 1764 )
1765 1765 def perfindex(ui, repo, **opts):
1766 1766 """benchmark index creation time followed by a lookup
1767 1767
1768 1768 The default is to look `tip` up. Depending on the index implementation,
1769 1769 the revision looked up can matters. For example, an implementation
1770 1770 scanning the index will have a faster lookup time for `--rev tip` than for
1771 1771 `--rev 0`. The number of looked up revisions and their order can also
1772 1772 matters.
1773 1773
1774 1774 Example of useful set to test:
1775 1775
1776 1776 * tip
1777 1777 * 0
1778 1778 * -10:
1779 1779 * :10
1780 1780 * -10: + :10
1781 1781 * :10: + -10:
1782 1782 * -10000:
1783 1783 * -10000: + 0
1784 1784
1785 1785 It is not currently possible to check for lookup of a missing node. For
1786 1786 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 1787 import mercurial.revlog
1788 1788
1789 1789 opts = _byteskwargs(opts)
1790 1790 timer, fm = gettimer(ui, opts)
1791 1791 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 1792 if opts[b'no_lookup']:
1793 1793 if opts['rev']:
1794 1794 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 1795 nodes = []
1796 1796 elif not opts[b'rev']:
1797 1797 nodes = [repo[b"tip"].node()]
1798 1798 else:
1799 1799 revs = scmutil.revrange(repo, opts[b'rev'])
1800 1800 cl = repo.changelog
1801 1801 nodes = [cl.node(r) for r in revs]
1802 1802
1803 1803 unfi = repo.unfiltered()
1804 1804 # find the filecache func directly
1805 1805 # This avoid polluting the benchmark with the filecache logic
1806 1806 makecl = unfi.__class__.changelog.func
1807 1807
1808 1808 def setup():
1809 1809 # probably not necessary, but for good measure
1810 1810 clearchangelog(unfi)
1811 1811
1812 1812 def d():
1813 1813 cl = makecl(unfi)
1814 1814 for n in nodes:
1815 1815 cl.rev(n)
1816 1816
1817 1817 timer(d, setup=setup)
1818 1818 fm.end()
1819 1819
1820 1820
1821 1821 @command(
1822 1822 b'perf::nodemap|perfnodemap',
1823 1823 [
1824 1824 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 1825 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 1826 ]
1827 1827 + formatteropts,
1828 1828 )
1829 1829 def perfnodemap(ui, repo, **opts):
1830 1830 """benchmark the time necessary to look up revision from a cold nodemap
1831 1831
1832 1832 Depending on the implementation, the amount and order of revision we look
1833 1833 up can varies. Example of useful set to test:
1834 1834 * tip
1835 1835 * 0
1836 1836 * -10:
1837 1837 * :10
1838 1838 * -10: + :10
1839 1839 * :10: + -10:
1840 1840 * -10000:
1841 1841 * -10000: + 0
1842 1842
1843 1843 The command currently focus on valid binary lookup. Benchmarking for
1844 1844 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 1845 """
1846 1846 import mercurial.revlog
1847 1847
1848 1848 opts = _byteskwargs(opts)
1849 1849 timer, fm = gettimer(ui, opts)
1850 1850 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851 1851
1852 1852 unfi = repo.unfiltered()
1853 1853 clearcaches = opts[b'clear_caches']
1854 1854 # find the filecache func directly
1855 1855 # This avoid polluting the benchmark with the filecache logic
1856 1856 makecl = unfi.__class__.changelog.func
1857 1857 if not opts[b'rev']:
1858 1858 raise error.Abort(b'use --rev to specify revisions to look up')
1859 1859 revs = scmutil.revrange(repo, opts[b'rev'])
1860 1860 cl = repo.changelog
1861 1861 nodes = [cl.node(r) for r in revs]
1862 1862
1863 1863 # use a list to pass reference to a nodemap from one closure to the next
1864 1864 nodeget = [None]
1865 1865
1866 1866 def setnodeget():
1867 1867 # probably not necessary, but for good measure
1868 1868 clearchangelog(unfi)
1869 1869 cl = makecl(unfi)
1870 1870 if util.safehasattr(cl.index, 'get_rev'):
1871 1871 nodeget[0] = cl.index.get_rev
1872 1872 else:
1873 1873 nodeget[0] = cl.nodemap.get
1874 1874
1875 1875 def d():
1876 1876 get = nodeget[0]
1877 1877 for n in nodes:
1878 1878 get(n)
1879 1879
1880 1880 setup = None
1881 1881 if clearcaches:
1882 1882
1883 1883 def setup():
1884 1884 setnodeget()
1885 1885
1886 1886 else:
1887 1887 setnodeget()
1888 1888 d() # prewarm the data structure
1889 1889 timer(d, setup=setup)
1890 1890 fm.end()
1891 1891
1892 1892
1893 1893 @command(b'perf::startup|perfstartup', formatteropts)
1894 1894 def perfstartup(ui, repo, **opts):
1895 1895 opts = _byteskwargs(opts)
1896 1896 timer, fm = gettimer(ui, opts)
1897 1897
1898 1898 def d():
1899 1899 if os.name != 'nt':
1900 1900 os.system(
1901 1901 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 1902 )
1903 1903 else:
1904 1904 os.environ['HGRCPATH'] = r' '
1905 1905 os.system("%s version -q > NUL" % sys.argv[0])
1906 1906
1907 1907 timer(d)
1908 1908 fm.end()
1909 1909
1910 1910
1911 1911 def _find_stream_generator(version):
1912 1912 """find the proper generator function for this stream version"""
1913 1913 import mercurial.streamclone
1914 1914
1915 1915 available = {}
1916 1916
1917 1917 # try to fetch a v1 generator
1918 1918 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 1919 if generatev1 is not None:
1920 1920
1921 1921 def generate(repo):
1922 1922 entries, bytes, data = generatev2(repo, None, None, True)
1923 1923 return data
1924 1924
1925 1925 available[b'v1'] = generatev1
1926 1926 # try to fetch a v2 generator
1927 1927 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 1928 if generatev2 is not None:
1929 1929
1930 1930 def generate(repo):
1931 1931 entries, bytes, data = generatev2(repo, None, None, True)
1932 1932 return data
1933 1933
1934 1934 available[b'v2'] = generate
1935 1935 # try to fetch a v3 generator
1936 1936 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 1937 if generatev3 is not None:
1938 1938
1939 1939 def generate(repo):
1940 1940 entries, bytes, data = generatev3(repo, None, None, True)
1941 1941 return data
1942 1942
1943 1943 available[b'v3-exp'] = generate
1944 1944
1945 1945 # resolve the request
1946 1946 if version == b"latest":
1947 1947 # latest is the highest non experimental version
1948 1948 latest_key = max(v for v in available if b'-exp' not in v)
1949 1949 return available[latest_key]
1950 1950 elif version in available:
1951 1951 return available[version]
1952 1952 else:
1953 1953 msg = b"unkown or unavailable version: %s"
1954 1954 msg %= version
1955 1955 hint = b"available versions: %s"
1956 1956 hint %= b', '.join(sorted(available))
1957 1957 raise error.Abort(msg, hint=hint)
1958 1958
1959 1959
1960 1960 @command(
1961 1961 b'perf::stream-locked-section',
1962 1962 [
1963 1963 (
1964 1964 b'',
1965 1965 b'stream-version',
1966 1966 b'latest',
1967 1967 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1968 1968 ),
1969 1969 ]
1970 1970 + formatteropts,
1971 1971 )
1972 1972 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 1973 """benchmark the initial, repo-locked, section of a stream-clone"""
1974 1974
1975 1975 opts = _byteskwargs(opts)
1976 1976 timer, fm = gettimer(ui, opts)
1977 1977
1978 1978 # deletion of the generator may trigger some cleanup that we do not want to
1979 1979 # measure
1980 1980 result_holder = [None]
1981 1981
1982 1982 def setupone():
1983 1983 result_holder[0] = None
1984 1984
1985 1985 generate = _find_stream_generator(stream_version)
1986 1986
1987 1987 def runone():
1988 1988 # the lock is held for the duration the initialisation
1989 1989 result_holder[0] = generate(repo)
1990 1990
1991 1991 timer(runone, setup=setupone, title=b"load")
1992 1992 fm.end()
1993 1993
1994 1994
1995 1995 @command(
1996 1996 b'perf::stream-generate',
1997 1997 [
1998 1998 (
1999 1999 b'',
2000 2000 b'stream-version',
2001 2001 b'latest',
2002 2002 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 2003 ),
2004 2004 ]
2005 2005 + formatteropts,
2006 2006 )
2007 2007 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 2008 """benchmark the full generation of a stream clone"""
2009 2009
2010 2010 opts = _byteskwargs(opts)
2011 2011 timer, fm = gettimer(ui, opts)
2012 2012
2013 2013 # deletion of the generator may trigger some cleanup that we do not want to
2014 2014 # measure
2015 2015
2016 2016 generate = _find_stream_generator(stream_version)
2017 2017
2018 2018 def runone():
2019 2019 # the lock is held for the duration the initialisation
2020 2020 for chunk in generate(repo):
2021 2021 pass
2022 2022
2023 2023 timer(runone, title=b"generate")
2024 2024 fm.end()
2025 2025
2026 2026
2027 2027 @command(
2028 2028 b'perf::stream-consume',
2029 2029 formatteropts,
2030 2030 )
2031 2031 def perf_stream_clone_consume(ui, repo, filename, **opts):
2032 2032 """benchmark the full application of a stream clone
2033 2033
2034 2034 This include the creation of the repository
2035 2035 """
2036 2036 # try except to appease check code
2037 2037 msg = b"mercurial too old, missing necessary module: %s"
2038 2038 try:
2039 2039 from mercurial import bundle2
2040 2040 except ImportError as exc:
2041 2041 msg %= _bytestr(exc)
2042 2042 raise error.Abort(msg)
2043 2043 try:
2044 2044 from mercurial import exchange
2045 2045 except ImportError as exc:
2046 2046 msg %= _bytestr(exc)
2047 2047 raise error.Abort(msg)
2048 2048 try:
2049 2049 from mercurial import hg
2050 2050 except ImportError as exc:
2051 2051 msg %= _bytestr(exc)
2052 2052 raise error.Abort(msg)
2053 2053 try:
2054 2054 from mercurial import localrepo
2055 2055 except ImportError as exc:
2056 2056 msg %= _bytestr(exc)
2057 2057 raise error.Abort(msg)
2058 2058
2059 2059 opts = _byteskwargs(opts)
2060 2060 timer, fm = gettimer(ui, opts)
2061 2061
2062 2062 # deletion of the generator may trigger some cleanup that we do not want to
2063 2063 # measure
2064 2064 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2065 2065 raise error.Abort("not a readable file: %s" % filename)
2066 2066
2067 2067 run_variables = [None, None]
2068 2068
2069 2069 @contextlib.contextmanager
2070 2070 def context():
2071 2071 with open(filename, mode='rb') as bundle:
2072 2072 with tempfile.TemporaryDirectory() as tmp_dir:
2073 2073 tmp_dir = fsencode(tmp_dir)
2074 2074 run_variables[0] = bundle
2075 2075 run_variables[1] = tmp_dir
2076 2076 yield
2077 2077 run_variables[0] = None
2078 2078 run_variables[1] = None
2079 2079
2080 2080 def runone():
2081 2081 bundle = run_variables[0]
2082 2082 tmp_dir = run_variables[1]
2083 2083 # only pass ui when no srcrepo
2084 2084 localrepo.createrepository(
2085 2085 repo.ui, tmp_dir, requirements=repo.requirements
2086 2086 )
2087 2087 target = hg.repository(repo.ui, tmp_dir)
2088 2088 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2089 2089 # stream v1
2090 2090 if util.safehasattr(gen, 'apply'):
2091 2091 gen.apply(target)
2092 2092 else:
2093 2093 with target.transaction(b"perf::stream-consume") as tr:
2094 2094 bundle2.applybundle(
2095 2095 target,
2096 2096 gen,
2097 2097 tr,
2098 2098 source=b'unbundle',
2099 2099 url=filename,
2100 2100 )
2101 2101
2102 2102 timer(runone, context=context, title=b"consume")
2103 2103 fm.end()
2104 2104
2105 2105
2106 2106 @command(b'perf::parents|perfparents', formatteropts)
2107 2107 def perfparents(ui, repo, **opts):
2108 2108 """benchmark the time necessary to fetch one changeset's parents.
2109 2109
2110 2110 The fetch is done using the `node identifier`, traversing all object layers
2111 2111 from the repository object. The first N revisions will be used for this
2112 2112 benchmark. N is controlled by the ``perf.parentscount`` config option
2113 2113 (default: 1000).
2114 2114 """
2115 2115 opts = _byteskwargs(opts)
2116 2116 timer, fm = gettimer(ui, opts)
2117 2117 # control the number of commits perfparents iterates over
2118 2118 # experimental config: perf.parentscount
2119 2119 count = getint(ui, b"perf", b"parentscount", 1000)
2120 2120 if len(repo.changelog) < count:
2121 2121 raise error.Abort(b"repo needs %d commits for this test" % count)
2122 2122 repo = repo.unfiltered()
2123 2123 nl = [repo.changelog.node(i) for i in _xrange(count)]
2124 2124
2125 2125 def d():
2126 2126 for n in nl:
2127 2127 repo.changelog.parents(n)
2128 2128
2129 2129 timer(d)
2130 2130 fm.end()
2131 2131
2132 2132
2133 2133 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2134 2134 def perfctxfiles(ui, repo, x, **opts):
2135 2135 opts = _byteskwargs(opts)
2136 2136 x = int(x)
2137 2137 timer, fm = gettimer(ui, opts)
2138 2138
2139 2139 def d():
2140 2140 len(repo[x].files())
2141 2141
2142 2142 timer(d)
2143 2143 fm.end()
2144 2144
2145 2145
2146 2146 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2147 2147 def perfrawfiles(ui, repo, x, **opts):
2148 2148 opts = _byteskwargs(opts)
2149 2149 x = int(x)
2150 2150 timer, fm = gettimer(ui, opts)
2151 2151 cl = repo.changelog
2152 2152
2153 2153 def d():
2154 2154 len(cl.read(x)[3])
2155 2155
2156 2156 timer(d)
2157 2157 fm.end()
2158 2158
2159 2159
2160 2160 @command(b'perf::lookup|perflookup', formatteropts)
2161 2161 def perflookup(ui, repo, rev, **opts):
2162 2162 opts = _byteskwargs(opts)
2163 2163 timer, fm = gettimer(ui, opts)
2164 2164 timer(lambda: len(repo.lookup(rev)))
2165 2165 fm.end()
2166 2166
2167 2167
2168 2168 @command(
2169 2169 b'perf::linelogedits|perflinelogedits',
2170 2170 [
2171 2171 (b'n', b'edits', 10000, b'number of edits'),
2172 2172 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2173 2173 ],
2174 2174 norepo=True,
2175 2175 )
2176 2176 def perflinelogedits(ui, **opts):
2177 2177 from mercurial import linelog
2178 2178
2179 2179 opts = _byteskwargs(opts)
2180 2180
2181 2181 edits = opts[b'edits']
2182 2182 maxhunklines = opts[b'max_hunk_lines']
2183 2183
2184 2184 maxb1 = 100000
2185 2185 random.seed(0)
2186 2186 randint = random.randint
2187 2187 currentlines = 0
2188 2188 arglist = []
2189 2189 for rev in _xrange(edits):
2190 2190 a1 = randint(0, currentlines)
2191 2191 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2192 2192 b1 = randint(0, maxb1)
2193 2193 b2 = randint(b1, b1 + maxhunklines)
2194 2194 currentlines += (b2 - b1) - (a2 - a1)
2195 2195 arglist.append((rev, a1, a2, b1, b2))
2196 2196
2197 2197 def d():
2198 2198 ll = linelog.linelog()
2199 2199 for args in arglist:
2200 2200 ll.replacelines(*args)
2201 2201
2202 2202 timer, fm = gettimer(ui, opts)
2203 2203 timer(d)
2204 2204 fm.end()
2205 2205
2206 2206
2207 2207 @command(b'perf::revrange|perfrevrange', formatteropts)
2208 2208 def perfrevrange(ui, repo, *specs, **opts):
2209 2209 opts = _byteskwargs(opts)
2210 2210 timer, fm = gettimer(ui, opts)
2211 2211 revrange = scmutil.revrange
2212 2212 timer(lambda: len(revrange(repo, specs)))
2213 2213 fm.end()
2214 2214
2215 2215
2216 2216 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2217 2217 def perfnodelookup(ui, repo, rev, **opts):
2218 2218 opts = _byteskwargs(opts)
2219 2219 timer, fm = gettimer(ui, opts)
2220 2220 import mercurial.revlog
2221 2221
2222 2222 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2223 2223 n = scmutil.revsingle(repo, rev).node()
2224 2224
2225 2225 try:
2226 2226 cl = revlog(getsvfs(repo), radix=b"00changelog")
2227 2227 except TypeError:
2228 2228 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2229 2229
2230 2230 def d():
2231 2231 cl.rev(n)
2232 2232 clearcaches(cl)
2233 2233
2234 2234 timer(d)
2235 2235 fm.end()
2236 2236
2237 2237
2238 2238 @command(
2239 2239 b'perf::log|perflog',
2240 2240 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2241 2241 )
2242 2242 def perflog(ui, repo, rev=None, **opts):
2243 2243 opts = _byteskwargs(opts)
2244 2244 if rev is None:
2245 2245 rev = []
2246 2246 timer, fm = gettimer(ui, opts)
2247 2247 ui.pushbuffer()
2248 2248 timer(
2249 2249 lambda: commands.log(
2250 2250 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2251 2251 )
2252 2252 )
2253 2253 ui.popbuffer()
2254 2254 fm.end()
2255 2255
2256 2256
2257 2257 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2258 2258 def perfmoonwalk(ui, repo, **opts):
2259 2259 """benchmark walking the changelog backwards
2260 2260
2261 2261 This also loads the changelog data for each revision in the changelog.
2262 2262 """
2263 2263 opts = _byteskwargs(opts)
2264 2264 timer, fm = gettimer(ui, opts)
2265 2265
2266 2266 def moonwalk():
2267 2267 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2268 2268 ctx = repo[i]
2269 2269 ctx.branch() # read changelog data (in addition to the index)
2270 2270
2271 2271 timer(moonwalk)
2272 2272 fm.end()
2273 2273
2274 2274
2275 2275 @command(
2276 2276 b'perf::templating|perftemplating',
2277 2277 [
2278 2278 (b'r', b'rev', [], b'revisions to run the template on'),
2279 2279 ]
2280 2280 + formatteropts,
2281 2281 )
2282 2282 def perftemplating(ui, repo, testedtemplate=None, **opts):
2283 2283 """test the rendering time of a given template"""
2284 2284 if makelogtemplater is None:
2285 2285 raise error.Abort(
2286 2286 b"perftemplating not available with this Mercurial",
2287 2287 hint=b"use 4.3 or later",
2288 2288 )
2289 2289
2290 2290 opts = _byteskwargs(opts)
2291 2291
2292 2292 nullui = ui.copy()
2293 2293 nullui.fout = open(os.devnull, 'wb')
2294 2294 nullui.disablepager()
2295 2295 revs = opts.get(b'rev')
2296 2296 if not revs:
2297 2297 revs = [b'all()']
2298 2298 revs = list(scmutil.revrange(repo, revs))
2299 2299
2300 2300 defaulttemplate = (
2301 2301 b'{date|shortdate} [{rev}:{node|short}]'
2302 2302 b' {author|person}: {desc|firstline}\n'
2303 2303 )
2304 2304 if testedtemplate is None:
2305 2305 testedtemplate = defaulttemplate
2306 2306 displayer = makelogtemplater(nullui, repo, testedtemplate)
2307 2307
2308 2308 def format():
2309 2309 for r in revs:
2310 2310 ctx = repo[r]
2311 2311 displayer.show(ctx)
2312 2312 displayer.flush(ctx)
2313 2313
2314 2314 timer, fm = gettimer(ui, opts)
2315 2315 timer(format)
2316 2316 fm.end()
2317 2317
2318 2318
2319 2319 def _displaystats(ui, opts, entries, data):
2320 2320 # use a second formatter because the data are quite different, not sure
2321 2321 # how it flies with the templater.
2322 2322 fm = ui.formatter(b'perf-stats', opts)
2323 2323 for key, title in entries:
2324 2324 values = data[key]
2325 2325 nbvalues = len(data)
2326 2326 values.sort()
2327 2327 stats = {
2328 2328 'key': key,
2329 2329 'title': title,
2330 2330 'nbitems': len(values),
2331 2331 'min': values[0][0],
2332 2332 '10%': values[(nbvalues * 10) // 100][0],
2333 2333 '25%': values[(nbvalues * 25) // 100][0],
2334 2334 '50%': values[(nbvalues * 50) // 100][0],
2335 2335 '75%': values[(nbvalues * 75) // 100][0],
2336 2336 '80%': values[(nbvalues * 80) // 100][0],
2337 2337 '85%': values[(nbvalues * 85) // 100][0],
2338 2338 '90%': values[(nbvalues * 90) // 100][0],
2339 2339 '95%': values[(nbvalues * 95) // 100][0],
2340 2340 '99%': values[(nbvalues * 99) // 100][0],
2341 2341 'max': values[-1][0],
2342 2342 }
2343 2343 fm.startitem()
2344 2344 fm.data(**stats)
2345 2345 # make node pretty for the human output
2346 2346 fm.plain('### %s (%d items)\n' % (title, len(values)))
2347 2347 lines = [
2348 2348 'min',
2349 2349 '10%',
2350 2350 '25%',
2351 2351 '50%',
2352 2352 '75%',
2353 2353 '80%',
2354 2354 '85%',
2355 2355 '90%',
2356 2356 '95%',
2357 2357 '99%',
2358 2358 'max',
2359 2359 ]
2360 2360 for l in lines:
2361 2361 fm.plain('%s: %s\n' % (l, stats[l]))
2362 2362 fm.end()
2363 2363
2364 2364
2365 2365 @command(
2366 2366 b'perf::helper-mergecopies|perfhelper-mergecopies',
2367 2367 formatteropts
2368 2368 + [
2369 2369 (b'r', b'revs', [], b'restrict search to these revisions'),
2370 2370 (b'', b'timing', False, b'provides extra data (costly)'),
2371 2371 (b'', b'stats', False, b'provides statistic about the measured data'),
2372 2372 ],
2373 2373 )
2374 2374 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2375 2375 """find statistics about potential parameters for `perfmergecopies`
2376 2376
2377 2377 This command find (base, p1, p2) triplet relevant for copytracing
2378 2378 benchmarking in the context of a merge. It reports values for some of the
2379 2379 parameters that impact merge copy tracing time during merge.
2380 2380
2381 2381 If `--timing` is set, rename detection is run and the associated timing
2382 2382 will be reported. The extra details come at the cost of slower command
2383 2383 execution.
2384 2384
2385 2385 Since rename detection is only run once, other factors might easily
2386 2386 affect the precision of the timing. However it should give a good
2387 2387 approximation of which revision triplets are very costly.
2388 2388 """
2389 2389 opts = _byteskwargs(opts)
2390 2390 fm = ui.formatter(b'perf', opts)
2391 2391 dotiming = opts[b'timing']
2392 2392 dostats = opts[b'stats']
2393 2393
2394 2394 output_template = [
2395 2395 ("base", "%(base)12s"),
2396 2396 ("p1", "%(p1.node)12s"),
2397 2397 ("p2", "%(p2.node)12s"),
2398 2398 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2399 2399 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2400 2400 ("p1.renames", "%(p1.renamedfiles)12d"),
2401 2401 ("p1.time", "%(p1.time)12.3f"),
2402 2402 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2403 2403 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2404 2404 ("p2.renames", "%(p2.renamedfiles)12d"),
2405 2405 ("p2.time", "%(p2.time)12.3f"),
2406 2406 ("renames", "%(nbrenamedfiles)12d"),
2407 2407 ("total.time", "%(time)12.3f"),
2408 2408 ]
2409 2409 if not dotiming:
2410 2410 output_template = [
2411 2411 i
2412 2412 for i in output_template
2413 2413 if not ('time' in i[0] or 'renames' in i[0])
2414 2414 ]
2415 2415 header_names = [h for (h, v) in output_template]
2416 2416 output = ' '.join([v for (h, v) in output_template]) + '\n'
2417 2417 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2418 2418 fm.plain(header % tuple(header_names))
2419 2419
2420 2420 if not revs:
2421 2421 revs = ['all()']
2422 2422 revs = scmutil.revrange(repo, revs)
2423 2423
2424 2424 if dostats:
2425 2425 alldata = {
2426 2426 'nbrevs': [],
2427 2427 'nbmissingfiles': [],
2428 2428 }
2429 2429 if dotiming:
2430 2430 alldata['parentnbrenames'] = []
2431 2431 alldata['totalnbrenames'] = []
2432 2432 alldata['parenttime'] = []
2433 2433 alldata['totaltime'] = []
2434 2434
2435 2435 roi = repo.revs('merge() and %ld', revs)
2436 2436 for r in roi:
2437 2437 ctx = repo[r]
2438 2438 p1 = ctx.p1()
2439 2439 p2 = ctx.p2()
2440 2440 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2441 2441 for b in bases:
2442 2442 b = repo[b]
2443 2443 p1missing = copies._computeforwardmissing(b, p1)
2444 2444 p2missing = copies._computeforwardmissing(b, p2)
2445 2445 data = {
2446 2446 b'base': b.hex(),
2447 2447 b'p1.node': p1.hex(),
2448 2448 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2449 2449 b'p1.nbmissingfiles': len(p1missing),
2450 2450 b'p2.node': p2.hex(),
2451 2451 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2452 2452 b'p2.nbmissingfiles': len(p2missing),
2453 2453 }
2454 2454 if dostats:
2455 2455 if p1missing:
2456 2456 alldata['nbrevs'].append(
2457 2457 (data['p1.nbrevs'], b.hex(), p1.hex())
2458 2458 )
2459 2459 alldata['nbmissingfiles'].append(
2460 2460 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2461 2461 )
2462 2462 if p2missing:
2463 2463 alldata['nbrevs'].append(
2464 2464 (data['p2.nbrevs'], b.hex(), p2.hex())
2465 2465 )
2466 2466 alldata['nbmissingfiles'].append(
2467 2467 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2468 2468 )
2469 2469 if dotiming:
2470 2470 begin = util.timer()
2471 2471 mergedata = copies.mergecopies(repo, p1, p2, b)
2472 2472 end = util.timer()
2473 2473 # not very stable timing since we did only one run
2474 2474 data['time'] = end - begin
2475 2475 # mergedata contains five dicts: "copy", "movewithdir",
2476 2476 # "diverge", "renamedelete" and "dirmove".
2477 2477 # The first 4 are about renamed file so lets count that.
2478 2478 renames = len(mergedata[0])
2479 2479 renames += len(mergedata[1])
2480 2480 renames += len(mergedata[2])
2481 2481 renames += len(mergedata[3])
2482 2482 data['nbrenamedfiles'] = renames
2483 2483 begin = util.timer()
2484 2484 p1renames = copies.pathcopies(b, p1)
2485 2485 end = util.timer()
2486 2486 data['p1.time'] = end - begin
2487 2487 begin = util.timer()
2488 2488 p2renames = copies.pathcopies(b, p2)
2489 2489 end = util.timer()
2490 2490 data['p2.time'] = end - begin
2491 2491 data['p1.renamedfiles'] = len(p1renames)
2492 2492 data['p2.renamedfiles'] = len(p2renames)
2493 2493
2494 2494 if dostats:
2495 2495 if p1missing:
2496 2496 alldata['parentnbrenames'].append(
2497 2497 (data['p1.renamedfiles'], b.hex(), p1.hex())
2498 2498 )
2499 2499 alldata['parenttime'].append(
2500 2500 (data['p1.time'], b.hex(), p1.hex())
2501 2501 )
2502 2502 if p2missing:
2503 2503 alldata['parentnbrenames'].append(
2504 2504 (data['p2.renamedfiles'], b.hex(), p2.hex())
2505 2505 )
2506 2506 alldata['parenttime'].append(
2507 2507 (data['p2.time'], b.hex(), p2.hex())
2508 2508 )
2509 2509 if p1missing or p2missing:
2510 2510 alldata['totalnbrenames'].append(
2511 2511 (
2512 2512 data['nbrenamedfiles'],
2513 2513 b.hex(),
2514 2514 p1.hex(),
2515 2515 p2.hex(),
2516 2516 )
2517 2517 )
2518 2518 alldata['totaltime'].append(
2519 2519 (data['time'], b.hex(), p1.hex(), p2.hex())
2520 2520 )
2521 2521 fm.startitem()
2522 2522 fm.data(**data)
2523 2523 # make node pretty for the human output
2524 2524 out = data.copy()
2525 2525 out['base'] = fm.hexfunc(b.node())
2526 2526 out['p1.node'] = fm.hexfunc(p1.node())
2527 2527 out['p2.node'] = fm.hexfunc(p2.node())
2528 2528 fm.plain(output % out)
2529 2529
2530 2530 fm.end()
2531 2531 if dostats:
2532 2532 # use a second formatter because the data are quite different, not sure
2533 2533 # how it flies with the templater.
2534 2534 entries = [
2535 2535 ('nbrevs', 'number of revision covered'),
2536 2536 ('nbmissingfiles', 'number of missing files at head'),
2537 2537 ]
2538 2538 if dotiming:
2539 2539 entries.append(
2540 2540 ('parentnbrenames', 'rename from one parent to base')
2541 2541 )
2542 2542 entries.append(('totalnbrenames', 'total number of renames'))
2543 2543 entries.append(('parenttime', 'time for one parent'))
2544 2544 entries.append(('totaltime', 'time for both parents'))
2545 2545 _displaystats(ui, opts, entries, alldata)
2546 2546
2547 2547
2548 2548 @command(
2549 2549 b'perf::helper-pathcopies|perfhelper-pathcopies',
2550 2550 formatteropts
2551 2551 + [
2552 2552 (b'r', b'revs', [], b'restrict search to these revisions'),
2553 2553 (b'', b'timing', False, b'provides extra data (costly)'),
2554 2554 (b'', b'stats', False, b'provides statistic about the measured data'),
2555 2555 ],
2556 2556 )
2557 2557 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2558 2558 """find statistic about potential parameters for the `perftracecopies`
2559 2559
2560 2560 This command find source-destination pair relevant for copytracing testing.
2561 2561 It report value for some of the parameters that impact copy tracing time.
2562 2562
2563 2563 If `--timing` is set, rename detection is run and the associated timing
2564 2564 will be reported. The extra details comes at the cost of a slower command
2565 2565 execution.
2566 2566
2567 2567 Since the rename detection is only run once, other factors might easily
2568 2568 affect the precision of the timing. However it should give a good
2569 2569 approximation of which revision pairs are very costly.
2570 2570 """
2571 2571 opts = _byteskwargs(opts)
2572 2572 fm = ui.formatter(b'perf', opts)
2573 2573 dotiming = opts[b'timing']
2574 2574 dostats = opts[b'stats']
2575 2575
2576 2576 if dotiming:
2577 2577 header = '%12s %12s %12s %12s %12s %12s\n'
2578 2578 output = (
2579 2579 "%(source)12s %(destination)12s "
2580 2580 "%(nbrevs)12d %(nbmissingfiles)12d "
2581 2581 "%(nbrenamedfiles)12d %(time)18.5f\n"
2582 2582 )
2583 2583 header_names = (
2584 2584 "source",
2585 2585 "destination",
2586 2586 "nb-revs",
2587 2587 "nb-files",
2588 2588 "nb-renames",
2589 2589 "time",
2590 2590 )
2591 2591 fm.plain(header % header_names)
2592 2592 else:
2593 2593 header = '%12s %12s %12s %12s\n'
2594 2594 output = (
2595 2595 "%(source)12s %(destination)12s "
2596 2596 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2597 2597 )
2598 2598 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2599 2599
2600 2600 if not revs:
2601 2601 revs = ['all()']
2602 2602 revs = scmutil.revrange(repo, revs)
2603 2603
2604 2604 if dostats:
2605 2605 alldata = {
2606 2606 'nbrevs': [],
2607 2607 'nbmissingfiles': [],
2608 2608 }
2609 2609 if dotiming:
2610 2610 alldata['nbrenames'] = []
2611 2611 alldata['time'] = []
2612 2612
2613 2613 roi = repo.revs('merge() and %ld', revs)
2614 2614 for r in roi:
2615 2615 ctx = repo[r]
2616 2616 p1 = ctx.p1().rev()
2617 2617 p2 = ctx.p2().rev()
2618 2618 bases = repo.changelog._commonancestorsheads(p1, p2)
2619 2619 for p in (p1, p2):
2620 2620 for b in bases:
2621 2621 base = repo[b]
2622 2622 parent = repo[p]
2623 2623 missing = copies._computeforwardmissing(base, parent)
2624 2624 if not missing:
2625 2625 continue
2626 2626 data = {
2627 2627 b'source': base.hex(),
2628 2628 b'destination': parent.hex(),
2629 2629 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2630 2630 b'nbmissingfiles': len(missing),
2631 2631 }
2632 2632 if dostats:
2633 2633 alldata['nbrevs'].append(
2634 2634 (
2635 2635 data['nbrevs'],
2636 2636 base.hex(),
2637 2637 parent.hex(),
2638 2638 )
2639 2639 )
2640 2640 alldata['nbmissingfiles'].append(
2641 2641 (
2642 2642 data['nbmissingfiles'],
2643 2643 base.hex(),
2644 2644 parent.hex(),
2645 2645 )
2646 2646 )
2647 2647 if dotiming:
2648 2648 begin = util.timer()
2649 2649 renames = copies.pathcopies(base, parent)
2650 2650 end = util.timer()
2651 2651 # not very stable timing since we did only one run
2652 2652 data['time'] = end - begin
2653 2653 data['nbrenamedfiles'] = len(renames)
2654 2654 if dostats:
2655 2655 alldata['time'].append(
2656 2656 (
2657 2657 data['time'],
2658 2658 base.hex(),
2659 2659 parent.hex(),
2660 2660 )
2661 2661 )
2662 2662 alldata['nbrenames'].append(
2663 2663 (
2664 2664 data['nbrenamedfiles'],
2665 2665 base.hex(),
2666 2666 parent.hex(),
2667 2667 )
2668 2668 )
2669 2669 fm.startitem()
2670 2670 fm.data(**data)
2671 2671 out = data.copy()
2672 2672 out['source'] = fm.hexfunc(base.node())
2673 2673 out['destination'] = fm.hexfunc(parent.node())
2674 2674 fm.plain(output % out)
2675 2675
2676 2676 fm.end()
2677 2677 if dostats:
2678 2678 entries = [
2679 2679 ('nbrevs', 'number of revision covered'),
2680 2680 ('nbmissingfiles', 'number of missing files at head'),
2681 2681 ]
2682 2682 if dotiming:
2683 2683 entries.append(('nbrenames', 'renamed files'))
2684 2684 entries.append(('time', 'time'))
2685 2685 _displaystats(ui, opts, entries, alldata)
2686 2686
2687 2687
2688 2688 @command(b'perf::cca|perfcca', formatteropts)
2689 2689 def perfcca(ui, repo, **opts):
2690 2690 opts = _byteskwargs(opts)
2691 2691 timer, fm = gettimer(ui, opts)
2692 2692 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2693 2693 fm.end()
2694 2694
2695 2695
2696 2696 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2697 2697 def perffncacheload(ui, repo, **opts):
2698 2698 opts = _byteskwargs(opts)
2699 2699 timer, fm = gettimer(ui, opts)
2700 2700 s = repo.store
2701 2701
2702 2702 def d():
2703 2703 s.fncache._load()
2704 2704
2705 2705 timer(d)
2706 2706 fm.end()
2707 2707
2708 2708
2709 2709 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2710 2710 def perffncachewrite(ui, repo, **opts):
2711 2711 opts = _byteskwargs(opts)
2712 2712 timer, fm = gettimer(ui, opts)
2713 2713 s = repo.store
2714 2714 lock = repo.lock()
2715 2715 s.fncache._load()
2716 2716 tr = repo.transaction(b'perffncachewrite')
2717 2717 tr.addbackup(b'fncache')
2718 2718
2719 2719 def d():
2720 2720 s.fncache._dirty = True
2721 2721 s.fncache.write(tr)
2722 2722
2723 2723 timer(d)
2724 2724 tr.close()
2725 2725 lock.release()
2726 2726 fm.end()
2727 2727
2728 2728
2729 2729 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2730 2730 def perffncacheencode(ui, repo, **opts):
2731 2731 opts = _byteskwargs(opts)
2732 2732 timer, fm = gettimer(ui, opts)
2733 2733 s = repo.store
2734 2734 s.fncache._load()
2735 2735
2736 2736 def d():
2737 2737 for p in s.fncache.entries:
2738 2738 s.encode(p)
2739 2739
2740 2740 timer(d)
2741 2741 fm.end()
2742 2742
2743 2743
2744 2744 def _bdiffworker(q, blocks, xdiff, ready, done):
2745 2745 while not done.is_set():
2746 2746 pair = q.get()
2747 2747 while pair is not None:
2748 2748 if xdiff:
2749 2749 mdiff.bdiff.xdiffblocks(*pair)
2750 2750 elif blocks:
2751 2751 mdiff.bdiff.blocks(*pair)
2752 2752 else:
2753 2753 mdiff.textdiff(*pair)
2754 2754 q.task_done()
2755 2755 pair = q.get()
2756 2756 q.task_done() # for the None one
2757 2757 with ready:
2758 2758 ready.wait()
2759 2759
2760 2760
2761 2761 def _manifestrevision(repo, mnode):
2762 2762 ml = repo.manifestlog
2763 2763
2764 2764 if util.safehasattr(ml, b'getstorage'):
2765 2765 store = ml.getstorage(b'')
2766 2766 else:
2767 2767 store = ml._revlog
2768 2768
2769 2769 return store.revision(mnode)
2770 2770
2771 2771
2772 2772 @command(
2773 2773 b'perf::bdiff|perfbdiff',
2774 2774 revlogopts
2775 2775 + formatteropts
2776 2776 + [
2777 2777 (
2778 2778 b'',
2779 2779 b'count',
2780 2780 1,
2781 2781 b'number of revisions to test (when using --startrev)',
2782 2782 ),
2783 2783 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2784 2784 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2785 2785 (b'', b'blocks', False, b'test computing diffs into blocks'),
2786 2786 (b'', b'xdiff', False, b'use xdiff algorithm'),
2787 2787 ],
2788 2788 b'-c|-m|FILE REV',
2789 2789 )
2790 2790 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2791 2791 """benchmark a bdiff between revisions
2792 2792
2793 2793 By default, benchmark a bdiff between its delta parent and itself.
2794 2794
2795 2795 With ``--count``, benchmark bdiffs between delta parents and self for N
2796 2796 revisions starting at the specified revision.
2797 2797
2798 2798 With ``--alldata``, assume the requested revision is a changeset and
2799 2799 measure bdiffs for all changes related to that changeset (manifest
2800 2800 and filelogs).
2801 2801 """
2802 2802 opts = _byteskwargs(opts)
2803 2803
2804 2804 if opts[b'xdiff'] and not opts[b'blocks']:
2805 2805 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2806 2806
2807 2807 if opts[b'alldata']:
2808 2808 opts[b'changelog'] = True
2809 2809
2810 2810 if opts.get(b'changelog') or opts.get(b'manifest'):
2811 2811 file_, rev = None, file_
2812 2812 elif rev is None:
2813 2813 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2814 2814
2815 2815 blocks = opts[b'blocks']
2816 2816 xdiff = opts[b'xdiff']
2817 2817 textpairs = []
2818 2818
2819 2819 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2820 2820
2821 2821 startrev = r.rev(r.lookup(rev))
2822 2822 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2823 2823 if opts[b'alldata']:
2824 2824 # Load revisions associated with changeset.
2825 2825 ctx = repo[rev]
2826 2826 mtext = _manifestrevision(repo, ctx.manifestnode())
2827 2827 for pctx in ctx.parents():
2828 2828 pman = _manifestrevision(repo, pctx.manifestnode())
2829 2829 textpairs.append((pman, mtext))
2830 2830
2831 2831 # Load filelog revisions by iterating manifest delta.
2832 2832 man = ctx.manifest()
2833 2833 pman = ctx.p1().manifest()
2834 2834 for filename, change in pman.diff(man).items():
2835 2835 fctx = repo.file(filename)
2836 2836 f1 = fctx.revision(change[0][0] or -1)
2837 2837 f2 = fctx.revision(change[1][0] or -1)
2838 2838 textpairs.append((f1, f2))
2839 2839 else:
2840 2840 dp = r.deltaparent(rev)
2841 2841 textpairs.append((r.revision(dp), r.revision(rev)))
2842 2842
2843 2843 withthreads = threads > 0
2844 2844 if not withthreads:
2845 2845
2846 2846 def d():
2847 2847 for pair in textpairs:
2848 2848 if xdiff:
2849 2849 mdiff.bdiff.xdiffblocks(*pair)
2850 2850 elif blocks:
2851 2851 mdiff.bdiff.blocks(*pair)
2852 2852 else:
2853 2853 mdiff.textdiff(*pair)
2854 2854
2855 2855 else:
2856 2856 q = queue()
2857 2857 for i in _xrange(threads):
2858 2858 q.put(None)
2859 2859 ready = threading.Condition()
2860 2860 done = threading.Event()
2861 2861 for i in _xrange(threads):
2862 2862 threading.Thread(
2863 2863 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2864 2864 ).start()
2865 2865 q.join()
2866 2866
2867 2867 def d():
2868 2868 for pair in textpairs:
2869 2869 q.put(pair)
2870 2870 for i in _xrange(threads):
2871 2871 q.put(None)
2872 2872 with ready:
2873 2873 ready.notify_all()
2874 2874 q.join()
2875 2875
2876 2876 timer, fm = gettimer(ui, opts)
2877 2877 timer(d)
2878 2878 fm.end()
2879 2879
2880 2880 if withthreads:
2881 2881 done.set()
2882 2882 for i in _xrange(threads):
2883 2883 q.put(None)
2884 2884 with ready:
2885 2885 ready.notify_all()
2886 2886
2887 2887
2888 2888 @command(
2889 2889 b'perf::unbundle',
2890 2890 formatteropts,
2891 2891 b'BUNDLE_FILE',
2892 2892 )
2893 2893 def perf_unbundle(ui, repo, fname, **opts):
2894 2894 """benchmark application of a bundle in a repository.
2895 2895
2896 2896 This does not include the final transaction processing"""
2897 2897
2898 2898 from mercurial import exchange
2899 2899 from mercurial import bundle2
2900 2900 from mercurial import transaction
2901 2901
2902 2902 opts = _byteskwargs(opts)
2903 2903
2904 2904 ### some compatibility hotfix
2905 2905 #
2906 2906 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2907 2907 # critical regression that break transaction rollback for files that are
2908 2908 # de-inlined.
2909 2909 method = transaction.transaction._addentry
2910 2910 pre_63edc384d3b7 = "data" in getargspec(method).args
2911 2911 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2912 2912 # a changeset that is a close descendant of 18415fc918a1, the changeset
2913 2913 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2914 2914 args = getargspec(error.Abort.__init__).args
2915 2915 post_18415fc918a1 = "detailed_exit_code" in args
2916 2916
2917 2917 old_max_inline = None
2918 2918 try:
2919 2919 if not (pre_63edc384d3b7 or post_18415fc918a1):
2920 2920 # disable inlining
2921 2921 old_max_inline = mercurial.revlog._maxinline
2922 2922 # large enough to never happen
2923 2923 mercurial.revlog._maxinline = 2 ** 50
2924 2924
2925 2925 with repo.lock():
2926 2926 bundle = [None, None]
2927 2927 orig_quiet = repo.ui.quiet
2928 2928 try:
2929 2929 repo.ui.quiet = True
2930 2930 with open(fname, mode="rb") as f:
2931 2931
2932 2932 def noop_report(*args, **kwargs):
2933 2933 pass
2934 2934
2935 2935 def setup():
2936 2936 gen, tr = bundle
2937 2937 if tr is not None:
2938 2938 tr.abort()
2939 2939 bundle[:] = [None, None]
2940 2940 f.seek(0)
2941 2941 bundle[0] = exchange.readbundle(ui, f, fname)
2942 2942 bundle[1] = repo.transaction(b'perf::unbundle')
2943 2943 # silence the transaction
2944 2944 bundle[1]._report = noop_report
2945 2945
2946 2946 def apply():
2947 2947 gen, tr = bundle
2948 2948 bundle2.applybundle(
2949 2949 repo,
2950 2950 gen,
2951 2951 tr,
2952 2952 source=b'perf::unbundle',
2953 2953 url=fname,
2954 2954 )
2955 2955
2956 2956 timer, fm = gettimer(ui, opts)
2957 2957 timer(apply, setup=setup)
2958 2958 fm.end()
2959 2959 finally:
2960 2960 repo.ui.quiet == orig_quiet
2961 2961 gen, tr = bundle
2962 2962 if tr is not None:
2963 2963 tr.abort()
2964 2964 finally:
2965 2965 if old_max_inline is not None:
2966 2966 mercurial.revlog._maxinline = old_max_inline
2967 2967
2968 2968
2969 2969 @command(
2970 2970 b'perf::unidiff|perfunidiff',
2971 2971 revlogopts
2972 2972 + formatteropts
2973 2973 + [
2974 2974 (
2975 2975 b'',
2976 2976 b'count',
2977 2977 1,
2978 2978 b'number of revisions to test (when using --startrev)',
2979 2979 ),
2980 2980 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2981 2981 ],
2982 2982 b'-c|-m|FILE REV',
2983 2983 )
2984 2984 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2985 2985 """benchmark a unified diff between revisions
2986 2986
2987 2987 This doesn't include any copy tracing - it's just a unified diff
2988 2988 of the texts.
2989 2989
2990 2990 By default, benchmark a diff between its delta parent and itself.
2991 2991
2992 2992 With ``--count``, benchmark diffs between delta parents and self for N
2993 2993 revisions starting at the specified revision.
2994 2994
2995 2995 With ``--alldata``, assume the requested revision is a changeset and
2996 2996 measure diffs for all changes related to that changeset (manifest
2997 2997 and filelogs).
2998 2998 """
2999 2999 opts = _byteskwargs(opts)
3000 3000 if opts[b'alldata']:
3001 3001 opts[b'changelog'] = True
3002 3002
3003 3003 if opts.get(b'changelog') or opts.get(b'manifest'):
3004 3004 file_, rev = None, file_
3005 3005 elif rev is None:
3006 3006 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3007 3007
3008 3008 textpairs = []
3009 3009
3010 3010 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3011 3011
3012 3012 startrev = r.rev(r.lookup(rev))
3013 3013 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3014 3014 if opts[b'alldata']:
3015 3015 # Load revisions associated with changeset.
3016 3016 ctx = repo[rev]
3017 3017 mtext = _manifestrevision(repo, ctx.manifestnode())
3018 3018 for pctx in ctx.parents():
3019 3019 pman = _manifestrevision(repo, pctx.manifestnode())
3020 3020 textpairs.append((pman, mtext))
3021 3021
3022 3022 # Load filelog revisions by iterating manifest delta.
3023 3023 man = ctx.manifest()
3024 3024 pman = ctx.p1().manifest()
3025 3025 for filename, change in pman.diff(man).items():
3026 3026 fctx = repo.file(filename)
3027 3027 f1 = fctx.revision(change[0][0] or -1)
3028 3028 f2 = fctx.revision(change[1][0] or -1)
3029 3029 textpairs.append((f1, f2))
3030 3030 else:
3031 3031 dp = r.deltaparent(rev)
3032 3032 textpairs.append((r.revision(dp), r.revision(rev)))
3033 3033
3034 3034 def d():
3035 3035 for left, right in textpairs:
3036 3036 # The date strings don't matter, so we pass empty strings.
3037 3037 headerlines, hunks = mdiff.unidiff(
3038 3038 left, b'', right, b'', b'left', b'right', binary=False
3039 3039 )
3040 3040 # consume iterators in roughly the way patch.py does
3041 3041 b'\n'.join(headerlines)
3042 3042 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3043 3043
3044 3044 timer, fm = gettimer(ui, opts)
3045 3045 timer(d)
3046 3046 fm.end()
3047 3047
3048 3048
3049 3049 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3050 3050 def perfdiffwd(ui, repo, **opts):
3051 3051 """Profile diff of working directory changes"""
3052 3052 opts = _byteskwargs(opts)
3053 3053 timer, fm = gettimer(ui, opts)
3054 3054 options = {
3055 3055 'w': 'ignore_all_space',
3056 3056 'b': 'ignore_space_change',
3057 3057 'B': 'ignore_blank_lines',
3058 3058 }
3059 3059
3060 3060 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3061 3061 opts = {options[c]: b'1' for c in diffopt}
3062 3062
3063 3063 def d():
3064 3064 ui.pushbuffer()
3065 3065 commands.diff(ui, repo, **opts)
3066 3066 ui.popbuffer()
3067 3067
3068 3068 diffopt = diffopt.encode('ascii')
3069 3069 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3070 3070 timer(d, title=title)
3071 3071 fm.end()
3072 3072
3073 3073
3074 3074 @command(
3075 3075 b'perf::revlogindex|perfrevlogindex',
3076 3076 revlogopts + formatteropts,
3077 3077 b'-c|-m|FILE',
3078 3078 )
3079 3079 def perfrevlogindex(ui, repo, file_=None, **opts):
3080 3080 """Benchmark operations against a revlog index.
3081 3081
3082 3082 This tests constructing a revlog instance, reading index data,
3083 3083 parsing index data, and performing various operations related to
3084 3084 index data.
3085 3085 """
3086 3086
3087 3087 opts = _byteskwargs(opts)
3088 3088
3089 3089 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3090 3090
3091 3091 opener = getattr(rl, 'opener') # trick linter
3092 3092 # compat with hg <= 5.8
3093 3093 radix = getattr(rl, 'radix', None)
3094 3094 indexfile = getattr(rl, '_indexfile', None)
3095 3095 if indexfile is None:
3096 3096 # compatibility with <= hg-5.8
3097 3097 indexfile = getattr(rl, 'indexfile')
3098 3098 data = opener.read(indexfile)
3099 3099
3100 3100 header = struct.unpack(b'>I', data[0:4])[0]
3101 3101 version = header & 0xFFFF
3102 3102 if version == 1:
3103 3103 inline = header & (1 << 16)
3104 3104 else:
3105 3105 raise error.Abort(b'unsupported revlog version: %d' % version)
3106 3106
3107 3107 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3108 3108 if parse_index_v1 is None:
3109 3109 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3110 3110
3111 3111 rllen = len(rl)
3112 3112
3113 3113 node0 = rl.node(0)
3114 3114 node25 = rl.node(rllen // 4)
3115 3115 node50 = rl.node(rllen // 2)
3116 3116 node75 = rl.node(rllen // 4 * 3)
3117 3117 node100 = rl.node(rllen - 1)
3118 3118
3119 3119 allrevs = range(rllen)
3120 3120 allrevsrev = list(reversed(allrevs))
3121 3121 allnodes = [rl.node(rev) for rev in range(rllen)]
3122 3122 allnodesrev = list(reversed(allnodes))
3123 3123
3124 3124 def constructor():
3125 3125 if radix is not None:
3126 3126 revlog(opener, radix=radix)
3127 3127 else:
3128 3128 # hg <= 5.8
3129 3129 revlog(opener, indexfile=indexfile)
3130 3130
3131 3131 def read():
3132 3132 with opener(indexfile) as fh:
3133 3133 fh.read()
3134 3134
3135 3135 def parseindex():
3136 3136 parse_index_v1(data, inline)
3137 3137
3138 3138 def getentry(revornode):
3139 3139 index = parse_index_v1(data, inline)[0]
3140 3140 index[revornode]
3141 3141
3142 3142 def getentries(revs, count=1):
3143 3143 index = parse_index_v1(data, inline)[0]
3144 3144
3145 3145 for i in range(count):
3146 3146 for rev in revs:
3147 3147 index[rev]
3148 3148
3149 3149 def resolvenode(node):
3150 3150 index = parse_index_v1(data, inline)[0]
3151 3151 rev = getattr(index, 'rev', None)
3152 3152 if rev is None:
3153 3153 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3154 3154 # This only works for the C code.
3155 3155 if nodemap is None:
3156 3156 return
3157 3157 rev = nodemap.__getitem__
3158 3158
3159 3159 try:
3160 3160 rev(node)
3161 3161 except error.RevlogError:
3162 3162 pass
3163 3163
3164 3164 def resolvenodes(nodes, count=1):
3165 3165 index = parse_index_v1(data, inline)[0]
3166 3166 rev = getattr(index, 'rev', None)
3167 3167 if rev is None:
3168 3168 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3169 3169 # This only works for the C code.
3170 3170 if nodemap is None:
3171 3171 return
3172 3172 rev = nodemap.__getitem__
3173 3173
3174 3174 for i in range(count):
3175 3175 for node in nodes:
3176 3176 try:
3177 3177 rev(node)
3178 3178 except error.RevlogError:
3179 3179 pass
3180 3180
3181 3181 benches = [
3182 3182 (constructor, b'revlog constructor'),
3183 3183 (read, b'read'),
3184 3184 (parseindex, b'create index object'),
3185 3185 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3186 3186 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3187 3187 (lambda: resolvenode(node0), b'look up node at rev 0'),
3188 3188 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3189 3189 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3190 3190 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3191 3191 (lambda: resolvenode(node100), b'look up node at tip'),
3192 3192 # 2x variation is to measure caching impact.
3193 3193 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3194 3194 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3195 3195 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3196 3196 (
3197 3197 lambda: resolvenodes(allnodesrev, 2),
3198 3198 b'look up all nodes 2x (reverse)',
3199 3199 ),
3200 3200 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3201 3201 (
3202 3202 lambda: getentries(allrevs, 2),
3203 3203 b'retrieve all index entries 2x (forward)',
3204 3204 ),
3205 3205 (
3206 3206 lambda: getentries(allrevsrev),
3207 3207 b'retrieve all index entries (reverse)',
3208 3208 ),
3209 3209 (
3210 3210 lambda: getentries(allrevsrev, 2),
3211 3211 b'retrieve all index entries 2x (reverse)',
3212 3212 ),
3213 3213 ]
3214 3214
3215 3215 for fn, title in benches:
3216 3216 timer, fm = gettimer(ui, opts)
3217 3217 timer(fn, title=title)
3218 3218 fm.end()
3219 3219
3220 3220
3221 3221 @command(
3222 3222 b'perf::revlogrevisions|perfrevlogrevisions',
3223 3223 revlogopts
3224 3224 + formatteropts
3225 3225 + [
3226 3226 (b'd', b'dist', 100, b'distance between the revisions'),
3227 3227 (b's', b'startrev', 0, b'revision to start reading at'),
3228 3228 (b'', b'reverse', False, b'read in reverse'),
3229 3229 ],
3230 3230 b'-c|-m|FILE',
3231 3231 )
3232 3232 def perfrevlogrevisions(
3233 3233 ui, repo, file_=None, startrev=0, reverse=False, **opts
3234 3234 ):
3235 3235 """Benchmark reading a series of revisions from a revlog.
3236 3236
3237 3237 By default, we read every ``-d/--dist`` revision from 0 to tip of
3238 3238 the specified revlog.
3239 3239
3240 3240 The start revision can be defined via ``-s/--startrev``.
3241 3241 """
3242 3242 opts = _byteskwargs(opts)
3243 3243
3244 3244 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3245 3245 rllen = getlen(ui)(rl)
3246 3246
3247 3247 if startrev < 0:
3248 3248 startrev = rllen + startrev
3249 3249
3250 3250 def d():
3251 3251 rl.clearcaches()
3252 3252
3253 3253 beginrev = startrev
3254 3254 endrev = rllen
3255 3255 dist = opts[b'dist']
3256 3256
3257 3257 if reverse:
3258 3258 beginrev, endrev = endrev - 1, beginrev - 1
3259 3259 dist = -1 * dist
3260 3260
3261 3261 for x in _xrange(beginrev, endrev, dist):
3262 3262 # Old revisions don't support passing int.
3263 3263 n = rl.node(x)
3264 3264 rl.revision(n)
3265 3265
3266 3266 timer, fm = gettimer(ui, opts)
3267 3267 timer(d)
3268 3268 fm.end()
3269 3269
3270 3270
3271 3271 @command(
3272 3272 b'perf::revlogwrite|perfrevlogwrite',
3273 3273 revlogopts
3274 3274 + formatteropts
3275 3275 + [
3276 3276 (b's', b'startrev', 1000, b'revision to start writing at'),
3277 3277 (b'', b'stoprev', -1, b'last revision to write'),
3278 3278 (b'', b'count', 3, b'number of passes to perform'),
3279 3279 (b'', b'details', False, b'print timing for every revisions tested'),
3280 3280 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3281 3281 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3282 3282 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3283 3283 ],
3284 3284 b'-c|-m|FILE',
3285 3285 )
3286 3286 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3287 3287 """Benchmark writing a series of revisions to a revlog.
3288 3288
3289 3289 Possible source values are:
3290 3290 * `full`: add from a full text (default).
3291 3291 * `parent-1`: add from a delta to the first parent
3292 3292 * `parent-2`: add from a delta to the second parent if it exists
3293 3293 (use a delta from the first parent otherwise)
3294 3294 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3295 3295 * `storage`: add from the existing precomputed deltas
3296 3296
3297 3297 Note: This performance command measures performance in a custom way. As a
3298 3298 result some of the global configuration of the 'perf' command does not
3299 3299 apply to it:
3300 3300
3301 3301 * ``pre-run``: disabled
3302 3302
3303 3303 * ``profile-benchmark``: disabled
3304 3304
3305 3305 * ``run-limits``: disabled use --count instead
3306 3306 """
3307 3307 opts = _byteskwargs(opts)
3308 3308
3309 3309 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3310 3310 rllen = getlen(ui)(rl)
3311 3311 if startrev < 0:
3312 3312 startrev = rllen + startrev
3313 3313 if stoprev < 0:
3314 3314 stoprev = rllen + stoprev
3315 3315
3316 3316 lazydeltabase = opts['lazydeltabase']
3317 3317 source = opts['source']
3318 3318 clearcaches = opts['clear_caches']
3319 3319 validsource = (
3320 3320 b'full',
3321 3321 b'parent-1',
3322 3322 b'parent-2',
3323 3323 b'parent-smallest',
3324 3324 b'storage',
3325 3325 )
3326 3326 if source not in validsource:
3327 3327 raise error.Abort('invalid source type: %s' % source)
3328 3328
3329 3329 ### actually gather results
3330 3330 count = opts['count']
3331 3331 if count <= 0:
3332 3332 raise error.Abort('invalide run count: %d' % count)
3333 3333 allresults = []
3334 3334 for c in range(count):
3335 3335 timing = _timeonewrite(
3336 3336 ui,
3337 3337 rl,
3338 3338 source,
3339 3339 startrev,
3340 3340 stoprev,
3341 3341 c + 1,
3342 3342 lazydeltabase=lazydeltabase,
3343 3343 clearcaches=clearcaches,
3344 3344 )
3345 3345 allresults.append(timing)
3346 3346
3347 3347 ### consolidate the results in a single list
3348 3348 results = []
3349 3349 for idx, (rev, t) in enumerate(allresults[0]):
3350 3350 ts = [t]
3351 3351 for other in allresults[1:]:
3352 3352 orev, ot = other[idx]
3353 3353 assert orev == rev
3354 3354 ts.append(ot)
3355 3355 results.append((rev, ts))
3356 3356 resultcount = len(results)
3357 3357
3358 3358 ### Compute and display relevant statistics
3359 3359
3360 3360 # get a formatter
3361 3361 fm = ui.formatter(b'perf', opts)
3362 3362 displayall = ui.configbool(b"perf", b"all-timing", False)
3363 3363
3364 3364 # print individual details if requested
3365 3365 if opts['details']:
3366 3366 for idx, item in enumerate(results, 1):
3367 3367 rev, data = item
3368 3368 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3369 3369 formatone(fm, data, title=title, displayall=displayall)
3370 3370
3371 3371 # sorts results by median time
3372 3372 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3373 3373 # list of (name, index) to display)
3374 3374 relevants = [
3375 3375 ("min", 0),
3376 3376 ("10%", resultcount * 10 // 100),
3377 3377 ("25%", resultcount * 25 // 100),
3378 3378 ("50%", resultcount * 70 // 100),
3379 3379 ("75%", resultcount * 75 // 100),
3380 3380 ("90%", resultcount * 90 // 100),
3381 3381 ("95%", resultcount * 95 // 100),
3382 3382 ("99%", resultcount * 99 // 100),
3383 3383 ("99.9%", resultcount * 999 // 1000),
3384 3384 ("99.99%", resultcount * 9999 // 10000),
3385 3385 ("99.999%", resultcount * 99999 // 100000),
3386 3386 ("max", -1),
3387 3387 ]
3388 3388 if not ui.quiet:
3389 3389 for name, idx in relevants:
3390 3390 data = results[idx]
3391 3391 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3392 3392 formatone(fm, data[1], title=title, displayall=displayall)
3393 3393
3394 3394 # XXX summing that many float will not be very precise, we ignore this fact
3395 3395 # for now
3396 3396 totaltime = []
3397 3397 for item in allresults:
3398 3398 totaltime.append(
3399 3399 (
3400 3400 sum(x[1][0] for x in item),
3401 3401 sum(x[1][1] for x in item),
3402 3402 sum(x[1][2] for x in item),
3403 3403 )
3404 3404 )
3405 3405 formatone(
3406 3406 fm,
3407 3407 totaltime,
3408 3408 title="total time (%d revs)" % resultcount,
3409 3409 displayall=displayall,
3410 3410 )
3411 3411 fm.end()
3412 3412
3413 3413
3414 3414 class _faketr:
3415 3415 def add(s, x, y, z=None):
3416 3416 return None
3417 3417
3418 3418
3419 3419 def _timeonewrite(
3420 3420 ui,
3421 3421 orig,
3422 3422 source,
3423 3423 startrev,
3424 3424 stoprev,
3425 3425 runidx=None,
3426 3426 lazydeltabase=True,
3427 3427 clearcaches=True,
3428 3428 ):
3429 3429 timings = []
3430 3430 tr = _faketr()
3431 3431 with _temprevlog(ui, orig, startrev) as dest:
3432 3432 dest._lazydeltabase = lazydeltabase
3433 3433 revs = list(orig.revs(startrev, stoprev))
3434 3434 total = len(revs)
3435 3435 topic = 'adding'
3436 3436 if runidx is not None:
3437 3437 topic += ' (run #%d)' % runidx
3438 3438 # Support both old and new progress API
3439 3439 if util.safehasattr(ui, 'makeprogress'):
3440 3440 progress = ui.makeprogress(topic, unit='revs', total=total)
3441 3441
3442 3442 def updateprogress(pos):
3443 3443 progress.update(pos)
3444 3444
3445 3445 def completeprogress():
3446 3446 progress.complete()
3447 3447
3448 3448 else:
3449 3449
3450 3450 def updateprogress(pos):
3451 3451 ui.progress(topic, pos, unit='revs', total=total)
3452 3452
3453 3453 def completeprogress():
3454 3454 ui.progress(topic, None, unit='revs', total=total)
3455 3455
3456 3456 for idx, rev in enumerate(revs):
3457 3457 updateprogress(idx)
3458 3458 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3459 3459 if clearcaches:
3460 3460 dest.index.clearcaches()
3461 3461 dest.clearcaches()
3462 3462 with timeone() as r:
3463 3463 dest.addrawrevision(*addargs, **addkwargs)
3464 3464 timings.append((rev, r[0]))
3465 3465 updateprogress(total)
3466 3466 completeprogress()
3467 3467 return timings
3468 3468
3469 3469
3470 3470 def _getrevisionseed(orig, rev, tr, source):
3471 3471 from mercurial.node import nullid
3472 3472
3473 3473 linkrev = orig.linkrev(rev)
3474 3474 node = orig.node(rev)
3475 3475 p1, p2 = orig.parents(node)
3476 3476 flags = orig.flags(rev)
3477 3477 cachedelta = None
3478 3478 text = None
3479 3479
3480 3480 if source == b'full':
3481 3481 text = orig.revision(rev)
3482 3482 elif source == b'parent-1':
3483 3483 baserev = orig.rev(p1)
3484 3484 cachedelta = (baserev, orig.revdiff(p1, rev))
3485 3485 elif source == b'parent-2':
3486 3486 parent = p2
3487 3487 if p2 == nullid:
3488 3488 parent = p1
3489 3489 baserev = orig.rev(parent)
3490 3490 cachedelta = (baserev, orig.revdiff(parent, rev))
3491 3491 elif source == b'parent-smallest':
3492 3492 p1diff = orig.revdiff(p1, rev)
3493 3493 parent = p1
3494 3494 diff = p1diff
3495 3495 if p2 != nullid:
3496 3496 p2diff = orig.revdiff(p2, rev)
3497 3497 if len(p1diff) > len(p2diff):
3498 3498 parent = p2
3499 3499 diff = p2diff
3500 3500 baserev = orig.rev(parent)
3501 3501 cachedelta = (baserev, diff)
3502 3502 elif source == b'storage':
3503 3503 baserev = orig.deltaparent(rev)
3504 3504 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3505 3505
3506 3506 return (
3507 3507 (text, tr, linkrev, p1, p2),
3508 3508 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3509 3509 )
3510 3510
3511 3511
3512 3512 @contextlib.contextmanager
3513 3513 def _temprevlog(ui, orig, truncaterev):
3514 3514 from mercurial import vfs as vfsmod
3515 3515
3516 3516 if orig._inline:
3517 3517 raise error.Abort('not supporting inline revlog (yet)')
3518 3518 revlogkwargs = {}
3519 3519 k = 'upperboundcomp'
3520 3520 if util.safehasattr(orig, k):
3521 3521 revlogkwargs[k] = getattr(orig, k)
3522 3522
3523 3523 indexfile = getattr(orig, '_indexfile', None)
3524 3524 if indexfile is None:
3525 3525 # compatibility with <= hg-5.8
3526 3526 indexfile = getattr(orig, 'indexfile')
3527 3527 origindexpath = orig.opener.join(indexfile)
3528 3528
3529 3529 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3530 3530 origdatapath = orig.opener.join(datafile)
3531 3531 radix = b'revlog'
3532 3532 indexname = b'revlog.i'
3533 3533 dataname = b'revlog.d'
3534 3534
3535 3535 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3536 3536 try:
3537 3537 # copy the data file in a temporary directory
3538 3538 ui.debug('copying data in %s\n' % tmpdir)
3539 3539 destindexpath = os.path.join(tmpdir, 'revlog.i')
3540 3540 destdatapath = os.path.join(tmpdir, 'revlog.d')
3541 3541 shutil.copyfile(origindexpath, destindexpath)
3542 3542 shutil.copyfile(origdatapath, destdatapath)
3543 3543
3544 3544 # remove the data we want to add again
3545 3545 ui.debug('truncating data to be rewritten\n')
3546 3546 with open(destindexpath, 'ab') as index:
3547 3547 index.seek(0)
3548 3548 index.truncate(truncaterev * orig._io.size)
3549 3549 with open(destdatapath, 'ab') as data:
3550 3550 data.seek(0)
3551 3551 data.truncate(orig.start(truncaterev))
3552 3552
3553 3553 # instantiate a new revlog from the temporary copy
3554 3554 ui.debug('truncating adding to be rewritten\n')
3555 3555 vfs = vfsmod.vfs(tmpdir)
3556 3556 vfs.options = getattr(orig.opener, 'options', None)
3557 3557
3558 3558 try:
3559 3559 dest = revlog(vfs, radix=radix, **revlogkwargs)
3560 3560 except TypeError:
3561 3561 dest = revlog(
3562 3562 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3563 3563 )
3564 3564 if dest._inline:
3565 3565 raise error.Abort('not supporting inline revlog (yet)')
3566 3566 # make sure internals are initialized
3567 3567 dest.revision(len(dest) - 1)
3568 3568 yield dest
3569 3569 del dest, vfs
3570 3570 finally:
3571 3571 shutil.rmtree(tmpdir, True)
3572 3572
3573 3573
3574 3574 @command(
3575 3575 b'perf::revlogchunks|perfrevlogchunks',
3576 3576 revlogopts
3577 3577 + formatteropts
3578 3578 + [
3579 3579 (b'e', b'engines', b'', b'compression engines to use'),
3580 3580 (b's', b'startrev', 0, b'revision to start at'),
3581 3581 ],
3582 3582 b'-c|-m|FILE',
3583 3583 )
3584 3584 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3585 3585 """Benchmark operations on revlog chunks.
3586 3586
3587 3587 Logically, each revlog is a collection of fulltext revisions. However,
3588 3588 stored within each revlog are "chunks" of possibly compressed data. This
3589 3589 data needs to be read and decompressed or compressed and written.
3590 3590
3591 3591 This command measures the time it takes to read+decompress and recompress
3592 3592 chunks in a revlog. It effectively isolates I/O and compression performance.
3593 3593 For measurements of higher-level operations like resolving revisions,
3594 3594 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3595 3595 """
3596 3596 opts = _byteskwargs(opts)
3597 3597
3598 3598 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3599 3599
3600 3600 # _chunkraw was renamed to _getsegmentforrevs.
3601 3601 try:
3602 3602 segmentforrevs = rl._getsegmentforrevs
3603 3603 except AttributeError:
3604 3604 segmentforrevs = rl._chunkraw
3605 3605
3606 3606 # Verify engines argument.
3607 3607 if engines:
3608 3608 engines = {e.strip() for e in engines.split(b',')}
3609 3609 for engine in engines:
3610 3610 try:
3611 3611 util.compressionengines[engine]
3612 3612 except KeyError:
3613 3613 raise error.Abort(b'unknown compression engine: %s' % engine)
3614 3614 else:
3615 3615 engines = []
3616 3616 for e in util.compengines:
3617 3617 engine = util.compengines[e]
3618 3618 try:
3619 3619 if engine.available():
3620 3620 engine.revlogcompressor().compress(b'dummy')
3621 3621 engines.append(e)
3622 3622 except NotImplementedError:
3623 3623 pass
3624 3624
3625 3625 revs = list(rl.revs(startrev, len(rl) - 1))
3626 3626
3627 3627 def rlfh(rl):
3628 3628 if rl._inline:
3629 3629 indexfile = getattr(rl, '_indexfile', None)
3630 3630 if indexfile is None:
3631 3631 # compatibility with <= hg-5.8
3632 3632 indexfile = getattr(rl, 'indexfile')
3633 3633 return getsvfs(repo)(indexfile)
3634 3634 else:
3635 3635 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3636 3636 return getsvfs(repo)(datafile)
3637 3637
3638 3638 def doread():
3639 3639 rl.clearcaches()
3640 3640 for rev in revs:
3641 3641 segmentforrevs(rev, rev)
3642 3642
3643 3643 def doreadcachedfh():
3644 3644 rl.clearcaches()
3645 3645 fh = rlfh(rl)
3646 3646 for rev in revs:
3647 3647 segmentforrevs(rev, rev, df=fh)
3648 3648
3649 3649 def doreadbatch():
3650 3650 rl.clearcaches()
3651 3651 segmentforrevs(revs[0], revs[-1])
3652 3652
3653 3653 def doreadbatchcachedfh():
3654 3654 rl.clearcaches()
3655 3655 fh = rlfh(rl)
3656 3656 segmentforrevs(revs[0], revs[-1], df=fh)
3657 3657
3658 3658 def dochunk():
3659 3659 rl.clearcaches()
3660 3660 fh = rlfh(rl)
3661 3661 for rev in revs:
3662 3662 rl._chunk(rev, df=fh)
3663 3663
3664 3664 chunks = [None]
3665 3665
3666 3666 def dochunkbatch():
3667 3667 rl.clearcaches()
3668 3668 fh = rlfh(rl)
3669 3669 # Save chunks as a side-effect.
3670 3670 chunks[0] = rl._chunks(revs, df=fh)
3671 3671
3672 3672 def docompress(compressor):
3673 3673 rl.clearcaches()
3674 3674
3675 3675 try:
3676 3676 # Swap in the requested compression engine.
3677 3677 oldcompressor = rl._compressor
3678 3678 rl._compressor = compressor
3679 3679 for chunk in chunks[0]:
3680 3680 rl.compress(chunk)
3681 3681 finally:
3682 3682 rl._compressor = oldcompressor
3683 3683
3684 3684 benches = [
3685 3685 (lambda: doread(), b'read'),
3686 3686 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3687 3687 (lambda: doreadbatch(), b'read batch'),
3688 3688 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3689 3689 (lambda: dochunk(), b'chunk'),
3690 3690 (lambda: dochunkbatch(), b'chunk batch'),
3691 3691 ]
3692 3692
3693 3693 for engine in sorted(engines):
3694 3694 compressor = util.compengines[engine].revlogcompressor()
3695 3695 benches.append(
3696 3696 (
3697 3697 functools.partial(docompress, compressor),
3698 3698 b'compress w/ %s' % engine,
3699 3699 )
3700 3700 )
3701 3701
3702 3702 for fn, title in benches:
3703 3703 timer, fm = gettimer(ui, opts)
3704 3704 timer(fn, title=title)
3705 3705 fm.end()
3706 3706
3707 3707
3708 3708 @command(
3709 3709 b'perf::revlogrevision|perfrevlogrevision',
3710 3710 revlogopts
3711 3711 + formatteropts
3712 3712 + [(b'', b'cache', False, b'use caches instead of clearing')],
3713 3713 b'-c|-m|FILE REV',
3714 3714 )
3715 3715 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3716 3716 """Benchmark obtaining a revlog revision.
3717 3717
3718 3718 Obtaining a revlog revision consists of roughly the following steps:
3719 3719
3720 3720 1. Compute the delta chain
3721 3721 2. Slice the delta chain if applicable
3722 3722 3. Obtain the raw chunks for that delta chain
3723 3723 4. Decompress each raw chunk
3724 3724 5. Apply binary patches to obtain fulltext
3725 3725 6. Verify hash of fulltext
3726 3726
3727 3727 This command measures the time spent in each of these phases.
3728 3728 """
3729 3729 opts = _byteskwargs(opts)
3730 3730
3731 3731 if opts.get(b'changelog') or opts.get(b'manifest'):
3732 3732 file_, rev = None, file_
3733 3733 elif rev is None:
3734 3734 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3735 3735
3736 3736 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3737 3737
3738 3738 # _chunkraw was renamed to _getsegmentforrevs.
3739 3739 try:
3740 3740 segmentforrevs = r._getsegmentforrevs
3741 3741 except AttributeError:
3742 3742 segmentforrevs = r._chunkraw
3743 3743
3744 3744 node = r.lookup(rev)
3745 3745 rev = r.rev(node)
3746 3746
3747 3747 def getrawchunks(data, chain):
3748 3748 start = r.start
3749 3749 length = r.length
3750 3750 inline = r._inline
3751 3751 try:
3752 3752 iosize = r.index.entry_size
3753 3753 except AttributeError:
3754 3754 iosize = r._io.size
3755 3755 buffer = util.buffer
3756 3756
3757 3757 chunks = []
3758 3758 ladd = chunks.append
3759 3759 for idx, item in enumerate(chain):
3760 3760 offset = start(item[0])
3761 3761 bits = data[idx]
3762 3762 for rev in item:
3763 3763 chunkstart = start(rev)
3764 3764 if inline:
3765 3765 chunkstart += (rev + 1) * iosize
3766 3766 chunklength = length(rev)
3767 3767 ladd(buffer(bits, chunkstart - offset, chunklength))
3768 3768
3769 3769 return chunks
3770 3770
3771 3771 def dodeltachain(rev):
3772 3772 if not cache:
3773 3773 r.clearcaches()
3774 3774 r._deltachain(rev)
3775 3775
3776 3776 def doread(chain):
3777 3777 if not cache:
3778 3778 r.clearcaches()
3779 3779 for item in slicedchain:
3780 3780 segmentforrevs(item[0], item[-1])
3781 3781
3782 3782 def doslice(r, chain, size):
3783 3783 for s in slicechunk(r, chain, targetsize=size):
3784 3784 pass
3785 3785
3786 3786 def dorawchunks(data, chain):
3787 3787 if not cache:
3788 3788 r.clearcaches()
3789 3789 getrawchunks(data, chain)
3790 3790
3791 3791 def dodecompress(chunks):
3792 3792 decomp = r.decompress
3793 3793 for chunk in chunks:
3794 3794 decomp(chunk)
3795 3795
3796 3796 def dopatch(text, bins):
3797 3797 if not cache:
3798 3798 r.clearcaches()
3799 3799 mdiff.patches(text, bins)
3800 3800
3801 3801 def dohash(text):
3802 3802 if not cache:
3803 3803 r.clearcaches()
3804 3804 r.checkhash(text, node, rev=rev)
3805 3805
3806 3806 def dorevision():
3807 3807 if not cache:
3808 3808 r.clearcaches()
3809 3809 r.revision(node)
3810 3810
3811 3811 try:
3812 3812 from mercurial.revlogutils.deltas import slicechunk
3813 3813 except ImportError:
3814 3814 slicechunk = getattr(revlog, '_slicechunk', None)
3815 3815
3816 3816 size = r.length(rev)
3817 3817 chain = r._deltachain(rev)[0]
3818 3818 if not getattr(r, '_withsparseread', False):
3819 3819 slicedchain = (chain,)
3820 3820 else:
3821 3821 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3822 3822 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3823 3823 rawchunks = getrawchunks(data, slicedchain)
3824 3824 bins = r._chunks(chain)
3825 3825 text = bytes(bins[0])
3826 3826 bins = bins[1:]
3827 3827 text = mdiff.patches(text, bins)
3828 3828
3829 3829 benches = [
3830 3830 (lambda: dorevision(), b'full'),
3831 3831 (lambda: dodeltachain(rev), b'deltachain'),
3832 3832 (lambda: doread(chain), b'read'),
3833 3833 ]
3834 3834
3835 3835 if getattr(r, '_withsparseread', False):
3836 3836 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3837 3837 benches.append(slicing)
3838 3838
3839 3839 benches.extend(
3840 3840 [
3841 3841 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3842 3842 (lambda: dodecompress(rawchunks), b'decompress'),
3843 3843 (lambda: dopatch(text, bins), b'patch'),
3844 3844 (lambda: dohash(text), b'hash'),
3845 3845 ]
3846 3846 )
3847 3847
3848 3848 timer, fm = gettimer(ui, opts)
3849 3849 for fn, title in benches:
3850 3850 timer(fn, title=title)
3851 3851 fm.end()
3852 3852
3853 3853
3854 3854 @command(
3855 3855 b'perf::revset|perfrevset',
3856 3856 [
3857 3857 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3858 3858 (b'', b'contexts', False, b'obtain changectx for each revision'),
3859 3859 ]
3860 3860 + formatteropts,
3861 3861 b"REVSET",
3862 3862 )
3863 3863 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3864 3864 """benchmark the execution time of a revset
3865 3865
3866 3866 Use the --clean option if need to evaluate the impact of build volatile
3867 3867 revisions set cache on the revset execution. Volatile cache hold filtered
3868 3868 and obsolete related cache."""
3869 3869 opts = _byteskwargs(opts)
3870 3870
3871 3871 timer, fm = gettimer(ui, opts)
3872 3872
3873 3873 def d():
3874 3874 if clear:
3875 3875 repo.invalidatevolatilesets()
3876 3876 if contexts:
3877 3877 for ctx in repo.set(expr):
3878 3878 pass
3879 3879 else:
3880 3880 for r in repo.revs(expr):
3881 3881 pass
3882 3882
3883 3883 timer(d)
3884 3884 fm.end()
3885 3885
3886 3886
3887 3887 @command(
3888 3888 b'perf::volatilesets|perfvolatilesets',
3889 3889 [
3890 3890 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3891 3891 ]
3892 3892 + formatteropts,
3893 3893 )
3894 3894 def perfvolatilesets(ui, repo, *names, **opts):
3895 3895 """benchmark the computation of various volatile set
3896 3896
3897 3897 Volatile set computes element related to filtering and obsolescence."""
3898 3898 opts = _byteskwargs(opts)
3899 3899 timer, fm = gettimer(ui, opts)
3900 3900 repo = repo.unfiltered()
3901 3901
3902 3902 def getobs(name):
3903 3903 def d():
3904 3904 repo.invalidatevolatilesets()
3905 3905 if opts[b'clear_obsstore']:
3906 3906 clearfilecache(repo, b'obsstore')
3907 3907 obsolete.getrevs(repo, name)
3908 3908
3909 3909 return d
3910 3910
3911 3911 allobs = sorted(obsolete.cachefuncs)
3912 3912 if names:
3913 3913 allobs = [n for n in allobs if n in names]
3914 3914
3915 3915 for name in allobs:
3916 3916 timer(getobs(name), title=name)
3917 3917
3918 3918 def getfiltered(name):
3919 3919 def d():
3920 3920 repo.invalidatevolatilesets()
3921 3921 if opts[b'clear_obsstore']:
3922 3922 clearfilecache(repo, b'obsstore')
3923 3923 repoview.filterrevs(repo, name)
3924 3924
3925 3925 return d
3926 3926
3927 3927 allfilter = sorted(repoview.filtertable)
3928 3928 if names:
3929 3929 allfilter = [n for n in allfilter if n in names]
3930 3930
3931 3931 for name in allfilter:
3932 3932 timer(getfiltered(name), title=name)
3933 3933 fm.end()
3934 3934
3935 3935
3936 3936 @command(
3937 3937 b'perf::branchmap|perfbranchmap',
3938 3938 [
3939 3939 (b'f', b'full', False, b'Includes build time of subset'),
3940 3940 (
3941 3941 b'',
3942 3942 b'clear-revbranch',
3943 3943 False,
3944 3944 b'purge the revbranch cache between computation',
3945 3945 ),
3946 3946 ]
3947 3947 + formatteropts,
3948 3948 )
3949 3949 def perfbranchmap(ui, repo, *filternames, **opts):
3950 3950 """benchmark the update of a branchmap
3951 3951
3952 3952 This benchmarks the full repo.branchmap() call with read and write disabled
3953 3953 """
3954 3954 opts = _byteskwargs(opts)
3955 3955 full = opts.get(b"full", False)
3956 3956 clear_revbranch = opts.get(b"clear_revbranch", False)
3957 3957 timer, fm = gettimer(ui, opts)
3958 3958
3959 3959 def getbranchmap(filtername):
3960 3960 """generate a benchmark function for the filtername"""
3961 3961 if filtername is None:
3962 3962 view = repo
3963 3963 else:
3964 3964 view = repo.filtered(filtername)
3965 3965 if util.safehasattr(view._branchcaches, '_per_filter'):
3966 3966 filtered = view._branchcaches._per_filter
3967 3967 else:
3968 3968 # older versions
3969 3969 filtered = view._branchcaches
3970 3970
3971 3971 def d():
3972 3972 if clear_revbranch:
3973 3973 repo.revbranchcache()._clear()
3974 3974 if full:
3975 3975 view._branchcaches.clear()
3976 3976 else:
3977 3977 filtered.pop(filtername, None)
3978 3978 view.branchmap()
3979 3979
3980 3980 return d
3981 3981
3982 3982 # add filter in smaller subset to bigger subset
3983 3983 possiblefilters = set(repoview.filtertable)
3984 3984 if filternames:
3985 3985 possiblefilters &= set(filternames)
3986 3986 subsettable = getbranchmapsubsettable()
3987 3987 allfilters = []
3988 3988 while possiblefilters:
3989 3989 for name in possiblefilters:
3990 3990 subset = subsettable.get(name)
3991 3991 if subset not in possiblefilters:
3992 3992 break
3993 3993 else:
3994 3994 assert False, b'subset cycle %s!' % possiblefilters
3995 3995 allfilters.append(name)
3996 3996 possiblefilters.remove(name)
3997 3997
3998 3998 # warm the cache
3999 3999 if not full:
4000 4000 for name in allfilters:
4001 4001 repo.filtered(name).branchmap()
4002 4002 if not filternames or b'unfiltered' in filternames:
4003 4003 # add unfiltered
4004 4004 allfilters.append(None)
4005 4005
4006 4006 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4007 4007 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4008 4008 branchcacheread.set(classmethod(lambda *args: None))
4009 4009 else:
4010 4010 # older versions
4011 4011 branchcacheread = safeattrsetter(branchmap, b'read')
4012 4012 branchcacheread.set(lambda *args: None)
4013 4013 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4014 4014 branchcachewrite.set(lambda *args: None)
4015 4015 try:
4016 4016 for name in allfilters:
4017 4017 printname = name
4018 4018 if name is None:
4019 4019 printname = b'unfiltered'
4020 4020 timer(getbranchmap(name), title=printname)
4021 4021 finally:
4022 4022 branchcacheread.restore()
4023 4023 branchcachewrite.restore()
4024 4024 fm.end()
4025 4025
4026 4026
4027 4027 @command(
4028 4028 b'perf::branchmapupdate|perfbranchmapupdate',
4029 4029 [
4030 4030 (b'', b'base', [], b'subset of revision to start from'),
4031 4031 (b'', b'target', [], b'subset of revision to end with'),
4032 4032 (b'', b'clear-caches', False, b'clear cache between each runs'),
4033 4033 ]
4034 4034 + formatteropts,
4035 4035 )
4036 4036 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4037 4037 """benchmark branchmap update from for <base> revs to <target> revs
4038 4038
4039 4039 If `--clear-caches` is passed, the following items will be reset before
4040 4040 each update:
4041 4041 * the changelog instance and associated indexes
4042 4042 * the rev-branch-cache instance
4043 4043
4044 4044 Examples:
4045 4045
4046 4046 # update for the one last revision
4047 4047 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4048 4048
4049 4049 $ update for change coming with a new branch
4050 4050 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4051 4051 """
4052 4052 from mercurial import branchmap
4053 4053 from mercurial import repoview
4054 4054
4055 4055 opts = _byteskwargs(opts)
4056 4056 timer, fm = gettimer(ui, opts)
4057 4057 clearcaches = opts[b'clear_caches']
4058 4058 unfi = repo.unfiltered()
4059 4059 x = [None] # used to pass data between closure
4060 4060
4061 4061 # we use a `list` here to avoid possible side effect from smartset
4062 4062 baserevs = list(scmutil.revrange(repo, base))
4063 4063 targetrevs = list(scmutil.revrange(repo, target))
4064 4064 if not baserevs:
4065 4065 raise error.Abort(b'no revisions selected for --base')
4066 4066 if not targetrevs:
4067 4067 raise error.Abort(b'no revisions selected for --target')
4068 4068
4069 4069 # make sure the target branchmap also contains the one in the base
4070 4070 targetrevs = list(set(baserevs) | set(targetrevs))
4071 4071 targetrevs.sort()
4072 4072
4073 4073 cl = repo.changelog
4074 4074 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4075 4075 allbaserevs.sort()
4076 4076 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4077 4077
4078 4078 newrevs = list(alltargetrevs.difference(allbaserevs))
4079 4079 newrevs.sort()
4080 4080
4081 4081 allrevs = frozenset(unfi.changelog.revs())
4082 4082 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4083 4083 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4084 4084
4085 4085 def basefilter(repo, visibilityexceptions=None):
4086 4086 return basefilterrevs
4087 4087
4088 4088 def targetfilter(repo, visibilityexceptions=None):
4089 4089 return targetfilterrevs
4090 4090
4091 4091 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4092 4092 ui.status(msg % (len(allbaserevs), len(newrevs)))
4093 4093 if targetfilterrevs:
4094 4094 msg = b'(%d revisions still filtered)\n'
4095 4095 ui.status(msg % len(targetfilterrevs))
4096 4096
4097 4097 try:
4098 4098 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4099 4099 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4100 4100
4101 4101 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4102 4102 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4103 4103
4104 4104 # try to find an existing branchmap to reuse
4105 4105 subsettable = getbranchmapsubsettable()
4106 4106 candidatefilter = subsettable.get(None)
4107 4107 while candidatefilter is not None:
4108 4108 candidatebm = repo.filtered(candidatefilter).branchmap()
4109 4109 if candidatebm.validfor(baserepo):
4110 4110 filtered = repoview.filterrevs(repo, candidatefilter)
4111 4111 missing = [r for r in allbaserevs if r in filtered]
4112 4112 base = candidatebm.copy()
4113 4113 base.update(baserepo, missing)
4114 4114 break
4115 4115 candidatefilter = subsettable.get(candidatefilter)
4116 4116 else:
4117 4117 # no suitable subset where found
4118 4118 base = branchmap.branchcache()
4119 4119 base.update(baserepo, allbaserevs)
4120 4120
4121 4121 def setup():
4122 4122 x[0] = base.copy()
4123 4123 if clearcaches:
4124 4124 unfi._revbranchcache = None
4125 4125 clearchangelog(repo)
4126 4126
4127 4127 def bench():
4128 4128 x[0].update(targetrepo, newrevs)
4129 4129
4130 4130 timer(bench, setup=setup)
4131 4131 fm.end()
4132 4132 finally:
4133 4133 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4134 4134 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4135 4135
4136 4136
4137 4137 @command(
4138 4138 b'perf::branchmapload|perfbranchmapload',
4139 4139 [
4140 4140 (b'f', b'filter', b'', b'Specify repoview filter'),
4141 4141 (b'', b'list', False, b'List brachmap filter caches'),
4142 4142 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4143 4143 ]
4144 4144 + formatteropts,
4145 4145 )
4146 4146 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4147 4147 """benchmark reading the branchmap"""
4148 4148 opts = _byteskwargs(opts)
4149 4149 clearrevlogs = opts[b'clear_revlogs']
4150 4150
4151 4151 if list:
4152 4152 for name, kind, st in repo.cachevfs.readdir(stat=True):
4153 4153 if name.startswith(b'branch2'):
4154 4154 filtername = name.partition(b'-')[2] or b'unfiltered'
4155 4155 ui.status(
4156 4156 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4157 4157 )
4158 4158 return
4159 4159 if not filter:
4160 4160 filter = None
4161 4161 subsettable = getbranchmapsubsettable()
4162 4162 if filter is None:
4163 4163 repo = repo.unfiltered()
4164 4164 else:
4165 4165 repo = repoview.repoview(repo, filter)
4166 4166
4167 4167 repo.branchmap() # make sure we have a relevant, up to date branchmap
4168 4168
4169 4169 try:
4170 4170 fromfile = branchmap.branchcache.fromfile
4171 4171 except AttributeError:
4172 4172 # older versions
4173 4173 fromfile = branchmap.read
4174 4174
4175 4175 currentfilter = filter
4176 4176 # try once without timer, the filter may not be cached
4177 4177 while fromfile(repo) is None:
4178 4178 currentfilter = subsettable.get(currentfilter)
4179 4179 if currentfilter is None:
4180 4180 raise error.Abort(
4181 4181 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4182 4182 )
4183 4183 repo = repo.filtered(currentfilter)
4184 4184 timer, fm = gettimer(ui, opts)
4185 4185
4186 4186 def setup():
4187 4187 if clearrevlogs:
4188 4188 clearchangelog(repo)
4189 4189
4190 4190 def bench():
4191 4191 fromfile(repo)
4192 4192
4193 4193 timer(bench, setup=setup)
4194 4194 fm.end()
4195 4195
4196 4196
4197 4197 @command(b'perf::loadmarkers|perfloadmarkers')
4198 4198 def perfloadmarkers(ui, repo):
4199 4199 """benchmark the time to parse the on-disk markers for a repo
4200 4200
4201 4201 Result is the number of markers in the repo."""
4202 4202 timer, fm = gettimer(ui)
4203 4203 svfs = getsvfs(repo)
4204 4204 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4205 4205 fm.end()
4206 4206
4207 4207
4208 4208 @command(
4209 4209 b'perf::lrucachedict|perflrucachedict',
4210 4210 formatteropts
4211 4211 + [
4212 4212 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4213 4213 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4214 4214 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4215 4215 (b'', b'size', 4, b'size of cache'),
4216 4216 (b'', b'gets', 10000, b'number of key lookups'),
4217 4217 (b'', b'sets', 10000, b'number of key sets'),
4218 4218 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4219 4219 (
4220 4220 b'',
4221 4221 b'mixedgetfreq',
4222 4222 50,
4223 4223 b'frequency of get vs set ops in mixed mode',
4224 4224 ),
4225 4225 ],
4226 4226 norepo=True,
4227 4227 )
4228 4228 def perflrucache(
4229 4229 ui,
4230 4230 mincost=0,
4231 4231 maxcost=100,
4232 4232 costlimit=0,
4233 4233 size=4,
4234 4234 gets=10000,
4235 4235 sets=10000,
4236 4236 mixed=10000,
4237 4237 mixedgetfreq=50,
4238 4238 **opts
4239 4239 ):
4240 4240 opts = _byteskwargs(opts)
4241 4241
4242 4242 def doinit():
4243 4243 for i in _xrange(10000):
4244 4244 util.lrucachedict(size)
4245 4245
4246 4246 costrange = list(range(mincost, maxcost + 1))
4247 4247
4248 4248 values = []
4249 4249 for i in _xrange(size):
4250 4250 values.append(random.randint(0, _maxint))
4251 4251
4252 4252 # Get mode fills the cache and tests raw lookup performance with no
4253 4253 # eviction.
4254 4254 getseq = []
4255 4255 for i in _xrange(gets):
4256 4256 getseq.append(random.choice(values))
4257 4257
4258 4258 def dogets():
4259 4259 d = util.lrucachedict(size)
4260 4260 for v in values:
4261 4261 d[v] = v
4262 4262 for key in getseq:
4263 4263 value = d[key]
4264 4264 value # silence pyflakes warning
4265 4265
4266 4266 def dogetscost():
4267 4267 d = util.lrucachedict(size, maxcost=costlimit)
4268 4268 for i, v in enumerate(values):
4269 4269 d.insert(v, v, cost=costs[i])
4270 4270 for key in getseq:
4271 4271 try:
4272 4272 value = d[key]
4273 4273 value # silence pyflakes warning
4274 4274 except KeyError:
4275 4275 pass
4276 4276
4277 4277 # Set mode tests insertion speed with cache eviction.
4278 4278 setseq = []
4279 4279 costs = []
4280 4280 for i in _xrange(sets):
4281 4281 setseq.append(random.randint(0, _maxint))
4282 4282 costs.append(random.choice(costrange))
4283 4283
4284 4284 def doinserts():
4285 4285 d = util.lrucachedict(size)
4286 4286 for v in setseq:
4287 4287 d.insert(v, v)
4288 4288
4289 4289 def doinsertscost():
4290 4290 d = util.lrucachedict(size, maxcost=costlimit)
4291 4291 for i, v in enumerate(setseq):
4292 4292 d.insert(v, v, cost=costs[i])
4293 4293
4294 4294 def dosets():
4295 4295 d = util.lrucachedict(size)
4296 4296 for v in setseq:
4297 4297 d[v] = v
4298 4298
4299 4299 # Mixed mode randomly performs gets and sets with eviction.
4300 4300 mixedops = []
4301 4301 for i in _xrange(mixed):
4302 4302 r = random.randint(0, 100)
4303 4303 if r < mixedgetfreq:
4304 4304 op = 0
4305 4305 else:
4306 4306 op = 1
4307 4307
4308 4308 mixedops.append(
4309 4309 (op, random.randint(0, size * 2), random.choice(costrange))
4310 4310 )
4311 4311
4312 4312 def domixed():
4313 4313 d = util.lrucachedict(size)
4314 4314
4315 4315 for op, v, cost in mixedops:
4316 4316 if op == 0:
4317 4317 try:
4318 4318 d[v]
4319 4319 except KeyError:
4320 4320 pass
4321 4321 else:
4322 4322 d[v] = v
4323 4323
4324 4324 def domixedcost():
4325 4325 d = util.lrucachedict(size, maxcost=costlimit)
4326 4326
4327 4327 for op, v, cost in mixedops:
4328 4328 if op == 0:
4329 4329 try:
4330 4330 d[v]
4331 4331 except KeyError:
4332 4332 pass
4333 4333 else:
4334 4334 d.insert(v, v, cost=cost)
4335 4335
4336 4336 benches = [
4337 4337 (doinit, b'init'),
4338 4338 ]
4339 4339
4340 4340 if costlimit:
4341 4341 benches.extend(
4342 4342 [
4343 4343 (dogetscost, b'gets w/ cost limit'),
4344 4344 (doinsertscost, b'inserts w/ cost limit'),
4345 4345 (domixedcost, b'mixed w/ cost limit'),
4346 4346 ]
4347 4347 )
4348 4348 else:
4349 4349 benches.extend(
4350 4350 [
4351 4351 (dogets, b'gets'),
4352 4352 (doinserts, b'inserts'),
4353 4353 (dosets, b'sets'),
4354 4354 (domixed, b'mixed'),
4355 4355 ]
4356 4356 )
4357 4357
4358 4358 for fn, title in benches:
4359 4359 timer, fm = gettimer(ui, opts)
4360 4360 timer(fn, title=title)
4361 4361 fm.end()
4362 4362
4363 4363
4364 4364 @command(
4365 4365 b'perf::write|perfwrite',
4366 4366 formatteropts
4367 4367 + [
4368 4368 (b'', b'write-method', b'write', b'ui write method'),
4369 4369 (b'', b'nlines', 100, b'number of lines'),
4370 4370 (b'', b'nitems', 100, b'number of items (per line)'),
4371 4371 (b'', b'item', b'x', b'item that is written'),
4372 4372 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4373 4373 (b'', b'flush-line', None, b'flush after each line'),
4374 4374 ],
4375 4375 )
4376 4376 def perfwrite(ui, repo, **opts):
4377 4377 """microbenchmark ui.write (and others)"""
4378 4378 opts = _byteskwargs(opts)
4379 4379
4380 4380 write = getattr(ui, _sysstr(opts[b'write_method']))
4381 4381 nlines = int(opts[b'nlines'])
4382 4382 nitems = int(opts[b'nitems'])
4383 4383 item = opts[b'item']
4384 4384 batch_line = opts.get(b'batch_line')
4385 4385 flush_line = opts.get(b'flush_line')
4386 4386
4387 4387 if batch_line:
4388 4388 line = item * nitems + b'\n'
4389 4389
4390 4390 def benchmark():
4391 4391 for i in pycompat.xrange(nlines):
4392 4392 if batch_line:
4393 4393 write(line)
4394 4394 else:
4395 4395 for i in pycompat.xrange(nitems):
4396 4396 write(item)
4397 4397 write(b'\n')
4398 4398 if flush_line:
4399 4399 ui.flush()
4400 4400 ui.flush()
4401 4401
4402 4402 timer, fm = gettimer(ui, opts)
4403 4403 timer(benchmark)
4404 4404 fm.end()
4405 4405
4406 4406
4407 4407 def uisetup(ui):
4408 4408 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4409 4409 commands, b'debugrevlogopts'
4410 4410 ):
4411 4411 # for "historical portability":
4412 4412 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4413 4413 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4414 4414 # openrevlog() should cause failure, because it has been
4415 4415 # available since 3.5 (or 49c583ca48c4).
4416 4416 def openrevlog(orig, repo, cmd, file_, opts):
4417 4417 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4418 4418 raise error.Abort(
4419 4419 b"This version doesn't support --dir option",
4420 4420 hint=b"use 3.5 or later",
4421 4421 )
4422 4422 return orig(repo, cmd, file_, opts)
4423 4423
4424 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4424 name = _sysstr(b'openrevlog')
4425 extensions.wrapfunction(cmdutil, name, openrevlog)
4425 4426
4426 4427
4427 4428 @command(
4428 4429 b'perf::progress|perfprogress',
4429 4430 formatteropts
4430 4431 + [
4431 4432 (b'', b'topic', b'topic', b'topic for progress messages'),
4432 4433 (b'c', b'total', 1000000, b'total value we are progressing to'),
4433 4434 ],
4434 4435 norepo=True,
4435 4436 )
4436 4437 def perfprogress(ui, topic=None, total=None, **opts):
4437 4438 """printing of progress bars"""
4438 4439 opts = _byteskwargs(opts)
4439 4440
4440 4441 timer, fm = gettimer(ui, opts)
4441 4442
4442 4443 def doprogress():
4443 4444 with ui.makeprogress(topic, total=total) as progress:
4444 4445 for i in _xrange(total):
4445 4446 progress.increment()
4446 4447
4447 4448 timer(doprogress)
4448 4449 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now