##// END OF EJS Templates
perf: add a --as-push option to perf::unbundle...
marmoute -
r52329:827b8971 default
parent child Browse files
Show More
@@ -1,4638 +1,4650 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", True)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 def _default_clear_on_disk_tags_cache(repo):
886 886 from mercurial import tags
887 887
888 888 repo.cachevfs.tryunlink(tags._filename(repo))
889 889
890 890
891 891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 892 from mercurial import tags
893 893
894 894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895 895
896 896
897 897 def _default_forget_fnodes(repo, revs):
898 898 """function used by the perf extension to prune some entries from the
899 899 fnodes cache"""
900 900 from mercurial import tags
901 901
902 902 missing_1 = b'\xff' * 4
903 903 missing_2 = b'\xff' * 20
904 904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 905 for r in revs:
906 906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 907 cache.write()
908 908
909 909
910 910 @command(
911 911 b'perf::tags|perftags',
912 912 formatteropts
913 913 + [
914 914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 915 (
916 916 b'',
917 917 b'clear-on-disk-cache',
918 918 False,
919 919 b'clear on disk tags cache (DESTRUCTIVE)',
920 920 ),
921 921 (
922 922 b'',
923 923 b'clear-fnode-cache-all',
924 924 False,
925 925 b'clear on disk file node cache (DESTRUCTIVE),',
926 926 ),
927 927 (
928 928 b'',
929 929 b'clear-fnode-cache-rev',
930 930 [],
931 931 b'clear on disk file node cache (DESTRUCTIVE),',
932 932 b'REVS',
933 933 ),
934 934 (
935 935 b'',
936 936 b'update-last',
937 937 b'',
938 938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 939 b'N',
940 940 ),
941 941 ],
942 942 )
943 943 def perftags(ui, repo, **opts):
944 944 """Benchmark tags retrieval in various situation
945 945
946 946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 947 altering performance after the command was run. However, it does not
948 948 destroy any stored data.
949 949 """
950 950 from mercurial import tags
951 951
952 952 opts = _byteskwargs(opts)
953 953 timer, fm = gettimer(ui, opts)
954 954 repocleartagscache = repocleartagscachefunc(repo)
955 955 clearrevlogs = opts[b'clear_revlogs']
956 956 clear_disk = opts[b'clear_on_disk_cache']
957 957 clear_fnode = opts[b'clear_fnode_cache_all']
958 958
959 959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 960 update_last_str = opts[b'update_last']
961 961 update_last = None
962 962 if update_last_str:
963 963 try:
964 964 update_last = int(update_last_str)
965 965 except ValueError:
966 966 msg = b'could not parse value for update-last: "%s"'
967 967 msg %= update_last_str
968 968 hint = b'value should be an integer'
969 969 raise error.Abort(msg, hint=hint)
970 970
971 971 clear_disk_fn = getattr(
972 972 tags,
973 973 "clear_cache_on_disk",
974 974 _default_clear_on_disk_tags_cache,
975 975 )
976 976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 977 clear_fnodes_fn = tags.clear_cache_fnodes
978 978 else:
979 979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 980 clear_fnodes_rev_fn = getattr(
981 981 tags,
982 982 "forget_fnodes",
983 983 _default_forget_fnodes,
984 984 )
985 985
986 986 clear_revs = []
987 987 if clear_fnode_revs:
988 988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989 989
990 990 if update_last:
991 991 revset = b'last(all(), %d)' % update_last
992 992 last_revs = repo.unfiltered().revs(revset)
993 993 clear_revs.extend(last_revs)
994 994
995 995 from mercurial import repoview
996 996
997 997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 999 filter_id = repoview.extrafilter(repo.ui)
1000 1000
1001 1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 1002 pre_repo = repo.filtered(filter_name)
1003 1003 pre_repo.tags() # warm the cache
1004 1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 1006
1007 1007 clear_revs = sorted(set(clear_revs))
1008 1008
1009 1009 def s():
1010 1010 if update_last:
1011 1011 util.copyfile(old_tags_path, new_tags_path)
1012 1012 if clearrevlogs:
1013 1013 clearchangelog(repo)
1014 1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 1015 if clear_disk:
1016 1016 clear_disk_fn(repo)
1017 1017 if clear_fnode:
1018 1018 clear_fnodes_fn(repo)
1019 1019 elif clear_revs:
1020 1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 1021 repocleartagscache()
1022 1022
1023 1023 def t():
1024 1024 len(repo.tags())
1025 1025
1026 1026 timer(t, setup=s)
1027 1027 fm.end()
1028 1028
1029 1029
1030 1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 1031 def perfancestors(ui, repo, **opts):
1032 1032 opts = _byteskwargs(opts)
1033 1033 timer, fm = gettimer(ui, opts)
1034 1034 heads = repo.changelog.headrevs()
1035 1035
1036 1036 def d():
1037 1037 for a in repo.changelog.ancestors(heads):
1038 1038 pass
1039 1039
1040 1040 timer(d)
1041 1041 fm.end()
1042 1042
1043 1043
1044 1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 1045 def perfancestorset(ui, repo, revset, **opts):
1046 1046 opts = _byteskwargs(opts)
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 revs = repo.revs(revset)
1049 1049 heads = repo.changelog.headrevs()
1050 1050
1051 1051 def d():
1052 1052 s = repo.changelog.ancestors(heads)
1053 1053 for rev in revs:
1054 1054 rev in s
1055 1055
1056 1056 timer(d)
1057 1057 fm.end()
1058 1058
1059 1059
1060 1060 @command(
1061 1061 b'perf::delta-find',
1062 1062 revlogopts + formatteropts,
1063 1063 b'-c|-m|FILE REV',
1064 1064 )
1065 1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 1066 """benchmark the process of finding a valid delta for a revlog revision
1067 1067
1068 1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 1070 This perf command measures how much time we spend in this process. It
1071 1071 operates on an already stored revision.
1072 1072
1073 1073 See `hg help debug-delta-find` for another related command.
1074 1074 """
1075 1075 from mercurial import revlogutils
1076 1076 import mercurial.revlogutils.deltas as deltautil
1077 1077
1078 1078 opts = _byteskwargs(opts)
1079 1079 if arg_2 is None:
1080 1080 file_ = None
1081 1081 rev = arg_1
1082 1082 else:
1083 1083 file_ = arg_1
1084 1084 rev = arg_2
1085 1085
1086 1086 repo = repo.unfiltered()
1087 1087
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089
1090 1090 rev = int(rev)
1091 1091
1092 1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 1093
1094 1094 deltacomputer = deltautil.deltacomputer(revlog)
1095 1095
1096 1096 node = revlog.node(rev)
1097 1097 p1r, p2r = revlog.parentrevs(rev)
1098 1098 p1 = revlog.node(p1r)
1099 1099 p2 = revlog.node(p2r)
1100 1100 full_text = revlog.revision(rev)
1101 1101 textlen = len(full_text)
1102 1102 cachedelta = None
1103 1103 flags = revlog.flags(rev)
1104 1104
1105 1105 revinfo = revlogutils.revisioninfo(
1106 1106 node,
1107 1107 p1,
1108 1108 p2,
1109 1109 [full_text], # btext
1110 1110 textlen,
1111 1111 cachedelta,
1112 1112 flags,
1113 1113 )
1114 1114
1115 1115 # Note: we should probably purge the potential caches (like the full
1116 1116 # manifest cache) between runs.
1117 1117 def find_one():
1118 1118 with revlog._datafp() as fh:
1119 1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 1120
1121 1121 timer(find_one)
1122 1122 fm.end()
1123 1123
1124 1124
1125 1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 1126 def perfdiscovery(ui, repo, path, **opts):
1127 1127 """benchmark discovery between local repo and the peer at given path"""
1128 1128 repos = [repo, None]
1129 1129 timer, fm = gettimer(ui, opts)
1130 1130
1131 1131 try:
1132 1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 1133
1134 1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 1135 except ImportError:
1136 1136 try:
1137 1137 from mercurial.utils.urlutil import get_unique_pull_path
1138 1138
1139 1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 1140 except ImportError:
1141 1141 path = ui.expandpath(path)
1142 1142
1143 1143 def s():
1144 1144 repos[1] = hg.peer(ui, opts, path)
1145 1145
1146 1146 def d():
1147 1147 setdiscovery.findcommonheads(ui, *repos)
1148 1148
1149 1149 timer(d, setup=s)
1150 1150 fm.end()
1151 1151
1152 1152
1153 1153 @command(
1154 1154 b'perf::bookmarks|perfbookmarks',
1155 1155 formatteropts
1156 1156 + [
1157 1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 1158 ],
1159 1159 )
1160 1160 def perfbookmarks(ui, repo, **opts):
1161 1161 """benchmark parsing bookmarks from disk to memory"""
1162 1162 opts = _byteskwargs(opts)
1163 1163 timer, fm = gettimer(ui, opts)
1164 1164
1165 1165 clearrevlogs = opts[b'clear_revlogs']
1166 1166
1167 1167 def s():
1168 1168 if clearrevlogs:
1169 1169 clearchangelog(repo)
1170 1170 clearfilecache(repo, b'_bookmarks')
1171 1171
1172 1172 def d():
1173 1173 repo._bookmarks
1174 1174
1175 1175 timer(d, setup=s)
1176 1176 fm.end()
1177 1177
1178 1178
1179 1179 @command(
1180 1180 b'perf::bundle',
1181 1181 [
1182 1182 (
1183 1183 b'r',
1184 1184 b'rev',
1185 1185 [],
1186 1186 b'changesets to bundle',
1187 1187 b'REV',
1188 1188 ),
1189 1189 (
1190 1190 b't',
1191 1191 b'type',
1192 1192 b'none',
1193 1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 1194 b'TYPE',
1195 1195 ),
1196 1196 ]
1197 1197 + formatteropts,
1198 1198 b'REVS',
1199 1199 )
1200 1200 def perfbundle(ui, repo, *revs, **opts):
1201 1201 """benchmark the creation of a bundle from a repository
1202 1202
1203 1203 For now, this only supports "none" compression.
1204 1204 """
1205 1205 try:
1206 1206 from mercurial import bundlecaches
1207 1207
1208 1208 parsebundlespec = bundlecaches.parsebundlespec
1209 1209 except ImportError:
1210 1210 from mercurial import exchange
1211 1211
1212 1212 parsebundlespec = exchange.parsebundlespec
1213 1213
1214 1214 from mercurial import discovery
1215 1215 from mercurial import bundle2
1216 1216
1217 1217 opts = _byteskwargs(opts)
1218 1218 timer, fm = gettimer(ui, opts)
1219 1219
1220 1220 cl = repo.changelog
1221 1221 revs = list(revs)
1222 1222 revs.extend(opts.get(b'rev', ()))
1223 1223 revs = scmutil.revrange(repo, revs)
1224 1224 if not revs:
1225 1225 raise error.Abort(b"not revision specified")
1226 1226 # make it a consistent set (ie: without topological gaps)
1227 1227 old_len = len(revs)
1228 1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 1229 if old_len != len(revs):
1230 1230 new_count = len(revs) - old_len
1231 1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 1232 ui.write_err(msg % new_count)
1233 1233
1234 1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 1236 outgoing = discovery.outgoing(repo, bases, targets)
1237 1237
1238 1238 bundle_spec = opts.get(b'type')
1239 1239
1240 1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 1241
1242 1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 1243 if cgversion is None:
1244 1244 if bundle_spec.version == b'v1':
1245 1245 cgversion = b'01'
1246 1246 if bundle_spec.version == b'v2':
1247 1247 cgversion = b'02'
1248 1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 1249 err = b"repository does not support bundle version %s"
1250 1250 raise error.Abort(err % cgversion)
1251 1251
1252 1252 if cgversion == b'01': # bundle1
1253 1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 1254 bcompression = None
1255 1255 elif cgversion in (b'02', b'03'):
1256 1256 bversion = b'HG20'
1257 1257 bcompression = bundle_spec.wirecompression
1258 1258 else:
1259 1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 1260 raise error.ProgrammingError(err % cgversion)
1261 1261
1262 1262 if bcompression is None:
1263 1263 bcompression = b'UN'
1264 1264
1265 1265 if bcompression != b'UN':
1266 1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 1267 raise error.ProgrammingError(err % bcompression)
1268 1268
1269 1269 def do_bundle():
1270 1270 bundle2.writenewbundle(
1271 1271 ui,
1272 1272 repo,
1273 1273 b'perf::bundle',
1274 1274 os.devnull,
1275 1275 bversion,
1276 1276 outgoing,
1277 1277 bundle_spec.params,
1278 1278 )
1279 1279
1280 1280 timer(do_bundle)
1281 1281 fm.end()
1282 1282
1283 1283
1284 1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 1286 """Benchmark reading of bundle files.
1287 1287
1288 1288 This command is meant to isolate the I/O part of bundle reading as
1289 1289 much as possible.
1290 1290 """
1291 1291 from mercurial import (
1292 1292 bundle2,
1293 1293 exchange,
1294 1294 streamclone,
1295 1295 )
1296 1296
1297 1297 opts = _byteskwargs(opts)
1298 1298
1299 1299 def makebench(fn):
1300 1300 def run():
1301 1301 with open(bundlepath, b'rb') as fh:
1302 1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 1303 fn(bundle)
1304 1304
1305 1305 return run
1306 1306
1307 1307 def makereadnbytes(size):
1308 1308 def run():
1309 1309 with open(bundlepath, b'rb') as fh:
1310 1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 1311 while bundle.read(size):
1312 1312 pass
1313 1313
1314 1314 return run
1315 1315
1316 1316 def makestdioread(size):
1317 1317 def run():
1318 1318 with open(bundlepath, b'rb') as fh:
1319 1319 while fh.read(size):
1320 1320 pass
1321 1321
1322 1322 return run
1323 1323
1324 1324 # bundle1
1325 1325
1326 1326 def deltaiter(bundle):
1327 1327 for delta in bundle.deltaiter():
1328 1328 pass
1329 1329
1330 1330 def iterchunks(bundle):
1331 1331 for chunk in bundle.getchunks():
1332 1332 pass
1333 1333
1334 1334 # bundle2
1335 1335
1336 1336 def forwardchunks(bundle):
1337 1337 for chunk in bundle._forwardchunks():
1338 1338 pass
1339 1339
1340 1340 def iterparts(bundle):
1341 1341 for part in bundle.iterparts():
1342 1342 pass
1343 1343
1344 1344 def iterpartsseekable(bundle):
1345 1345 for part in bundle.iterparts(seekable=True):
1346 1346 pass
1347 1347
1348 1348 def seek(bundle):
1349 1349 for part in bundle.iterparts(seekable=True):
1350 1350 part.seek(0, os.SEEK_END)
1351 1351
1352 1352 def makepartreadnbytes(size):
1353 1353 def run():
1354 1354 with open(bundlepath, b'rb') as fh:
1355 1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 1356 for part in bundle.iterparts():
1357 1357 while part.read(size):
1358 1358 pass
1359 1359
1360 1360 return run
1361 1361
1362 1362 benches = [
1363 1363 (makestdioread(8192), b'read(8k)'),
1364 1364 (makestdioread(16384), b'read(16k)'),
1365 1365 (makestdioread(32768), b'read(32k)'),
1366 1366 (makestdioread(131072), b'read(128k)'),
1367 1367 ]
1368 1368
1369 1369 with open(bundlepath, b'rb') as fh:
1370 1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 1371
1372 1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 1373 benches.extend(
1374 1374 [
1375 1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 1381 ]
1382 1382 )
1383 1383 elif isinstance(bundle, bundle2.unbundle20):
1384 1384 benches.extend(
1385 1385 [
1386 1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 1388 (
1389 1389 makebench(iterpartsseekable),
1390 1390 b'bundle2 iterparts() seekable',
1391 1391 ),
1392 1392 (makebench(seek), b'bundle2 part seek()'),
1393 1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 1397 ]
1398 1398 )
1399 1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 1400 raise error.Abort(b'stream clone bundles not supported')
1401 1401 else:
1402 1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 1403
1404 1404 for fn, title in benches:
1405 1405 timer, fm = gettimer(ui, opts)
1406 1406 timer(fn, title=title)
1407 1407 fm.end()
1408 1408
1409 1409
1410 1410 @command(
1411 1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 1412 formatteropts
1413 1413 + [
1414 1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 1416 ],
1417 1417 )
1418 1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 1419 """Benchmark producing a changelog group for a changegroup.
1420 1420
1421 1421 This measures the time spent processing the changelog during a
1422 1422 bundle operation. This occurs during `hg bundle` and on a server
1423 1423 processing a `getbundle` wire protocol request (handles clones
1424 1424 and pull requests).
1425 1425
1426 1426 By default, all revisions are added to the changegroup.
1427 1427 """
1428 1428 opts = _byteskwargs(opts)
1429 1429 cl = repo.changelog
1430 1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 1431 bundler = changegroup.getbundler(cgversion, repo)
1432 1432
1433 1433 def d():
1434 1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 1435 for chunk in chunks:
1436 1436 pass
1437 1437
1438 1438 timer, fm = gettimer(ui, opts)
1439 1439
1440 1440 # Terminal printing can interfere with timing. So disable it.
1441 1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 1442 timer(d)
1443 1443
1444 1444 fm.end()
1445 1445
1446 1446
1447 1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 1448 def perfdirs(ui, repo, **opts):
1449 1449 opts = _byteskwargs(opts)
1450 1450 timer, fm = gettimer(ui, opts)
1451 1451 dirstate = repo.dirstate
1452 1452 b'a' in dirstate
1453 1453
1454 1454 def d():
1455 1455 dirstate.hasdir(b'a')
1456 1456 try:
1457 1457 del dirstate._map._dirs
1458 1458 except AttributeError:
1459 1459 pass
1460 1460
1461 1461 timer(d)
1462 1462 fm.end()
1463 1463
1464 1464
1465 1465 @command(
1466 1466 b'perf::dirstate|perfdirstate',
1467 1467 [
1468 1468 (
1469 1469 b'',
1470 1470 b'iteration',
1471 1471 None,
1472 1472 b'benchmark a full iteration for the dirstate',
1473 1473 ),
1474 1474 (
1475 1475 b'',
1476 1476 b'contains',
1477 1477 None,
1478 1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 1479 ),
1480 1480 ]
1481 1481 + formatteropts,
1482 1482 )
1483 1483 def perfdirstate(ui, repo, **opts):
1484 1484 """benchmap the time of various distate operations
1485 1485
1486 1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 1487 The dirstate is loaded to the point were a "contains" request can be
1488 1488 answered.
1489 1489 """
1490 1490 opts = _byteskwargs(opts)
1491 1491 timer, fm = gettimer(ui, opts)
1492 1492 b"a" in repo.dirstate
1493 1493
1494 1494 if opts[b'iteration'] and opts[b'contains']:
1495 1495 msg = b'only specify one of --iteration or --contains'
1496 1496 raise error.Abort(msg)
1497 1497
1498 1498 if opts[b'iteration']:
1499 1499 setup = None
1500 1500 dirstate = repo.dirstate
1501 1501
1502 1502 def d():
1503 1503 for f in dirstate:
1504 1504 pass
1505 1505
1506 1506 elif opts[b'contains']:
1507 1507 setup = None
1508 1508 dirstate = repo.dirstate
1509 1509 allfiles = list(dirstate)
1510 1510 # also add file path that will be "missing" from the dirstate
1511 1511 allfiles.extend([f[::-1] for f in allfiles])
1512 1512
1513 1513 def d():
1514 1514 for f in allfiles:
1515 1515 f in dirstate
1516 1516
1517 1517 else:
1518 1518
1519 1519 def setup():
1520 1520 repo.dirstate.invalidate()
1521 1521
1522 1522 def d():
1523 1523 b"a" in repo.dirstate
1524 1524
1525 1525 timer(d, setup=setup)
1526 1526 fm.end()
1527 1527
1528 1528
1529 1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 1530 def perfdirstatedirs(ui, repo, **opts):
1531 1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 1532 opts = _byteskwargs(opts)
1533 1533 timer, fm = gettimer(ui, opts)
1534 1534 repo.dirstate.hasdir(b"a")
1535 1535
1536 1536 def setup():
1537 1537 try:
1538 1538 del repo.dirstate._map._dirs
1539 1539 except AttributeError:
1540 1540 pass
1541 1541
1542 1542 def d():
1543 1543 repo.dirstate.hasdir(b"a")
1544 1544
1545 1545 timer(d, setup=setup)
1546 1546 fm.end()
1547 1547
1548 1548
1549 1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 1552
1553 1553 The dirstate filefoldmap cache is dropped between every request.
1554 1554 """
1555 1555 opts = _byteskwargs(opts)
1556 1556 timer, fm = gettimer(ui, opts)
1557 1557 dirstate = repo.dirstate
1558 1558 dirstate._map.filefoldmap.get(b'a')
1559 1559
1560 1560 def setup():
1561 1561 del dirstate._map.filefoldmap
1562 1562
1563 1563 def d():
1564 1564 dirstate._map.filefoldmap.get(b'a')
1565 1565
1566 1566 timer(d, setup=setup)
1567 1567 fm.end()
1568 1568
1569 1569
1570 1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 1571 def perfdirfoldmap(ui, repo, **opts):
1572 1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 1573
1574 1574 The dirstate dirfoldmap cache is dropped between every request.
1575 1575 """
1576 1576 opts = _byteskwargs(opts)
1577 1577 timer, fm = gettimer(ui, opts)
1578 1578 dirstate = repo.dirstate
1579 1579 dirstate._map.dirfoldmap.get(b'a')
1580 1580
1581 1581 def setup():
1582 1582 del dirstate._map.dirfoldmap
1583 1583 try:
1584 1584 del dirstate._map._dirs
1585 1585 except AttributeError:
1586 1586 pass
1587 1587
1588 1588 def d():
1589 1589 dirstate._map.dirfoldmap.get(b'a')
1590 1590
1591 1591 timer(d, setup=setup)
1592 1592 fm.end()
1593 1593
1594 1594
1595 1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 1596 def perfdirstatewrite(ui, repo, **opts):
1597 1597 """benchmap the time it take to write a dirstate on disk"""
1598 1598 opts = _byteskwargs(opts)
1599 1599 timer, fm = gettimer(ui, opts)
1600 1600 ds = repo.dirstate
1601 1601 b"a" in ds
1602 1602
1603 1603 def setup():
1604 1604 ds._dirty = True
1605 1605
1606 1606 def d():
1607 1607 ds.write(repo.currenttransaction())
1608 1608
1609 1609 with repo.wlock():
1610 1610 timer(d, setup=setup)
1611 1611 fm.end()
1612 1612
1613 1613
1614 1614 def _getmergerevs(repo, opts):
1615 1615 """parse command argument to return rev involved in merge
1616 1616
1617 1617 input: options dictionnary with `rev`, `from` and `bse`
1618 1618 output: (localctx, otherctx, basectx)
1619 1619 """
1620 1620 if opts[b'from']:
1621 1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 1622 wctx = repo[fromrev]
1623 1623 else:
1624 1624 wctx = repo[None]
1625 1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 1626 # prime that cache
1627 1627 wctx.dirty()
1628 1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 1629 if opts[b'base']:
1630 1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 1631 ancestor = repo[fromrev]
1632 1632 else:
1633 1633 ancestor = wctx.ancestor(rctx)
1634 1634 return (wctx, rctx, ancestor)
1635 1635
1636 1636
1637 1637 @command(
1638 1638 b'perf::mergecalculate|perfmergecalculate',
1639 1639 [
1640 1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 1641 (b'', b'from', b'', b'rev to merge from'),
1642 1642 (b'', b'base', b'', b'the revision to use as base'),
1643 1643 ]
1644 1644 + formatteropts,
1645 1645 )
1646 1646 def perfmergecalculate(ui, repo, **opts):
1647 1647 opts = _byteskwargs(opts)
1648 1648 timer, fm = gettimer(ui, opts)
1649 1649
1650 1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 1651
1652 1652 def d():
1653 1653 # acceptremote is True because we don't want prompts in the middle of
1654 1654 # our benchmark
1655 1655 merge.calculateupdates(
1656 1656 repo,
1657 1657 wctx,
1658 1658 rctx,
1659 1659 [ancestor],
1660 1660 branchmerge=False,
1661 1661 force=False,
1662 1662 acceptremote=True,
1663 1663 followcopies=True,
1664 1664 )
1665 1665
1666 1666 timer(d)
1667 1667 fm.end()
1668 1668
1669 1669
1670 1670 @command(
1671 1671 b'perf::mergecopies|perfmergecopies',
1672 1672 [
1673 1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 1674 (b'', b'from', b'', b'rev to merge from'),
1675 1675 (b'', b'base', b'', b'the revision to use as base'),
1676 1676 ]
1677 1677 + formatteropts,
1678 1678 )
1679 1679 def perfmergecopies(ui, repo, **opts):
1680 1680 """measure runtime of `copies.mergecopies`"""
1681 1681 opts = _byteskwargs(opts)
1682 1682 timer, fm = gettimer(ui, opts)
1683 1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 1684
1685 1685 def d():
1686 1686 # acceptremote is True because we don't want prompts in the middle of
1687 1687 # our benchmark
1688 1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 1689
1690 1690 timer(d)
1691 1691 fm.end()
1692 1692
1693 1693
1694 1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 1696 """benchmark the copy tracing logic"""
1697 1697 opts = _byteskwargs(opts)
1698 1698 timer, fm = gettimer(ui, opts)
1699 1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 1701
1702 1702 def d():
1703 1703 copies.pathcopies(ctx1, ctx2)
1704 1704
1705 1705 timer(d)
1706 1706 fm.end()
1707 1707
1708 1708
1709 1709 @command(
1710 1710 b'perf::phases|perfphases',
1711 1711 [
1712 1712 (b'', b'full', False, b'include file reading time too'),
1713 1713 ],
1714 1714 b"",
1715 1715 )
1716 1716 def perfphases(ui, repo, **opts):
1717 1717 """benchmark phasesets computation"""
1718 1718 opts = _byteskwargs(opts)
1719 1719 timer, fm = gettimer(ui, opts)
1720 1720 _phases = repo._phasecache
1721 1721 full = opts.get(b'full')
1722 1722 tip_rev = repo.changelog.tiprev()
1723 1723
1724 1724 def d():
1725 1725 phases = _phases
1726 1726 if full:
1727 1727 clearfilecache(repo, b'_phasecache')
1728 1728 phases = repo._phasecache
1729 1729 phases.invalidate()
1730 1730 phases.phase(repo, tip_rev)
1731 1731
1732 1732 timer(d)
1733 1733 fm.end()
1734 1734
1735 1735
1736 1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1737 1737 def perfphasesremote(ui, repo, dest=None, **opts):
1738 1738 """benchmark time needed to analyse phases of the remote server"""
1739 1739 from mercurial.node import bin
1740 1740 from mercurial import (
1741 1741 exchange,
1742 1742 hg,
1743 1743 phases,
1744 1744 )
1745 1745
1746 1746 opts = _byteskwargs(opts)
1747 1747 timer, fm = gettimer(ui, opts)
1748 1748
1749 1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1750 1750 if not path:
1751 1751 raise error.Abort(
1752 1752 b'default repository not configured!',
1753 1753 hint=b"see 'hg help config.paths'",
1754 1754 )
1755 1755 if util.safehasattr(path, 'main_path'):
1756 1756 path = path.get_push_variant()
1757 1757 dest = path.loc
1758 1758 else:
1759 1759 dest = path.pushloc or path.loc
1760 1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1761 1761 other = hg.peer(repo, opts, dest)
1762 1762
1763 1763 # easier to perform discovery through the operation
1764 1764 op = exchange.pushoperation(repo, other)
1765 1765 exchange._pushdiscoverychangeset(op)
1766 1766
1767 1767 remotesubset = op.fallbackheads
1768 1768
1769 1769 with other.commandexecutor() as e:
1770 1770 remotephases = e.callcommand(
1771 1771 b'listkeys', {b'namespace': b'phases'}
1772 1772 ).result()
1773 1773 del other
1774 1774 publishing = remotephases.get(b'publishing', False)
1775 1775 if publishing:
1776 1776 ui.statusnoi18n(b'publishing: yes\n')
1777 1777 else:
1778 1778 ui.statusnoi18n(b'publishing: no\n')
1779 1779
1780 1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1781 1781 if has_node is None:
1782 1782 has_node = repo.changelog.nodemap.__contains__
1783 1783 nonpublishroots = 0
1784 1784 for nhex, phase in remotephases.iteritems():
1785 1785 if nhex == b'publishing': # ignore data related to publish option
1786 1786 continue
1787 1787 node = bin(nhex)
1788 1788 if has_node(node) and int(phase):
1789 1789 nonpublishroots += 1
1790 1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1791 1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1792 1792
1793 1793 def d():
1794 1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1795 1795
1796 1796 timer(d)
1797 1797 fm.end()
1798 1798
1799 1799
1800 1800 @command(
1801 1801 b'perf::manifest|perfmanifest',
1802 1802 [
1803 1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1804 1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1805 1805 ]
1806 1806 + formatteropts,
1807 1807 b'REV|NODE',
1808 1808 )
1809 1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1810 1810 """benchmark the time to read a manifest from disk and return a usable
1811 1811 dict-like object
1812 1812
1813 1813 Manifest caches are cleared before retrieval."""
1814 1814 opts = _byteskwargs(opts)
1815 1815 timer, fm = gettimer(ui, opts)
1816 1816 if not manifest_rev:
1817 1817 ctx = scmutil.revsingle(repo, rev, rev)
1818 1818 t = ctx.manifestnode()
1819 1819 else:
1820 1820 from mercurial.node import bin
1821 1821
1822 1822 if len(rev) == 40:
1823 1823 t = bin(rev)
1824 1824 else:
1825 1825 try:
1826 1826 rev = int(rev)
1827 1827
1828 1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1829 1829 t = repo.manifestlog.getstorage(b'').node(rev)
1830 1830 else:
1831 1831 t = repo.manifestlog._revlog.lookup(rev)
1832 1832 except ValueError:
1833 1833 raise error.Abort(
1834 1834 b'manifest revision must be integer or full node'
1835 1835 )
1836 1836
1837 1837 def d():
1838 1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1839 1839 repo.manifestlog[t].read()
1840 1840
1841 1841 timer(d)
1842 1842 fm.end()
1843 1843
1844 1844
1845 1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1846 1846 def perfchangeset(ui, repo, rev, **opts):
1847 1847 opts = _byteskwargs(opts)
1848 1848 timer, fm = gettimer(ui, opts)
1849 1849 n = scmutil.revsingle(repo, rev).node()
1850 1850
1851 1851 def d():
1852 1852 repo.changelog.read(n)
1853 1853 # repo.changelog._cache = None
1854 1854
1855 1855 timer(d)
1856 1856 fm.end()
1857 1857
1858 1858
1859 1859 @command(b'perf::ignore|perfignore', formatteropts)
1860 1860 def perfignore(ui, repo, **opts):
1861 1861 """benchmark operation related to computing ignore"""
1862 1862 opts = _byteskwargs(opts)
1863 1863 timer, fm = gettimer(ui, opts)
1864 1864 dirstate = repo.dirstate
1865 1865
1866 1866 def setupone():
1867 1867 dirstate.invalidate()
1868 1868 clearfilecache(dirstate, b'_ignore')
1869 1869
1870 1870 def runone():
1871 1871 dirstate._ignore
1872 1872
1873 1873 timer(runone, setup=setupone, title=b"load")
1874 1874 fm.end()
1875 1875
1876 1876
1877 1877 @command(
1878 1878 b'perf::index|perfindex',
1879 1879 [
1880 1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1881 1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1882 1882 ]
1883 1883 + formatteropts,
1884 1884 )
1885 1885 def perfindex(ui, repo, **opts):
1886 1886 """benchmark index creation time followed by a lookup
1887 1887
1888 1888 The default is to look `tip` up. Depending on the index implementation,
1889 1889 the revision looked up can matters. For example, an implementation
1890 1890 scanning the index will have a faster lookup time for `--rev tip` than for
1891 1891 `--rev 0`. The number of looked up revisions and their order can also
1892 1892 matters.
1893 1893
1894 1894 Example of useful set to test:
1895 1895
1896 1896 * tip
1897 1897 * 0
1898 1898 * -10:
1899 1899 * :10
1900 1900 * -10: + :10
1901 1901 * :10: + -10:
1902 1902 * -10000:
1903 1903 * -10000: + 0
1904 1904
1905 1905 It is not currently possible to check for lookup of a missing node. For
1906 1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1907 1907 import mercurial.revlog
1908 1908
1909 1909 opts = _byteskwargs(opts)
1910 1910 timer, fm = gettimer(ui, opts)
1911 1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1912 1912 if opts[b'no_lookup']:
1913 1913 if opts['rev']:
1914 1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1915 1915 nodes = []
1916 1916 elif not opts[b'rev']:
1917 1917 nodes = [repo[b"tip"].node()]
1918 1918 else:
1919 1919 revs = scmutil.revrange(repo, opts[b'rev'])
1920 1920 cl = repo.changelog
1921 1921 nodes = [cl.node(r) for r in revs]
1922 1922
1923 1923 unfi = repo.unfiltered()
1924 1924 # find the filecache func directly
1925 1925 # This avoid polluting the benchmark with the filecache logic
1926 1926 makecl = unfi.__class__.changelog.func
1927 1927
1928 1928 def setup():
1929 1929 # probably not necessary, but for good measure
1930 1930 clearchangelog(unfi)
1931 1931
1932 1932 def d():
1933 1933 cl = makecl(unfi)
1934 1934 for n in nodes:
1935 1935 cl.rev(n)
1936 1936
1937 1937 timer(d, setup=setup)
1938 1938 fm.end()
1939 1939
1940 1940
1941 1941 @command(
1942 1942 b'perf::nodemap|perfnodemap',
1943 1943 [
1944 1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1945 1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1946 1946 ]
1947 1947 + formatteropts,
1948 1948 )
1949 1949 def perfnodemap(ui, repo, **opts):
1950 1950 """benchmark the time necessary to look up revision from a cold nodemap
1951 1951
1952 1952 Depending on the implementation, the amount and order of revision we look
1953 1953 up can varies. Example of useful set to test:
1954 1954 * tip
1955 1955 * 0
1956 1956 * -10:
1957 1957 * :10
1958 1958 * -10: + :10
1959 1959 * :10: + -10:
1960 1960 * -10000:
1961 1961 * -10000: + 0
1962 1962
1963 1963 The command currently focus on valid binary lookup. Benchmarking for
1964 1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1965 1965 """
1966 1966 import mercurial.revlog
1967 1967
1968 1968 opts = _byteskwargs(opts)
1969 1969 timer, fm = gettimer(ui, opts)
1970 1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1971 1971
1972 1972 unfi = repo.unfiltered()
1973 1973 clearcaches = opts[b'clear_caches']
1974 1974 # find the filecache func directly
1975 1975 # This avoid polluting the benchmark with the filecache logic
1976 1976 makecl = unfi.__class__.changelog.func
1977 1977 if not opts[b'rev']:
1978 1978 raise error.Abort(b'use --rev to specify revisions to look up')
1979 1979 revs = scmutil.revrange(repo, opts[b'rev'])
1980 1980 cl = repo.changelog
1981 1981 nodes = [cl.node(r) for r in revs]
1982 1982
1983 1983 # use a list to pass reference to a nodemap from one closure to the next
1984 1984 nodeget = [None]
1985 1985
1986 1986 def setnodeget():
1987 1987 # probably not necessary, but for good measure
1988 1988 clearchangelog(unfi)
1989 1989 cl = makecl(unfi)
1990 1990 if util.safehasattr(cl.index, 'get_rev'):
1991 1991 nodeget[0] = cl.index.get_rev
1992 1992 else:
1993 1993 nodeget[0] = cl.nodemap.get
1994 1994
1995 1995 def d():
1996 1996 get = nodeget[0]
1997 1997 for n in nodes:
1998 1998 get(n)
1999 1999
2000 2000 setup = None
2001 2001 if clearcaches:
2002 2002
2003 2003 def setup():
2004 2004 setnodeget()
2005 2005
2006 2006 else:
2007 2007 setnodeget()
2008 2008 d() # prewarm the data structure
2009 2009 timer(d, setup=setup)
2010 2010 fm.end()
2011 2011
2012 2012
2013 2013 @command(b'perf::startup|perfstartup', formatteropts)
2014 2014 def perfstartup(ui, repo, **opts):
2015 2015 opts = _byteskwargs(opts)
2016 2016 timer, fm = gettimer(ui, opts)
2017 2017
2018 2018 def d():
2019 2019 if os.name != 'nt':
2020 2020 os.system(
2021 2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2022 2022 )
2023 2023 else:
2024 2024 os.environ['HGRCPATH'] = r' '
2025 2025 os.system("%s version -q > NUL" % sys.argv[0])
2026 2026
2027 2027 timer(d)
2028 2028 fm.end()
2029 2029
2030 2030
2031 2031 def _find_stream_generator(version):
2032 2032 """find the proper generator function for this stream version"""
2033 2033 import mercurial.streamclone
2034 2034
2035 2035 available = {}
2036 2036
2037 2037 # try to fetch a v1 generator
2038 2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2039 2039 if generatev1 is not None:
2040 2040
2041 2041 def generate(repo):
2042 2042 entries, bytes, data = generatev2(repo, None, None, True)
2043 2043 return data
2044 2044
2045 2045 available[b'v1'] = generatev1
2046 2046 # try to fetch a v2 generator
2047 2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2048 2048 if generatev2 is not None:
2049 2049
2050 2050 def generate(repo):
2051 2051 entries, bytes, data = generatev2(repo, None, None, True)
2052 2052 return data
2053 2053
2054 2054 available[b'v2'] = generate
2055 2055 # try to fetch a v3 generator
2056 2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2057 2057 if generatev3 is not None:
2058 2058
2059 2059 def generate(repo):
2060 2060 entries, bytes, data = generatev3(repo, None, None, True)
2061 2061 return data
2062 2062
2063 2063 available[b'v3-exp'] = generate
2064 2064
2065 2065 # resolve the request
2066 2066 if version == b"latest":
2067 2067 # latest is the highest non experimental version
2068 2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 2069 return available[latest_key]
2070 2070 elif version in available:
2071 2071 return available[version]
2072 2072 else:
2073 2073 msg = b"unkown or unavailable version: %s"
2074 2074 msg %= version
2075 2075 hint = b"available versions: %s"
2076 2076 hint %= b', '.join(sorted(available))
2077 2077 raise error.Abort(msg, hint=hint)
2078 2078
2079 2079
2080 2080 @command(
2081 2081 b'perf::stream-locked-section',
2082 2082 [
2083 2083 (
2084 2084 b'',
2085 2085 b'stream-version',
2086 2086 b'latest',
2087 2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2088 2088 ),
2089 2089 ]
2090 2090 + formatteropts,
2091 2091 )
2092 2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2093 2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2094 2094
2095 2095 opts = _byteskwargs(opts)
2096 2096 timer, fm = gettimer(ui, opts)
2097 2097
2098 2098 # deletion of the generator may trigger some cleanup that we do not want to
2099 2099 # measure
2100 2100 result_holder = [None]
2101 2101
2102 2102 def setupone():
2103 2103 result_holder[0] = None
2104 2104
2105 2105 generate = _find_stream_generator(stream_version)
2106 2106
2107 2107 def runone():
2108 2108 # the lock is held for the duration the initialisation
2109 2109 result_holder[0] = generate(repo)
2110 2110
2111 2111 timer(runone, setup=setupone, title=b"load")
2112 2112 fm.end()
2113 2113
2114 2114
2115 2115 @command(
2116 2116 b'perf::stream-generate',
2117 2117 [
2118 2118 (
2119 2119 b'',
2120 2120 b'stream-version',
2121 2121 b'latest',
2122 2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2123 2123 ),
2124 2124 ]
2125 2125 + formatteropts,
2126 2126 )
2127 2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2128 2128 """benchmark the full generation of a stream clone"""
2129 2129
2130 2130 opts = _byteskwargs(opts)
2131 2131 timer, fm = gettimer(ui, opts)
2132 2132
2133 2133 # deletion of the generator may trigger some cleanup that we do not want to
2134 2134 # measure
2135 2135
2136 2136 generate = _find_stream_generator(stream_version)
2137 2137
2138 2138 def runone():
2139 2139 # the lock is held for the duration the initialisation
2140 2140 for chunk in generate(repo):
2141 2141 pass
2142 2142
2143 2143 timer(runone, title=b"generate")
2144 2144 fm.end()
2145 2145
2146 2146
2147 2147 @command(
2148 2148 b'perf::stream-consume',
2149 2149 formatteropts,
2150 2150 )
2151 2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2152 2152 """benchmark the full application of a stream clone
2153 2153
2154 2154 This include the creation of the repository
2155 2155 """
2156 2156 # try except to appease check code
2157 2157 msg = b"mercurial too old, missing necessary module: %s"
2158 2158 try:
2159 2159 from mercurial import bundle2
2160 2160 except ImportError as exc:
2161 2161 msg %= _bytestr(exc)
2162 2162 raise error.Abort(msg)
2163 2163 try:
2164 2164 from mercurial import exchange
2165 2165 except ImportError as exc:
2166 2166 msg %= _bytestr(exc)
2167 2167 raise error.Abort(msg)
2168 2168 try:
2169 2169 from mercurial import hg
2170 2170 except ImportError as exc:
2171 2171 msg %= _bytestr(exc)
2172 2172 raise error.Abort(msg)
2173 2173 try:
2174 2174 from mercurial import localrepo
2175 2175 except ImportError as exc:
2176 2176 msg %= _bytestr(exc)
2177 2177 raise error.Abort(msg)
2178 2178
2179 2179 opts = _byteskwargs(opts)
2180 2180 timer, fm = gettimer(ui, opts)
2181 2181
2182 2182 # deletion of the generator may trigger some cleanup that we do not want to
2183 2183 # measure
2184 2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2185 2185 raise error.Abort("not a readable file: %s" % filename)
2186 2186
2187 2187 run_variables = [None, None]
2188 2188
2189 2189 @contextlib.contextmanager
2190 2190 def context():
2191 2191 with open(filename, mode='rb') as bundle:
2192 2192 with tempfile.TemporaryDirectory() as tmp_dir:
2193 2193 tmp_dir = fsencode(tmp_dir)
2194 2194 run_variables[0] = bundle
2195 2195 run_variables[1] = tmp_dir
2196 2196 yield
2197 2197 run_variables[0] = None
2198 2198 run_variables[1] = None
2199 2199
2200 2200 def runone():
2201 2201 bundle = run_variables[0]
2202 2202 tmp_dir = run_variables[1]
2203 2203 # only pass ui when no srcrepo
2204 2204 localrepo.createrepository(
2205 2205 repo.ui, tmp_dir, requirements=repo.requirements
2206 2206 )
2207 2207 target = hg.repository(repo.ui, tmp_dir)
2208 2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2209 2209 # stream v1
2210 2210 if util.safehasattr(gen, 'apply'):
2211 2211 gen.apply(target)
2212 2212 else:
2213 2213 with target.transaction(b"perf::stream-consume") as tr:
2214 2214 bundle2.applybundle(
2215 2215 target,
2216 2216 gen,
2217 2217 tr,
2218 2218 source=b'unbundle',
2219 2219 url=filename,
2220 2220 )
2221 2221
2222 2222 timer(runone, context=context, title=b"consume")
2223 2223 fm.end()
2224 2224
2225 2225
2226 2226 @command(b'perf::parents|perfparents', formatteropts)
2227 2227 def perfparents(ui, repo, **opts):
2228 2228 """benchmark the time necessary to fetch one changeset's parents.
2229 2229
2230 2230 The fetch is done using the `node identifier`, traversing all object layers
2231 2231 from the repository object. The first N revisions will be used for this
2232 2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2233 2233 (default: 1000).
2234 2234 """
2235 2235 opts = _byteskwargs(opts)
2236 2236 timer, fm = gettimer(ui, opts)
2237 2237 # control the number of commits perfparents iterates over
2238 2238 # experimental config: perf.parentscount
2239 2239 count = getint(ui, b"perf", b"parentscount", 1000)
2240 2240 if len(repo.changelog) < count:
2241 2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2242 2242 repo = repo.unfiltered()
2243 2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2244 2244
2245 2245 def d():
2246 2246 for n in nl:
2247 2247 repo.changelog.parents(n)
2248 2248
2249 2249 timer(d)
2250 2250 fm.end()
2251 2251
2252 2252
2253 2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2254 2254 def perfctxfiles(ui, repo, x, **opts):
2255 2255 opts = _byteskwargs(opts)
2256 2256 x = int(x)
2257 2257 timer, fm = gettimer(ui, opts)
2258 2258
2259 2259 def d():
2260 2260 len(repo[x].files())
2261 2261
2262 2262 timer(d)
2263 2263 fm.end()
2264 2264
2265 2265
2266 2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2267 2267 def perfrawfiles(ui, repo, x, **opts):
2268 2268 opts = _byteskwargs(opts)
2269 2269 x = int(x)
2270 2270 timer, fm = gettimer(ui, opts)
2271 2271 cl = repo.changelog
2272 2272
2273 2273 def d():
2274 2274 len(cl.read(x)[3])
2275 2275
2276 2276 timer(d)
2277 2277 fm.end()
2278 2278
2279 2279
2280 2280 @command(b'perf::lookup|perflookup', formatteropts)
2281 2281 def perflookup(ui, repo, rev, **opts):
2282 2282 opts = _byteskwargs(opts)
2283 2283 timer, fm = gettimer(ui, opts)
2284 2284 timer(lambda: len(repo.lookup(rev)))
2285 2285 fm.end()
2286 2286
2287 2287
2288 2288 @command(
2289 2289 b'perf::linelogedits|perflinelogedits',
2290 2290 [
2291 2291 (b'n', b'edits', 10000, b'number of edits'),
2292 2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2293 2293 ],
2294 2294 norepo=True,
2295 2295 )
2296 2296 def perflinelogedits(ui, **opts):
2297 2297 from mercurial import linelog
2298 2298
2299 2299 opts = _byteskwargs(opts)
2300 2300
2301 2301 edits = opts[b'edits']
2302 2302 maxhunklines = opts[b'max_hunk_lines']
2303 2303
2304 2304 maxb1 = 100000
2305 2305 random.seed(0)
2306 2306 randint = random.randint
2307 2307 currentlines = 0
2308 2308 arglist = []
2309 2309 for rev in _xrange(edits):
2310 2310 a1 = randint(0, currentlines)
2311 2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2312 2312 b1 = randint(0, maxb1)
2313 2313 b2 = randint(b1, b1 + maxhunklines)
2314 2314 currentlines += (b2 - b1) - (a2 - a1)
2315 2315 arglist.append((rev, a1, a2, b1, b2))
2316 2316
2317 2317 def d():
2318 2318 ll = linelog.linelog()
2319 2319 for args in arglist:
2320 2320 ll.replacelines(*args)
2321 2321
2322 2322 timer, fm = gettimer(ui, opts)
2323 2323 timer(d)
2324 2324 fm.end()
2325 2325
2326 2326
2327 2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2328 2328 def perfrevrange(ui, repo, *specs, **opts):
2329 2329 opts = _byteskwargs(opts)
2330 2330 timer, fm = gettimer(ui, opts)
2331 2331 revrange = scmutil.revrange
2332 2332 timer(lambda: len(revrange(repo, specs)))
2333 2333 fm.end()
2334 2334
2335 2335
2336 2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2337 2337 def perfnodelookup(ui, repo, rev, **opts):
2338 2338 opts = _byteskwargs(opts)
2339 2339 timer, fm = gettimer(ui, opts)
2340 2340 import mercurial.revlog
2341 2341
2342 2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2343 2343 n = scmutil.revsingle(repo, rev).node()
2344 2344
2345 2345 try:
2346 2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2347 2347 except TypeError:
2348 2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2349 2349
2350 2350 def d():
2351 2351 cl.rev(n)
2352 2352 clearcaches(cl)
2353 2353
2354 2354 timer(d)
2355 2355 fm.end()
2356 2356
2357 2357
2358 2358 @command(
2359 2359 b'perf::log|perflog',
2360 2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2361 2361 )
2362 2362 def perflog(ui, repo, rev=None, **opts):
2363 2363 opts = _byteskwargs(opts)
2364 2364 if rev is None:
2365 2365 rev = []
2366 2366 timer, fm = gettimer(ui, opts)
2367 2367 ui.pushbuffer()
2368 2368 timer(
2369 2369 lambda: commands.log(
2370 2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2371 2371 )
2372 2372 )
2373 2373 ui.popbuffer()
2374 2374 fm.end()
2375 2375
2376 2376
2377 2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2378 2378 def perfmoonwalk(ui, repo, **opts):
2379 2379 """benchmark walking the changelog backwards
2380 2380
2381 2381 This also loads the changelog data for each revision in the changelog.
2382 2382 """
2383 2383 opts = _byteskwargs(opts)
2384 2384 timer, fm = gettimer(ui, opts)
2385 2385
2386 2386 def moonwalk():
2387 2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2388 2388 ctx = repo[i]
2389 2389 ctx.branch() # read changelog data (in addition to the index)
2390 2390
2391 2391 timer(moonwalk)
2392 2392 fm.end()
2393 2393
2394 2394
2395 2395 @command(
2396 2396 b'perf::templating|perftemplating',
2397 2397 [
2398 2398 (b'r', b'rev', [], b'revisions to run the template on'),
2399 2399 ]
2400 2400 + formatteropts,
2401 2401 )
2402 2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2403 2403 """test the rendering time of a given template"""
2404 2404 if makelogtemplater is None:
2405 2405 raise error.Abort(
2406 2406 b"perftemplating not available with this Mercurial",
2407 2407 hint=b"use 4.3 or later",
2408 2408 )
2409 2409
2410 2410 opts = _byteskwargs(opts)
2411 2411
2412 2412 nullui = ui.copy()
2413 2413 nullui.fout = open(os.devnull, 'wb')
2414 2414 nullui.disablepager()
2415 2415 revs = opts.get(b'rev')
2416 2416 if not revs:
2417 2417 revs = [b'all()']
2418 2418 revs = list(scmutil.revrange(repo, revs))
2419 2419
2420 2420 defaulttemplate = (
2421 2421 b'{date|shortdate} [{rev}:{node|short}]'
2422 2422 b' {author|person}: {desc|firstline}\n'
2423 2423 )
2424 2424 if testedtemplate is None:
2425 2425 testedtemplate = defaulttemplate
2426 2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2427 2427
2428 2428 def format():
2429 2429 for r in revs:
2430 2430 ctx = repo[r]
2431 2431 displayer.show(ctx)
2432 2432 displayer.flush(ctx)
2433 2433
2434 2434 timer, fm = gettimer(ui, opts)
2435 2435 timer(format)
2436 2436 fm.end()
2437 2437
2438 2438
2439 2439 def _displaystats(ui, opts, entries, data):
2440 2440 # use a second formatter because the data are quite different, not sure
2441 2441 # how it flies with the templater.
2442 2442 fm = ui.formatter(b'perf-stats', opts)
2443 2443 for key, title in entries:
2444 2444 values = data[key]
2445 2445 nbvalues = len(data)
2446 2446 values.sort()
2447 2447 stats = {
2448 2448 'key': key,
2449 2449 'title': title,
2450 2450 'nbitems': len(values),
2451 2451 'min': values[0][0],
2452 2452 '10%': values[(nbvalues * 10) // 100][0],
2453 2453 '25%': values[(nbvalues * 25) // 100][0],
2454 2454 '50%': values[(nbvalues * 50) // 100][0],
2455 2455 '75%': values[(nbvalues * 75) // 100][0],
2456 2456 '80%': values[(nbvalues * 80) // 100][0],
2457 2457 '85%': values[(nbvalues * 85) // 100][0],
2458 2458 '90%': values[(nbvalues * 90) // 100][0],
2459 2459 '95%': values[(nbvalues * 95) // 100][0],
2460 2460 '99%': values[(nbvalues * 99) // 100][0],
2461 2461 'max': values[-1][0],
2462 2462 }
2463 2463 fm.startitem()
2464 2464 fm.data(**stats)
2465 2465 # make node pretty for the human output
2466 2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2467 2467 lines = [
2468 2468 'min',
2469 2469 '10%',
2470 2470 '25%',
2471 2471 '50%',
2472 2472 '75%',
2473 2473 '80%',
2474 2474 '85%',
2475 2475 '90%',
2476 2476 '95%',
2477 2477 '99%',
2478 2478 'max',
2479 2479 ]
2480 2480 for l in lines:
2481 2481 fm.plain('%s: %s\n' % (l, stats[l]))
2482 2482 fm.end()
2483 2483
2484 2484
2485 2485 @command(
2486 2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2487 2487 formatteropts
2488 2488 + [
2489 2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2490 2490 (b'', b'timing', False, b'provides extra data (costly)'),
2491 2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2492 2492 ],
2493 2493 )
2494 2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2495 2495 """find statistics about potential parameters for `perfmergecopies`
2496 2496
2497 2497 This command find (base, p1, p2) triplet relevant for copytracing
2498 2498 benchmarking in the context of a merge. It reports values for some of the
2499 2499 parameters that impact merge copy tracing time during merge.
2500 2500
2501 2501 If `--timing` is set, rename detection is run and the associated timing
2502 2502 will be reported. The extra details come at the cost of slower command
2503 2503 execution.
2504 2504
2505 2505 Since rename detection is only run once, other factors might easily
2506 2506 affect the precision of the timing. However it should give a good
2507 2507 approximation of which revision triplets are very costly.
2508 2508 """
2509 2509 opts = _byteskwargs(opts)
2510 2510 fm = ui.formatter(b'perf', opts)
2511 2511 dotiming = opts[b'timing']
2512 2512 dostats = opts[b'stats']
2513 2513
2514 2514 output_template = [
2515 2515 ("base", "%(base)12s"),
2516 2516 ("p1", "%(p1.node)12s"),
2517 2517 ("p2", "%(p2.node)12s"),
2518 2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2519 2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2520 2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2521 2521 ("p1.time", "%(p1.time)12.3f"),
2522 2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2523 2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2524 2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2525 2525 ("p2.time", "%(p2.time)12.3f"),
2526 2526 ("renames", "%(nbrenamedfiles)12d"),
2527 2527 ("total.time", "%(time)12.3f"),
2528 2528 ]
2529 2529 if not dotiming:
2530 2530 output_template = [
2531 2531 i
2532 2532 for i in output_template
2533 2533 if not ('time' in i[0] or 'renames' in i[0])
2534 2534 ]
2535 2535 header_names = [h for (h, v) in output_template]
2536 2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2537 2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2538 2538 fm.plain(header % tuple(header_names))
2539 2539
2540 2540 if not revs:
2541 2541 revs = ['all()']
2542 2542 revs = scmutil.revrange(repo, revs)
2543 2543
2544 2544 if dostats:
2545 2545 alldata = {
2546 2546 'nbrevs': [],
2547 2547 'nbmissingfiles': [],
2548 2548 }
2549 2549 if dotiming:
2550 2550 alldata['parentnbrenames'] = []
2551 2551 alldata['totalnbrenames'] = []
2552 2552 alldata['parenttime'] = []
2553 2553 alldata['totaltime'] = []
2554 2554
2555 2555 roi = repo.revs('merge() and %ld', revs)
2556 2556 for r in roi:
2557 2557 ctx = repo[r]
2558 2558 p1 = ctx.p1()
2559 2559 p2 = ctx.p2()
2560 2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2561 2561 for b in bases:
2562 2562 b = repo[b]
2563 2563 p1missing = copies._computeforwardmissing(b, p1)
2564 2564 p2missing = copies._computeforwardmissing(b, p2)
2565 2565 data = {
2566 2566 b'base': b.hex(),
2567 2567 b'p1.node': p1.hex(),
2568 2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2569 2569 b'p1.nbmissingfiles': len(p1missing),
2570 2570 b'p2.node': p2.hex(),
2571 2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2572 2572 b'p2.nbmissingfiles': len(p2missing),
2573 2573 }
2574 2574 if dostats:
2575 2575 if p1missing:
2576 2576 alldata['nbrevs'].append(
2577 2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2578 2578 )
2579 2579 alldata['nbmissingfiles'].append(
2580 2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2581 2581 )
2582 2582 if p2missing:
2583 2583 alldata['nbrevs'].append(
2584 2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2585 2585 )
2586 2586 alldata['nbmissingfiles'].append(
2587 2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2588 2588 )
2589 2589 if dotiming:
2590 2590 begin = util.timer()
2591 2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2592 2592 end = util.timer()
2593 2593 # not very stable timing since we did only one run
2594 2594 data['time'] = end - begin
2595 2595 # mergedata contains five dicts: "copy", "movewithdir",
2596 2596 # "diverge", "renamedelete" and "dirmove".
2597 2597 # The first 4 are about renamed file so lets count that.
2598 2598 renames = len(mergedata[0])
2599 2599 renames += len(mergedata[1])
2600 2600 renames += len(mergedata[2])
2601 2601 renames += len(mergedata[3])
2602 2602 data['nbrenamedfiles'] = renames
2603 2603 begin = util.timer()
2604 2604 p1renames = copies.pathcopies(b, p1)
2605 2605 end = util.timer()
2606 2606 data['p1.time'] = end - begin
2607 2607 begin = util.timer()
2608 2608 p2renames = copies.pathcopies(b, p2)
2609 2609 end = util.timer()
2610 2610 data['p2.time'] = end - begin
2611 2611 data['p1.renamedfiles'] = len(p1renames)
2612 2612 data['p2.renamedfiles'] = len(p2renames)
2613 2613
2614 2614 if dostats:
2615 2615 if p1missing:
2616 2616 alldata['parentnbrenames'].append(
2617 2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2618 2618 )
2619 2619 alldata['parenttime'].append(
2620 2620 (data['p1.time'], b.hex(), p1.hex())
2621 2621 )
2622 2622 if p2missing:
2623 2623 alldata['parentnbrenames'].append(
2624 2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2625 2625 )
2626 2626 alldata['parenttime'].append(
2627 2627 (data['p2.time'], b.hex(), p2.hex())
2628 2628 )
2629 2629 if p1missing or p2missing:
2630 2630 alldata['totalnbrenames'].append(
2631 2631 (
2632 2632 data['nbrenamedfiles'],
2633 2633 b.hex(),
2634 2634 p1.hex(),
2635 2635 p2.hex(),
2636 2636 )
2637 2637 )
2638 2638 alldata['totaltime'].append(
2639 2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2640 2640 )
2641 2641 fm.startitem()
2642 2642 fm.data(**data)
2643 2643 # make node pretty for the human output
2644 2644 out = data.copy()
2645 2645 out['base'] = fm.hexfunc(b.node())
2646 2646 out['p1.node'] = fm.hexfunc(p1.node())
2647 2647 out['p2.node'] = fm.hexfunc(p2.node())
2648 2648 fm.plain(output % out)
2649 2649
2650 2650 fm.end()
2651 2651 if dostats:
2652 2652 # use a second formatter because the data are quite different, not sure
2653 2653 # how it flies with the templater.
2654 2654 entries = [
2655 2655 ('nbrevs', 'number of revision covered'),
2656 2656 ('nbmissingfiles', 'number of missing files at head'),
2657 2657 ]
2658 2658 if dotiming:
2659 2659 entries.append(
2660 2660 ('parentnbrenames', 'rename from one parent to base')
2661 2661 )
2662 2662 entries.append(('totalnbrenames', 'total number of renames'))
2663 2663 entries.append(('parenttime', 'time for one parent'))
2664 2664 entries.append(('totaltime', 'time for both parents'))
2665 2665 _displaystats(ui, opts, entries, alldata)
2666 2666
2667 2667
2668 2668 @command(
2669 2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2670 2670 formatteropts
2671 2671 + [
2672 2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2673 2673 (b'', b'timing', False, b'provides extra data (costly)'),
2674 2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2675 2675 ],
2676 2676 )
2677 2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2678 2678 """find statistic about potential parameters for the `perftracecopies`
2679 2679
2680 2680 This command find source-destination pair relevant for copytracing testing.
2681 2681 It report value for some of the parameters that impact copy tracing time.
2682 2682
2683 2683 If `--timing` is set, rename detection is run and the associated timing
2684 2684 will be reported. The extra details comes at the cost of a slower command
2685 2685 execution.
2686 2686
2687 2687 Since the rename detection is only run once, other factors might easily
2688 2688 affect the precision of the timing. However it should give a good
2689 2689 approximation of which revision pairs are very costly.
2690 2690 """
2691 2691 opts = _byteskwargs(opts)
2692 2692 fm = ui.formatter(b'perf', opts)
2693 2693 dotiming = opts[b'timing']
2694 2694 dostats = opts[b'stats']
2695 2695
2696 2696 if dotiming:
2697 2697 header = '%12s %12s %12s %12s %12s %12s\n'
2698 2698 output = (
2699 2699 "%(source)12s %(destination)12s "
2700 2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2701 2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2702 2702 )
2703 2703 header_names = (
2704 2704 "source",
2705 2705 "destination",
2706 2706 "nb-revs",
2707 2707 "nb-files",
2708 2708 "nb-renames",
2709 2709 "time",
2710 2710 )
2711 2711 fm.plain(header % header_names)
2712 2712 else:
2713 2713 header = '%12s %12s %12s %12s\n'
2714 2714 output = (
2715 2715 "%(source)12s %(destination)12s "
2716 2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2717 2717 )
2718 2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2719 2719
2720 2720 if not revs:
2721 2721 revs = ['all()']
2722 2722 revs = scmutil.revrange(repo, revs)
2723 2723
2724 2724 if dostats:
2725 2725 alldata = {
2726 2726 'nbrevs': [],
2727 2727 'nbmissingfiles': [],
2728 2728 }
2729 2729 if dotiming:
2730 2730 alldata['nbrenames'] = []
2731 2731 alldata['time'] = []
2732 2732
2733 2733 roi = repo.revs('merge() and %ld', revs)
2734 2734 for r in roi:
2735 2735 ctx = repo[r]
2736 2736 p1 = ctx.p1().rev()
2737 2737 p2 = ctx.p2().rev()
2738 2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2739 2739 for p in (p1, p2):
2740 2740 for b in bases:
2741 2741 base = repo[b]
2742 2742 parent = repo[p]
2743 2743 missing = copies._computeforwardmissing(base, parent)
2744 2744 if not missing:
2745 2745 continue
2746 2746 data = {
2747 2747 b'source': base.hex(),
2748 2748 b'destination': parent.hex(),
2749 2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2750 2750 b'nbmissingfiles': len(missing),
2751 2751 }
2752 2752 if dostats:
2753 2753 alldata['nbrevs'].append(
2754 2754 (
2755 2755 data['nbrevs'],
2756 2756 base.hex(),
2757 2757 parent.hex(),
2758 2758 )
2759 2759 )
2760 2760 alldata['nbmissingfiles'].append(
2761 2761 (
2762 2762 data['nbmissingfiles'],
2763 2763 base.hex(),
2764 2764 parent.hex(),
2765 2765 )
2766 2766 )
2767 2767 if dotiming:
2768 2768 begin = util.timer()
2769 2769 renames = copies.pathcopies(base, parent)
2770 2770 end = util.timer()
2771 2771 # not very stable timing since we did only one run
2772 2772 data['time'] = end - begin
2773 2773 data['nbrenamedfiles'] = len(renames)
2774 2774 if dostats:
2775 2775 alldata['time'].append(
2776 2776 (
2777 2777 data['time'],
2778 2778 base.hex(),
2779 2779 parent.hex(),
2780 2780 )
2781 2781 )
2782 2782 alldata['nbrenames'].append(
2783 2783 (
2784 2784 data['nbrenamedfiles'],
2785 2785 base.hex(),
2786 2786 parent.hex(),
2787 2787 )
2788 2788 )
2789 2789 fm.startitem()
2790 2790 fm.data(**data)
2791 2791 out = data.copy()
2792 2792 out['source'] = fm.hexfunc(base.node())
2793 2793 out['destination'] = fm.hexfunc(parent.node())
2794 2794 fm.plain(output % out)
2795 2795
2796 2796 fm.end()
2797 2797 if dostats:
2798 2798 entries = [
2799 2799 ('nbrevs', 'number of revision covered'),
2800 2800 ('nbmissingfiles', 'number of missing files at head'),
2801 2801 ]
2802 2802 if dotiming:
2803 2803 entries.append(('nbrenames', 'renamed files'))
2804 2804 entries.append(('time', 'time'))
2805 2805 _displaystats(ui, opts, entries, alldata)
2806 2806
2807 2807
2808 2808 @command(b'perf::cca|perfcca', formatteropts)
2809 2809 def perfcca(ui, repo, **opts):
2810 2810 opts = _byteskwargs(opts)
2811 2811 timer, fm = gettimer(ui, opts)
2812 2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2813 2813 fm.end()
2814 2814
2815 2815
2816 2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2817 2817 def perffncacheload(ui, repo, **opts):
2818 2818 opts = _byteskwargs(opts)
2819 2819 timer, fm = gettimer(ui, opts)
2820 2820 s = repo.store
2821 2821
2822 2822 def d():
2823 2823 s.fncache._load()
2824 2824
2825 2825 timer(d)
2826 2826 fm.end()
2827 2827
2828 2828
2829 2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2830 2830 def perffncachewrite(ui, repo, **opts):
2831 2831 opts = _byteskwargs(opts)
2832 2832 timer, fm = gettimer(ui, opts)
2833 2833 s = repo.store
2834 2834 lock = repo.lock()
2835 2835 s.fncache._load()
2836 2836 tr = repo.transaction(b'perffncachewrite')
2837 2837 tr.addbackup(b'fncache')
2838 2838
2839 2839 def d():
2840 2840 s.fncache._dirty = True
2841 2841 s.fncache.write(tr)
2842 2842
2843 2843 timer(d)
2844 2844 tr.close()
2845 2845 lock.release()
2846 2846 fm.end()
2847 2847
2848 2848
2849 2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2850 2850 def perffncacheencode(ui, repo, **opts):
2851 2851 opts = _byteskwargs(opts)
2852 2852 timer, fm = gettimer(ui, opts)
2853 2853 s = repo.store
2854 2854 s.fncache._load()
2855 2855
2856 2856 def d():
2857 2857 for p in s.fncache.entries:
2858 2858 s.encode(p)
2859 2859
2860 2860 timer(d)
2861 2861 fm.end()
2862 2862
2863 2863
2864 2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2865 2865 while not done.is_set():
2866 2866 pair = q.get()
2867 2867 while pair is not None:
2868 2868 if xdiff:
2869 2869 mdiff.bdiff.xdiffblocks(*pair)
2870 2870 elif blocks:
2871 2871 mdiff.bdiff.blocks(*pair)
2872 2872 else:
2873 2873 mdiff.textdiff(*pair)
2874 2874 q.task_done()
2875 2875 pair = q.get()
2876 2876 q.task_done() # for the None one
2877 2877 with ready:
2878 2878 ready.wait()
2879 2879
2880 2880
2881 2881 def _manifestrevision(repo, mnode):
2882 2882 ml = repo.manifestlog
2883 2883
2884 2884 if util.safehasattr(ml, b'getstorage'):
2885 2885 store = ml.getstorage(b'')
2886 2886 else:
2887 2887 store = ml._revlog
2888 2888
2889 2889 return store.revision(mnode)
2890 2890
2891 2891
2892 2892 @command(
2893 2893 b'perf::bdiff|perfbdiff',
2894 2894 revlogopts
2895 2895 + formatteropts
2896 2896 + [
2897 2897 (
2898 2898 b'',
2899 2899 b'count',
2900 2900 1,
2901 2901 b'number of revisions to test (when using --startrev)',
2902 2902 ),
2903 2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2904 2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2905 2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2906 2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2907 2907 ],
2908 2908 b'-c|-m|FILE REV',
2909 2909 )
2910 2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2911 2911 """benchmark a bdiff between revisions
2912 2912
2913 2913 By default, benchmark a bdiff between its delta parent and itself.
2914 2914
2915 2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2916 2916 revisions starting at the specified revision.
2917 2917
2918 2918 With ``--alldata``, assume the requested revision is a changeset and
2919 2919 measure bdiffs for all changes related to that changeset (manifest
2920 2920 and filelogs).
2921 2921 """
2922 2922 opts = _byteskwargs(opts)
2923 2923
2924 2924 if opts[b'xdiff'] and not opts[b'blocks']:
2925 2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2926 2926
2927 2927 if opts[b'alldata']:
2928 2928 opts[b'changelog'] = True
2929 2929
2930 2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2931 2931 file_, rev = None, file_
2932 2932 elif rev is None:
2933 2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2934 2934
2935 2935 blocks = opts[b'blocks']
2936 2936 xdiff = opts[b'xdiff']
2937 2937 textpairs = []
2938 2938
2939 2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2940 2940
2941 2941 startrev = r.rev(r.lookup(rev))
2942 2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2943 2943 if opts[b'alldata']:
2944 2944 # Load revisions associated with changeset.
2945 2945 ctx = repo[rev]
2946 2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2947 2947 for pctx in ctx.parents():
2948 2948 pman = _manifestrevision(repo, pctx.manifestnode())
2949 2949 textpairs.append((pman, mtext))
2950 2950
2951 2951 # Load filelog revisions by iterating manifest delta.
2952 2952 man = ctx.manifest()
2953 2953 pman = ctx.p1().manifest()
2954 2954 for filename, change in pman.diff(man).items():
2955 2955 fctx = repo.file(filename)
2956 2956 f1 = fctx.revision(change[0][0] or -1)
2957 2957 f2 = fctx.revision(change[1][0] or -1)
2958 2958 textpairs.append((f1, f2))
2959 2959 else:
2960 2960 dp = r.deltaparent(rev)
2961 2961 textpairs.append((r.revision(dp), r.revision(rev)))
2962 2962
2963 2963 withthreads = threads > 0
2964 2964 if not withthreads:
2965 2965
2966 2966 def d():
2967 2967 for pair in textpairs:
2968 2968 if xdiff:
2969 2969 mdiff.bdiff.xdiffblocks(*pair)
2970 2970 elif blocks:
2971 2971 mdiff.bdiff.blocks(*pair)
2972 2972 else:
2973 2973 mdiff.textdiff(*pair)
2974 2974
2975 2975 else:
2976 2976 q = queue()
2977 2977 for i in _xrange(threads):
2978 2978 q.put(None)
2979 2979 ready = threading.Condition()
2980 2980 done = threading.Event()
2981 2981 for i in _xrange(threads):
2982 2982 threading.Thread(
2983 2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2984 2984 ).start()
2985 2985 q.join()
2986 2986
2987 2987 def d():
2988 2988 for pair in textpairs:
2989 2989 q.put(pair)
2990 2990 for i in _xrange(threads):
2991 2991 q.put(None)
2992 2992 with ready:
2993 2993 ready.notify_all()
2994 2994 q.join()
2995 2995
2996 2996 timer, fm = gettimer(ui, opts)
2997 2997 timer(d)
2998 2998 fm.end()
2999 2999
3000 3000 if withthreads:
3001 3001 done.set()
3002 3002 for i in _xrange(threads):
3003 3003 q.put(None)
3004 3004 with ready:
3005 3005 ready.notify_all()
3006 3006
3007 3007
3008 3008 @command(
3009 3009 b'perf::unbundle',
3010 formatteropts,
3010 [
3011 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3012 ]
3013 + formatteropts,
3011 3014 b'BUNDLE_FILE',
3012 3015 )
3013 3016 def perf_unbundle(ui, repo, fname, **opts):
3014 3017 """benchmark application of a bundle in a repository.
3015 3018
3016 This does not include the final transaction processing"""
3019 This does not include the final transaction processing
3020
3021 The --as-push option make the unbundle operation appears like it comes from
3022 a client push. It change some aspect of the processing and associated
3023 performance profile.
3024 """
3017 3025
3018 3026 from mercurial import exchange
3019 3027 from mercurial import bundle2
3020 3028 from mercurial import transaction
3021 3029
3022 3030 opts = _byteskwargs(opts)
3023 3031
3024 3032 ### some compatibility hotfix
3025 3033 #
3026 3034 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3027 3035 # critical regression that break transaction rollback for files that are
3028 3036 # de-inlined.
3029 3037 method = transaction.transaction._addentry
3030 3038 pre_63edc384d3b7 = "data" in getargspec(method).args
3031 3039 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3032 3040 # a changeset that is a close descendant of 18415fc918a1, the changeset
3033 3041 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3034 3042 args = getargspec(error.Abort.__init__).args
3035 3043 post_18415fc918a1 = "detailed_exit_code" in args
3036 3044
3045 unbundle_source = b'perf::unbundle'
3046 if opts[b'as_push']:
3047 unbundle_source = b'push'
3048
3037 3049 old_max_inline = None
3038 3050 try:
3039 3051 if not (pre_63edc384d3b7 or post_18415fc918a1):
3040 3052 # disable inlining
3041 3053 old_max_inline = mercurial.revlog._maxinline
3042 3054 # large enough to never happen
3043 3055 mercurial.revlog._maxinline = 2 ** 50
3044 3056
3045 3057 with repo.lock():
3046 3058 bundle = [None, None]
3047 3059 orig_quiet = repo.ui.quiet
3048 3060 try:
3049 3061 repo.ui.quiet = True
3050 3062 with open(fname, mode="rb") as f:
3051 3063
3052 3064 def noop_report(*args, **kwargs):
3053 3065 pass
3054 3066
3055 3067 def setup():
3056 3068 gen, tr = bundle
3057 3069 if tr is not None:
3058 3070 tr.abort()
3059 3071 bundle[:] = [None, None]
3060 3072 f.seek(0)
3061 3073 bundle[0] = exchange.readbundle(ui, f, fname)
3062 3074 bundle[1] = repo.transaction(b'perf::unbundle')
3063 3075 # silence the transaction
3064 3076 bundle[1]._report = noop_report
3065 3077
3066 3078 def apply():
3067 3079 gen, tr = bundle
3068 3080 bundle2.applybundle(
3069 3081 repo,
3070 3082 gen,
3071 3083 tr,
3072 source=b'perf::unbundle',
3084 source=unbundle_source,
3073 3085 url=fname,
3074 3086 )
3075 3087
3076 3088 timer, fm = gettimer(ui, opts)
3077 3089 timer(apply, setup=setup)
3078 3090 fm.end()
3079 3091 finally:
3080 3092 repo.ui.quiet == orig_quiet
3081 3093 gen, tr = bundle
3082 3094 if tr is not None:
3083 3095 tr.abort()
3084 3096 finally:
3085 3097 if old_max_inline is not None:
3086 3098 mercurial.revlog._maxinline = old_max_inline
3087 3099
3088 3100
3089 3101 @command(
3090 3102 b'perf::unidiff|perfunidiff',
3091 3103 revlogopts
3092 3104 + formatteropts
3093 3105 + [
3094 3106 (
3095 3107 b'',
3096 3108 b'count',
3097 3109 1,
3098 3110 b'number of revisions to test (when using --startrev)',
3099 3111 ),
3100 3112 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3101 3113 ],
3102 3114 b'-c|-m|FILE REV',
3103 3115 )
3104 3116 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3105 3117 """benchmark a unified diff between revisions
3106 3118
3107 3119 This doesn't include any copy tracing - it's just a unified diff
3108 3120 of the texts.
3109 3121
3110 3122 By default, benchmark a diff between its delta parent and itself.
3111 3123
3112 3124 With ``--count``, benchmark diffs between delta parents and self for N
3113 3125 revisions starting at the specified revision.
3114 3126
3115 3127 With ``--alldata``, assume the requested revision is a changeset and
3116 3128 measure diffs for all changes related to that changeset (manifest
3117 3129 and filelogs).
3118 3130 """
3119 3131 opts = _byteskwargs(opts)
3120 3132 if opts[b'alldata']:
3121 3133 opts[b'changelog'] = True
3122 3134
3123 3135 if opts.get(b'changelog') or opts.get(b'manifest'):
3124 3136 file_, rev = None, file_
3125 3137 elif rev is None:
3126 3138 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3127 3139
3128 3140 textpairs = []
3129 3141
3130 3142 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3131 3143
3132 3144 startrev = r.rev(r.lookup(rev))
3133 3145 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3134 3146 if opts[b'alldata']:
3135 3147 # Load revisions associated with changeset.
3136 3148 ctx = repo[rev]
3137 3149 mtext = _manifestrevision(repo, ctx.manifestnode())
3138 3150 for pctx in ctx.parents():
3139 3151 pman = _manifestrevision(repo, pctx.manifestnode())
3140 3152 textpairs.append((pman, mtext))
3141 3153
3142 3154 # Load filelog revisions by iterating manifest delta.
3143 3155 man = ctx.manifest()
3144 3156 pman = ctx.p1().manifest()
3145 3157 for filename, change in pman.diff(man).items():
3146 3158 fctx = repo.file(filename)
3147 3159 f1 = fctx.revision(change[0][0] or -1)
3148 3160 f2 = fctx.revision(change[1][0] or -1)
3149 3161 textpairs.append((f1, f2))
3150 3162 else:
3151 3163 dp = r.deltaparent(rev)
3152 3164 textpairs.append((r.revision(dp), r.revision(rev)))
3153 3165
3154 3166 def d():
3155 3167 for left, right in textpairs:
3156 3168 # The date strings don't matter, so we pass empty strings.
3157 3169 headerlines, hunks = mdiff.unidiff(
3158 3170 left, b'', right, b'', b'left', b'right', binary=False
3159 3171 )
3160 3172 # consume iterators in roughly the way patch.py does
3161 3173 b'\n'.join(headerlines)
3162 3174 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3163 3175
3164 3176 timer, fm = gettimer(ui, opts)
3165 3177 timer(d)
3166 3178 fm.end()
3167 3179
3168 3180
3169 3181 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3170 3182 def perfdiffwd(ui, repo, **opts):
3171 3183 """Profile diff of working directory changes"""
3172 3184 opts = _byteskwargs(opts)
3173 3185 timer, fm = gettimer(ui, opts)
3174 3186 options = {
3175 3187 'w': 'ignore_all_space',
3176 3188 'b': 'ignore_space_change',
3177 3189 'B': 'ignore_blank_lines',
3178 3190 }
3179 3191
3180 3192 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3181 3193 opts = {options[c]: b'1' for c in diffopt}
3182 3194
3183 3195 def d():
3184 3196 ui.pushbuffer()
3185 3197 commands.diff(ui, repo, **opts)
3186 3198 ui.popbuffer()
3187 3199
3188 3200 diffopt = diffopt.encode('ascii')
3189 3201 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3190 3202 timer(d, title=title)
3191 3203 fm.end()
3192 3204
3193 3205
3194 3206 @command(
3195 3207 b'perf::revlogindex|perfrevlogindex',
3196 3208 revlogopts + formatteropts,
3197 3209 b'-c|-m|FILE',
3198 3210 )
3199 3211 def perfrevlogindex(ui, repo, file_=None, **opts):
3200 3212 """Benchmark operations against a revlog index.
3201 3213
3202 3214 This tests constructing a revlog instance, reading index data,
3203 3215 parsing index data, and performing various operations related to
3204 3216 index data.
3205 3217 """
3206 3218
3207 3219 opts = _byteskwargs(opts)
3208 3220
3209 3221 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3210 3222
3211 3223 opener = getattr(rl, 'opener') # trick linter
3212 3224 # compat with hg <= 5.8
3213 3225 radix = getattr(rl, 'radix', None)
3214 3226 indexfile = getattr(rl, '_indexfile', None)
3215 3227 if indexfile is None:
3216 3228 # compatibility with <= hg-5.8
3217 3229 indexfile = getattr(rl, 'indexfile')
3218 3230 data = opener.read(indexfile)
3219 3231
3220 3232 header = struct.unpack(b'>I', data[0:4])[0]
3221 3233 version = header & 0xFFFF
3222 3234 if version == 1:
3223 3235 inline = header & (1 << 16)
3224 3236 else:
3225 3237 raise error.Abort(b'unsupported revlog version: %d' % version)
3226 3238
3227 3239 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3228 3240 if parse_index_v1 is None:
3229 3241 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3230 3242
3231 3243 rllen = len(rl)
3232 3244
3233 3245 node0 = rl.node(0)
3234 3246 node25 = rl.node(rllen // 4)
3235 3247 node50 = rl.node(rllen // 2)
3236 3248 node75 = rl.node(rllen // 4 * 3)
3237 3249 node100 = rl.node(rllen - 1)
3238 3250
3239 3251 allrevs = range(rllen)
3240 3252 allrevsrev = list(reversed(allrevs))
3241 3253 allnodes = [rl.node(rev) for rev in range(rllen)]
3242 3254 allnodesrev = list(reversed(allnodes))
3243 3255
3244 3256 def constructor():
3245 3257 if radix is not None:
3246 3258 revlog(opener, radix=radix)
3247 3259 else:
3248 3260 # hg <= 5.8
3249 3261 revlog(opener, indexfile=indexfile)
3250 3262
3251 3263 def read():
3252 3264 with opener(indexfile) as fh:
3253 3265 fh.read()
3254 3266
3255 3267 def parseindex():
3256 3268 parse_index_v1(data, inline)
3257 3269
3258 3270 def getentry(revornode):
3259 3271 index = parse_index_v1(data, inline)[0]
3260 3272 index[revornode]
3261 3273
3262 3274 def getentries(revs, count=1):
3263 3275 index = parse_index_v1(data, inline)[0]
3264 3276
3265 3277 for i in range(count):
3266 3278 for rev in revs:
3267 3279 index[rev]
3268 3280
3269 3281 def resolvenode(node):
3270 3282 index = parse_index_v1(data, inline)[0]
3271 3283 rev = getattr(index, 'rev', None)
3272 3284 if rev is None:
3273 3285 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3274 3286 # This only works for the C code.
3275 3287 if nodemap is None:
3276 3288 return
3277 3289 rev = nodemap.__getitem__
3278 3290
3279 3291 try:
3280 3292 rev(node)
3281 3293 except error.RevlogError:
3282 3294 pass
3283 3295
3284 3296 def resolvenodes(nodes, count=1):
3285 3297 index = parse_index_v1(data, inline)[0]
3286 3298 rev = getattr(index, 'rev', None)
3287 3299 if rev is None:
3288 3300 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3289 3301 # This only works for the C code.
3290 3302 if nodemap is None:
3291 3303 return
3292 3304 rev = nodemap.__getitem__
3293 3305
3294 3306 for i in range(count):
3295 3307 for node in nodes:
3296 3308 try:
3297 3309 rev(node)
3298 3310 except error.RevlogError:
3299 3311 pass
3300 3312
3301 3313 benches = [
3302 3314 (constructor, b'revlog constructor'),
3303 3315 (read, b'read'),
3304 3316 (parseindex, b'create index object'),
3305 3317 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3306 3318 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3307 3319 (lambda: resolvenode(node0), b'look up node at rev 0'),
3308 3320 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3309 3321 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3310 3322 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3311 3323 (lambda: resolvenode(node100), b'look up node at tip'),
3312 3324 # 2x variation is to measure caching impact.
3313 3325 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3314 3326 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3315 3327 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3316 3328 (
3317 3329 lambda: resolvenodes(allnodesrev, 2),
3318 3330 b'look up all nodes 2x (reverse)',
3319 3331 ),
3320 3332 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3321 3333 (
3322 3334 lambda: getentries(allrevs, 2),
3323 3335 b'retrieve all index entries 2x (forward)',
3324 3336 ),
3325 3337 (
3326 3338 lambda: getentries(allrevsrev),
3327 3339 b'retrieve all index entries (reverse)',
3328 3340 ),
3329 3341 (
3330 3342 lambda: getentries(allrevsrev, 2),
3331 3343 b'retrieve all index entries 2x (reverse)',
3332 3344 ),
3333 3345 ]
3334 3346
3335 3347 for fn, title in benches:
3336 3348 timer, fm = gettimer(ui, opts)
3337 3349 timer(fn, title=title)
3338 3350 fm.end()
3339 3351
3340 3352
3341 3353 @command(
3342 3354 b'perf::revlogrevisions|perfrevlogrevisions',
3343 3355 revlogopts
3344 3356 + formatteropts
3345 3357 + [
3346 3358 (b'd', b'dist', 100, b'distance between the revisions'),
3347 3359 (b's', b'startrev', 0, b'revision to start reading at'),
3348 3360 (b'', b'reverse', False, b'read in reverse'),
3349 3361 ],
3350 3362 b'-c|-m|FILE',
3351 3363 )
3352 3364 def perfrevlogrevisions(
3353 3365 ui, repo, file_=None, startrev=0, reverse=False, **opts
3354 3366 ):
3355 3367 """Benchmark reading a series of revisions from a revlog.
3356 3368
3357 3369 By default, we read every ``-d/--dist`` revision from 0 to tip of
3358 3370 the specified revlog.
3359 3371
3360 3372 The start revision can be defined via ``-s/--startrev``.
3361 3373 """
3362 3374 opts = _byteskwargs(opts)
3363 3375
3364 3376 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3365 3377 rllen = getlen(ui)(rl)
3366 3378
3367 3379 if startrev < 0:
3368 3380 startrev = rllen + startrev
3369 3381
3370 3382 def d():
3371 3383 rl.clearcaches()
3372 3384
3373 3385 beginrev = startrev
3374 3386 endrev = rllen
3375 3387 dist = opts[b'dist']
3376 3388
3377 3389 if reverse:
3378 3390 beginrev, endrev = endrev - 1, beginrev - 1
3379 3391 dist = -1 * dist
3380 3392
3381 3393 for x in _xrange(beginrev, endrev, dist):
3382 3394 # Old revisions don't support passing int.
3383 3395 n = rl.node(x)
3384 3396 rl.revision(n)
3385 3397
3386 3398 timer, fm = gettimer(ui, opts)
3387 3399 timer(d)
3388 3400 fm.end()
3389 3401
3390 3402
3391 3403 @command(
3392 3404 b'perf::revlogwrite|perfrevlogwrite',
3393 3405 revlogopts
3394 3406 + formatteropts
3395 3407 + [
3396 3408 (b's', b'startrev', 1000, b'revision to start writing at'),
3397 3409 (b'', b'stoprev', -1, b'last revision to write'),
3398 3410 (b'', b'count', 3, b'number of passes to perform'),
3399 3411 (b'', b'details', False, b'print timing for every revisions tested'),
3400 3412 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3401 3413 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3402 3414 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3403 3415 ],
3404 3416 b'-c|-m|FILE',
3405 3417 )
3406 3418 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3407 3419 """Benchmark writing a series of revisions to a revlog.
3408 3420
3409 3421 Possible source values are:
3410 3422 * `full`: add from a full text (default).
3411 3423 * `parent-1`: add from a delta to the first parent
3412 3424 * `parent-2`: add from a delta to the second parent if it exists
3413 3425 (use a delta from the first parent otherwise)
3414 3426 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3415 3427 * `storage`: add from the existing precomputed deltas
3416 3428
3417 3429 Note: This performance command measures performance in a custom way. As a
3418 3430 result some of the global configuration of the 'perf' command does not
3419 3431 apply to it:
3420 3432
3421 3433 * ``pre-run``: disabled
3422 3434
3423 3435 * ``profile-benchmark``: disabled
3424 3436
3425 3437 * ``run-limits``: disabled use --count instead
3426 3438 """
3427 3439 opts = _byteskwargs(opts)
3428 3440
3429 3441 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3430 3442 rllen = getlen(ui)(rl)
3431 3443 if startrev < 0:
3432 3444 startrev = rllen + startrev
3433 3445 if stoprev < 0:
3434 3446 stoprev = rllen + stoprev
3435 3447
3436 3448 lazydeltabase = opts['lazydeltabase']
3437 3449 source = opts['source']
3438 3450 clearcaches = opts['clear_caches']
3439 3451 validsource = (
3440 3452 b'full',
3441 3453 b'parent-1',
3442 3454 b'parent-2',
3443 3455 b'parent-smallest',
3444 3456 b'storage',
3445 3457 )
3446 3458 if source not in validsource:
3447 3459 raise error.Abort('invalid source type: %s' % source)
3448 3460
3449 3461 ### actually gather results
3450 3462 count = opts['count']
3451 3463 if count <= 0:
3452 3464 raise error.Abort('invalide run count: %d' % count)
3453 3465 allresults = []
3454 3466 for c in range(count):
3455 3467 timing = _timeonewrite(
3456 3468 ui,
3457 3469 rl,
3458 3470 source,
3459 3471 startrev,
3460 3472 stoprev,
3461 3473 c + 1,
3462 3474 lazydeltabase=lazydeltabase,
3463 3475 clearcaches=clearcaches,
3464 3476 )
3465 3477 allresults.append(timing)
3466 3478
3467 3479 ### consolidate the results in a single list
3468 3480 results = []
3469 3481 for idx, (rev, t) in enumerate(allresults[0]):
3470 3482 ts = [t]
3471 3483 for other in allresults[1:]:
3472 3484 orev, ot = other[idx]
3473 3485 assert orev == rev
3474 3486 ts.append(ot)
3475 3487 results.append((rev, ts))
3476 3488 resultcount = len(results)
3477 3489
3478 3490 ### Compute and display relevant statistics
3479 3491
3480 3492 # get a formatter
3481 3493 fm = ui.formatter(b'perf', opts)
3482 3494 displayall = ui.configbool(b"perf", b"all-timing", True)
3483 3495
3484 3496 # print individual details if requested
3485 3497 if opts['details']:
3486 3498 for idx, item in enumerate(results, 1):
3487 3499 rev, data = item
3488 3500 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3489 3501 formatone(fm, data, title=title, displayall=displayall)
3490 3502
3491 3503 # sorts results by median time
3492 3504 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3493 3505 # list of (name, index) to display)
3494 3506 relevants = [
3495 3507 ("min", 0),
3496 3508 ("10%", resultcount * 10 // 100),
3497 3509 ("25%", resultcount * 25 // 100),
3498 3510 ("50%", resultcount * 70 // 100),
3499 3511 ("75%", resultcount * 75 // 100),
3500 3512 ("90%", resultcount * 90 // 100),
3501 3513 ("95%", resultcount * 95 // 100),
3502 3514 ("99%", resultcount * 99 // 100),
3503 3515 ("99.9%", resultcount * 999 // 1000),
3504 3516 ("99.99%", resultcount * 9999 // 10000),
3505 3517 ("99.999%", resultcount * 99999 // 100000),
3506 3518 ("max", -1),
3507 3519 ]
3508 3520 if not ui.quiet:
3509 3521 for name, idx in relevants:
3510 3522 data = results[idx]
3511 3523 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3512 3524 formatone(fm, data[1], title=title, displayall=displayall)
3513 3525
3514 3526 # XXX summing that many float will not be very precise, we ignore this fact
3515 3527 # for now
3516 3528 totaltime = []
3517 3529 for item in allresults:
3518 3530 totaltime.append(
3519 3531 (
3520 3532 sum(x[1][0] for x in item),
3521 3533 sum(x[1][1] for x in item),
3522 3534 sum(x[1][2] for x in item),
3523 3535 )
3524 3536 )
3525 3537 formatone(
3526 3538 fm,
3527 3539 totaltime,
3528 3540 title="total time (%d revs)" % resultcount,
3529 3541 displayall=displayall,
3530 3542 )
3531 3543 fm.end()
3532 3544
3533 3545
3534 3546 class _faketr:
3535 3547 def add(s, x, y, z=None):
3536 3548 return None
3537 3549
3538 3550
3539 3551 def _timeonewrite(
3540 3552 ui,
3541 3553 orig,
3542 3554 source,
3543 3555 startrev,
3544 3556 stoprev,
3545 3557 runidx=None,
3546 3558 lazydeltabase=True,
3547 3559 clearcaches=True,
3548 3560 ):
3549 3561 timings = []
3550 3562 tr = _faketr()
3551 3563 with _temprevlog(ui, orig, startrev) as dest:
3552 3564 if hasattr(dest, "delta_config"):
3553 3565 dest.delta_config.lazy_delta_base = lazydeltabase
3554 3566 else:
3555 3567 dest._lazydeltabase = lazydeltabase
3556 3568 revs = list(orig.revs(startrev, stoprev))
3557 3569 total = len(revs)
3558 3570 topic = 'adding'
3559 3571 if runidx is not None:
3560 3572 topic += ' (run #%d)' % runidx
3561 3573 # Support both old and new progress API
3562 3574 if util.safehasattr(ui, 'makeprogress'):
3563 3575 progress = ui.makeprogress(topic, unit='revs', total=total)
3564 3576
3565 3577 def updateprogress(pos):
3566 3578 progress.update(pos)
3567 3579
3568 3580 def completeprogress():
3569 3581 progress.complete()
3570 3582
3571 3583 else:
3572 3584
3573 3585 def updateprogress(pos):
3574 3586 ui.progress(topic, pos, unit='revs', total=total)
3575 3587
3576 3588 def completeprogress():
3577 3589 ui.progress(topic, None, unit='revs', total=total)
3578 3590
3579 3591 for idx, rev in enumerate(revs):
3580 3592 updateprogress(idx)
3581 3593 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3582 3594 if clearcaches:
3583 3595 dest.index.clearcaches()
3584 3596 dest.clearcaches()
3585 3597 with timeone() as r:
3586 3598 dest.addrawrevision(*addargs, **addkwargs)
3587 3599 timings.append((rev, r[0]))
3588 3600 updateprogress(total)
3589 3601 completeprogress()
3590 3602 return timings
3591 3603
3592 3604
3593 3605 def _getrevisionseed(orig, rev, tr, source):
3594 3606 from mercurial.node import nullid
3595 3607
3596 3608 linkrev = orig.linkrev(rev)
3597 3609 node = orig.node(rev)
3598 3610 p1, p2 = orig.parents(node)
3599 3611 flags = orig.flags(rev)
3600 3612 cachedelta = None
3601 3613 text = None
3602 3614
3603 3615 if source == b'full':
3604 3616 text = orig.revision(rev)
3605 3617 elif source == b'parent-1':
3606 3618 baserev = orig.rev(p1)
3607 3619 cachedelta = (baserev, orig.revdiff(p1, rev))
3608 3620 elif source == b'parent-2':
3609 3621 parent = p2
3610 3622 if p2 == nullid:
3611 3623 parent = p1
3612 3624 baserev = orig.rev(parent)
3613 3625 cachedelta = (baserev, orig.revdiff(parent, rev))
3614 3626 elif source == b'parent-smallest':
3615 3627 p1diff = orig.revdiff(p1, rev)
3616 3628 parent = p1
3617 3629 diff = p1diff
3618 3630 if p2 != nullid:
3619 3631 p2diff = orig.revdiff(p2, rev)
3620 3632 if len(p1diff) > len(p2diff):
3621 3633 parent = p2
3622 3634 diff = p2diff
3623 3635 baserev = orig.rev(parent)
3624 3636 cachedelta = (baserev, diff)
3625 3637 elif source == b'storage':
3626 3638 baserev = orig.deltaparent(rev)
3627 3639 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3628 3640
3629 3641 return (
3630 3642 (text, tr, linkrev, p1, p2),
3631 3643 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3632 3644 )
3633 3645
3634 3646
3635 3647 @contextlib.contextmanager
3636 3648 def _temprevlog(ui, orig, truncaterev):
3637 3649 from mercurial import vfs as vfsmod
3638 3650
3639 3651 if orig._inline:
3640 3652 raise error.Abort('not supporting inline revlog (yet)')
3641 3653 revlogkwargs = {}
3642 3654 k = 'upperboundcomp'
3643 3655 if util.safehasattr(orig, k):
3644 3656 revlogkwargs[k] = getattr(orig, k)
3645 3657
3646 3658 indexfile = getattr(orig, '_indexfile', None)
3647 3659 if indexfile is None:
3648 3660 # compatibility with <= hg-5.8
3649 3661 indexfile = getattr(orig, 'indexfile')
3650 3662 origindexpath = orig.opener.join(indexfile)
3651 3663
3652 3664 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3653 3665 origdatapath = orig.opener.join(datafile)
3654 3666 radix = b'revlog'
3655 3667 indexname = b'revlog.i'
3656 3668 dataname = b'revlog.d'
3657 3669
3658 3670 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3659 3671 try:
3660 3672 # copy the data file in a temporary directory
3661 3673 ui.debug('copying data in %s\n' % tmpdir)
3662 3674 destindexpath = os.path.join(tmpdir, 'revlog.i')
3663 3675 destdatapath = os.path.join(tmpdir, 'revlog.d')
3664 3676 shutil.copyfile(origindexpath, destindexpath)
3665 3677 shutil.copyfile(origdatapath, destdatapath)
3666 3678
3667 3679 # remove the data we want to add again
3668 3680 ui.debug('truncating data to be rewritten\n')
3669 3681 with open(destindexpath, 'ab') as index:
3670 3682 index.seek(0)
3671 3683 index.truncate(truncaterev * orig._io.size)
3672 3684 with open(destdatapath, 'ab') as data:
3673 3685 data.seek(0)
3674 3686 data.truncate(orig.start(truncaterev))
3675 3687
3676 3688 # instantiate a new revlog from the temporary copy
3677 3689 ui.debug('truncating adding to be rewritten\n')
3678 3690 vfs = vfsmod.vfs(tmpdir)
3679 3691 vfs.options = getattr(orig.opener, 'options', None)
3680 3692
3681 3693 try:
3682 3694 dest = revlog(vfs, radix=radix, **revlogkwargs)
3683 3695 except TypeError:
3684 3696 dest = revlog(
3685 3697 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3686 3698 )
3687 3699 if dest._inline:
3688 3700 raise error.Abort('not supporting inline revlog (yet)')
3689 3701 # make sure internals are initialized
3690 3702 dest.revision(len(dest) - 1)
3691 3703 yield dest
3692 3704 del dest, vfs
3693 3705 finally:
3694 3706 shutil.rmtree(tmpdir, True)
3695 3707
3696 3708
3697 3709 @command(
3698 3710 b'perf::revlogchunks|perfrevlogchunks',
3699 3711 revlogopts
3700 3712 + formatteropts
3701 3713 + [
3702 3714 (b'e', b'engines', b'', b'compression engines to use'),
3703 3715 (b's', b'startrev', 0, b'revision to start at'),
3704 3716 ],
3705 3717 b'-c|-m|FILE',
3706 3718 )
3707 3719 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3708 3720 """Benchmark operations on revlog chunks.
3709 3721
3710 3722 Logically, each revlog is a collection of fulltext revisions. However,
3711 3723 stored within each revlog are "chunks" of possibly compressed data. This
3712 3724 data needs to be read and decompressed or compressed and written.
3713 3725
3714 3726 This command measures the time it takes to read+decompress and recompress
3715 3727 chunks in a revlog. It effectively isolates I/O and compression performance.
3716 3728 For measurements of higher-level operations like resolving revisions,
3717 3729 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3718 3730 """
3719 3731 opts = _byteskwargs(opts)
3720 3732
3721 3733 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3722 3734
3723 3735 # - _chunkraw was renamed to _getsegmentforrevs
3724 3736 # - _getsegmentforrevs was moved on the inner object
3725 3737 try:
3726 3738 segmentforrevs = rl._inner.get_segment_for_revs
3727 3739 except AttributeError:
3728 3740 try:
3729 3741 segmentforrevs = rl._getsegmentforrevs
3730 3742 except AttributeError:
3731 3743 segmentforrevs = rl._chunkraw
3732 3744
3733 3745 # Verify engines argument.
3734 3746 if engines:
3735 3747 engines = {e.strip() for e in engines.split(b',')}
3736 3748 for engine in engines:
3737 3749 try:
3738 3750 util.compressionengines[engine]
3739 3751 except KeyError:
3740 3752 raise error.Abort(b'unknown compression engine: %s' % engine)
3741 3753 else:
3742 3754 engines = []
3743 3755 for e in util.compengines:
3744 3756 engine = util.compengines[e]
3745 3757 try:
3746 3758 if engine.available():
3747 3759 engine.revlogcompressor().compress(b'dummy')
3748 3760 engines.append(e)
3749 3761 except NotImplementedError:
3750 3762 pass
3751 3763
3752 3764 revs = list(rl.revs(startrev, len(rl) - 1))
3753 3765
3754 3766 @contextlib.contextmanager
3755 3767 def reading(rl):
3756 3768 if getattr(rl, 'reading', None) is not None:
3757 3769 with rl.reading():
3758 3770 yield None
3759 3771 elif rl._inline:
3760 3772 indexfile = getattr(rl, '_indexfile', None)
3761 3773 if indexfile is None:
3762 3774 # compatibility with <= hg-5.8
3763 3775 indexfile = getattr(rl, 'indexfile')
3764 3776 yield getsvfs(repo)(indexfile)
3765 3777 else:
3766 3778 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3767 3779 yield getsvfs(repo)(datafile)
3768 3780
3769 3781 if getattr(rl, 'reading', None) is not None:
3770 3782
3771 3783 @contextlib.contextmanager
3772 3784 def lazy_reading(rl):
3773 3785 with rl.reading():
3774 3786 yield
3775 3787
3776 3788 else:
3777 3789
3778 3790 @contextlib.contextmanager
3779 3791 def lazy_reading(rl):
3780 3792 yield
3781 3793
3782 3794 def doread():
3783 3795 rl.clearcaches()
3784 3796 for rev in revs:
3785 3797 with lazy_reading(rl):
3786 3798 segmentforrevs(rev, rev)
3787 3799
3788 3800 def doreadcachedfh():
3789 3801 rl.clearcaches()
3790 3802 with reading(rl) as fh:
3791 3803 if fh is not None:
3792 3804 for rev in revs:
3793 3805 segmentforrevs(rev, rev, df=fh)
3794 3806 else:
3795 3807 for rev in revs:
3796 3808 segmentforrevs(rev, rev)
3797 3809
3798 3810 def doreadbatch():
3799 3811 rl.clearcaches()
3800 3812 with lazy_reading(rl):
3801 3813 segmentforrevs(revs[0], revs[-1])
3802 3814
3803 3815 def doreadbatchcachedfh():
3804 3816 rl.clearcaches()
3805 3817 with reading(rl) as fh:
3806 3818 if fh is not None:
3807 3819 segmentforrevs(revs[0], revs[-1], df=fh)
3808 3820 else:
3809 3821 segmentforrevs(revs[0], revs[-1])
3810 3822
3811 3823 def dochunk():
3812 3824 rl.clearcaches()
3813 3825 # chunk used to be available directly on the revlog
3814 3826 _chunk = getattr(rl, '_inner', rl)._chunk
3815 3827 with reading(rl) as fh:
3816 3828 if fh is not None:
3817 3829 for rev in revs:
3818 3830 _chunk(rev, df=fh)
3819 3831 else:
3820 3832 for rev in revs:
3821 3833 _chunk(rev)
3822 3834
3823 3835 chunks = [None]
3824 3836
3825 3837 def dochunkbatch():
3826 3838 rl.clearcaches()
3827 3839 _chunks = getattr(rl, '_inner', rl)._chunks
3828 3840 with reading(rl) as fh:
3829 3841 if fh is not None:
3830 3842 # Save chunks as a side-effect.
3831 3843 chunks[0] = _chunks(revs, df=fh)
3832 3844 else:
3833 3845 # Save chunks as a side-effect.
3834 3846 chunks[0] = _chunks(revs)
3835 3847
3836 3848 def docompress(compressor):
3837 3849 rl.clearcaches()
3838 3850
3839 3851 compressor_holder = getattr(rl, '_inner', rl)
3840 3852
3841 3853 try:
3842 3854 # Swap in the requested compression engine.
3843 3855 oldcompressor = compressor_holder._compressor
3844 3856 compressor_holder._compressor = compressor
3845 3857 for chunk in chunks[0]:
3846 3858 rl.compress(chunk)
3847 3859 finally:
3848 3860 compressor_holder._compressor = oldcompressor
3849 3861
3850 3862 benches = [
3851 3863 (lambda: doread(), b'read'),
3852 3864 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3853 3865 (lambda: doreadbatch(), b'read batch'),
3854 3866 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3855 3867 (lambda: dochunk(), b'chunk'),
3856 3868 (lambda: dochunkbatch(), b'chunk batch'),
3857 3869 ]
3858 3870
3859 3871 for engine in sorted(engines):
3860 3872 compressor = util.compengines[engine].revlogcompressor()
3861 3873 benches.append(
3862 3874 (
3863 3875 functools.partial(docompress, compressor),
3864 3876 b'compress w/ %s' % engine,
3865 3877 )
3866 3878 )
3867 3879
3868 3880 for fn, title in benches:
3869 3881 timer, fm = gettimer(ui, opts)
3870 3882 timer(fn, title=title)
3871 3883 fm.end()
3872 3884
3873 3885
3874 3886 @command(
3875 3887 b'perf::revlogrevision|perfrevlogrevision',
3876 3888 revlogopts
3877 3889 + formatteropts
3878 3890 + [(b'', b'cache', False, b'use caches instead of clearing')],
3879 3891 b'-c|-m|FILE REV',
3880 3892 )
3881 3893 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3882 3894 """Benchmark obtaining a revlog revision.
3883 3895
3884 3896 Obtaining a revlog revision consists of roughly the following steps:
3885 3897
3886 3898 1. Compute the delta chain
3887 3899 2. Slice the delta chain if applicable
3888 3900 3. Obtain the raw chunks for that delta chain
3889 3901 4. Decompress each raw chunk
3890 3902 5. Apply binary patches to obtain fulltext
3891 3903 6. Verify hash of fulltext
3892 3904
3893 3905 This command measures the time spent in each of these phases.
3894 3906 """
3895 3907 opts = _byteskwargs(opts)
3896 3908
3897 3909 if opts.get(b'changelog') or opts.get(b'manifest'):
3898 3910 file_, rev = None, file_
3899 3911 elif rev is None:
3900 3912 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3901 3913
3902 3914 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3903 3915
3904 3916 # _chunkraw was renamed to _getsegmentforrevs.
3905 3917 try:
3906 3918 segmentforrevs = r._inner.get_segment_for_revs
3907 3919 except AttributeError:
3908 3920 try:
3909 3921 segmentforrevs = r._getsegmentforrevs
3910 3922 except AttributeError:
3911 3923 segmentforrevs = r._chunkraw
3912 3924
3913 3925 node = r.lookup(rev)
3914 3926 rev = r.rev(node)
3915 3927
3916 3928 if getattr(r, 'reading', None) is not None:
3917 3929
3918 3930 @contextlib.contextmanager
3919 3931 def lazy_reading(r):
3920 3932 with r.reading():
3921 3933 yield
3922 3934
3923 3935 else:
3924 3936
3925 3937 @contextlib.contextmanager
3926 3938 def lazy_reading(r):
3927 3939 yield
3928 3940
3929 3941 def getrawchunks(data, chain):
3930 3942 start = r.start
3931 3943 length = r.length
3932 3944 inline = r._inline
3933 3945 try:
3934 3946 iosize = r.index.entry_size
3935 3947 except AttributeError:
3936 3948 iosize = r._io.size
3937 3949 buffer = util.buffer
3938 3950
3939 3951 chunks = []
3940 3952 ladd = chunks.append
3941 3953 for idx, item in enumerate(chain):
3942 3954 offset = start(item[0])
3943 3955 bits = data[idx]
3944 3956 for rev in item:
3945 3957 chunkstart = start(rev)
3946 3958 if inline:
3947 3959 chunkstart += (rev + 1) * iosize
3948 3960 chunklength = length(rev)
3949 3961 ladd(buffer(bits, chunkstart - offset, chunklength))
3950 3962
3951 3963 return chunks
3952 3964
3953 3965 def dodeltachain(rev):
3954 3966 if not cache:
3955 3967 r.clearcaches()
3956 3968 r._deltachain(rev)
3957 3969
3958 3970 def doread(chain):
3959 3971 if not cache:
3960 3972 r.clearcaches()
3961 3973 for item in slicedchain:
3962 3974 with lazy_reading(r):
3963 3975 segmentforrevs(item[0], item[-1])
3964 3976
3965 3977 def doslice(r, chain, size):
3966 3978 for s in slicechunk(r, chain, targetsize=size):
3967 3979 pass
3968 3980
3969 3981 def dorawchunks(data, chain):
3970 3982 if not cache:
3971 3983 r.clearcaches()
3972 3984 getrawchunks(data, chain)
3973 3985
3974 3986 def dodecompress(chunks):
3975 3987 decomp = r.decompress
3976 3988 for chunk in chunks:
3977 3989 decomp(chunk)
3978 3990
3979 3991 def dopatch(text, bins):
3980 3992 if not cache:
3981 3993 r.clearcaches()
3982 3994 mdiff.patches(text, bins)
3983 3995
3984 3996 def dohash(text):
3985 3997 if not cache:
3986 3998 r.clearcaches()
3987 3999 r.checkhash(text, node, rev=rev)
3988 4000
3989 4001 def dorevision():
3990 4002 if not cache:
3991 4003 r.clearcaches()
3992 4004 r.revision(node)
3993 4005
3994 4006 try:
3995 4007 from mercurial.revlogutils.deltas import slicechunk
3996 4008 except ImportError:
3997 4009 slicechunk = getattr(revlog, '_slicechunk', None)
3998 4010
3999 4011 size = r.length(rev)
4000 4012 chain = r._deltachain(rev)[0]
4001 4013
4002 4014 with_sparse_read = False
4003 4015 if hasattr(r, 'data_config'):
4004 4016 with_sparse_read = r.data_config.with_sparse_read
4005 4017 elif hasattr(r, '_withsparseread'):
4006 4018 with_sparse_read = r._withsparseread
4007 4019 if with_sparse_read:
4008 4020 slicedchain = (chain,)
4009 4021 else:
4010 4022 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4011 4023 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4012 4024 rawchunks = getrawchunks(data, slicedchain)
4013 4025 bins = r._inner._chunks(chain)
4014 4026 text = bytes(bins[0])
4015 4027 bins = bins[1:]
4016 4028 text = mdiff.patches(text, bins)
4017 4029
4018 4030 benches = [
4019 4031 (lambda: dorevision(), b'full'),
4020 4032 (lambda: dodeltachain(rev), b'deltachain'),
4021 4033 (lambda: doread(chain), b'read'),
4022 4034 ]
4023 4035
4024 4036 if with_sparse_read:
4025 4037 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4026 4038 benches.append(slicing)
4027 4039
4028 4040 benches.extend(
4029 4041 [
4030 4042 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4031 4043 (lambda: dodecompress(rawchunks), b'decompress'),
4032 4044 (lambda: dopatch(text, bins), b'patch'),
4033 4045 (lambda: dohash(text), b'hash'),
4034 4046 ]
4035 4047 )
4036 4048
4037 4049 timer, fm = gettimer(ui, opts)
4038 4050 for fn, title in benches:
4039 4051 timer(fn, title=title)
4040 4052 fm.end()
4041 4053
4042 4054
4043 4055 @command(
4044 4056 b'perf::revset|perfrevset',
4045 4057 [
4046 4058 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4047 4059 (b'', b'contexts', False, b'obtain changectx for each revision'),
4048 4060 ]
4049 4061 + formatteropts,
4050 4062 b"REVSET",
4051 4063 )
4052 4064 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4053 4065 """benchmark the execution time of a revset
4054 4066
4055 4067 Use the --clean option if need to evaluate the impact of build volatile
4056 4068 revisions set cache on the revset execution. Volatile cache hold filtered
4057 4069 and obsolete related cache."""
4058 4070 opts = _byteskwargs(opts)
4059 4071
4060 4072 timer, fm = gettimer(ui, opts)
4061 4073
4062 4074 def d():
4063 4075 if clear:
4064 4076 repo.invalidatevolatilesets()
4065 4077 if contexts:
4066 4078 for ctx in repo.set(expr):
4067 4079 pass
4068 4080 else:
4069 4081 for r in repo.revs(expr):
4070 4082 pass
4071 4083
4072 4084 timer(d)
4073 4085 fm.end()
4074 4086
4075 4087
4076 4088 @command(
4077 4089 b'perf::volatilesets|perfvolatilesets',
4078 4090 [
4079 4091 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4080 4092 ]
4081 4093 + formatteropts,
4082 4094 )
4083 4095 def perfvolatilesets(ui, repo, *names, **opts):
4084 4096 """benchmark the computation of various volatile set
4085 4097
4086 4098 Volatile set computes element related to filtering and obsolescence."""
4087 4099 opts = _byteskwargs(opts)
4088 4100 timer, fm = gettimer(ui, opts)
4089 4101 repo = repo.unfiltered()
4090 4102
4091 4103 def getobs(name):
4092 4104 def d():
4093 4105 repo.invalidatevolatilesets()
4094 4106 if opts[b'clear_obsstore']:
4095 4107 clearfilecache(repo, b'obsstore')
4096 4108 obsolete.getrevs(repo, name)
4097 4109
4098 4110 return d
4099 4111
4100 4112 allobs = sorted(obsolete.cachefuncs)
4101 4113 if names:
4102 4114 allobs = [n for n in allobs if n in names]
4103 4115
4104 4116 for name in allobs:
4105 4117 timer(getobs(name), title=name)
4106 4118
4107 4119 def getfiltered(name):
4108 4120 def d():
4109 4121 repo.invalidatevolatilesets()
4110 4122 if opts[b'clear_obsstore']:
4111 4123 clearfilecache(repo, b'obsstore')
4112 4124 repoview.filterrevs(repo, name)
4113 4125
4114 4126 return d
4115 4127
4116 4128 allfilter = sorted(repoview.filtertable)
4117 4129 if names:
4118 4130 allfilter = [n for n in allfilter if n in names]
4119 4131
4120 4132 for name in allfilter:
4121 4133 timer(getfiltered(name), title=name)
4122 4134 fm.end()
4123 4135
4124 4136
4125 4137 @command(
4126 4138 b'perf::branchmap|perfbranchmap',
4127 4139 [
4128 4140 (b'f', b'full', False, b'Includes build time of subset'),
4129 4141 (
4130 4142 b'',
4131 4143 b'clear-revbranch',
4132 4144 False,
4133 4145 b'purge the revbranch cache between computation',
4134 4146 ),
4135 4147 ]
4136 4148 + formatteropts,
4137 4149 )
4138 4150 def perfbranchmap(ui, repo, *filternames, **opts):
4139 4151 """benchmark the update of a branchmap
4140 4152
4141 4153 This benchmarks the full repo.branchmap() call with read and write disabled
4142 4154 """
4143 4155 opts = _byteskwargs(opts)
4144 4156 full = opts.get(b"full", False)
4145 4157 clear_revbranch = opts.get(b"clear_revbranch", False)
4146 4158 timer, fm = gettimer(ui, opts)
4147 4159
4148 4160 def getbranchmap(filtername):
4149 4161 """generate a benchmark function for the filtername"""
4150 4162 if filtername is None:
4151 4163 view = repo
4152 4164 else:
4153 4165 view = repo.filtered(filtername)
4154 4166 if util.safehasattr(view._branchcaches, '_per_filter'):
4155 4167 filtered = view._branchcaches._per_filter
4156 4168 else:
4157 4169 # older versions
4158 4170 filtered = view._branchcaches
4159 4171
4160 4172 def d():
4161 4173 if clear_revbranch:
4162 4174 repo.revbranchcache()._clear()
4163 4175 if full:
4164 4176 view._branchcaches.clear()
4165 4177 else:
4166 4178 filtered.pop(filtername, None)
4167 4179 view.branchmap()
4168 4180
4169 4181 return d
4170 4182
4171 4183 # add filter in smaller subset to bigger subset
4172 4184 possiblefilters = set(repoview.filtertable)
4173 4185 if filternames:
4174 4186 possiblefilters &= set(filternames)
4175 4187 subsettable = getbranchmapsubsettable()
4176 4188 allfilters = []
4177 4189 while possiblefilters:
4178 4190 for name in possiblefilters:
4179 4191 subset = subsettable.get(name)
4180 4192 if subset not in possiblefilters:
4181 4193 break
4182 4194 else:
4183 4195 assert False, b'subset cycle %s!' % possiblefilters
4184 4196 allfilters.append(name)
4185 4197 possiblefilters.remove(name)
4186 4198
4187 4199 # warm the cache
4188 4200 if not full:
4189 4201 for name in allfilters:
4190 4202 repo.filtered(name).branchmap()
4191 4203 if not filternames or b'unfiltered' in filternames:
4192 4204 # add unfiltered
4193 4205 allfilters.append(None)
4194 4206
4195 4207 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4196 4208 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4197 4209 branchcacheread.set(classmethod(lambda *args: None))
4198 4210 else:
4199 4211 # older versions
4200 4212 branchcacheread = safeattrsetter(branchmap, b'read')
4201 4213 branchcacheread.set(lambda *args: None)
4202 4214 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4203 4215 branchcachewrite.set(lambda *args: None)
4204 4216 try:
4205 4217 for name in allfilters:
4206 4218 printname = name
4207 4219 if name is None:
4208 4220 printname = b'unfiltered'
4209 4221 timer(getbranchmap(name), title=printname)
4210 4222 finally:
4211 4223 branchcacheread.restore()
4212 4224 branchcachewrite.restore()
4213 4225 fm.end()
4214 4226
4215 4227
4216 4228 @command(
4217 4229 b'perf::branchmapupdate|perfbranchmapupdate',
4218 4230 [
4219 4231 (b'', b'base', [], b'subset of revision to start from'),
4220 4232 (b'', b'target', [], b'subset of revision to end with'),
4221 4233 (b'', b'clear-caches', False, b'clear cache between each runs'),
4222 4234 ]
4223 4235 + formatteropts,
4224 4236 )
4225 4237 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4226 4238 """benchmark branchmap update from for <base> revs to <target> revs
4227 4239
4228 4240 If `--clear-caches` is passed, the following items will be reset before
4229 4241 each update:
4230 4242 * the changelog instance and associated indexes
4231 4243 * the rev-branch-cache instance
4232 4244
4233 4245 Examples:
4234 4246
4235 4247 # update for the one last revision
4236 4248 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4237 4249
4238 4250 $ update for change coming with a new branch
4239 4251 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4240 4252 """
4241 4253 from mercurial import branchmap
4242 4254 from mercurial import repoview
4243 4255
4244 4256 opts = _byteskwargs(opts)
4245 4257 timer, fm = gettimer(ui, opts)
4246 4258 clearcaches = opts[b'clear_caches']
4247 4259 unfi = repo.unfiltered()
4248 4260 x = [None] # used to pass data between closure
4249 4261
4250 4262 # we use a `list` here to avoid possible side effect from smartset
4251 4263 baserevs = list(scmutil.revrange(repo, base))
4252 4264 targetrevs = list(scmutil.revrange(repo, target))
4253 4265 if not baserevs:
4254 4266 raise error.Abort(b'no revisions selected for --base')
4255 4267 if not targetrevs:
4256 4268 raise error.Abort(b'no revisions selected for --target')
4257 4269
4258 4270 # make sure the target branchmap also contains the one in the base
4259 4271 targetrevs = list(set(baserevs) | set(targetrevs))
4260 4272 targetrevs.sort()
4261 4273
4262 4274 cl = repo.changelog
4263 4275 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4264 4276 allbaserevs.sort()
4265 4277 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4266 4278
4267 4279 newrevs = list(alltargetrevs.difference(allbaserevs))
4268 4280 newrevs.sort()
4269 4281
4270 4282 allrevs = frozenset(unfi.changelog.revs())
4271 4283 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4272 4284 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4273 4285
4274 4286 def basefilter(repo, visibilityexceptions=None):
4275 4287 return basefilterrevs
4276 4288
4277 4289 def targetfilter(repo, visibilityexceptions=None):
4278 4290 return targetfilterrevs
4279 4291
4280 4292 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4281 4293 ui.status(msg % (len(allbaserevs), len(newrevs)))
4282 4294 if targetfilterrevs:
4283 4295 msg = b'(%d revisions still filtered)\n'
4284 4296 ui.status(msg % len(targetfilterrevs))
4285 4297
4286 4298 try:
4287 4299 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4288 4300 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4289 4301
4290 4302 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4291 4303 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4292 4304
4293 4305 # try to find an existing branchmap to reuse
4294 4306 subsettable = getbranchmapsubsettable()
4295 4307 candidatefilter = subsettable.get(None)
4296 4308 while candidatefilter is not None:
4297 4309 candidatebm = repo.filtered(candidatefilter).branchmap()
4298 4310 if candidatebm.validfor(baserepo):
4299 4311 filtered = repoview.filterrevs(repo, candidatefilter)
4300 4312 missing = [r for r in allbaserevs if r in filtered]
4301 4313 base = candidatebm.copy()
4302 4314 base.update(baserepo, missing)
4303 4315 break
4304 4316 candidatefilter = subsettable.get(candidatefilter)
4305 4317 else:
4306 4318 # no suitable subset where found
4307 4319 base = branchmap.branchcache()
4308 4320 base.update(baserepo, allbaserevs)
4309 4321
4310 4322 def setup():
4311 4323 x[0] = base.copy()
4312 4324 if clearcaches:
4313 4325 unfi._revbranchcache = None
4314 4326 clearchangelog(repo)
4315 4327
4316 4328 def bench():
4317 4329 x[0].update(targetrepo, newrevs)
4318 4330
4319 4331 timer(bench, setup=setup)
4320 4332 fm.end()
4321 4333 finally:
4322 4334 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4323 4335 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4324 4336
4325 4337
4326 4338 @command(
4327 4339 b'perf::branchmapload|perfbranchmapload',
4328 4340 [
4329 4341 (b'f', b'filter', b'', b'Specify repoview filter'),
4330 4342 (b'', b'list', False, b'List brachmap filter caches'),
4331 4343 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4332 4344 ]
4333 4345 + formatteropts,
4334 4346 )
4335 4347 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4336 4348 """benchmark reading the branchmap"""
4337 4349 opts = _byteskwargs(opts)
4338 4350 clearrevlogs = opts[b'clear_revlogs']
4339 4351
4340 4352 if list:
4341 4353 for name, kind, st in repo.cachevfs.readdir(stat=True):
4342 4354 if name.startswith(b'branch2'):
4343 4355 filtername = name.partition(b'-')[2] or b'unfiltered'
4344 4356 ui.status(
4345 4357 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4346 4358 )
4347 4359 return
4348 4360 if not filter:
4349 4361 filter = None
4350 4362 subsettable = getbranchmapsubsettable()
4351 4363 if filter is None:
4352 4364 repo = repo.unfiltered()
4353 4365 else:
4354 4366 repo = repoview.repoview(repo, filter)
4355 4367
4356 4368 repo.branchmap() # make sure we have a relevant, up to date branchmap
4357 4369
4358 4370 try:
4359 4371 fromfile = branchmap.branchcache.fromfile
4360 4372 except AttributeError:
4361 4373 # older versions
4362 4374 fromfile = branchmap.read
4363 4375
4364 4376 currentfilter = filter
4365 4377 # try once without timer, the filter may not be cached
4366 4378 while fromfile(repo) is None:
4367 4379 currentfilter = subsettable.get(currentfilter)
4368 4380 if currentfilter is None:
4369 4381 raise error.Abort(
4370 4382 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4371 4383 )
4372 4384 repo = repo.filtered(currentfilter)
4373 4385 timer, fm = gettimer(ui, opts)
4374 4386
4375 4387 def setup():
4376 4388 if clearrevlogs:
4377 4389 clearchangelog(repo)
4378 4390
4379 4391 def bench():
4380 4392 fromfile(repo)
4381 4393
4382 4394 timer(bench, setup=setup)
4383 4395 fm.end()
4384 4396
4385 4397
4386 4398 @command(b'perf::loadmarkers|perfloadmarkers')
4387 4399 def perfloadmarkers(ui, repo):
4388 4400 """benchmark the time to parse the on-disk markers for a repo
4389 4401
4390 4402 Result is the number of markers in the repo."""
4391 4403 timer, fm = gettimer(ui)
4392 4404 svfs = getsvfs(repo)
4393 4405 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4394 4406 fm.end()
4395 4407
4396 4408
4397 4409 @command(
4398 4410 b'perf::lrucachedict|perflrucachedict',
4399 4411 formatteropts
4400 4412 + [
4401 4413 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4402 4414 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4403 4415 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4404 4416 (b'', b'size', 4, b'size of cache'),
4405 4417 (b'', b'gets', 10000, b'number of key lookups'),
4406 4418 (b'', b'sets', 10000, b'number of key sets'),
4407 4419 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4408 4420 (
4409 4421 b'',
4410 4422 b'mixedgetfreq',
4411 4423 50,
4412 4424 b'frequency of get vs set ops in mixed mode',
4413 4425 ),
4414 4426 ],
4415 4427 norepo=True,
4416 4428 )
4417 4429 def perflrucache(
4418 4430 ui,
4419 4431 mincost=0,
4420 4432 maxcost=100,
4421 4433 costlimit=0,
4422 4434 size=4,
4423 4435 gets=10000,
4424 4436 sets=10000,
4425 4437 mixed=10000,
4426 4438 mixedgetfreq=50,
4427 4439 **opts
4428 4440 ):
4429 4441 opts = _byteskwargs(opts)
4430 4442
4431 4443 def doinit():
4432 4444 for i in _xrange(10000):
4433 4445 util.lrucachedict(size)
4434 4446
4435 4447 costrange = list(range(mincost, maxcost + 1))
4436 4448
4437 4449 values = []
4438 4450 for i in _xrange(size):
4439 4451 values.append(random.randint(0, _maxint))
4440 4452
4441 4453 # Get mode fills the cache and tests raw lookup performance with no
4442 4454 # eviction.
4443 4455 getseq = []
4444 4456 for i in _xrange(gets):
4445 4457 getseq.append(random.choice(values))
4446 4458
4447 4459 def dogets():
4448 4460 d = util.lrucachedict(size)
4449 4461 for v in values:
4450 4462 d[v] = v
4451 4463 for key in getseq:
4452 4464 value = d[key]
4453 4465 value # silence pyflakes warning
4454 4466
4455 4467 def dogetscost():
4456 4468 d = util.lrucachedict(size, maxcost=costlimit)
4457 4469 for i, v in enumerate(values):
4458 4470 d.insert(v, v, cost=costs[i])
4459 4471 for key in getseq:
4460 4472 try:
4461 4473 value = d[key]
4462 4474 value # silence pyflakes warning
4463 4475 except KeyError:
4464 4476 pass
4465 4477
4466 4478 # Set mode tests insertion speed with cache eviction.
4467 4479 setseq = []
4468 4480 costs = []
4469 4481 for i in _xrange(sets):
4470 4482 setseq.append(random.randint(0, _maxint))
4471 4483 costs.append(random.choice(costrange))
4472 4484
4473 4485 def doinserts():
4474 4486 d = util.lrucachedict(size)
4475 4487 for v in setseq:
4476 4488 d.insert(v, v)
4477 4489
4478 4490 def doinsertscost():
4479 4491 d = util.lrucachedict(size, maxcost=costlimit)
4480 4492 for i, v in enumerate(setseq):
4481 4493 d.insert(v, v, cost=costs[i])
4482 4494
4483 4495 def dosets():
4484 4496 d = util.lrucachedict(size)
4485 4497 for v in setseq:
4486 4498 d[v] = v
4487 4499
4488 4500 # Mixed mode randomly performs gets and sets with eviction.
4489 4501 mixedops = []
4490 4502 for i in _xrange(mixed):
4491 4503 r = random.randint(0, 100)
4492 4504 if r < mixedgetfreq:
4493 4505 op = 0
4494 4506 else:
4495 4507 op = 1
4496 4508
4497 4509 mixedops.append(
4498 4510 (op, random.randint(0, size * 2), random.choice(costrange))
4499 4511 )
4500 4512
4501 4513 def domixed():
4502 4514 d = util.lrucachedict(size)
4503 4515
4504 4516 for op, v, cost in mixedops:
4505 4517 if op == 0:
4506 4518 try:
4507 4519 d[v]
4508 4520 except KeyError:
4509 4521 pass
4510 4522 else:
4511 4523 d[v] = v
4512 4524
4513 4525 def domixedcost():
4514 4526 d = util.lrucachedict(size, maxcost=costlimit)
4515 4527
4516 4528 for op, v, cost in mixedops:
4517 4529 if op == 0:
4518 4530 try:
4519 4531 d[v]
4520 4532 except KeyError:
4521 4533 pass
4522 4534 else:
4523 4535 d.insert(v, v, cost=cost)
4524 4536
4525 4537 benches = [
4526 4538 (doinit, b'init'),
4527 4539 ]
4528 4540
4529 4541 if costlimit:
4530 4542 benches.extend(
4531 4543 [
4532 4544 (dogetscost, b'gets w/ cost limit'),
4533 4545 (doinsertscost, b'inserts w/ cost limit'),
4534 4546 (domixedcost, b'mixed w/ cost limit'),
4535 4547 ]
4536 4548 )
4537 4549 else:
4538 4550 benches.extend(
4539 4551 [
4540 4552 (dogets, b'gets'),
4541 4553 (doinserts, b'inserts'),
4542 4554 (dosets, b'sets'),
4543 4555 (domixed, b'mixed'),
4544 4556 ]
4545 4557 )
4546 4558
4547 4559 for fn, title in benches:
4548 4560 timer, fm = gettimer(ui, opts)
4549 4561 timer(fn, title=title)
4550 4562 fm.end()
4551 4563
4552 4564
4553 4565 @command(
4554 4566 b'perf::write|perfwrite',
4555 4567 formatteropts
4556 4568 + [
4557 4569 (b'', b'write-method', b'write', b'ui write method'),
4558 4570 (b'', b'nlines', 100, b'number of lines'),
4559 4571 (b'', b'nitems', 100, b'number of items (per line)'),
4560 4572 (b'', b'item', b'x', b'item that is written'),
4561 4573 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4562 4574 (b'', b'flush-line', None, b'flush after each line'),
4563 4575 ],
4564 4576 )
4565 4577 def perfwrite(ui, repo, **opts):
4566 4578 """microbenchmark ui.write (and others)"""
4567 4579 opts = _byteskwargs(opts)
4568 4580
4569 4581 write = getattr(ui, _sysstr(opts[b'write_method']))
4570 4582 nlines = int(opts[b'nlines'])
4571 4583 nitems = int(opts[b'nitems'])
4572 4584 item = opts[b'item']
4573 4585 batch_line = opts.get(b'batch_line')
4574 4586 flush_line = opts.get(b'flush_line')
4575 4587
4576 4588 if batch_line:
4577 4589 line = item * nitems + b'\n'
4578 4590
4579 4591 def benchmark():
4580 4592 for i in pycompat.xrange(nlines):
4581 4593 if batch_line:
4582 4594 write(line)
4583 4595 else:
4584 4596 for i in pycompat.xrange(nitems):
4585 4597 write(item)
4586 4598 write(b'\n')
4587 4599 if flush_line:
4588 4600 ui.flush()
4589 4601 ui.flush()
4590 4602
4591 4603 timer, fm = gettimer(ui, opts)
4592 4604 timer(benchmark)
4593 4605 fm.end()
4594 4606
4595 4607
4596 4608 def uisetup(ui):
4597 4609 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4598 4610 commands, b'debugrevlogopts'
4599 4611 ):
4600 4612 # for "historical portability":
4601 4613 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4602 4614 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4603 4615 # openrevlog() should cause failure, because it has been
4604 4616 # available since 3.5 (or 49c583ca48c4).
4605 4617 def openrevlog(orig, repo, cmd, file_, opts):
4606 4618 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4607 4619 raise error.Abort(
4608 4620 b"This version doesn't support --dir option",
4609 4621 hint=b"use 3.5 or later",
4610 4622 )
4611 4623 return orig(repo, cmd, file_, opts)
4612 4624
4613 4625 name = _sysstr(b'openrevlog')
4614 4626 extensions.wrapfunction(cmdutil, name, openrevlog)
4615 4627
4616 4628
4617 4629 @command(
4618 4630 b'perf::progress|perfprogress',
4619 4631 formatteropts
4620 4632 + [
4621 4633 (b'', b'topic', b'topic', b'topic for progress messages'),
4622 4634 (b'c', b'total', 1000000, b'total value we are progressing to'),
4623 4635 ],
4624 4636 norepo=True,
4625 4637 )
4626 4638 def perfprogress(ui, topic=None, total=None, **opts):
4627 4639 """printing of progress bars"""
4628 4640 opts = _byteskwargs(opts)
4629 4641
4630 4642 timer, fm = gettimer(ui, opts)
4631 4643
4632 4644 def doprogress():
4633 4645 with ui.makeprogress(topic, total=total) as progress:
4634 4646 for i in _xrange(total):
4635 4647 progress.increment()
4636 4648
4637 4649 timer(doprogress)
4638 4650 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now