##// END OF EJS Templates
perf: add a --update-last flag to perf::tags...
marmoute -
r51833:98a7f325 stable
parent child Browse files
Show More
@@ -1,4530 +1,4568 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", False)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 def _default_clear_on_disk_tags_cache(repo):
886 886 from mercurial import tags
887 887
888 888 repo.cachevfs.tryunlink(tags._filename(repo))
889 889
890 890
891 891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 892 from mercurial import tags
893 893
894 894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895 895
896 896
897 897 def _default_forget_fnodes(repo, revs):
898 898 """function used by the perf extension to prune some entries from the
899 899 fnodes cache"""
900 900 from mercurial import tags
901 901
902 902 missing_1 = b'\xff' * 4
903 903 missing_2 = b'\xff' * 20
904 904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 905 for r in revs:
906 906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 907 cache.write()
908 908
909 909
910 910 @command(
911 911 b'perf::tags|perftags',
912 912 formatteropts
913 913 + [
914 914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 915 (
916 916 b'',
917 917 b'clear-on-disk-cache',
918 918 False,
919 919 b'clear on disk tags cache (DESTRUCTIVE)',
920 920 ),
921 921 (
922 922 b'',
923 923 b'clear-fnode-cache-all',
924 924 False,
925 925 b'clear on disk file node cache (DESTRUCTIVE),',
926 926 ),
927 927 (
928 928 b'',
929 929 b'clear-fnode-cache-rev',
930 930 [],
931 931 b'clear on disk file node cache (DESTRUCTIVE),',
932 932 b'REVS',
933 933 ),
934 (
935 b'',
936 b'update-last',
937 b'',
938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 b'N',
940 ),
934 941 ],
935 942 )
936 943 def perftags(ui, repo, **opts):
937 944 """Benchmark tags retrieval in various situation
938 945
939 946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
940 947 altering performance after the command was run. However, it does not
941 948 destroy any stored data.
942 949 """
943 950 from mercurial import tags
944 951
945 952 opts = _byteskwargs(opts)
946 953 timer, fm = gettimer(ui, opts)
947 954 repocleartagscache = repocleartagscachefunc(repo)
948 955 clearrevlogs = opts[b'clear_revlogs']
949 956 clear_disk = opts[b'clear_on_disk_cache']
950 957 clear_fnode = opts[b'clear_fnode_cache_all']
951 958
952 959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 update_last_str = opts[b'update_last']
961 update_last = None
962 if update_last_str:
963 try:
964 update_last = int(update_last_str)
965 except ValueError:
966 msg = b'could not parse value for update-last: "%s"'
967 msg %= update_last_str
968 hint = b'value should be an integer'
969 raise error.Abort(msg, hint=hint)
953 970
954 971 clear_disk_fn = getattr(
955 972 tags,
956 973 "clear_cache_on_disk",
957 974 _default_clear_on_disk_tags_cache,
958 975 )
959 976 clear_fnodes_fn = getattr(
960 977 tags,
961 978 "clear_cache_fnodes",
962 979 _default_clear_on_disk_tags_fnodes_cache,
963 980 )
964 981 clear_fnodes_rev_fn = getattr(
965 982 tags,
966 983 "forget_fnodes",
967 984 _default_forget_fnodes,
968 985 )
969 986
970 clear_revs = None
987 clear_revs = []
971 988 if clear_fnode_revs:
972 clear_revs = scmutil.revrange(repo, clear_fnode_revs)
989 clear_revs.extends(scmutil.revrange(repo, clear_fnode_revs))
990
991 if update_last:
992 revset = b'last(all(), %d)' % update_last
993 last_revs = repo.unfiltered().revs(revset)
994 clear_revs.extend(last_revs)
995
996 from mercurial import repoview
997
998 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
999 with repo.ui.configoverride(rev_filter, source=b"perf"):
1000 filter_id = repoview.extrafilter(repo.ui)
1001
1002 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1003 pre_repo = repo.filtered(filter_name)
1004 pre_repo.tags() # warm the cache
1005 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1006 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1007
1008 clear_revs = sorted(set(clear_revs))
973 1009
974 1010 def s():
1011 if update_last:
1012 util.copyfile(old_tags_path, new_tags_path)
975 1013 if clearrevlogs:
976 1014 clearchangelog(repo)
977 1015 clearfilecache(repo.unfiltered(), 'manifest')
978 1016 if clear_disk:
979 1017 clear_disk_fn(repo)
980 1018 if clear_fnode:
981 1019 clear_fnodes_fn(repo)
982 elif clear_revs is not None:
1020 elif clear_revs:
983 1021 clear_fnodes_rev_fn(repo, clear_revs)
984 1022 repocleartagscache()
985 1023
986 1024 def t():
987 1025 len(repo.tags())
988 1026
989 1027 timer(t, setup=s)
990 1028 fm.end()
991 1029
992 1030
993 1031 @command(b'perf::ancestors|perfancestors', formatteropts)
994 1032 def perfancestors(ui, repo, **opts):
995 1033 opts = _byteskwargs(opts)
996 1034 timer, fm = gettimer(ui, opts)
997 1035 heads = repo.changelog.headrevs()
998 1036
999 1037 def d():
1000 1038 for a in repo.changelog.ancestors(heads):
1001 1039 pass
1002 1040
1003 1041 timer(d)
1004 1042 fm.end()
1005 1043
1006 1044
1007 1045 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1008 1046 def perfancestorset(ui, repo, revset, **opts):
1009 1047 opts = _byteskwargs(opts)
1010 1048 timer, fm = gettimer(ui, opts)
1011 1049 revs = repo.revs(revset)
1012 1050 heads = repo.changelog.headrevs()
1013 1051
1014 1052 def d():
1015 1053 s = repo.changelog.ancestors(heads)
1016 1054 for rev in revs:
1017 1055 rev in s
1018 1056
1019 1057 timer(d)
1020 1058 fm.end()
1021 1059
1022 1060
1023 1061 @command(
1024 1062 b'perf::delta-find',
1025 1063 revlogopts + formatteropts,
1026 1064 b'-c|-m|FILE REV',
1027 1065 )
1028 1066 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1029 1067 """benchmark the process of finding a valid delta for a revlog revision
1030 1068
1031 1069 When a revlog receives a new revision (e.g. from a commit, or from an
1032 1070 incoming bundle), it searches for a suitable delta-base to produce a delta.
1033 1071 This perf command measures how much time we spend in this process. It
1034 1072 operates on an already stored revision.
1035 1073
1036 1074 See `hg help debug-delta-find` for another related command.
1037 1075 """
1038 1076 from mercurial import revlogutils
1039 1077 import mercurial.revlogutils.deltas as deltautil
1040 1078
1041 1079 opts = _byteskwargs(opts)
1042 1080 if arg_2 is None:
1043 1081 file_ = None
1044 1082 rev = arg_1
1045 1083 else:
1046 1084 file_ = arg_1
1047 1085 rev = arg_2
1048 1086
1049 1087 repo = repo.unfiltered()
1050 1088
1051 1089 timer, fm = gettimer(ui, opts)
1052 1090
1053 1091 rev = int(rev)
1054 1092
1055 1093 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1056 1094
1057 1095 deltacomputer = deltautil.deltacomputer(revlog)
1058 1096
1059 1097 node = revlog.node(rev)
1060 1098 p1r, p2r = revlog.parentrevs(rev)
1061 1099 p1 = revlog.node(p1r)
1062 1100 p2 = revlog.node(p2r)
1063 1101 full_text = revlog.revision(rev)
1064 1102 textlen = len(full_text)
1065 1103 cachedelta = None
1066 1104 flags = revlog.flags(rev)
1067 1105
1068 1106 revinfo = revlogutils.revisioninfo(
1069 1107 node,
1070 1108 p1,
1071 1109 p2,
1072 1110 [full_text], # btext
1073 1111 textlen,
1074 1112 cachedelta,
1075 1113 flags,
1076 1114 )
1077 1115
1078 1116 # Note: we should probably purge the potential caches (like the full
1079 1117 # manifest cache) between runs.
1080 1118 def find_one():
1081 1119 with revlog._datafp() as fh:
1082 1120 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1083 1121
1084 1122 timer(find_one)
1085 1123 fm.end()
1086 1124
1087 1125
1088 1126 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1089 1127 def perfdiscovery(ui, repo, path, **opts):
1090 1128 """benchmark discovery between local repo and the peer at given path"""
1091 1129 repos = [repo, None]
1092 1130 timer, fm = gettimer(ui, opts)
1093 1131
1094 1132 try:
1095 1133 from mercurial.utils.urlutil import get_unique_pull_path_obj
1096 1134
1097 1135 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1098 1136 except ImportError:
1099 1137 try:
1100 1138 from mercurial.utils.urlutil import get_unique_pull_path
1101 1139
1102 1140 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1103 1141 except ImportError:
1104 1142 path = ui.expandpath(path)
1105 1143
1106 1144 def s():
1107 1145 repos[1] = hg.peer(ui, opts, path)
1108 1146
1109 1147 def d():
1110 1148 setdiscovery.findcommonheads(ui, *repos)
1111 1149
1112 1150 timer(d, setup=s)
1113 1151 fm.end()
1114 1152
1115 1153
1116 1154 @command(
1117 1155 b'perf::bookmarks|perfbookmarks',
1118 1156 formatteropts
1119 1157 + [
1120 1158 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1121 1159 ],
1122 1160 )
1123 1161 def perfbookmarks(ui, repo, **opts):
1124 1162 """benchmark parsing bookmarks from disk to memory"""
1125 1163 opts = _byteskwargs(opts)
1126 1164 timer, fm = gettimer(ui, opts)
1127 1165
1128 1166 clearrevlogs = opts[b'clear_revlogs']
1129 1167
1130 1168 def s():
1131 1169 if clearrevlogs:
1132 1170 clearchangelog(repo)
1133 1171 clearfilecache(repo, b'_bookmarks')
1134 1172
1135 1173 def d():
1136 1174 repo._bookmarks
1137 1175
1138 1176 timer(d, setup=s)
1139 1177 fm.end()
1140 1178
1141 1179
1142 1180 @command(
1143 1181 b'perf::bundle',
1144 1182 [
1145 1183 (
1146 1184 b'r',
1147 1185 b'rev',
1148 1186 [],
1149 1187 b'changesets to bundle',
1150 1188 b'REV',
1151 1189 ),
1152 1190 (
1153 1191 b't',
1154 1192 b'type',
1155 1193 b'none',
1156 1194 b'bundlespec to use (see `hg help bundlespec`)',
1157 1195 b'TYPE',
1158 1196 ),
1159 1197 ]
1160 1198 + formatteropts,
1161 1199 b'REVS',
1162 1200 )
1163 1201 def perfbundle(ui, repo, *revs, **opts):
1164 1202 """benchmark the creation of a bundle from a repository
1165 1203
1166 1204 For now, this only supports "none" compression.
1167 1205 """
1168 1206 try:
1169 1207 from mercurial import bundlecaches
1170 1208
1171 1209 parsebundlespec = bundlecaches.parsebundlespec
1172 1210 except ImportError:
1173 1211 from mercurial import exchange
1174 1212
1175 1213 parsebundlespec = exchange.parsebundlespec
1176 1214
1177 1215 from mercurial import discovery
1178 1216 from mercurial import bundle2
1179 1217
1180 1218 opts = _byteskwargs(opts)
1181 1219 timer, fm = gettimer(ui, opts)
1182 1220
1183 1221 cl = repo.changelog
1184 1222 revs = list(revs)
1185 1223 revs.extend(opts.get(b'rev', ()))
1186 1224 revs = scmutil.revrange(repo, revs)
1187 1225 if not revs:
1188 1226 raise error.Abort(b"not revision specified")
1189 1227 # make it a consistent set (ie: without topological gaps)
1190 1228 old_len = len(revs)
1191 1229 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1192 1230 if old_len != len(revs):
1193 1231 new_count = len(revs) - old_len
1194 1232 msg = b"add %d new revisions to make it a consistent set\n"
1195 1233 ui.write_err(msg % new_count)
1196 1234
1197 1235 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1198 1236 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1199 1237 outgoing = discovery.outgoing(repo, bases, targets)
1200 1238
1201 1239 bundle_spec = opts.get(b'type')
1202 1240
1203 1241 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1204 1242
1205 1243 cgversion = bundle_spec.params.get(b"cg.version")
1206 1244 if cgversion is None:
1207 1245 if bundle_spec.version == b'v1':
1208 1246 cgversion = b'01'
1209 1247 if bundle_spec.version == b'v2':
1210 1248 cgversion = b'02'
1211 1249 if cgversion not in changegroup.supportedoutgoingversions(repo):
1212 1250 err = b"repository does not support bundle version %s"
1213 1251 raise error.Abort(err % cgversion)
1214 1252
1215 1253 if cgversion == b'01': # bundle1
1216 1254 bversion = b'HG10' + bundle_spec.wirecompression
1217 1255 bcompression = None
1218 1256 elif cgversion in (b'02', b'03'):
1219 1257 bversion = b'HG20'
1220 1258 bcompression = bundle_spec.wirecompression
1221 1259 else:
1222 1260 err = b'perf::bundle: unexpected changegroup version %s'
1223 1261 raise error.ProgrammingError(err % cgversion)
1224 1262
1225 1263 if bcompression is None:
1226 1264 bcompression = b'UN'
1227 1265
1228 1266 if bcompression != b'UN':
1229 1267 err = b'perf::bundle: compression currently unsupported: %s'
1230 1268 raise error.ProgrammingError(err % bcompression)
1231 1269
1232 1270 def do_bundle():
1233 1271 bundle2.writenewbundle(
1234 1272 ui,
1235 1273 repo,
1236 1274 b'perf::bundle',
1237 1275 os.devnull,
1238 1276 bversion,
1239 1277 outgoing,
1240 1278 bundle_spec.params,
1241 1279 )
1242 1280
1243 1281 timer(do_bundle)
1244 1282 fm.end()
1245 1283
1246 1284
1247 1285 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1248 1286 def perfbundleread(ui, repo, bundlepath, **opts):
1249 1287 """Benchmark reading of bundle files.
1250 1288
1251 1289 This command is meant to isolate the I/O part of bundle reading as
1252 1290 much as possible.
1253 1291 """
1254 1292 from mercurial import (
1255 1293 bundle2,
1256 1294 exchange,
1257 1295 streamclone,
1258 1296 )
1259 1297
1260 1298 opts = _byteskwargs(opts)
1261 1299
1262 1300 def makebench(fn):
1263 1301 def run():
1264 1302 with open(bundlepath, b'rb') as fh:
1265 1303 bundle = exchange.readbundle(ui, fh, bundlepath)
1266 1304 fn(bundle)
1267 1305
1268 1306 return run
1269 1307
1270 1308 def makereadnbytes(size):
1271 1309 def run():
1272 1310 with open(bundlepath, b'rb') as fh:
1273 1311 bundle = exchange.readbundle(ui, fh, bundlepath)
1274 1312 while bundle.read(size):
1275 1313 pass
1276 1314
1277 1315 return run
1278 1316
1279 1317 def makestdioread(size):
1280 1318 def run():
1281 1319 with open(bundlepath, b'rb') as fh:
1282 1320 while fh.read(size):
1283 1321 pass
1284 1322
1285 1323 return run
1286 1324
1287 1325 # bundle1
1288 1326
1289 1327 def deltaiter(bundle):
1290 1328 for delta in bundle.deltaiter():
1291 1329 pass
1292 1330
1293 1331 def iterchunks(bundle):
1294 1332 for chunk in bundle.getchunks():
1295 1333 pass
1296 1334
1297 1335 # bundle2
1298 1336
1299 1337 def forwardchunks(bundle):
1300 1338 for chunk in bundle._forwardchunks():
1301 1339 pass
1302 1340
1303 1341 def iterparts(bundle):
1304 1342 for part in bundle.iterparts():
1305 1343 pass
1306 1344
1307 1345 def iterpartsseekable(bundle):
1308 1346 for part in bundle.iterparts(seekable=True):
1309 1347 pass
1310 1348
1311 1349 def seek(bundle):
1312 1350 for part in bundle.iterparts(seekable=True):
1313 1351 part.seek(0, os.SEEK_END)
1314 1352
1315 1353 def makepartreadnbytes(size):
1316 1354 def run():
1317 1355 with open(bundlepath, b'rb') as fh:
1318 1356 bundle = exchange.readbundle(ui, fh, bundlepath)
1319 1357 for part in bundle.iterparts():
1320 1358 while part.read(size):
1321 1359 pass
1322 1360
1323 1361 return run
1324 1362
1325 1363 benches = [
1326 1364 (makestdioread(8192), b'read(8k)'),
1327 1365 (makestdioread(16384), b'read(16k)'),
1328 1366 (makestdioread(32768), b'read(32k)'),
1329 1367 (makestdioread(131072), b'read(128k)'),
1330 1368 ]
1331 1369
1332 1370 with open(bundlepath, b'rb') as fh:
1333 1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1334 1372
1335 1373 if isinstance(bundle, changegroup.cg1unpacker):
1336 1374 benches.extend(
1337 1375 [
1338 1376 (makebench(deltaiter), b'cg1 deltaiter()'),
1339 1377 (makebench(iterchunks), b'cg1 getchunks()'),
1340 1378 (makereadnbytes(8192), b'cg1 read(8k)'),
1341 1379 (makereadnbytes(16384), b'cg1 read(16k)'),
1342 1380 (makereadnbytes(32768), b'cg1 read(32k)'),
1343 1381 (makereadnbytes(131072), b'cg1 read(128k)'),
1344 1382 ]
1345 1383 )
1346 1384 elif isinstance(bundle, bundle2.unbundle20):
1347 1385 benches.extend(
1348 1386 [
1349 1387 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1350 1388 (makebench(iterparts), b'bundle2 iterparts()'),
1351 1389 (
1352 1390 makebench(iterpartsseekable),
1353 1391 b'bundle2 iterparts() seekable',
1354 1392 ),
1355 1393 (makebench(seek), b'bundle2 part seek()'),
1356 1394 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1357 1395 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1358 1396 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1359 1397 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1360 1398 ]
1361 1399 )
1362 1400 elif isinstance(bundle, streamclone.streamcloneapplier):
1363 1401 raise error.Abort(b'stream clone bundles not supported')
1364 1402 else:
1365 1403 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1366 1404
1367 1405 for fn, title in benches:
1368 1406 timer, fm = gettimer(ui, opts)
1369 1407 timer(fn, title=title)
1370 1408 fm.end()
1371 1409
1372 1410
1373 1411 @command(
1374 1412 b'perf::changegroupchangelog|perfchangegroupchangelog',
1375 1413 formatteropts
1376 1414 + [
1377 1415 (b'', b'cgversion', b'02', b'changegroup version'),
1378 1416 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1379 1417 ],
1380 1418 )
1381 1419 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1382 1420 """Benchmark producing a changelog group for a changegroup.
1383 1421
1384 1422 This measures the time spent processing the changelog during a
1385 1423 bundle operation. This occurs during `hg bundle` and on a server
1386 1424 processing a `getbundle` wire protocol request (handles clones
1387 1425 and pull requests).
1388 1426
1389 1427 By default, all revisions are added to the changegroup.
1390 1428 """
1391 1429 opts = _byteskwargs(opts)
1392 1430 cl = repo.changelog
1393 1431 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1394 1432 bundler = changegroup.getbundler(cgversion, repo)
1395 1433
1396 1434 def d():
1397 1435 state, chunks = bundler._generatechangelog(cl, nodes)
1398 1436 for chunk in chunks:
1399 1437 pass
1400 1438
1401 1439 timer, fm = gettimer(ui, opts)
1402 1440
1403 1441 # Terminal printing can interfere with timing. So disable it.
1404 1442 with ui.configoverride({(b'progress', b'disable'): True}):
1405 1443 timer(d)
1406 1444
1407 1445 fm.end()
1408 1446
1409 1447
1410 1448 @command(b'perf::dirs|perfdirs', formatteropts)
1411 1449 def perfdirs(ui, repo, **opts):
1412 1450 opts = _byteskwargs(opts)
1413 1451 timer, fm = gettimer(ui, opts)
1414 1452 dirstate = repo.dirstate
1415 1453 b'a' in dirstate
1416 1454
1417 1455 def d():
1418 1456 dirstate.hasdir(b'a')
1419 1457 try:
1420 1458 del dirstate._map._dirs
1421 1459 except AttributeError:
1422 1460 pass
1423 1461
1424 1462 timer(d)
1425 1463 fm.end()
1426 1464
1427 1465
1428 1466 @command(
1429 1467 b'perf::dirstate|perfdirstate',
1430 1468 [
1431 1469 (
1432 1470 b'',
1433 1471 b'iteration',
1434 1472 None,
1435 1473 b'benchmark a full iteration for the dirstate',
1436 1474 ),
1437 1475 (
1438 1476 b'',
1439 1477 b'contains',
1440 1478 None,
1441 1479 b'benchmark a large amount of `nf in dirstate` calls',
1442 1480 ),
1443 1481 ]
1444 1482 + formatteropts,
1445 1483 )
1446 1484 def perfdirstate(ui, repo, **opts):
1447 1485 """benchmap the time of various distate operations
1448 1486
1449 1487 By default benchmark the time necessary to load a dirstate from scratch.
1450 1488 The dirstate is loaded to the point were a "contains" request can be
1451 1489 answered.
1452 1490 """
1453 1491 opts = _byteskwargs(opts)
1454 1492 timer, fm = gettimer(ui, opts)
1455 1493 b"a" in repo.dirstate
1456 1494
1457 1495 if opts[b'iteration'] and opts[b'contains']:
1458 1496 msg = b'only specify one of --iteration or --contains'
1459 1497 raise error.Abort(msg)
1460 1498
1461 1499 if opts[b'iteration']:
1462 1500 setup = None
1463 1501 dirstate = repo.dirstate
1464 1502
1465 1503 def d():
1466 1504 for f in dirstate:
1467 1505 pass
1468 1506
1469 1507 elif opts[b'contains']:
1470 1508 setup = None
1471 1509 dirstate = repo.dirstate
1472 1510 allfiles = list(dirstate)
1473 1511 # also add file path that will be "missing" from the dirstate
1474 1512 allfiles.extend([f[::-1] for f in allfiles])
1475 1513
1476 1514 def d():
1477 1515 for f in allfiles:
1478 1516 f in dirstate
1479 1517
1480 1518 else:
1481 1519
1482 1520 def setup():
1483 1521 repo.dirstate.invalidate()
1484 1522
1485 1523 def d():
1486 1524 b"a" in repo.dirstate
1487 1525
1488 1526 timer(d, setup=setup)
1489 1527 fm.end()
1490 1528
1491 1529
1492 1530 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1493 1531 def perfdirstatedirs(ui, repo, **opts):
1494 1532 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1495 1533 opts = _byteskwargs(opts)
1496 1534 timer, fm = gettimer(ui, opts)
1497 1535 repo.dirstate.hasdir(b"a")
1498 1536
1499 1537 def setup():
1500 1538 try:
1501 1539 del repo.dirstate._map._dirs
1502 1540 except AttributeError:
1503 1541 pass
1504 1542
1505 1543 def d():
1506 1544 repo.dirstate.hasdir(b"a")
1507 1545
1508 1546 timer(d, setup=setup)
1509 1547 fm.end()
1510 1548
1511 1549
1512 1550 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1513 1551 def perfdirstatefoldmap(ui, repo, **opts):
1514 1552 """benchmap a `dirstate._map.filefoldmap.get()` request
1515 1553
1516 1554 The dirstate filefoldmap cache is dropped between every request.
1517 1555 """
1518 1556 opts = _byteskwargs(opts)
1519 1557 timer, fm = gettimer(ui, opts)
1520 1558 dirstate = repo.dirstate
1521 1559 dirstate._map.filefoldmap.get(b'a')
1522 1560
1523 1561 def setup():
1524 1562 del dirstate._map.filefoldmap
1525 1563
1526 1564 def d():
1527 1565 dirstate._map.filefoldmap.get(b'a')
1528 1566
1529 1567 timer(d, setup=setup)
1530 1568 fm.end()
1531 1569
1532 1570
1533 1571 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1534 1572 def perfdirfoldmap(ui, repo, **opts):
1535 1573 """benchmap a `dirstate._map.dirfoldmap.get()` request
1536 1574
1537 1575 The dirstate dirfoldmap cache is dropped between every request.
1538 1576 """
1539 1577 opts = _byteskwargs(opts)
1540 1578 timer, fm = gettimer(ui, opts)
1541 1579 dirstate = repo.dirstate
1542 1580 dirstate._map.dirfoldmap.get(b'a')
1543 1581
1544 1582 def setup():
1545 1583 del dirstate._map.dirfoldmap
1546 1584 try:
1547 1585 del dirstate._map._dirs
1548 1586 except AttributeError:
1549 1587 pass
1550 1588
1551 1589 def d():
1552 1590 dirstate._map.dirfoldmap.get(b'a')
1553 1591
1554 1592 timer(d, setup=setup)
1555 1593 fm.end()
1556 1594
1557 1595
1558 1596 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1559 1597 def perfdirstatewrite(ui, repo, **opts):
1560 1598 """benchmap the time it take to write a dirstate on disk"""
1561 1599 opts = _byteskwargs(opts)
1562 1600 timer, fm = gettimer(ui, opts)
1563 1601 ds = repo.dirstate
1564 1602 b"a" in ds
1565 1603
1566 1604 def setup():
1567 1605 ds._dirty = True
1568 1606
1569 1607 def d():
1570 1608 ds.write(repo.currenttransaction())
1571 1609
1572 1610 with repo.wlock():
1573 1611 timer(d, setup=setup)
1574 1612 fm.end()
1575 1613
1576 1614
1577 1615 def _getmergerevs(repo, opts):
1578 1616 """parse command argument to return rev involved in merge
1579 1617
1580 1618 input: options dictionnary with `rev`, `from` and `bse`
1581 1619 output: (localctx, otherctx, basectx)
1582 1620 """
1583 1621 if opts[b'from']:
1584 1622 fromrev = scmutil.revsingle(repo, opts[b'from'])
1585 1623 wctx = repo[fromrev]
1586 1624 else:
1587 1625 wctx = repo[None]
1588 1626 # we don't want working dir files to be stat'd in the benchmark, so
1589 1627 # prime that cache
1590 1628 wctx.dirty()
1591 1629 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1592 1630 if opts[b'base']:
1593 1631 fromrev = scmutil.revsingle(repo, opts[b'base'])
1594 1632 ancestor = repo[fromrev]
1595 1633 else:
1596 1634 ancestor = wctx.ancestor(rctx)
1597 1635 return (wctx, rctx, ancestor)
1598 1636
1599 1637
1600 1638 @command(
1601 1639 b'perf::mergecalculate|perfmergecalculate',
1602 1640 [
1603 1641 (b'r', b'rev', b'.', b'rev to merge against'),
1604 1642 (b'', b'from', b'', b'rev to merge from'),
1605 1643 (b'', b'base', b'', b'the revision to use as base'),
1606 1644 ]
1607 1645 + formatteropts,
1608 1646 )
1609 1647 def perfmergecalculate(ui, repo, **opts):
1610 1648 opts = _byteskwargs(opts)
1611 1649 timer, fm = gettimer(ui, opts)
1612 1650
1613 1651 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1614 1652
1615 1653 def d():
1616 1654 # acceptremote is True because we don't want prompts in the middle of
1617 1655 # our benchmark
1618 1656 merge.calculateupdates(
1619 1657 repo,
1620 1658 wctx,
1621 1659 rctx,
1622 1660 [ancestor],
1623 1661 branchmerge=False,
1624 1662 force=False,
1625 1663 acceptremote=True,
1626 1664 followcopies=True,
1627 1665 )
1628 1666
1629 1667 timer(d)
1630 1668 fm.end()
1631 1669
1632 1670
1633 1671 @command(
1634 1672 b'perf::mergecopies|perfmergecopies',
1635 1673 [
1636 1674 (b'r', b'rev', b'.', b'rev to merge against'),
1637 1675 (b'', b'from', b'', b'rev to merge from'),
1638 1676 (b'', b'base', b'', b'the revision to use as base'),
1639 1677 ]
1640 1678 + formatteropts,
1641 1679 )
1642 1680 def perfmergecopies(ui, repo, **opts):
1643 1681 """measure runtime of `copies.mergecopies`"""
1644 1682 opts = _byteskwargs(opts)
1645 1683 timer, fm = gettimer(ui, opts)
1646 1684 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1647 1685
1648 1686 def d():
1649 1687 # acceptremote is True because we don't want prompts in the middle of
1650 1688 # our benchmark
1651 1689 copies.mergecopies(repo, wctx, rctx, ancestor)
1652 1690
1653 1691 timer(d)
1654 1692 fm.end()
1655 1693
1656 1694
1657 1695 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1658 1696 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1659 1697 """benchmark the copy tracing logic"""
1660 1698 opts = _byteskwargs(opts)
1661 1699 timer, fm = gettimer(ui, opts)
1662 1700 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1663 1701 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1664 1702
1665 1703 def d():
1666 1704 copies.pathcopies(ctx1, ctx2)
1667 1705
1668 1706 timer(d)
1669 1707 fm.end()
1670 1708
1671 1709
1672 1710 @command(
1673 1711 b'perf::phases|perfphases',
1674 1712 [
1675 1713 (b'', b'full', False, b'include file reading time too'),
1676 1714 ],
1677 1715 b"",
1678 1716 )
1679 1717 def perfphases(ui, repo, **opts):
1680 1718 """benchmark phasesets computation"""
1681 1719 opts = _byteskwargs(opts)
1682 1720 timer, fm = gettimer(ui, opts)
1683 1721 _phases = repo._phasecache
1684 1722 full = opts.get(b'full')
1685 1723
1686 1724 def d():
1687 1725 phases = _phases
1688 1726 if full:
1689 1727 clearfilecache(repo, b'_phasecache')
1690 1728 phases = repo._phasecache
1691 1729 phases.invalidate()
1692 1730 phases.loadphaserevs(repo)
1693 1731
1694 1732 timer(d)
1695 1733 fm.end()
1696 1734
1697 1735
1698 1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1699 1737 def perfphasesremote(ui, repo, dest=None, **opts):
1700 1738 """benchmark time needed to analyse phases of the remote server"""
1701 1739 from mercurial.node import bin
1702 1740 from mercurial import (
1703 1741 exchange,
1704 1742 hg,
1705 1743 phases,
1706 1744 )
1707 1745
1708 1746 opts = _byteskwargs(opts)
1709 1747 timer, fm = gettimer(ui, opts)
1710 1748
1711 1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1712 1750 if not path:
1713 1751 raise error.Abort(
1714 1752 b'default repository not configured!',
1715 1753 hint=b"see 'hg help config.paths'",
1716 1754 )
1717 1755 if util.safehasattr(path, 'main_path'):
1718 1756 path = path.get_push_variant()
1719 1757 dest = path.loc
1720 1758 else:
1721 1759 dest = path.pushloc or path.loc
1722 1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1723 1761 other = hg.peer(repo, opts, dest)
1724 1762
1725 1763 # easier to perform discovery through the operation
1726 1764 op = exchange.pushoperation(repo, other)
1727 1765 exchange._pushdiscoverychangeset(op)
1728 1766
1729 1767 remotesubset = op.fallbackheads
1730 1768
1731 1769 with other.commandexecutor() as e:
1732 1770 remotephases = e.callcommand(
1733 1771 b'listkeys', {b'namespace': b'phases'}
1734 1772 ).result()
1735 1773 del other
1736 1774 publishing = remotephases.get(b'publishing', False)
1737 1775 if publishing:
1738 1776 ui.statusnoi18n(b'publishing: yes\n')
1739 1777 else:
1740 1778 ui.statusnoi18n(b'publishing: no\n')
1741 1779
1742 1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1743 1781 if has_node is None:
1744 1782 has_node = repo.changelog.nodemap.__contains__
1745 1783 nonpublishroots = 0
1746 1784 for nhex, phase in remotephases.iteritems():
1747 1785 if nhex == b'publishing': # ignore data related to publish option
1748 1786 continue
1749 1787 node = bin(nhex)
1750 1788 if has_node(node) and int(phase):
1751 1789 nonpublishroots += 1
1752 1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1753 1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1754 1792
1755 1793 def d():
1756 1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1757 1795
1758 1796 timer(d)
1759 1797 fm.end()
1760 1798
1761 1799
1762 1800 @command(
1763 1801 b'perf::manifest|perfmanifest',
1764 1802 [
1765 1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1766 1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1767 1805 ]
1768 1806 + formatteropts,
1769 1807 b'REV|NODE',
1770 1808 )
1771 1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1772 1810 """benchmark the time to read a manifest from disk and return a usable
1773 1811 dict-like object
1774 1812
1775 1813 Manifest caches are cleared before retrieval."""
1776 1814 opts = _byteskwargs(opts)
1777 1815 timer, fm = gettimer(ui, opts)
1778 1816 if not manifest_rev:
1779 1817 ctx = scmutil.revsingle(repo, rev, rev)
1780 1818 t = ctx.manifestnode()
1781 1819 else:
1782 1820 from mercurial.node import bin
1783 1821
1784 1822 if len(rev) == 40:
1785 1823 t = bin(rev)
1786 1824 else:
1787 1825 try:
1788 1826 rev = int(rev)
1789 1827
1790 1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1791 1829 t = repo.manifestlog.getstorage(b'').node(rev)
1792 1830 else:
1793 1831 t = repo.manifestlog._revlog.lookup(rev)
1794 1832 except ValueError:
1795 1833 raise error.Abort(
1796 1834 b'manifest revision must be integer or full node'
1797 1835 )
1798 1836
1799 1837 def d():
1800 1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1801 1839 repo.manifestlog[t].read()
1802 1840
1803 1841 timer(d)
1804 1842 fm.end()
1805 1843
1806 1844
1807 1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1808 1846 def perfchangeset(ui, repo, rev, **opts):
1809 1847 opts = _byteskwargs(opts)
1810 1848 timer, fm = gettimer(ui, opts)
1811 1849 n = scmutil.revsingle(repo, rev).node()
1812 1850
1813 1851 def d():
1814 1852 repo.changelog.read(n)
1815 1853 # repo.changelog._cache = None
1816 1854
1817 1855 timer(d)
1818 1856 fm.end()
1819 1857
1820 1858
1821 1859 @command(b'perf::ignore|perfignore', formatteropts)
1822 1860 def perfignore(ui, repo, **opts):
1823 1861 """benchmark operation related to computing ignore"""
1824 1862 opts = _byteskwargs(opts)
1825 1863 timer, fm = gettimer(ui, opts)
1826 1864 dirstate = repo.dirstate
1827 1865
1828 1866 def setupone():
1829 1867 dirstate.invalidate()
1830 1868 clearfilecache(dirstate, b'_ignore')
1831 1869
1832 1870 def runone():
1833 1871 dirstate._ignore
1834 1872
1835 1873 timer(runone, setup=setupone, title=b"load")
1836 1874 fm.end()
1837 1875
1838 1876
1839 1877 @command(
1840 1878 b'perf::index|perfindex',
1841 1879 [
1842 1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1843 1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1844 1882 ]
1845 1883 + formatteropts,
1846 1884 )
1847 1885 def perfindex(ui, repo, **opts):
1848 1886 """benchmark index creation time followed by a lookup
1849 1887
1850 1888 The default is to look `tip` up. Depending on the index implementation,
1851 1889 the revision looked up can matters. For example, an implementation
1852 1890 scanning the index will have a faster lookup time for `--rev tip` than for
1853 1891 `--rev 0`. The number of looked up revisions and their order can also
1854 1892 matters.
1855 1893
1856 1894 Example of useful set to test:
1857 1895
1858 1896 * tip
1859 1897 * 0
1860 1898 * -10:
1861 1899 * :10
1862 1900 * -10: + :10
1863 1901 * :10: + -10:
1864 1902 * -10000:
1865 1903 * -10000: + 0
1866 1904
1867 1905 It is not currently possible to check for lookup of a missing node. For
1868 1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1869 1907 import mercurial.revlog
1870 1908
1871 1909 opts = _byteskwargs(opts)
1872 1910 timer, fm = gettimer(ui, opts)
1873 1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1874 1912 if opts[b'no_lookup']:
1875 1913 if opts['rev']:
1876 1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1877 1915 nodes = []
1878 1916 elif not opts[b'rev']:
1879 1917 nodes = [repo[b"tip"].node()]
1880 1918 else:
1881 1919 revs = scmutil.revrange(repo, opts[b'rev'])
1882 1920 cl = repo.changelog
1883 1921 nodes = [cl.node(r) for r in revs]
1884 1922
1885 1923 unfi = repo.unfiltered()
1886 1924 # find the filecache func directly
1887 1925 # This avoid polluting the benchmark with the filecache logic
1888 1926 makecl = unfi.__class__.changelog.func
1889 1927
1890 1928 def setup():
1891 1929 # probably not necessary, but for good measure
1892 1930 clearchangelog(unfi)
1893 1931
1894 1932 def d():
1895 1933 cl = makecl(unfi)
1896 1934 for n in nodes:
1897 1935 cl.rev(n)
1898 1936
1899 1937 timer(d, setup=setup)
1900 1938 fm.end()
1901 1939
1902 1940
1903 1941 @command(
1904 1942 b'perf::nodemap|perfnodemap',
1905 1943 [
1906 1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1907 1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1908 1946 ]
1909 1947 + formatteropts,
1910 1948 )
1911 1949 def perfnodemap(ui, repo, **opts):
1912 1950 """benchmark the time necessary to look up revision from a cold nodemap
1913 1951
1914 1952 Depending on the implementation, the amount and order of revision we look
1915 1953 up can varies. Example of useful set to test:
1916 1954 * tip
1917 1955 * 0
1918 1956 * -10:
1919 1957 * :10
1920 1958 * -10: + :10
1921 1959 * :10: + -10:
1922 1960 * -10000:
1923 1961 * -10000: + 0
1924 1962
1925 1963 The command currently focus on valid binary lookup. Benchmarking for
1926 1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1927 1965 """
1928 1966 import mercurial.revlog
1929 1967
1930 1968 opts = _byteskwargs(opts)
1931 1969 timer, fm = gettimer(ui, opts)
1932 1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1933 1971
1934 1972 unfi = repo.unfiltered()
1935 1973 clearcaches = opts[b'clear_caches']
1936 1974 # find the filecache func directly
1937 1975 # This avoid polluting the benchmark with the filecache logic
1938 1976 makecl = unfi.__class__.changelog.func
1939 1977 if not opts[b'rev']:
1940 1978 raise error.Abort(b'use --rev to specify revisions to look up')
1941 1979 revs = scmutil.revrange(repo, opts[b'rev'])
1942 1980 cl = repo.changelog
1943 1981 nodes = [cl.node(r) for r in revs]
1944 1982
1945 1983 # use a list to pass reference to a nodemap from one closure to the next
1946 1984 nodeget = [None]
1947 1985
1948 1986 def setnodeget():
1949 1987 # probably not necessary, but for good measure
1950 1988 clearchangelog(unfi)
1951 1989 cl = makecl(unfi)
1952 1990 if util.safehasattr(cl.index, 'get_rev'):
1953 1991 nodeget[0] = cl.index.get_rev
1954 1992 else:
1955 1993 nodeget[0] = cl.nodemap.get
1956 1994
1957 1995 def d():
1958 1996 get = nodeget[0]
1959 1997 for n in nodes:
1960 1998 get(n)
1961 1999
1962 2000 setup = None
1963 2001 if clearcaches:
1964 2002
1965 2003 def setup():
1966 2004 setnodeget()
1967 2005
1968 2006 else:
1969 2007 setnodeget()
1970 2008 d() # prewarm the data structure
1971 2009 timer(d, setup=setup)
1972 2010 fm.end()
1973 2011
1974 2012
1975 2013 @command(b'perf::startup|perfstartup', formatteropts)
1976 2014 def perfstartup(ui, repo, **opts):
1977 2015 opts = _byteskwargs(opts)
1978 2016 timer, fm = gettimer(ui, opts)
1979 2017
1980 2018 def d():
1981 2019 if os.name != 'nt':
1982 2020 os.system(
1983 2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1984 2022 )
1985 2023 else:
1986 2024 os.environ['HGRCPATH'] = r' '
1987 2025 os.system("%s version -q > NUL" % sys.argv[0])
1988 2026
1989 2027 timer(d)
1990 2028 fm.end()
1991 2029
1992 2030
1993 2031 def _find_stream_generator(version):
1994 2032 """find the proper generator function for this stream version"""
1995 2033 import mercurial.streamclone
1996 2034
1997 2035 available = {}
1998 2036
1999 2037 # try to fetch a v1 generator
2000 2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2001 2039 if generatev1 is not None:
2002 2040
2003 2041 def generate(repo):
2004 2042 entries, bytes, data = generatev2(repo, None, None, True)
2005 2043 return data
2006 2044
2007 2045 available[b'v1'] = generatev1
2008 2046 # try to fetch a v2 generator
2009 2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2010 2048 if generatev2 is not None:
2011 2049
2012 2050 def generate(repo):
2013 2051 entries, bytes, data = generatev2(repo, None, None, True)
2014 2052 return data
2015 2053
2016 2054 available[b'v2'] = generate
2017 2055 # try to fetch a v3 generator
2018 2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2019 2057 if generatev3 is not None:
2020 2058
2021 2059 def generate(repo):
2022 2060 entries, bytes, data = generatev3(repo, None, None, True)
2023 2061 return data
2024 2062
2025 2063 available[b'v3-exp'] = generate
2026 2064
2027 2065 # resolve the request
2028 2066 if version == b"latest":
2029 2067 # latest is the highest non experimental version
2030 2068 latest_key = max(v for v in available if b'-exp' not in v)
2031 2069 return available[latest_key]
2032 2070 elif version in available:
2033 2071 return available[version]
2034 2072 else:
2035 2073 msg = b"unkown or unavailable version: %s"
2036 2074 msg %= version
2037 2075 hint = b"available versions: %s"
2038 2076 hint %= b', '.join(sorted(available))
2039 2077 raise error.Abort(msg, hint=hint)
2040 2078
2041 2079
2042 2080 @command(
2043 2081 b'perf::stream-locked-section',
2044 2082 [
2045 2083 (
2046 2084 b'',
2047 2085 b'stream-version',
2048 2086 b'latest',
2049 2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2050 2088 ),
2051 2089 ]
2052 2090 + formatteropts,
2053 2091 )
2054 2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2055 2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2056 2094
2057 2095 opts = _byteskwargs(opts)
2058 2096 timer, fm = gettimer(ui, opts)
2059 2097
2060 2098 # deletion of the generator may trigger some cleanup that we do not want to
2061 2099 # measure
2062 2100 result_holder = [None]
2063 2101
2064 2102 def setupone():
2065 2103 result_holder[0] = None
2066 2104
2067 2105 generate = _find_stream_generator(stream_version)
2068 2106
2069 2107 def runone():
2070 2108 # the lock is held for the duration the initialisation
2071 2109 result_holder[0] = generate(repo)
2072 2110
2073 2111 timer(runone, setup=setupone, title=b"load")
2074 2112 fm.end()
2075 2113
2076 2114
2077 2115 @command(
2078 2116 b'perf::stream-generate',
2079 2117 [
2080 2118 (
2081 2119 b'',
2082 2120 b'stream-version',
2083 2121 b'latest',
2084 2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2085 2123 ),
2086 2124 ]
2087 2125 + formatteropts,
2088 2126 )
2089 2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2090 2128 """benchmark the full generation of a stream clone"""
2091 2129
2092 2130 opts = _byteskwargs(opts)
2093 2131 timer, fm = gettimer(ui, opts)
2094 2132
2095 2133 # deletion of the generator may trigger some cleanup that we do not want to
2096 2134 # measure
2097 2135
2098 2136 generate = _find_stream_generator(stream_version)
2099 2137
2100 2138 def runone():
2101 2139 # the lock is held for the duration the initialisation
2102 2140 for chunk in generate(repo):
2103 2141 pass
2104 2142
2105 2143 timer(runone, title=b"generate")
2106 2144 fm.end()
2107 2145
2108 2146
2109 2147 @command(
2110 2148 b'perf::stream-consume',
2111 2149 formatteropts,
2112 2150 )
2113 2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2114 2152 """benchmark the full application of a stream clone
2115 2153
2116 2154 This include the creation of the repository
2117 2155 """
2118 2156 # try except to appease check code
2119 2157 msg = b"mercurial too old, missing necessary module: %s"
2120 2158 try:
2121 2159 from mercurial import bundle2
2122 2160 except ImportError as exc:
2123 2161 msg %= _bytestr(exc)
2124 2162 raise error.Abort(msg)
2125 2163 try:
2126 2164 from mercurial import exchange
2127 2165 except ImportError as exc:
2128 2166 msg %= _bytestr(exc)
2129 2167 raise error.Abort(msg)
2130 2168 try:
2131 2169 from mercurial import hg
2132 2170 except ImportError as exc:
2133 2171 msg %= _bytestr(exc)
2134 2172 raise error.Abort(msg)
2135 2173 try:
2136 2174 from mercurial import localrepo
2137 2175 except ImportError as exc:
2138 2176 msg %= _bytestr(exc)
2139 2177 raise error.Abort(msg)
2140 2178
2141 2179 opts = _byteskwargs(opts)
2142 2180 timer, fm = gettimer(ui, opts)
2143 2181
2144 2182 # deletion of the generator may trigger some cleanup that we do not want to
2145 2183 # measure
2146 2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2147 2185 raise error.Abort("not a readable file: %s" % filename)
2148 2186
2149 2187 run_variables = [None, None]
2150 2188
2151 2189 @contextlib.contextmanager
2152 2190 def context():
2153 2191 with open(filename, mode='rb') as bundle:
2154 2192 with tempfile.TemporaryDirectory() as tmp_dir:
2155 2193 tmp_dir = fsencode(tmp_dir)
2156 2194 run_variables[0] = bundle
2157 2195 run_variables[1] = tmp_dir
2158 2196 yield
2159 2197 run_variables[0] = None
2160 2198 run_variables[1] = None
2161 2199
2162 2200 def runone():
2163 2201 bundle = run_variables[0]
2164 2202 tmp_dir = run_variables[1]
2165 2203 # only pass ui when no srcrepo
2166 2204 localrepo.createrepository(
2167 2205 repo.ui, tmp_dir, requirements=repo.requirements
2168 2206 )
2169 2207 target = hg.repository(repo.ui, tmp_dir)
2170 2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2171 2209 # stream v1
2172 2210 if util.safehasattr(gen, 'apply'):
2173 2211 gen.apply(target)
2174 2212 else:
2175 2213 with target.transaction(b"perf::stream-consume") as tr:
2176 2214 bundle2.applybundle(
2177 2215 target,
2178 2216 gen,
2179 2217 tr,
2180 2218 source=b'unbundle',
2181 2219 url=filename,
2182 2220 )
2183 2221
2184 2222 timer(runone, context=context, title=b"consume")
2185 2223 fm.end()
2186 2224
2187 2225
2188 2226 @command(b'perf::parents|perfparents', formatteropts)
2189 2227 def perfparents(ui, repo, **opts):
2190 2228 """benchmark the time necessary to fetch one changeset's parents.
2191 2229
2192 2230 The fetch is done using the `node identifier`, traversing all object layers
2193 2231 from the repository object. The first N revisions will be used for this
2194 2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2195 2233 (default: 1000).
2196 2234 """
2197 2235 opts = _byteskwargs(opts)
2198 2236 timer, fm = gettimer(ui, opts)
2199 2237 # control the number of commits perfparents iterates over
2200 2238 # experimental config: perf.parentscount
2201 2239 count = getint(ui, b"perf", b"parentscount", 1000)
2202 2240 if len(repo.changelog) < count:
2203 2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2204 2242 repo = repo.unfiltered()
2205 2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2206 2244
2207 2245 def d():
2208 2246 for n in nl:
2209 2247 repo.changelog.parents(n)
2210 2248
2211 2249 timer(d)
2212 2250 fm.end()
2213 2251
2214 2252
2215 2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2216 2254 def perfctxfiles(ui, repo, x, **opts):
2217 2255 opts = _byteskwargs(opts)
2218 2256 x = int(x)
2219 2257 timer, fm = gettimer(ui, opts)
2220 2258
2221 2259 def d():
2222 2260 len(repo[x].files())
2223 2261
2224 2262 timer(d)
2225 2263 fm.end()
2226 2264
2227 2265
2228 2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2229 2267 def perfrawfiles(ui, repo, x, **opts):
2230 2268 opts = _byteskwargs(opts)
2231 2269 x = int(x)
2232 2270 timer, fm = gettimer(ui, opts)
2233 2271 cl = repo.changelog
2234 2272
2235 2273 def d():
2236 2274 len(cl.read(x)[3])
2237 2275
2238 2276 timer(d)
2239 2277 fm.end()
2240 2278
2241 2279
2242 2280 @command(b'perf::lookup|perflookup', formatteropts)
2243 2281 def perflookup(ui, repo, rev, **opts):
2244 2282 opts = _byteskwargs(opts)
2245 2283 timer, fm = gettimer(ui, opts)
2246 2284 timer(lambda: len(repo.lookup(rev)))
2247 2285 fm.end()
2248 2286
2249 2287
2250 2288 @command(
2251 2289 b'perf::linelogedits|perflinelogedits',
2252 2290 [
2253 2291 (b'n', b'edits', 10000, b'number of edits'),
2254 2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2255 2293 ],
2256 2294 norepo=True,
2257 2295 )
2258 2296 def perflinelogedits(ui, **opts):
2259 2297 from mercurial import linelog
2260 2298
2261 2299 opts = _byteskwargs(opts)
2262 2300
2263 2301 edits = opts[b'edits']
2264 2302 maxhunklines = opts[b'max_hunk_lines']
2265 2303
2266 2304 maxb1 = 100000
2267 2305 random.seed(0)
2268 2306 randint = random.randint
2269 2307 currentlines = 0
2270 2308 arglist = []
2271 2309 for rev in _xrange(edits):
2272 2310 a1 = randint(0, currentlines)
2273 2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2274 2312 b1 = randint(0, maxb1)
2275 2313 b2 = randint(b1, b1 + maxhunklines)
2276 2314 currentlines += (b2 - b1) - (a2 - a1)
2277 2315 arglist.append((rev, a1, a2, b1, b2))
2278 2316
2279 2317 def d():
2280 2318 ll = linelog.linelog()
2281 2319 for args in arglist:
2282 2320 ll.replacelines(*args)
2283 2321
2284 2322 timer, fm = gettimer(ui, opts)
2285 2323 timer(d)
2286 2324 fm.end()
2287 2325
2288 2326
2289 2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2290 2328 def perfrevrange(ui, repo, *specs, **opts):
2291 2329 opts = _byteskwargs(opts)
2292 2330 timer, fm = gettimer(ui, opts)
2293 2331 revrange = scmutil.revrange
2294 2332 timer(lambda: len(revrange(repo, specs)))
2295 2333 fm.end()
2296 2334
2297 2335
2298 2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2299 2337 def perfnodelookup(ui, repo, rev, **opts):
2300 2338 opts = _byteskwargs(opts)
2301 2339 timer, fm = gettimer(ui, opts)
2302 2340 import mercurial.revlog
2303 2341
2304 2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2305 2343 n = scmutil.revsingle(repo, rev).node()
2306 2344
2307 2345 try:
2308 2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2309 2347 except TypeError:
2310 2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2311 2349
2312 2350 def d():
2313 2351 cl.rev(n)
2314 2352 clearcaches(cl)
2315 2353
2316 2354 timer(d)
2317 2355 fm.end()
2318 2356
2319 2357
2320 2358 @command(
2321 2359 b'perf::log|perflog',
2322 2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2323 2361 )
2324 2362 def perflog(ui, repo, rev=None, **opts):
2325 2363 opts = _byteskwargs(opts)
2326 2364 if rev is None:
2327 2365 rev = []
2328 2366 timer, fm = gettimer(ui, opts)
2329 2367 ui.pushbuffer()
2330 2368 timer(
2331 2369 lambda: commands.log(
2332 2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2333 2371 )
2334 2372 )
2335 2373 ui.popbuffer()
2336 2374 fm.end()
2337 2375
2338 2376
2339 2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2340 2378 def perfmoonwalk(ui, repo, **opts):
2341 2379 """benchmark walking the changelog backwards
2342 2380
2343 2381 This also loads the changelog data for each revision in the changelog.
2344 2382 """
2345 2383 opts = _byteskwargs(opts)
2346 2384 timer, fm = gettimer(ui, opts)
2347 2385
2348 2386 def moonwalk():
2349 2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2350 2388 ctx = repo[i]
2351 2389 ctx.branch() # read changelog data (in addition to the index)
2352 2390
2353 2391 timer(moonwalk)
2354 2392 fm.end()
2355 2393
2356 2394
2357 2395 @command(
2358 2396 b'perf::templating|perftemplating',
2359 2397 [
2360 2398 (b'r', b'rev', [], b'revisions to run the template on'),
2361 2399 ]
2362 2400 + formatteropts,
2363 2401 )
2364 2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2365 2403 """test the rendering time of a given template"""
2366 2404 if makelogtemplater is None:
2367 2405 raise error.Abort(
2368 2406 b"perftemplating not available with this Mercurial",
2369 2407 hint=b"use 4.3 or later",
2370 2408 )
2371 2409
2372 2410 opts = _byteskwargs(opts)
2373 2411
2374 2412 nullui = ui.copy()
2375 2413 nullui.fout = open(os.devnull, 'wb')
2376 2414 nullui.disablepager()
2377 2415 revs = opts.get(b'rev')
2378 2416 if not revs:
2379 2417 revs = [b'all()']
2380 2418 revs = list(scmutil.revrange(repo, revs))
2381 2419
2382 2420 defaulttemplate = (
2383 2421 b'{date|shortdate} [{rev}:{node|short}]'
2384 2422 b' {author|person}: {desc|firstline}\n'
2385 2423 )
2386 2424 if testedtemplate is None:
2387 2425 testedtemplate = defaulttemplate
2388 2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2389 2427
2390 2428 def format():
2391 2429 for r in revs:
2392 2430 ctx = repo[r]
2393 2431 displayer.show(ctx)
2394 2432 displayer.flush(ctx)
2395 2433
2396 2434 timer, fm = gettimer(ui, opts)
2397 2435 timer(format)
2398 2436 fm.end()
2399 2437
2400 2438
2401 2439 def _displaystats(ui, opts, entries, data):
2402 2440 # use a second formatter because the data are quite different, not sure
2403 2441 # how it flies with the templater.
2404 2442 fm = ui.formatter(b'perf-stats', opts)
2405 2443 for key, title in entries:
2406 2444 values = data[key]
2407 2445 nbvalues = len(data)
2408 2446 values.sort()
2409 2447 stats = {
2410 2448 'key': key,
2411 2449 'title': title,
2412 2450 'nbitems': len(values),
2413 2451 'min': values[0][0],
2414 2452 '10%': values[(nbvalues * 10) // 100][0],
2415 2453 '25%': values[(nbvalues * 25) // 100][0],
2416 2454 '50%': values[(nbvalues * 50) // 100][0],
2417 2455 '75%': values[(nbvalues * 75) // 100][0],
2418 2456 '80%': values[(nbvalues * 80) // 100][0],
2419 2457 '85%': values[(nbvalues * 85) // 100][0],
2420 2458 '90%': values[(nbvalues * 90) // 100][0],
2421 2459 '95%': values[(nbvalues * 95) // 100][0],
2422 2460 '99%': values[(nbvalues * 99) // 100][0],
2423 2461 'max': values[-1][0],
2424 2462 }
2425 2463 fm.startitem()
2426 2464 fm.data(**stats)
2427 2465 # make node pretty for the human output
2428 2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2429 2467 lines = [
2430 2468 'min',
2431 2469 '10%',
2432 2470 '25%',
2433 2471 '50%',
2434 2472 '75%',
2435 2473 '80%',
2436 2474 '85%',
2437 2475 '90%',
2438 2476 '95%',
2439 2477 '99%',
2440 2478 'max',
2441 2479 ]
2442 2480 for l in lines:
2443 2481 fm.plain('%s: %s\n' % (l, stats[l]))
2444 2482 fm.end()
2445 2483
2446 2484
2447 2485 @command(
2448 2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2449 2487 formatteropts
2450 2488 + [
2451 2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2452 2490 (b'', b'timing', False, b'provides extra data (costly)'),
2453 2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2454 2492 ],
2455 2493 )
2456 2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2457 2495 """find statistics about potential parameters for `perfmergecopies`
2458 2496
2459 2497 This command find (base, p1, p2) triplet relevant for copytracing
2460 2498 benchmarking in the context of a merge. It reports values for some of the
2461 2499 parameters that impact merge copy tracing time during merge.
2462 2500
2463 2501 If `--timing` is set, rename detection is run and the associated timing
2464 2502 will be reported. The extra details come at the cost of slower command
2465 2503 execution.
2466 2504
2467 2505 Since rename detection is only run once, other factors might easily
2468 2506 affect the precision of the timing. However it should give a good
2469 2507 approximation of which revision triplets are very costly.
2470 2508 """
2471 2509 opts = _byteskwargs(opts)
2472 2510 fm = ui.formatter(b'perf', opts)
2473 2511 dotiming = opts[b'timing']
2474 2512 dostats = opts[b'stats']
2475 2513
2476 2514 output_template = [
2477 2515 ("base", "%(base)12s"),
2478 2516 ("p1", "%(p1.node)12s"),
2479 2517 ("p2", "%(p2.node)12s"),
2480 2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2481 2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2482 2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2483 2521 ("p1.time", "%(p1.time)12.3f"),
2484 2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2485 2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2486 2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2487 2525 ("p2.time", "%(p2.time)12.3f"),
2488 2526 ("renames", "%(nbrenamedfiles)12d"),
2489 2527 ("total.time", "%(time)12.3f"),
2490 2528 ]
2491 2529 if not dotiming:
2492 2530 output_template = [
2493 2531 i
2494 2532 for i in output_template
2495 2533 if not ('time' in i[0] or 'renames' in i[0])
2496 2534 ]
2497 2535 header_names = [h for (h, v) in output_template]
2498 2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2499 2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2500 2538 fm.plain(header % tuple(header_names))
2501 2539
2502 2540 if not revs:
2503 2541 revs = ['all()']
2504 2542 revs = scmutil.revrange(repo, revs)
2505 2543
2506 2544 if dostats:
2507 2545 alldata = {
2508 2546 'nbrevs': [],
2509 2547 'nbmissingfiles': [],
2510 2548 }
2511 2549 if dotiming:
2512 2550 alldata['parentnbrenames'] = []
2513 2551 alldata['totalnbrenames'] = []
2514 2552 alldata['parenttime'] = []
2515 2553 alldata['totaltime'] = []
2516 2554
2517 2555 roi = repo.revs('merge() and %ld', revs)
2518 2556 for r in roi:
2519 2557 ctx = repo[r]
2520 2558 p1 = ctx.p1()
2521 2559 p2 = ctx.p2()
2522 2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2523 2561 for b in bases:
2524 2562 b = repo[b]
2525 2563 p1missing = copies._computeforwardmissing(b, p1)
2526 2564 p2missing = copies._computeforwardmissing(b, p2)
2527 2565 data = {
2528 2566 b'base': b.hex(),
2529 2567 b'p1.node': p1.hex(),
2530 2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2531 2569 b'p1.nbmissingfiles': len(p1missing),
2532 2570 b'p2.node': p2.hex(),
2533 2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2534 2572 b'p2.nbmissingfiles': len(p2missing),
2535 2573 }
2536 2574 if dostats:
2537 2575 if p1missing:
2538 2576 alldata['nbrevs'].append(
2539 2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2540 2578 )
2541 2579 alldata['nbmissingfiles'].append(
2542 2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2543 2581 )
2544 2582 if p2missing:
2545 2583 alldata['nbrevs'].append(
2546 2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2547 2585 )
2548 2586 alldata['nbmissingfiles'].append(
2549 2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2550 2588 )
2551 2589 if dotiming:
2552 2590 begin = util.timer()
2553 2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2554 2592 end = util.timer()
2555 2593 # not very stable timing since we did only one run
2556 2594 data['time'] = end - begin
2557 2595 # mergedata contains five dicts: "copy", "movewithdir",
2558 2596 # "diverge", "renamedelete" and "dirmove".
2559 2597 # The first 4 are about renamed file so lets count that.
2560 2598 renames = len(mergedata[0])
2561 2599 renames += len(mergedata[1])
2562 2600 renames += len(mergedata[2])
2563 2601 renames += len(mergedata[3])
2564 2602 data['nbrenamedfiles'] = renames
2565 2603 begin = util.timer()
2566 2604 p1renames = copies.pathcopies(b, p1)
2567 2605 end = util.timer()
2568 2606 data['p1.time'] = end - begin
2569 2607 begin = util.timer()
2570 2608 p2renames = copies.pathcopies(b, p2)
2571 2609 end = util.timer()
2572 2610 data['p2.time'] = end - begin
2573 2611 data['p1.renamedfiles'] = len(p1renames)
2574 2612 data['p2.renamedfiles'] = len(p2renames)
2575 2613
2576 2614 if dostats:
2577 2615 if p1missing:
2578 2616 alldata['parentnbrenames'].append(
2579 2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2580 2618 )
2581 2619 alldata['parenttime'].append(
2582 2620 (data['p1.time'], b.hex(), p1.hex())
2583 2621 )
2584 2622 if p2missing:
2585 2623 alldata['parentnbrenames'].append(
2586 2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2587 2625 )
2588 2626 alldata['parenttime'].append(
2589 2627 (data['p2.time'], b.hex(), p2.hex())
2590 2628 )
2591 2629 if p1missing or p2missing:
2592 2630 alldata['totalnbrenames'].append(
2593 2631 (
2594 2632 data['nbrenamedfiles'],
2595 2633 b.hex(),
2596 2634 p1.hex(),
2597 2635 p2.hex(),
2598 2636 )
2599 2637 )
2600 2638 alldata['totaltime'].append(
2601 2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2602 2640 )
2603 2641 fm.startitem()
2604 2642 fm.data(**data)
2605 2643 # make node pretty for the human output
2606 2644 out = data.copy()
2607 2645 out['base'] = fm.hexfunc(b.node())
2608 2646 out['p1.node'] = fm.hexfunc(p1.node())
2609 2647 out['p2.node'] = fm.hexfunc(p2.node())
2610 2648 fm.plain(output % out)
2611 2649
2612 2650 fm.end()
2613 2651 if dostats:
2614 2652 # use a second formatter because the data are quite different, not sure
2615 2653 # how it flies with the templater.
2616 2654 entries = [
2617 2655 ('nbrevs', 'number of revision covered'),
2618 2656 ('nbmissingfiles', 'number of missing files at head'),
2619 2657 ]
2620 2658 if dotiming:
2621 2659 entries.append(
2622 2660 ('parentnbrenames', 'rename from one parent to base')
2623 2661 )
2624 2662 entries.append(('totalnbrenames', 'total number of renames'))
2625 2663 entries.append(('parenttime', 'time for one parent'))
2626 2664 entries.append(('totaltime', 'time for both parents'))
2627 2665 _displaystats(ui, opts, entries, alldata)
2628 2666
2629 2667
2630 2668 @command(
2631 2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2632 2670 formatteropts
2633 2671 + [
2634 2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2635 2673 (b'', b'timing', False, b'provides extra data (costly)'),
2636 2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2637 2675 ],
2638 2676 )
2639 2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2640 2678 """find statistic about potential parameters for the `perftracecopies`
2641 2679
2642 2680 This command find source-destination pair relevant for copytracing testing.
2643 2681 It report value for some of the parameters that impact copy tracing time.
2644 2682
2645 2683 If `--timing` is set, rename detection is run and the associated timing
2646 2684 will be reported. The extra details comes at the cost of a slower command
2647 2685 execution.
2648 2686
2649 2687 Since the rename detection is only run once, other factors might easily
2650 2688 affect the precision of the timing. However it should give a good
2651 2689 approximation of which revision pairs are very costly.
2652 2690 """
2653 2691 opts = _byteskwargs(opts)
2654 2692 fm = ui.formatter(b'perf', opts)
2655 2693 dotiming = opts[b'timing']
2656 2694 dostats = opts[b'stats']
2657 2695
2658 2696 if dotiming:
2659 2697 header = '%12s %12s %12s %12s %12s %12s\n'
2660 2698 output = (
2661 2699 "%(source)12s %(destination)12s "
2662 2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2663 2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2664 2702 )
2665 2703 header_names = (
2666 2704 "source",
2667 2705 "destination",
2668 2706 "nb-revs",
2669 2707 "nb-files",
2670 2708 "nb-renames",
2671 2709 "time",
2672 2710 )
2673 2711 fm.plain(header % header_names)
2674 2712 else:
2675 2713 header = '%12s %12s %12s %12s\n'
2676 2714 output = (
2677 2715 "%(source)12s %(destination)12s "
2678 2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2679 2717 )
2680 2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2681 2719
2682 2720 if not revs:
2683 2721 revs = ['all()']
2684 2722 revs = scmutil.revrange(repo, revs)
2685 2723
2686 2724 if dostats:
2687 2725 alldata = {
2688 2726 'nbrevs': [],
2689 2727 'nbmissingfiles': [],
2690 2728 }
2691 2729 if dotiming:
2692 2730 alldata['nbrenames'] = []
2693 2731 alldata['time'] = []
2694 2732
2695 2733 roi = repo.revs('merge() and %ld', revs)
2696 2734 for r in roi:
2697 2735 ctx = repo[r]
2698 2736 p1 = ctx.p1().rev()
2699 2737 p2 = ctx.p2().rev()
2700 2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2701 2739 for p in (p1, p2):
2702 2740 for b in bases:
2703 2741 base = repo[b]
2704 2742 parent = repo[p]
2705 2743 missing = copies._computeforwardmissing(base, parent)
2706 2744 if not missing:
2707 2745 continue
2708 2746 data = {
2709 2747 b'source': base.hex(),
2710 2748 b'destination': parent.hex(),
2711 2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2712 2750 b'nbmissingfiles': len(missing),
2713 2751 }
2714 2752 if dostats:
2715 2753 alldata['nbrevs'].append(
2716 2754 (
2717 2755 data['nbrevs'],
2718 2756 base.hex(),
2719 2757 parent.hex(),
2720 2758 )
2721 2759 )
2722 2760 alldata['nbmissingfiles'].append(
2723 2761 (
2724 2762 data['nbmissingfiles'],
2725 2763 base.hex(),
2726 2764 parent.hex(),
2727 2765 )
2728 2766 )
2729 2767 if dotiming:
2730 2768 begin = util.timer()
2731 2769 renames = copies.pathcopies(base, parent)
2732 2770 end = util.timer()
2733 2771 # not very stable timing since we did only one run
2734 2772 data['time'] = end - begin
2735 2773 data['nbrenamedfiles'] = len(renames)
2736 2774 if dostats:
2737 2775 alldata['time'].append(
2738 2776 (
2739 2777 data['time'],
2740 2778 base.hex(),
2741 2779 parent.hex(),
2742 2780 )
2743 2781 )
2744 2782 alldata['nbrenames'].append(
2745 2783 (
2746 2784 data['nbrenamedfiles'],
2747 2785 base.hex(),
2748 2786 parent.hex(),
2749 2787 )
2750 2788 )
2751 2789 fm.startitem()
2752 2790 fm.data(**data)
2753 2791 out = data.copy()
2754 2792 out['source'] = fm.hexfunc(base.node())
2755 2793 out['destination'] = fm.hexfunc(parent.node())
2756 2794 fm.plain(output % out)
2757 2795
2758 2796 fm.end()
2759 2797 if dostats:
2760 2798 entries = [
2761 2799 ('nbrevs', 'number of revision covered'),
2762 2800 ('nbmissingfiles', 'number of missing files at head'),
2763 2801 ]
2764 2802 if dotiming:
2765 2803 entries.append(('nbrenames', 'renamed files'))
2766 2804 entries.append(('time', 'time'))
2767 2805 _displaystats(ui, opts, entries, alldata)
2768 2806
2769 2807
2770 2808 @command(b'perf::cca|perfcca', formatteropts)
2771 2809 def perfcca(ui, repo, **opts):
2772 2810 opts = _byteskwargs(opts)
2773 2811 timer, fm = gettimer(ui, opts)
2774 2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2775 2813 fm.end()
2776 2814
2777 2815
2778 2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2779 2817 def perffncacheload(ui, repo, **opts):
2780 2818 opts = _byteskwargs(opts)
2781 2819 timer, fm = gettimer(ui, opts)
2782 2820 s = repo.store
2783 2821
2784 2822 def d():
2785 2823 s.fncache._load()
2786 2824
2787 2825 timer(d)
2788 2826 fm.end()
2789 2827
2790 2828
2791 2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2792 2830 def perffncachewrite(ui, repo, **opts):
2793 2831 opts = _byteskwargs(opts)
2794 2832 timer, fm = gettimer(ui, opts)
2795 2833 s = repo.store
2796 2834 lock = repo.lock()
2797 2835 s.fncache._load()
2798 2836 tr = repo.transaction(b'perffncachewrite')
2799 2837 tr.addbackup(b'fncache')
2800 2838
2801 2839 def d():
2802 2840 s.fncache._dirty = True
2803 2841 s.fncache.write(tr)
2804 2842
2805 2843 timer(d)
2806 2844 tr.close()
2807 2845 lock.release()
2808 2846 fm.end()
2809 2847
2810 2848
2811 2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2812 2850 def perffncacheencode(ui, repo, **opts):
2813 2851 opts = _byteskwargs(opts)
2814 2852 timer, fm = gettimer(ui, opts)
2815 2853 s = repo.store
2816 2854 s.fncache._load()
2817 2855
2818 2856 def d():
2819 2857 for p in s.fncache.entries:
2820 2858 s.encode(p)
2821 2859
2822 2860 timer(d)
2823 2861 fm.end()
2824 2862
2825 2863
2826 2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2827 2865 while not done.is_set():
2828 2866 pair = q.get()
2829 2867 while pair is not None:
2830 2868 if xdiff:
2831 2869 mdiff.bdiff.xdiffblocks(*pair)
2832 2870 elif blocks:
2833 2871 mdiff.bdiff.blocks(*pair)
2834 2872 else:
2835 2873 mdiff.textdiff(*pair)
2836 2874 q.task_done()
2837 2875 pair = q.get()
2838 2876 q.task_done() # for the None one
2839 2877 with ready:
2840 2878 ready.wait()
2841 2879
2842 2880
2843 2881 def _manifestrevision(repo, mnode):
2844 2882 ml = repo.manifestlog
2845 2883
2846 2884 if util.safehasattr(ml, b'getstorage'):
2847 2885 store = ml.getstorage(b'')
2848 2886 else:
2849 2887 store = ml._revlog
2850 2888
2851 2889 return store.revision(mnode)
2852 2890
2853 2891
2854 2892 @command(
2855 2893 b'perf::bdiff|perfbdiff',
2856 2894 revlogopts
2857 2895 + formatteropts
2858 2896 + [
2859 2897 (
2860 2898 b'',
2861 2899 b'count',
2862 2900 1,
2863 2901 b'number of revisions to test (when using --startrev)',
2864 2902 ),
2865 2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2866 2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2867 2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2868 2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2869 2907 ],
2870 2908 b'-c|-m|FILE REV',
2871 2909 )
2872 2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2873 2911 """benchmark a bdiff between revisions
2874 2912
2875 2913 By default, benchmark a bdiff between its delta parent and itself.
2876 2914
2877 2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2878 2916 revisions starting at the specified revision.
2879 2917
2880 2918 With ``--alldata``, assume the requested revision is a changeset and
2881 2919 measure bdiffs for all changes related to that changeset (manifest
2882 2920 and filelogs).
2883 2921 """
2884 2922 opts = _byteskwargs(opts)
2885 2923
2886 2924 if opts[b'xdiff'] and not opts[b'blocks']:
2887 2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2888 2926
2889 2927 if opts[b'alldata']:
2890 2928 opts[b'changelog'] = True
2891 2929
2892 2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2893 2931 file_, rev = None, file_
2894 2932 elif rev is None:
2895 2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2896 2934
2897 2935 blocks = opts[b'blocks']
2898 2936 xdiff = opts[b'xdiff']
2899 2937 textpairs = []
2900 2938
2901 2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2902 2940
2903 2941 startrev = r.rev(r.lookup(rev))
2904 2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2905 2943 if opts[b'alldata']:
2906 2944 # Load revisions associated with changeset.
2907 2945 ctx = repo[rev]
2908 2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2909 2947 for pctx in ctx.parents():
2910 2948 pman = _manifestrevision(repo, pctx.manifestnode())
2911 2949 textpairs.append((pman, mtext))
2912 2950
2913 2951 # Load filelog revisions by iterating manifest delta.
2914 2952 man = ctx.manifest()
2915 2953 pman = ctx.p1().manifest()
2916 2954 for filename, change in pman.diff(man).items():
2917 2955 fctx = repo.file(filename)
2918 2956 f1 = fctx.revision(change[0][0] or -1)
2919 2957 f2 = fctx.revision(change[1][0] or -1)
2920 2958 textpairs.append((f1, f2))
2921 2959 else:
2922 2960 dp = r.deltaparent(rev)
2923 2961 textpairs.append((r.revision(dp), r.revision(rev)))
2924 2962
2925 2963 withthreads = threads > 0
2926 2964 if not withthreads:
2927 2965
2928 2966 def d():
2929 2967 for pair in textpairs:
2930 2968 if xdiff:
2931 2969 mdiff.bdiff.xdiffblocks(*pair)
2932 2970 elif blocks:
2933 2971 mdiff.bdiff.blocks(*pair)
2934 2972 else:
2935 2973 mdiff.textdiff(*pair)
2936 2974
2937 2975 else:
2938 2976 q = queue()
2939 2977 for i in _xrange(threads):
2940 2978 q.put(None)
2941 2979 ready = threading.Condition()
2942 2980 done = threading.Event()
2943 2981 for i in _xrange(threads):
2944 2982 threading.Thread(
2945 2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2946 2984 ).start()
2947 2985 q.join()
2948 2986
2949 2987 def d():
2950 2988 for pair in textpairs:
2951 2989 q.put(pair)
2952 2990 for i in _xrange(threads):
2953 2991 q.put(None)
2954 2992 with ready:
2955 2993 ready.notify_all()
2956 2994 q.join()
2957 2995
2958 2996 timer, fm = gettimer(ui, opts)
2959 2997 timer(d)
2960 2998 fm.end()
2961 2999
2962 3000 if withthreads:
2963 3001 done.set()
2964 3002 for i in _xrange(threads):
2965 3003 q.put(None)
2966 3004 with ready:
2967 3005 ready.notify_all()
2968 3006
2969 3007
2970 3008 @command(
2971 3009 b'perf::unbundle',
2972 3010 formatteropts,
2973 3011 b'BUNDLE_FILE',
2974 3012 )
2975 3013 def perf_unbundle(ui, repo, fname, **opts):
2976 3014 """benchmark application of a bundle in a repository.
2977 3015
2978 3016 This does not include the final transaction processing"""
2979 3017
2980 3018 from mercurial import exchange
2981 3019 from mercurial import bundle2
2982 3020 from mercurial import transaction
2983 3021
2984 3022 opts = _byteskwargs(opts)
2985 3023
2986 3024 ### some compatibility hotfix
2987 3025 #
2988 3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2989 3027 # critical regression that break transaction rollback for files that are
2990 3028 # de-inlined.
2991 3029 method = transaction.transaction._addentry
2992 3030 pre_63edc384d3b7 = "data" in getargspec(method).args
2993 3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2994 3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
2995 3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2996 3034 args = getargspec(error.Abort.__init__).args
2997 3035 post_18415fc918a1 = "detailed_exit_code" in args
2998 3036
2999 3037 old_max_inline = None
3000 3038 try:
3001 3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3002 3040 # disable inlining
3003 3041 old_max_inline = mercurial.revlog._maxinline
3004 3042 # large enough to never happen
3005 3043 mercurial.revlog._maxinline = 2 ** 50
3006 3044
3007 3045 with repo.lock():
3008 3046 bundle = [None, None]
3009 3047 orig_quiet = repo.ui.quiet
3010 3048 try:
3011 3049 repo.ui.quiet = True
3012 3050 with open(fname, mode="rb") as f:
3013 3051
3014 3052 def noop_report(*args, **kwargs):
3015 3053 pass
3016 3054
3017 3055 def setup():
3018 3056 gen, tr = bundle
3019 3057 if tr is not None:
3020 3058 tr.abort()
3021 3059 bundle[:] = [None, None]
3022 3060 f.seek(0)
3023 3061 bundle[0] = exchange.readbundle(ui, f, fname)
3024 3062 bundle[1] = repo.transaction(b'perf::unbundle')
3025 3063 # silence the transaction
3026 3064 bundle[1]._report = noop_report
3027 3065
3028 3066 def apply():
3029 3067 gen, tr = bundle
3030 3068 bundle2.applybundle(
3031 3069 repo,
3032 3070 gen,
3033 3071 tr,
3034 3072 source=b'perf::unbundle',
3035 3073 url=fname,
3036 3074 )
3037 3075
3038 3076 timer, fm = gettimer(ui, opts)
3039 3077 timer(apply, setup=setup)
3040 3078 fm.end()
3041 3079 finally:
3042 3080 repo.ui.quiet == orig_quiet
3043 3081 gen, tr = bundle
3044 3082 if tr is not None:
3045 3083 tr.abort()
3046 3084 finally:
3047 3085 if old_max_inline is not None:
3048 3086 mercurial.revlog._maxinline = old_max_inline
3049 3087
3050 3088
3051 3089 @command(
3052 3090 b'perf::unidiff|perfunidiff',
3053 3091 revlogopts
3054 3092 + formatteropts
3055 3093 + [
3056 3094 (
3057 3095 b'',
3058 3096 b'count',
3059 3097 1,
3060 3098 b'number of revisions to test (when using --startrev)',
3061 3099 ),
3062 3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3063 3101 ],
3064 3102 b'-c|-m|FILE REV',
3065 3103 )
3066 3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3067 3105 """benchmark a unified diff between revisions
3068 3106
3069 3107 This doesn't include any copy tracing - it's just a unified diff
3070 3108 of the texts.
3071 3109
3072 3110 By default, benchmark a diff between its delta parent and itself.
3073 3111
3074 3112 With ``--count``, benchmark diffs between delta parents and self for N
3075 3113 revisions starting at the specified revision.
3076 3114
3077 3115 With ``--alldata``, assume the requested revision is a changeset and
3078 3116 measure diffs for all changes related to that changeset (manifest
3079 3117 and filelogs).
3080 3118 """
3081 3119 opts = _byteskwargs(opts)
3082 3120 if opts[b'alldata']:
3083 3121 opts[b'changelog'] = True
3084 3122
3085 3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3086 3124 file_, rev = None, file_
3087 3125 elif rev is None:
3088 3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3089 3127
3090 3128 textpairs = []
3091 3129
3092 3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3093 3131
3094 3132 startrev = r.rev(r.lookup(rev))
3095 3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3096 3134 if opts[b'alldata']:
3097 3135 # Load revisions associated with changeset.
3098 3136 ctx = repo[rev]
3099 3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3100 3138 for pctx in ctx.parents():
3101 3139 pman = _manifestrevision(repo, pctx.manifestnode())
3102 3140 textpairs.append((pman, mtext))
3103 3141
3104 3142 # Load filelog revisions by iterating manifest delta.
3105 3143 man = ctx.manifest()
3106 3144 pman = ctx.p1().manifest()
3107 3145 for filename, change in pman.diff(man).items():
3108 3146 fctx = repo.file(filename)
3109 3147 f1 = fctx.revision(change[0][0] or -1)
3110 3148 f2 = fctx.revision(change[1][0] or -1)
3111 3149 textpairs.append((f1, f2))
3112 3150 else:
3113 3151 dp = r.deltaparent(rev)
3114 3152 textpairs.append((r.revision(dp), r.revision(rev)))
3115 3153
3116 3154 def d():
3117 3155 for left, right in textpairs:
3118 3156 # The date strings don't matter, so we pass empty strings.
3119 3157 headerlines, hunks = mdiff.unidiff(
3120 3158 left, b'', right, b'', b'left', b'right', binary=False
3121 3159 )
3122 3160 # consume iterators in roughly the way patch.py does
3123 3161 b'\n'.join(headerlines)
3124 3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3125 3163
3126 3164 timer, fm = gettimer(ui, opts)
3127 3165 timer(d)
3128 3166 fm.end()
3129 3167
3130 3168
3131 3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3132 3170 def perfdiffwd(ui, repo, **opts):
3133 3171 """Profile diff of working directory changes"""
3134 3172 opts = _byteskwargs(opts)
3135 3173 timer, fm = gettimer(ui, opts)
3136 3174 options = {
3137 3175 'w': 'ignore_all_space',
3138 3176 'b': 'ignore_space_change',
3139 3177 'B': 'ignore_blank_lines',
3140 3178 }
3141 3179
3142 3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3143 3181 opts = {options[c]: b'1' for c in diffopt}
3144 3182
3145 3183 def d():
3146 3184 ui.pushbuffer()
3147 3185 commands.diff(ui, repo, **opts)
3148 3186 ui.popbuffer()
3149 3187
3150 3188 diffopt = diffopt.encode('ascii')
3151 3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3152 3190 timer(d, title=title)
3153 3191 fm.end()
3154 3192
3155 3193
3156 3194 @command(
3157 3195 b'perf::revlogindex|perfrevlogindex',
3158 3196 revlogopts + formatteropts,
3159 3197 b'-c|-m|FILE',
3160 3198 )
3161 3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3162 3200 """Benchmark operations against a revlog index.
3163 3201
3164 3202 This tests constructing a revlog instance, reading index data,
3165 3203 parsing index data, and performing various operations related to
3166 3204 index data.
3167 3205 """
3168 3206
3169 3207 opts = _byteskwargs(opts)
3170 3208
3171 3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3172 3210
3173 3211 opener = getattr(rl, 'opener') # trick linter
3174 3212 # compat with hg <= 5.8
3175 3213 radix = getattr(rl, 'radix', None)
3176 3214 indexfile = getattr(rl, '_indexfile', None)
3177 3215 if indexfile is None:
3178 3216 # compatibility with <= hg-5.8
3179 3217 indexfile = getattr(rl, 'indexfile')
3180 3218 data = opener.read(indexfile)
3181 3219
3182 3220 header = struct.unpack(b'>I', data[0:4])[0]
3183 3221 version = header & 0xFFFF
3184 3222 if version == 1:
3185 3223 inline = header & (1 << 16)
3186 3224 else:
3187 3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3188 3226
3189 3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3190 3228 if parse_index_v1 is None:
3191 3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3192 3230
3193 3231 rllen = len(rl)
3194 3232
3195 3233 node0 = rl.node(0)
3196 3234 node25 = rl.node(rllen // 4)
3197 3235 node50 = rl.node(rllen // 2)
3198 3236 node75 = rl.node(rllen // 4 * 3)
3199 3237 node100 = rl.node(rllen - 1)
3200 3238
3201 3239 allrevs = range(rllen)
3202 3240 allrevsrev = list(reversed(allrevs))
3203 3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3204 3242 allnodesrev = list(reversed(allnodes))
3205 3243
3206 3244 def constructor():
3207 3245 if radix is not None:
3208 3246 revlog(opener, radix=radix)
3209 3247 else:
3210 3248 # hg <= 5.8
3211 3249 revlog(opener, indexfile=indexfile)
3212 3250
3213 3251 def read():
3214 3252 with opener(indexfile) as fh:
3215 3253 fh.read()
3216 3254
3217 3255 def parseindex():
3218 3256 parse_index_v1(data, inline)
3219 3257
3220 3258 def getentry(revornode):
3221 3259 index = parse_index_v1(data, inline)[0]
3222 3260 index[revornode]
3223 3261
3224 3262 def getentries(revs, count=1):
3225 3263 index = parse_index_v1(data, inline)[0]
3226 3264
3227 3265 for i in range(count):
3228 3266 for rev in revs:
3229 3267 index[rev]
3230 3268
3231 3269 def resolvenode(node):
3232 3270 index = parse_index_v1(data, inline)[0]
3233 3271 rev = getattr(index, 'rev', None)
3234 3272 if rev is None:
3235 3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3236 3274 # This only works for the C code.
3237 3275 if nodemap is None:
3238 3276 return
3239 3277 rev = nodemap.__getitem__
3240 3278
3241 3279 try:
3242 3280 rev(node)
3243 3281 except error.RevlogError:
3244 3282 pass
3245 3283
3246 3284 def resolvenodes(nodes, count=1):
3247 3285 index = parse_index_v1(data, inline)[0]
3248 3286 rev = getattr(index, 'rev', None)
3249 3287 if rev is None:
3250 3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3251 3289 # This only works for the C code.
3252 3290 if nodemap is None:
3253 3291 return
3254 3292 rev = nodemap.__getitem__
3255 3293
3256 3294 for i in range(count):
3257 3295 for node in nodes:
3258 3296 try:
3259 3297 rev(node)
3260 3298 except error.RevlogError:
3261 3299 pass
3262 3300
3263 3301 benches = [
3264 3302 (constructor, b'revlog constructor'),
3265 3303 (read, b'read'),
3266 3304 (parseindex, b'create index object'),
3267 3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3268 3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3269 3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3270 3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3271 3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3272 3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3273 3311 (lambda: resolvenode(node100), b'look up node at tip'),
3274 3312 # 2x variation is to measure caching impact.
3275 3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3276 3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3277 3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3278 3316 (
3279 3317 lambda: resolvenodes(allnodesrev, 2),
3280 3318 b'look up all nodes 2x (reverse)',
3281 3319 ),
3282 3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3283 3321 (
3284 3322 lambda: getentries(allrevs, 2),
3285 3323 b'retrieve all index entries 2x (forward)',
3286 3324 ),
3287 3325 (
3288 3326 lambda: getentries(allrevsrev),
3289 3327 b'retrieve all index entries (reverse)',
3290 3328 ),
3291 3329 (
3292 3330 lambda: getentries(allrevsrev, 2),
3293 3331 b'retrieve all index entries 2x (reverse)',
3294 3332 ),
3295 3333 ]
3296 3334
3297 3335 for fn, title in benches:
3298 3336 timer, fm = gettimer(ui, opts)
3299 3337 timer(fn, title=title)
3300 3338 fm.end()
3301 3339
3302 3340
3303 3341 @command(
3304 3342 b'perf::revlogrevisions|perfrevlogrevisions',
3305 3343 revlogopts
3306 3344 + formatteropts
3307 3345 + [
3308 3346 (b'd', b'dist', 100, b'distance between the revisions'),
3309 3347 (b's', b'startrev', 0, b'revision to start reading at'),
3310 3348 (b'', b'reverse', False, b'read in reverse'),
3311 3349 ],
3312 3350 b'-c|-m|FILE',
3313 3351 )
3314 3352 def perfrevlogrevisions(
3315 3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3316 3354 ):
3317 3355 """Benchmark reading a series of revisions from a revlog.
3318 3356
3319 3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3320 3358 the specified revlog.
3321 3359
3322 3360 The start revision can be defined via ``-s/--startrev``.
3323 3361 """
3324 3362 opts = _byteskwargs(opts)
3325 3363
3326 3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3327 3365 rllen = getlen(ui)(rl)
3328 3366
3329 3367 if startrev < 0:
3330 3368 startrev = rllen + startrev
3331 3369
3332 3370 def d():
3333 3371 rl.clearcaches()
3334 3372
3335 3373 beginrev = startrev
3336 3374 endrev = rllen
3337 3375 dist = opts[b'dist']
3338 3376
3339 3377 if reverse:
3340 3378 beginrev, endrev = endrev - 1, beginrev - 1
3341 3379 dist = -1 * dist
3342 3380
3343 3381 for x in _xrange(beginrev, endrev, dist):
3344 3382 # Old revisions don't support passing int.
3345 3383 n = rl.node(x)
3346 3384 rl.revision(n)
3347 3385
3348 3386 timer, fm = gettimer(ui, opts)
3349 3387 timer(d)
3350 3388 fm.end()
3351 3389
3352 3390
3353 3391 @command(
3354 3392 b'perf::revlogwrite|perfrevlogwrite',
3355 3393 revlogopts
3356 3394 + formatteropts
3357 3395 + [
3358 3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3359 3397 (b'', b'stoprev', -1, b'last revision to write'),
3360 3398 (b'', b'count', 3, b'number of passes to perform'),
3361 3399 (b'', b'details', False, b'print timing for every revisions tested'),
3362 3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3363 3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3364 3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3365 3403 ],
3366 3404 b'-c|-m|FILE',
3367 3405 )
3368 3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3369 3407 """Benchmark writing a series of revisions to a revlog.
3370 3408
3371 3409 Possible source values are:
3372 3410 * `full`: add from a full text (default).
3373 3411 * `parent-1`: add from a delta to the first parent
3374 3412 * `parent-2`: add from a delta to the second parent if it exists
3375 3413 (use a delta from the first parent otherwise)
3376 3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3377 3415 * `storage`: add from the existing precomputed deltas
3378 3416
3379 3417 Note: This performance command measures performance in a custom way. As a
3380 3418 result some of the global configuration of the 'perf' command does not
3381 3419 apply to it:
3382 3420
3383 3421 * ``pre-run``: disabled
3384 3422
3385 3423 * ``profile-benchmark``: disabled
3386 3424
3387 3425 * ``run-limits``: disabled use --count instead
3388 3426 """
3389 3427 opts = _byteskwargs(opts)
3390 3428
3391 3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3392 3430 rllen = getlen(ui)(rl)
3393 3431 if startrev < 0:
3394 3432 startrev = rllen + startrev
3395 3433 if stoprev < 0:
3396 3434 stoprev = rllen + stoprev
3397 3435
3398 3436 lazydeltabase = opts['lazydeltabase']
3399 3437 source = opts['source']
3400 3438 clearcaches = opts['clear_caches']
3401 3439 validsource = (
3402 3440 b'full',
3403 3441 b'parent-1',
3404 3442 b'parent-2',
3405 3443 b'parent-smallest',
3406 3444 b'storage',
3407 3445 )
3408 3446 if source not in validsource:
3409 3447 raise error.Abort('invalid source type: %s' % source)
3410 3448
3411 3449 ### actually gather results
3412 3450 count = opts['count']
3413 3451 if count <= 0:
3414 3452 raise error.Abort('invalide run count: %d' % count)
3415 3453 allresults = []
3416 3454 for c in range(count):
3417 3455 timing = _timeonewrite(
3418 3456 ui,
3419 3457 rl,
3420 3458 source,
3421 3459 startrev,
3422 3460 stoprev,
3423 3461 c + 1,
3424 3462 lazydeltabase=lazydeltabase,
3425 3463 clearcaches=clearcaches,
3426 3464 )
3427 3465 allresults.append(timing)
3428 3466
3429 3467 ### consolidate the results in a single list
3430 3468 results = []
3431 3469 for idx, (rev, t) in enumerate(allresults[0]):
3432 3470 ts = [t]
3433 3471 for other in allresults[1:]:
3434 3472 orev, ot = other[idx]
3435 3473 assert orev == rev
3436 3474 ts.append(ot)
3437 3475 results.append((rev, ts))
3438 3476 resultcount = len(results)
3439 3477
3440 3478 ### Compute and display relevant statistics
3441 3479
3442 3480 # get a formatter
3443 3481 fm = ui.formatter(b'perf', opts)
3444 3482 displayall = ui.configbool(b"perf", b"all-timing", False)
3445 3483
3446 3484 # print individual details if requested
3447 3485 if opts['details']:
3448 3486 for idx, item in enumerate(results, 1):
3449 3487 rev, data = item
3450 3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3451 3489 formatone(fm, data, title=title, displayall=displayall)
3452 3490
3453 3491 # sorts results by median time
3454 3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3455 3493 # list of (name, index) to display)
3456 3494 relevants = [
3457 3495 ("min", 0),
3458 3496 ("10%", resultcount * 10 // 100),
3459 3497 ("25%", resultcount * 25 // 100),
3460 3498 ("50%", resultcount * 70 // 100),
3461 3499 ("75%", resultcount * 75 // 100),
3462 3500 ("90%", resultcount * 90 // 100),
3463 3501 ("95%", resultcount * 95 // 100),
3464 3502 ("99%", resultcount * 99 // 100),
3465 3503 ("99.9%", resultcount * 999 // 1000),
3466 3504 ("99.99%", resultcount * 9999 // 10000),
3467 3505 ("99.999%", resultcount * 99999 // 100000),
3468 3506 ("max", -1),
3469 3507 ]
3470 3508 if not ui.quiet:
3471 3509 for name, idx in relevants:
3472 3510 data = results[idx]
3473 3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3474 3512 formatone(fm, data[1], title=title, displayall=displayall)
3475 3513
3476 3514 # XXX summing that many float will not be very precise, we ignore this fact
3477 3515 # for now
3478 3516 totaltime = []
3479 3517 for item in allresults:
3480 3518 totaltime.append(
3481 3519 (
3482 3520 sum(x[1][0] for x in item),
3483 3521 sum(x[1][1] for x in item),
3484 3522 sum(x[1][2] for x in item),
3485 3523 )
3486 3524 )
3487 3525 formatone(
3488 3526 fm,
3489 3527 totaltime,
3490 3528 title="total time (%d revs)" % resultcount,
3491 3529 displayall=displayall,
3492 3530 )
3493 3531 fm.end()
3494 3532
3495 3533
3496 3534 class _faketr:
3497 3535 def add(s, x, y, z=None):
3498 3536 return None
3499 3537
3500 3538
3501 3539 def _timeonewrite(
3502 3540 ui,
3503 3541 orig,
3504 3542 source,
3505 3543 startrev,
3506 3544 stoprev,
3507 3545 runidx=None,
3508 3546 lazydeltabase=True,
3509 3547 clearcaches=True,
3510 3548 ):
3511 3549 timings = []
3512 3550 tr = _faketr()
3513 3551 with _temprevlog(ui, orig, startrev) as dest:
3514 3552 dest._lazydeltabase = lazydeltabase
3515 3553 revs = list(orig.revs(startrev, stoprev))
3516 3554 total = len(revs)
3517 3555 topic = 'adding'
3518 3556 if runidx is not None:
3519 3557 topic += ' (run #%d)' % runidx
3520 3558 # Support both old and new progress API
3521 3559 if util.safehasattr(ui, 'makeprogress'):
3522 3560 progress = ui.makeprogress(topic, unit='revs', total=total)
3523 3561
3524 3562 def updateprogress(pos):
3525 3563 progress.update(pos)
3526 3564
3527 3565 def completeprogress():
3528 3566 progress.complete()
3529 3567
3530 3568 else:
3531 3569
3532 3570 def updateprogress(pos):
3533 3571 ui.progress(topic, pos, unit='revs', total=total)
3534 3572
3535 3573 def completeprogress():
3536 3574 ui.progress(topic, None, unit='revs', total=total)
3537 3575
3538 3576 for idx, rev in enumerate(revs):
3539 3577 updateprogress(idx)
3540 3578 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3541 3579 if clearcaches:
3542 3580 dest.index.clearcaches()
3543 3581 dest.clearcaches()
3544 3582 with timeone() as r:
3545 3583 dest.addrawrevision(*addargs, **addkwargs)
3546 3584 timings.append((rev, r[0]))
3547 3585 updateprogress(total)
3548 3586 completeprogress()
3549 3587 return timings
3550 3588
3551 3589
3552 3590 def _getrevisionseed(orig, rev, tr, source):
3553 3591 from mercurial.node import nullid
3554 3592
3555 3593 linkrev = orig.linkrev(rev)
3556 3594 node = orig.node(rev)
3557 3595 p1, p2 = orig.parents(node)
3558 3596 flags = orig.flags(rev)
3559 3597 cachedelta = None
3560 3598 text = None
3561 3599
3562 3600 if source == b'full':
3563 3601 text = orig.revision(rev)
3564 3602 elif source == b'parent-1':
3565 3603 baserev = orig.rev(p1)
3566 3604 cachedelta = (baserev, orig.revdiff(p1, rev))
3567 3605 elif source == b'parent-2':
3568 3606 parent = p2
3569 3607 if p2 == nullid:
3570 3608 parent = p1
3571 3609 baserev = orig.rev(parent)
3572 3610 cachedelta = (baserev, orig.revdiff(parent, rev))
3573 3611 elif source == b'parent-smallest':
3574 3612 p1diff = orig.revdiff(p1, rev)
3575 3613 parent = p1
3576 3614 diff = p1diff
3577 3615 if p2 != nullid:
3578 3616 p2diff = orig.revdiff(p2, rev)
3579 3617 if len(p1diff) > len(p2diff):
3580 3618 parent = p2
3581 3619 diff = p2diff
3582 3620 baserev = orig.rev(parent)
3583 3621 cachedelta = (baserev, diff)
3584 3622 elif source == b'storage':
3585 3623 baserev = orig.deltaparent(rev)
3586 3624 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3587 3625
3588 3626 return (
3589 3627 (text, tr, linkrev, p1, p2),
3590 3628 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3591 3629 )
3592 3630
3593 3631
3594 3632 @contextlib.contextmanager
3595 3633 def _temprevlog(ui, orig, truncaterev):
3596 3634 from mercurial import vfs as vfsmod
3597 3635
3598 3636 if orig._inline:
3599 3637 raise error.Abort('not supporting inline revlog (yet)')
3600 3638 revlogkwargs = {}
3601 3639 k = 'upperboundcomp'
3602 3640 if util.safehasattr(orig, k):
3603 3641 revlogkwargs[k] = getattr(orig, k)
3604 3642
3605 3643 indexfile = getattr(orig, '_indexfile', None)
3606 3644 if indexfile is None:
3607 3645 # compatibility with <= hg-5.8
3608 3646 indexfile = getattr(orig, 'indexfile')
3609 3647 origindexpath = orig.opener.join(indexfile)
3610 3648
3611 3649 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3612 3650 origdatapath = orig.opener.join(datafile)
3613 3651 radix = b'revlog'
3614 3652 indexname = b'revlog.i'
3615 3653 dataname = b'revlog.d'
3616 3654
3617 3655 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3618 3656 try:
3619 3657 # copy the data file in a temporary directory
3620 3658 ui.debug('copying data in %s\n' % tmpdir)
3621 3659 destindexpath = os.path.join(tmpdir, 'revlog.i')
3622 3660 destdatapath = os.path.join(tmpdir, 'revlog.d')
3623 3661 shutil.copyfile(origindexpath, destindexpath)
3624 3662 shutil.copyfile(origdatapath, destdatapath)
3625 3663
3626 3664 # remove the data we want to add again
3627 3665 ui.debug('truncating data to be rewritten\n')
3628 3666 with open(destindexpath, 'ab') as index:
3629 3667 index.seek(0)
3630 3668 index.truncate(truncaterev * orig._io.size)
3631 3669 with open(destdatapath, 'ab') as data:
3632 3670 data.seek(0)
3633 3671 data.truncate(orig.start(truncaterev))
3634 3672
3635 3673 # instantiate a new revlog from the temporary copy
3636 3674 ui.debug('truncating adding to be rewritten\n')
3637 3675 vfs = vfsmod.vfs(tmpdir)
3638 3676 vfs.options = getattr(orig.opener, 'options', None)
3639 3677
3640 3678 try:
3641 3679 dest = revlog(vfs, radix=radix, **revlogkwargs)
3642 3680 except TypeError:
3643 3681 dest = revlog(
3644 3682 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3645 3683 )
3646 3684 if dest._inline:
3647 3685 raise error.Abort('not supporting inline revlog (yet)')
3648 3686 # make sure internals are initialized
3649 3687 dest.revision(len(dest) - 1)
3650 3688 yield dest
3651 3689 del dest, vfs
3652 3690 finally:
3653 3691 shutil.rmtree(tmpdir, True)
3654 3692
3655 3693
3656 3694 @command(
3657 3695 b'perf::revlogchunks|perfrevlogchunks',
3658 3696 revlogopts
3659 3697 + formatteropts
3660 3698 + [
3661 3699 (b'e', b'engines', b'', b'compression engines to use'),
3662 3700 (b's', b'startrev', 0, b'revision to start at'),
3663 3701 ],
3664 3702 b'-c|-m|FILE',
3665 3703 )
3666 3704 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3667 3705 """Benchmark operations on revlog chunks.
3668 3706
3669 3707 Logically, each revlog is a collection of fulltext revisions. However,
3670 3708 stored within each revlog are "chunks" of possibly compressed data. This
3671 3709 data needs to be read and decompressed or compressed and written.
3672 3710
3673 3711 This command measures the time it takes to read+decompress and recompress
3674 3712 chunks in a revlog. It effectively isolates I/O and compression performance.
3675 3713 For measurements of higher-level operations like resolving revisions,
3676 3714 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3677 3715 """
3678 3716 opts = _byteskwargs(opts)
3679 3717
3680 3718 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3681 3719
3682 3720 # _chunkraw was renamed to _getsegmentforrevs.
3683 3721 try:
3684 3722 segmentforrevs = rl._getsegmentforrevs
3685 3723 except AttributeError:
3686 3724 segmentforrevs = rl._chunkraw
3687 3725
3688 3726 # Verify engines argument.
3689 3727 if engines:
3690 3728 engines = {e.strip() for e in engines.split(b',')}
3691 3729 for engine in engines:
3692 3730 try:
3693 3731 util.compressionengines[engine]
3694 3732 except KeyError:
3695 3733 raise error.Abort(b'unknown compression engine: %s' % engine)
3696 3734 else:
3697 3735 engines = []
3698 3736 for e in util.compengines:
3699 3737 engine = util.compengines[e]
3700 3738 try:
3701 3739 if engine.available():
3702 3740 engine.revlogcompressor().compress(b'dummy')
3703 3741 engines.append(e)
3704 3742 except NotImplementedError:
3705 3743 pass
3706 3744
3707 3745 revs = list(rl.revs(startrev, len(rl) - 1))
3708 3746
3709 3747 def rlfh(rl):
3710 3748 if rl._inline:
3711 3749 indexfile = getattr(rl, '_indexfile', None)
3712 3750 if indexfile is None:
3713 3751 # compatibility with <= hg-5.8
3714 3752 indexfile = getattr(rl, 'indexfile')
3715 3753 return getsvfs(repo)(indexfile)
3716 3754 else:
3717 3755 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3718 3756 return getsvfs(repo)(datafile)
3719 3757
3720 3758 def doread():
3721 3759 rl.clearcaches()
3722 3760 for rev in revs:
3723 3761 segmentforrevs(rev, rev)
3724 3762
3725 3763 def doreadcachedfh():
3726 3764 rl.clearcaches()
3727 3765 fh = rlfh(rl)
3728 3766 for rev in revs:
3729 3767 segmentforrevs(rev, rev, df=fh)
3730 3768
3731 3769 def doreadbatch():
3732 3770 rl.clearcaches()
3733 3771 segmentforrevs(revs[0], revs[-1])
3734 3772
3735 3773 def doreadbatchcachedfh():
3736 3774 rl.clearcaches()
3737 3775 fh = rlfh(rl)
3738 3776 segmentforrevs(revs[0], revs[-1], df=fh)
3739 3777
3740 3778 def dochunk():
3741 3779 rl.clearcaches()
3742 3780 fh = rlfh(rl)
3743 3781 for rev in revs:
3744 3782 rl._chunk(rev, df=fh)
3745 3783
3746 3784 chunks = [None]
3747 3785
3748 3786 def dochunkbatch():
3749 3787 rl.clearcaches()
3750 3788 fh = rlfh(rl)
3751 3789 # Save chunks as a side-effect.
3752 3790 chunks[0] = rl._chunks(revs, df=fh)
3753 3791
3754 3792 def docompress(compressor):
3755 3793 rl.clearcaches()
3756 3794
3757 3795 try:
3758 3796 # Swap in the requested compression engine.
3759 3797 oldcompressor = rl._compressor
3760 3798 rl._compressor = compressor
3761 3799 for chunk in chunks[0]:
3762 3800 rl.compress(chunk)
3763 3801 finally:
3764 3802 rl._compressor = oldcompressor
3765 3803
3766 3804 benches = [
3767 3805 (lambda: doread(), b'read'),
3768 3806 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3769 3807 (lambda: doreadbatch(), b'read batch'),
3770 3808 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3771 3809 (lambda: dochunk(), b'chunk'),
3772 3810 (lambda: dochunkbatch(), b'chunk batch'),
3773 3811 ]
3774 3812
3775 3813 for engine in sorted(engines):
3776 3814 compressor = util.compengines[engine].revlogcompressor()
3777 3815 benches.append(
3778 3816 (
3779 3817 functools.partial(docompress, compressor),
3780 3818 b'compress w/ %s' % engine,
3781 3819 )
3782 3820 )
3783 3821
3784 3822 for fn, title in benches:
3785 3823 timer, fm = gettimer(ui, opts)
3786 3824 timer(fn, title=title)
3787 3825 fm.end()
3788 3826
3789 3827
3790 3828 @command(
3791 3829 b'perf::revlogrevision|perfrevlogrevision',
3792 3830 revlogopts
3793 3831 + formatteropts
3794 3832 + [(b'', b'cache', False, b'use caches instead of clearing')],
3795 3833 b'-c|-m|FILE REV',
3796 3834 )
3797 3835 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3798 3836 """Benchmark obtaining a revlog revision.
3799 3837
3800 3838 Obtaining a revlog revision consists of roughly the following steps:
3801 3839
3802 3840 1. Compute the delta chain
3803 3841 2. Slice the delta chain if applicable
3804 3842 3. Obtain the raw chunks for that delta chain
3805 3843 4. Decompress each raw chunk
3806 3844 5. Apply binary patches to obtain fulltext
3807 3845 6. Verify hash of fulltext
3808 3846
3809 3847 This command measures the time spent in each of these phases.
3810 3848 """
3811 3849 opts = _byteskwargs(opts)
3812 3850
3813 3851 if opts.get(b'changelog') or opts.get(b'manifest'):
3814 3852 file_, rev = None, file_
3815 3853 elif rev is None:
3816 3854 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3817 3855
3818 3856 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3819 3857
3820 3858 # _chunkraw was renamed to _getsegmentforrevs.
3821 3859 try:
3822 3860 segmentforrevs = r._getsegmentforrevs
3823 3861 except AttributeError:
3824 3862 segmentforrevs = r._chunkraw
3825 3863
3826 3864 node = r.lookup(rev)
3827 3865 rev = r.rev(node)
3828 3866
3829 3867 def getrawchunks(data, chain):
3830 3868 start = r.start
3831 3869 length = r.length
3832 3870 inline = r._inline
3833 3871 try:
3834 3872 iosize = r.index.entry_size
3835 3873 except AttributeError:
3836 3874 iosize = r._io.size
3837 3875 buffer = util.buffer
3838 3876
3839 3877 chunks = []
3840 3878 ladd = chunks.append
3841 3879 for idx, item in enumerate(chain):
3842 3880 offset = start(item[0])
3843 3881 bits = data[idx]
3844 3882 for rev in item:
3845 3883 chunkstart = start(rev)
3846 3884 if inline:
3847 3885 chunkstart += (rev + 1) * iosize
3848 3886 chunklength = length(rev)
3849 3887 ladd(buffer(bits, chunkstart - offset, chunklength))
3850 3888
3851 3889 return chunks
3852 3890
3853 3891 def dodeltachain(rev):
3854 3892 if not cache:
3855 3893 r.clearcaches()
3856 3894 r._deltachain(rev)
3857 3895
3858 3896 def doread(chain):
3859 3897 if not cache:
3860 3898 r.clearcaches()
3861 3899 for item in slicedchain:
3862 3900 segmentforrevs(item[0], item[-1])
3863 3901
3864 3902 def doslice(r, chain, size):
3865 3903 for s in slicechunk(r, chain, targetsize=size):
3866 3904 pass
3867 3905
3868 3906 def dorawchunks(data, chain):
3869 3907 if not cache:
3870 3908 r.clearcaches()
3871 3909 getrawchunks(data, chain)
3872 3910
3873 3911 def dodecompress(chunks):
3874 3912 decomp = r.decompress
3875 3913 for chunk in chunks:
3876 3914 decomp(chunk)
3877 3915
3878 3916 def dopatch(text, bins):
3879 3917 if not cache:
3880 3918 r.clearcaches()
3881 3919 mdiff.patches(text, bins)
3882 3920
3883 3921 def dohash(text):
3884 3922 if not cache:
3885 3923 r.clearcaches()
3886 3924 r.checkhash(text, node, rev=rev)
3887 3925
3888 3926 def dorevision():
3889 3927 if not cache:
3890 3928 r.clearcaches()
3891 3929 r.revision(node)
3892 3930
3893 3931 try:
3894 3932 from mercurial.revlogutils.deltas import slicechunk
3895 3933 except ImportError:
3896 3934 slicechunk = getattr(revlog, '_slicechunk', None)
3897 3935
3898 3936 size = r.length(rev)
3899 3937 chain = r._deltachain(rev)[0]
3900 3938 if not getattr(r, '_withsparseread', False):
3901 3939 slicedchain = (chain,)
3902 3940 else:
3903 3941 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3904 3942 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3905 3943 rawchunks = getrawchunks(data, slicedchain)
3906 3944 bins = r._chunks(chain)
3907 3945 text = bytes(bins[0])
3908 3946 bins = bins[1:]
3909 3947 text = mdiff.patches(text, bins)
3910 3948
3911 3949 benches = [
3912 3950 (lambda: dorevision(), b'full'),
3913 3951 (lambda: dodeltachain(rev), b'deltachain'),
3914 3952 (lambda: doread(chain), b'read'),
3915 3953 ]
3916 3954
3917 3955 if getattr(r, '_withsparseread', False):
3918 3956 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3919 3957 benches.append(slicing)
3920 3958
3921 3959 benches.extend(
3922 3960 [
3923 3961 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3924 3962 (lambda: dodecompress(rawchunks), b'decompress'),
3925 3963 (lambda: dopatch(text, bins), b'patch'),
3926 3964 (lambda: dohash(text), b'hash'),
3927 3965 ]
3928 3966 )
3929 3967
3930 3968 timer, fm = gettimer(ui, opts)
3931 3969 for fn, title in benches:
3932 3970 timer(fn, title=title)
3933 3971 fm.end()
3934 3972
3935 3973
3936 3974 @command(
3937 3975 b'perf::revset|perfrevset',
3938 3976 [
3939 3977 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3940 3978 (b'', b'contexts', False, b'obtain changectx for each revision'),
3941 3979 ]
3942 3980 + formatteropts,
3943 3981 b"REVSET",
3944 3982 )
3945 3983 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3946 3984 """benchmark the execution time of a revset
3947 3985
3948 3986 Use the --clean option if need to evaluate the impact of build volatile
3949 3987 revisions set cache on the revset execution. Volatile cache hold filtered
3950 3988 and obsolete related cache."""
3951 3989 opts = _byteskwargs(opts)
3952 3990
3953 3991 timer, fm = gettimer(ui, opts)
3954 3992
3955 3993 def d():
3956 3994 if clear:
3957 3995 repo.invalidatevolatilesets()
3958 3996 if contexts:
3959 3997 for ctx in repo.set(expr):
3960 3998 pass
3961 3999 else:
3962 4000 for r in repo.revs(expr):
3963 4001 pass
3964 4002
3965 4003 timer(d)
3966 4004 fm.end()
3967 4005
3968 4006
3969 4007 @command(
3970 4008 b'perf::volatilesets|perfvolatilesets',
3971 4009 [
3972 4010 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3973 4011 ]
3974 4012 + formatteropts,
3975 4013 )
3976 4014 def perfvolatilesets(ui, repo, *names, **opts):
3977 4015 """benchmark the computation of various volatile set
3978 4016
3979 4017 Volatile set computes element related to filtering and obsolescence."""
3980 4018 opts = _byteskwargs(opts)
3981 4019 timer, fm = gettimer(ui, opts)
3982 4020 repo = repo.unfiltered()
3983 4021
3984 4022 def getobs(name):
3985 4023 def d():
3986 4024 repo.invalidatevolatilesets()
3987 4025 if opts[b'clear_obsstore']:
3988 4026 clearfilecache(repo, b'obsstore')
3989 4027 obsolete.getrevs(repo, name)
3990 4028
3991 4029 return d
3992 4030
3993 4031 allobs = sorted(obsolete.cachefuncs)
3994 4032 if names:
3995 4033 allobs = [n for n in allobs if n in names]
3996 4034
3997 4035 for name in allobs:
3998 4036 timer(getobs(name), title=name)
3999 4037
4000 4038 def getfiltered(name):
4001 4039 def d():
4002 4040 repo.invalidatevolatilesets()
4003 4041 if opts[b'clear_obsstore']:
4004 4042 clearfilecache(repo, b'obsstore')
4005 4043 repoview.filterrevs(repo, name)
4006 4044
4007 4045 return d
4008 4046
4009 4047 allfilter = sorted(repoview.filtertable)
4010 4048 if names:
4011 4049 allfilter = [n for n in allfilter if n in names]
4012 4050
4013 4051 for name in allfilter:
4014 4052 timer(getfiltered(name), title=name)
4015 4053 fm.end()
4016 4054
4017 4055
4018 4056 @command(
4019 4057 b'perf::branchmap|perfbranchmap',
4020 4058 [
4021 4059 (b'f', b'full', False, b'Includes build time of subset'),
4022 4060 (
4023 4061 b'',
4024 4062 b'clear-revbranch',
4025 4063 False,
4026 4064 b'purge the revbranch cache between computation',
4027 4065 ),
4028 4066 ]
4029 4067 + formatteropts,
4030 4068 )
4031 4069 def perfbranchmap(ui, repo, *filternames, **opts):
4032 4070 """benchmark the update of a branchmap
4033 4071
4034 4072 This benchmarks the full repo.branchmap() call with read and write disabled
4035 4073 """
4036 4074 opts = _byteskwargs(opts)
4037 4075 full = opts.get(b"full", False)
4038 4076 clear_revbranch = opts.get(b"clear_revbranch", False)
4039 4077 timer, fm = gettimer(ui, opts)
4040 4078
4041 4079 def getbranchmap(filtername):
4042 4080 """generate a benchmark function for the filtername"""
4043 4081 if filtername is None:
4044 4082 view = repo
4045 4083 else:
4046 4084 view = repo.filtered(filtername)
4047 4085 if util.safehasattr(view._branchcaches, '_per_filter'):
4048 4086 filtered = view._branchcaches._per_filter
4049 4087 else:
4050 4088 # older versions
4051 4089 filtered = view._branchcaches
4052 4090
4053 4091 def d():
4054 4092 if clear_revbranch:
4055 4093 repo.revbranchcache()._clear()
4056 4094 if full:
4057 4095 view._branchcaches.clear()
4058 4096 else:
4059 4097 filtered.pop(filtername, None)
4060 4098 view.branchmap()
4061 4099
4062 4100 return d
4063 4101
4064 4102 # add filter in smaller subset to bigger subset
4065 4103 possiblefilters = set(repoview.filtertable)
4066 4104 if filternames:
4067 4105 possiblefilters &= set(filternames)
4068 4106 subsettable = getbranchmapsubsettable()
4069 4107 allfilters = []
4070 4108 while possiblefilters:
4071 4109 for name in possiblefilters:
4072 4110 subset = subsettable.get(name)
4073 4111 if subset not in possiblefilters:
4074 4112 break
4075 4113 else:
4076 4114 assert False, b'subset cycle %s!' % possiblefilters
4077 4115 allfilters.append(name)
4078 4116 possiblefilters.remove(name)
4079 4117
4080 4118 # warm the cache
4081 4119 if not full:
4082 4120 for name in allfilters:
4083 4121 repo.filtered(name).branchmap()
4084 4122 if not filternames or b'unfiltered' in filternames:
4085 4123 # add unfiltered
4086 4124 allfilters.append(None)
4087 4125
4088 4126 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4089 4127 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4090 4128 branchcacheread.set(classmethod(lambda *args: None))
4091 4129 else:
4092 4130 # older versions
4093 4131 branchcacheread = safeattrsetter(branchmap, b'read')
4094 4132 branchcacheread.set(lambda *args: None)
4095 4133 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4096 4134 branchcachewrite.set(lambda *args: None)
4097 4135 try:
4098 4136 for name in allfilters:
4099 4137 printname = name
4100 4138 if name is None:
4101 4139 printname = b'unfiltered'
4102 4140 timer(getbranchmap(name), title=printname)
4103 4141 finally:
4104 4142 branchcacheread.restore()
4105 4143 branchcachewrite.restore()
4106 4144 fm.end()
4107 4145
4108 4146
4109 4147 @command(
4110 4148 b'perf::branchmapupdate|perfbranchmapupdate',
4111 4149 [
4112 4150 (b'', b'base', [], b'subset of revision to start from'),
4113 4151 (b'', b'target', [], b'subset of revision to end with'),
4114 4152 (b'', b'clear-caches', False, b'clear cache between each runs'),
4115 4153 ]
4116 4154 + formatteropts,
4117 4155 )
4118 4156 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4119 4157 """benchmark branchmap update from for <base> revs to <target> revs
4120 4158
4121 4159 If `--clear-caches` is passed, the following items will be reset before
4122 4160 each update:
4123 4161 * the changelog instance and associated indexes
4124 4162 * the rev-branch-cache instance
4125 4163
4126 4164 Examples:
4127 4165
4128 4166 # update for the one last revision
4129 4167 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4130 4168
4131 4169 $ update for change coming with a new branch
4132 4170 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4133 4171 """
4134 4172 from mercurial import branchmap
4135 4173 from mercurial import repoview
4136 4174
4137 4175 opts = _byteskwargs(opts)
4138 4176 timer, fm = gettimer(ui, opts)
4139 4177 clearcaches = opts[b'clear_caches']
4140 4178 unfi = repo.unfiltered()
4141 4179 x = [None] # used to pass data between closure
4142 4180
4143 4181 # we use a `list` here to avoid possible side effect from smartset
4144 4182 baserevs = list(scmutil.revrange(repo, base))
4145 4183 targetrevs = list(scmutil.revrange(repo, target))
4146 4184 if not baserevs:
4147 4185 raise error.Abort(b'no revisions selected for --base')
4148 4186 if not targetrevs:
4149 4187 raise error.Abort(b'no revisions selected for --target')
4150 4188
4151 4189 # make sure the target branchmap also contains the one in the base
4152 4190 targetrevs = list(set(baserevs) | set(targetrevs))
4153 4191 targetrevs.sort()
4154 4192
4155 4193 cl = repo.changelog
4156 4194 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4157 4195 allbaserevs.sort()
4158 4196 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4159 4197
4160 4198 newrevs = list(alltargetrevs.difference(allbaserevs))
4161 4199 newrevs.sort()
4162 4200
4163 4201 allrevs = frozenset(unfi.changelog.revs())
4164 4202 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4165 4203 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4166 4204
4167 4205 def basefilter(repo, visibilityexceptions=None):
4168 4206 return basefilterrevs
4169 4207
4170 4208 def targetfilter(repo, visibilityexceptions=None):
4171 4209 return targetfilterrevs
4172 4210
4173 4211 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4174 4212 ui.status(msg % (len(allbaserevs), len(newrevs)))
4175 4213 if targetfilterrevs:
4176 4214 msg = b'(%d revisions still filtered)\n'
4177 4215 ui.status(msg % len(targetfilterrevs))
4178 4216
4179 4217 try:
4180 4218 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4181 4219 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4182 4220
4183 4221 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4184 4222 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4185 4223
4186 4224 # try to find an existing branchmap to reuse
4187 4225 subsettable = getbranchmapsubsettable()
4188 4226 candidatefilter = subsettable.get(None)
4189 4227 while candidatefilter is not None:
4190 4228 candidatebm = repo.filtered(candidatefilter).branchmap()
4191 4229 if candidatebm.validfor(baserepo):
4192 4230 filtered = repoview.filterrevs(repo, candidatefilter)
4193 4231 missing = [r for r in allbaserevs if r in filtered]
4194 4232 base = candidatebm.copy()
4195 4233 base.update(baserepo, missing)
4196 4234 break
4197 4235 candidatefilter = subsettable.get(candidatefilter)
4198 4236 else:
4199 4237 # no suitable subset where found
4200 4238 base = branchmap.branchcache()
4201 4239 base.update(baserepo, allbaserevs)
4202 4240
4203 4241 def setup():
4204 4242 x[0] = base.copy()
4205 4243 if clearcaches:
4206 4244 unfi._revbranchcache = None
4207 4245 clearchangelog(repo)
4208 4246
4209 4247 def bench():
4210 4248 x[0].update(targetrepo, newrevs)
4211 4249
4212 4250 timer(bench, setup=setup)
4213 4251 fm.end()
4214 4252 finally:
4215 4253 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4216 4254 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4217 4255
4218 4256
4219 4257 @command(
4220 4258 b'perf::branchmapload|perfbranchmapload',
4221 4259 [
4222 4260 (b'f', b'filter', b'', b'Specify repoview filter'),
4223 4261 (b'', b'list', False, b'List brachmap filter caches'),
4224 4262 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4225 4263 ]
4226 4264 + formatteropts,
4227 4265 )
4228 4266 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4229 4267 """benchmark reading the branchmap"""
4230 4268 opts = _byteskwargs(opts)
4231 4269 clearrevlogs = opts[b'clear_revlogs']
4232 4270
4233 4271 if list:
4234 4272 for name, kind, st in repo.cachevfs.readdir(stat=True):
4235 4273 if name.startswith(b'branch2'):
4236 4274 filtername = name.partition(b'-')[2] or b'unfiltered'
4237 4275 ui.status(
4238 4276 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4239 4277 )
4240 4278 return
4241 4279 if not filter:
4242 4280 filter = None
4243 4281 subsettable = getbranchmapsubsettable()
4244 4282 if filter is None:
4245 4283 repo = repo.unfiltered()
4246 4284 else:
4247 4285 repo = repoview.repoview(repo, filter)
4248 4286
4249 4287 repo.branchmap() # make sure we have a relevant, up to date branchmap
4250 4288
4251 4289 try:
4252 4290 fromfile = branchmap.branchcache.fromfile
4253 4291 except AttributeError:
4254 4292 # older versions
4255 4293 fromfile = branchmap.read
4256 4294
4257 4295 currentfilter = filter
4258 4296 # try once without timer, the filter may not be cached
4259 4297 while fromfile(repo) is None:
4260 4298 currentfilter = subsettable.get(currentfilter)
4261 4299 if currentfilter is None:
4262 4300 raise error.Abort(
4263 4301 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4264 4302 )
4265 4303 repo = repo.filtered(currentfilter)
4266 4304 timer, fm = gettimer(ui, opts)
4267 4305
4268 4306 def setup():
4269 4307 if clearrevlogs:
4270 4308 clearchangelog(repo)
4271 4309
4272 4310 def bench():
4273 4311 fromfile(repo)
4274 4312
4275 4313 timer(bench, setup=setup)
4276 4314 fm.end()
4277 4315
4278 4316
4279 4317 @command(b'perf::loadmarkers|perfloadmarkers')
4280 4318 def perfloadmarkers(ui, repo):
4281 4319 """benchmark the time to parse the on-disk markers for a repo
4282 4320
4283 4321 Result is the number of markers in the repo."""
4284 4322 timer, fm = gettimer(ui)
4285 4323 svfs = getsvfs(repo)
4286 4324 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4287 4325 fm.end()
4288 4326
4289 4327
4290 4328 @command(
4291 4329 b'perf::lrucachedict|perflrucachedict',
4292 4330 formatteropts
4293 4331 + [
4294 4332 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4295 4333 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4296 4334 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4297 4335 (b'', b'size', 4, b'size of cache'),
4298 4336 (b'', b'gets', 10000, b'number of key lookups'),
4299 4337 (b'', b'sets', 10000, b'number of key sets'),
4300 4338 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4301 4339 (
4302 4340 b'',
4303 4341 b'mixedgetfreq',
4304 4342 50,
4305 4343 b'frequency of get vs set ops in mixed mode',
4306 4344 ),
4307 4345 ],
4308 4346 norepo=True,
4309 4347 )
4310 4348 def perflrucache(
4311 4349 ui,
4312 4350 mincost=0,
4313 4351 maxcost=100,
4314 4352 costlimit=0,
4315 4353 size=4,
4316 4354 gets=10000,
4317 4355 sets=10000,
4318 4356 mixed=10000,
4319 4357 mixedgetfreq=50,
4320 4358 **opts
4321 4359 ):
4322 4360 opts = _byteskwargs(opts)
4323 4361
4324 4362 def doinit():
4325 4363 for i in _xrange(10000):
4326 4364 util.lrucachedict(size)
4327 4365
4328 4366 costrange = list(range(mincost, maxcost + 1))
4329 4367
4330 4368 values = []
4331 4369 for i in _xrange(size):
4332 4370 values.append(random.randint(0, _maxint))
4333 4371
4334 4372 # Get mode fills the cache and tests raw lookup performance with no
4335 4373 # eviction.
4336 4374 getseq = []
4337 4375 for i in _xrange(gets):
4338 4376 getseq.append(random.choice(values))
4339 4377
4340 4378 def dogets():
4341 4379 d = util.lrucachedict(size)
4342 4380 for v in values:
4343 4381 d[v] = v
4344 4382 for key in getseq:
4345 4383 value = d[key]
4346 4384 value # silence pyflakes warning
4347 4385
4348 4386 def dogetscost():
4349 4387 d = util.lrucachedict(size, maxcost=costlimit)
4350 4388 for i, v in enumerate(values):
4351 4389 d.insert(v, v, cost=costs[i])
4352 4390 for key in getseq:
4353 4391 try:
4354 4392 value = d[key]
4355 4393 value # silence pyflakes warning
4356 4394 except KeyError:
4357 4395 pass
4358 4396
4359 4397 # Set mode tests insertion speed with cache eviction.
4360 4398 setseq = []
4361 4399 costs = []
4362 4400 for i in _xrange(sets):
4363 4401 setseq.append(random.randint(0, _maxint))
4364 4402 costs.append(random.choice(costrange))
4365 4403
4366 4404 def doinserts():
4367 4405 d = util.lrucachedict(size)
4368 4406 for v in setseq:
4369 4407 d.insert(v, v)
4370 4408
4371 4409 def doinsertscost():
4372 4410 d = util.lrucachedict(size, maxcost=costlimit)
4373 4411 for i, v in enumerate(setseq):
4374 4412 d.insert(v, v, cost=costs[i])
4375 4413
4376 4414 def dosets():
4377 4415 d = util.lrucachedict(size)
4378 4416 for v in setseq:
4379 4417 d[v] = v
4380 4418
4381 4419 # Mixed mode randomly performs gets and sets with eviction.
4382 4420 mixedops = []
4383 4421 for i in _xrange(mixed):
4384 4422 r = random.randint(0, 100)
4385 4423 if r < mixedgetfreq:
4386 4424 op = 0
4387 4425 else:
4388 4426 op = 1
4389 4427
4390 4428 mixedops.append(
4391 4429 (op, random.randint(0, size * 2), random.choice(costrange))
4392 4430 )
4393 4431
4394 4432 def domixed():
4395 4433 d = util.lrucachedict(size)
4396 4434
4397 4435 for op, v, cost in mixedops:
4398 4436 if op == 0:
4399 4437 try:
4400 4438 d[v]
4401 4439 except KeyError:
4402 4440 pass
4403 4441 else:
4404 4442 d[v] = v
4405 4443
4406 4444 def domixedcost():
4407 4445 d = util.lrucachedict(size, maxcost=costlimit)
4408 4446
4409 4447 for op, v, cost in mixedops:
4410 4448 if op == 0:
4411 4449 try:
4412 4450 d[v]
4413 4451 except KeyError:
4414 4452 pass
4415 4453 else:
4416 4454 d.insert(v, v, cost=cost)
4417 4455
4418 4456 benches = [
4419 4457 (doinit, b'init'),
4420 4458 ]
4421 4459
4422 4460 if costlimit:
4423 4461 benches.extend(
4424 4462 [
4425 4463 (dogetscost, b'gets w/ cost limit'),
4426 4464 (doinsertscost, b'inserts w/ cost limit'),
4427 4465 (domixedcost, b'mixed w/ cost limit'),
4428 4466 ]
4429 4467 )
4430 4468 else:
4431 4469 benches.extend(
4432 4470 [
4433 4471 (dogets, b'gets'),
4434 4472 (doinserts, b'inserts'),
4435 4473 (dosets, b'sets'),
4436 4474 (domixed, b'mixed'),
4437 4475 ]
4438 4476 )
4439 4477
4440 4478 for fn, title in benches:
4441 4479 timer, fm = gettimer(ui, opts)
4442 4480 timer(fn, title=title)
4443 4481 fm.end()
4444 4482
4445 4483
4446 4484 @command(
4447 4485 b'perf::write|perfwrite',
4448 4486 formatteropts
4449 4487 + [
4450 4488 (b'', b'write-method', b'write', b'ui write method'),
4451 4489 (b'', b'nlines', 100, b'number of lines'),
4452 4490 (b'', b'nitems', 100, b'number of items (per line)'),
4453 4491 (b'', b'item', b'x', b'item that is written'),
4454 4492 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4455 4493 (b'', b'flush-line', None, b'flush after each line'),
4456 4494 ],
4457 4495 )
4458 4496 def perfwrite(ui, repo, **opts):
4459 4497 """microbenchmark ui.write (and others)"""
4460 4498 opts = _byteskwargs(opts)
4461 4499
4462 4500 write = getattr(ui, _sysstr(opts[b'write_method']))
4463 4501 nlines = int(opts[b'nlines'])
4464 4502 nitems = int(opts[b'nitems'])
4465 4503 item = opts[b'item']
4466 4504 batch_line = opts.get(b'batch_line')
4467 4505 flush_line = opts.get(b'flush_line')
4468 4506
4469 4507 if batch_line:
4470 4508 line = item * nitems + b'\n'
4471 4509
4472 4510 def benchmark():
4473 4511 for i in pycompat.xrange(nlines):
4474 4512 if batch_line:
4475 4513 write(line)
4476 4514 else:
4477 4515 for i in pycompat.xrange(nitems):
4478 4516 write(item)
4479 4517 write(b'\n')
4480 4518 if flush_line:
4481 4519 ui.flush()
4482 4520 ui.flush()
4483 4521
4484 4522 timer, fm = gettimer(ui, opts)
4485 4523 timer(benchmark)
4486 4524 fm.end()
4487 4525
4488 4526
4489 4527 def uisetup(ui):
4490 4528 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4491 4529 commands, b'debugrevlogopts'
4492 4530 ):
4493 4531 # for "historical portability":
4494 4532 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4495 4533 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4496 4534 # openrevlog() should cause failure, because it has been
4497 4535 # available since 3.5 (or 49c583ca48c4).
4498 4536 def openrevlog(orig, repo, cmd, file_, opts):
4499 4537 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4500 4538 raise error.Abort(
4501 4539 b"This version doesn't support --dir option",
4502 4540 hint=b"use 3.5 or later",
4503 4541 )
4504 4542 return orig(repo, cmd, file_, opts)
4505 4543
4506 4544 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4507 4545
4508 4546
4509 4547 @command(
4510 4548 b'perf::progress|perfprogress',
4511 4549 formatteropts
4512 4550 + [
4513 4551 (b'', b'topic', b'topic', b'topic for progress messages'),
4514 4552 (b'c', b'total', 1000000, b'total value we are progressing to'),
4515 4553 ],
4516 4554 norepo=True,
4517 4555 )
4518 4556 def perfprogress(ui, topic=None, total=None, **opts):
4519 4557 """printing of progress bars"""
4520 4558 opts = _byteskwargs(opts)
4521 4559
4522 4560 timer, fm = gettimer(ui, opts)
4523 4561
4524 4562 def doprogress():
4525 4563 with ui.makeprogress(topic, total=total) as progress:
4526 4564 for i in _xrange(total):
4527 4565 progress.increment()
4528 4566
4529 4567 timer(doprogress)
4530 4568 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now