##// END OF EJS Templates
perf: make perf::bundle compatible down to 5.2...
marmoute -
r50369:d513ae93 default
parent child Browse files
Show More
@@ -1,4195 +1,4203 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238 # for "historical portability":
239 239 # define parsealiases locally, because cmdutil.parsealiases has been
240 240 # available since 1.5 (or 6252852b4332)
241 241 def parsealiases(cmd):
242 242 return cmd.split(b"|")
243 243
244 244
245 245 if safehasattr(registrar, 'command'):
246 246 command = registrar.command(cmdtable)
247 247 elif safehasattr(cmdutil, 'command'):
248 248 command = cmdutil.command(cmdtable)
249 249 if 'norepo' not in getargspec(command).args:
250 250 # for "historical portability":
251 251 # wrap original cmdutil.command, because "norepo" option has
252 252 # been available since 3.1 (or 75a96326cecb)
253 253 _command = command
254 254
255 255 def command(name, options=(), synopsis=None, norepo=False):
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return _command(name, list(options), synopsis)
259 259
260 260
261 261 else:
262 262 # for "historical portability":
263 263 # define "@command" annotation locally, because cmdutil.command
264 264 # has been available since 1.9 (or 2daa5179e73f)
265 265 def command(name, options=(), synopsis=None, norepo=False):
266 266 def decorator(func):
267 267 if synopsis:
268 268 cmdtable[name] = func, list(options), synopsis
269 269 else:
270 270 cmdtable[name] = func, list(options)
271 271 if norepo:
272 272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 273 return func
274 274
275 275 return decorator
276 276
277 277
278 278 try:
279 279 import mercurial.registrar
280 280 import mercurial.configitems
281 281
282 282 configtable = {}
283 283 configitem = mercurial.registrar.configitem(configtable)
284 284 configitem(
285 285 b'perf',
286 286 b'presleep',
287 287 default=mercurial.configitems.dynamicdefault,
288 288 experimental=True,
289 289 )
290 290 configitem(
291 291 b'perf',
292 292 b'stub',
293 293 default=mercurial.configitems.dynamicdefault,
294 294 experimental=True,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'parentscount',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 experimental=True,
301 301 )
302 302 configitem(
303 303 b'perf',
304 304 b'all-timing',
305 305 default=mercurial.configitems.dynamicdefault,
306 306 experimental=True,
307 307 )
308 308 configitem(
309 309 b'perf',
310 310 b'pre-run',
311 311 default=mercurial.configitems.dynamicdefault,
312 312 )
313 313 configitem(
314 314 b'perf',
315 315 b'profile-benchmark',
316 316 default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf',
320 320 b'run-limits',
321 321 default=mercurial.configitems.dynamicdefault,
322 322 experimental=True,
323 323 )
324 324 except (ImportError, AttributeError):
325 325 pass
326 326 except TypeError:
327 327 # compatibility fix for a11fd395e83f
328 328 # hg version: 5.2
329 329 configitem(
330 330 b'perf',
331 331 b'presleep',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'stub',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'parentscount',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'all-timing',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349 configitem(
350 350 b'perf',
351 351 b'pre-run',
352 352 default=mercurial.configitems.dynamicdefault,
353 353 )
354 354 configitem(
355 355 b'perf',
356 356 b'profile-benchmark',
357 357 default=mercurial.configitems.dynamicdefault,
358 358 )
359 359 configitem(
360 360 b'perf',
361 361 b'run-limits',
362 362 default=mercurial.configitems.dynamicdefault,
363 363 )
364 364
365 365
366 366 def getlen(ui):
367 367 if ui.configbool(b"perf", b"stub", False):
368 368 return lambda x: 1
369 369 return len
370 370
371 371
372 372 class noop:
373 373 """dummy context manager"""
374 374
375 375 def __enter__(self):
376 376 pass
377 377
378 378 def __exit__(self, *args):
379 379 pass
380 380
381 381
382 382 NOOPCTX = noop()
383 383
384 384
385 385 def gettimer(ui, opts=None):
386 386 """return a timer function and formatter: (timer, formatter)
387 387
388 388 This function exists to gather the creation of formatter in a single
389 389 place instead of duplicating it in all performance commands."""
390 390
391 391 # enforce an idle period before execution to counteract power management
392 392 # experimental config: perf.presleep
393 393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 394
395 395 if opts is None:
396 396 opts = {}
397 397 # redirect all to stderr unless buffer api is in use
398 398 if not ui._buffers:
399 399 ui = ui.copy()
400 400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 401 if uifout:
402 402 # for "historical portability":
403 403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 404 uifout.set(ui.ferr)
405 405
406 406 # get a formatter
407 407 uiformatter = getattr(ui, 'formatter', None)
408 408 if uiformatter:
409 409 fm = uiformatter(b'perf', opts)
410 410 else:
411 411 # for "historical portability":
412 412 # define formatter locally, because ui.formatter has been
413 413 # available since 2.2 (or ae5f92e154d3)
414 414 from mercurial import node
415 415
416 416 class defaultformatter:
417 417 """Minimized composition of baseformatter and plainformatter"""
418 418
419 419 def __init__(self, ui, topic, opts):
420 420 self._ui = ui
421 421 if ui.debugflag:
422 422 self.hexfunc = node.hex
423 423 else:
424 424 self.hexfunc = node.short
425 425
426 426 def __nonzero__(self):
427 427 return False
428 428
429 429 __bool__ = __nonzero__
430 430
431 431 def startitem(self):
432 432 pass
433 433
434 434 def data(self, **data):
435 435 pass
436 436
437 437 def write(self, fields, deftext, *fielddata, **opts):
438 438 self._ui.write(deftext % fielddata, **opts)
439 439
440 440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 441 if cond:
442 442 self._ui.write(deftext % fielddata, **opts)
443 443
444 444 def plain(self, text, **opts):
445 445 self._ui.write(text, **opts)
446 446
447 447 def end(self):
448 448 pass
449 449
450 450 fm = defaultformatter(ui, b'perf', opts)
451 451
452 452 # stub function, runs code only once instead of in a loop
453 453 # experimental config: perf.stub
454 454 if ui.configbool(b"perf", b"stub", False):
455 455 return functools.partial(stub_timer, fm), fm
456 456
457 457 # experimental config: perf.all-timing
458 458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 459
460 460 # experimental config: perf.run-limits
461 461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 462 limits = []
463 463 for item in limitspec:
464 464 parts = item.split(b'-', 1)
465 465 if len(parts) < 2:
466 466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 467 continue
468 468 try:
469 469 time_limit = float(_sysstr(parts[0]))
470 470 except ValueError as e:
471 471 ui.warn(
472 472 (
473 473 b'malformatted run limit entry, %s: %s\n'
474 474 % (_bytestr(e), item)
475 475 )
476 476 )
477 477 continue
478 478 try:
479 479 run_limit = int(_sysstr(parts[1]))
480 480 except ValueError as e:
481 481 ui.warn(
482 482 (
483 483 b'malformatted run limit entry, %s: %s\n'
484 484 % (_bytestr(e), item)
485 485 )
486 486 )
487 487 continue
488 488 limits.append((time_limit, run_limit))
489 489 if not limits:
490 490 limits = DEFAULTLIMITS
491 491
492 492 profiler = None
493 493 if profiling is not None:
494 494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 495 profiler = profiling.profile(ui)
496 496
497 497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 498 t = functools.partial(
499 499 _timer,
500 500 fm,
501 501 displayall=displayall,
502 502 limits=limits,
503 503 prerun=prerun,
504 504 profiler=profiler,
505 505 )
506 506 return t, fm
507 507
508 508
509 509 def stub_timer(fm, func, setup=None, title=None):
510 510 if setup is not None:
511 511 setup()
512 512 func()
513 513
514 514
515 515 @contextlib.contextmanager
516 516 def timeone():
517 517 r = []
518 518 ostart = os.times()
519 519 cstart = util.timer()
520 520 yield r
521 521 cstop = util.timer()
522 522 ostop = os.times()
523 523 a, b = ostart, ostop
524 524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 525
526 526
527 527 # list of stop condition (elapsed time, minimal run count)
528 528 DEFAULTLIMITS = (
529 529 (3.0, 100),
530 530 (10.0, 3),
531 531 )
532 532
533 533
534 534 def _timer(
535 535 fm,
536 536 func,
537 537 setup=None,
538 538 title=None,
539 539 displayall=False,
540 540 limits=DEFAULTLIMITS,
541 541 prerun=0,
542 542 profiler=None,
543 543 ):
544 544 gc.collect()
545 545 results = []
546 546 begin = util.timer()
547 547 count = 0
548 548 if profiler is None:
549 549 profiler = NOOPCTX
550 550 for i in range(prerun):
551 551 if setup is not None:
552 552 setup()
553 553 func()
554 554 keepgoing = True
555 555 while keepgoing:
556 556 if setup is not None:
557 557 setup()
558 558 with profiler:
559 559 with timeone() as item:
560 560 r = func()
561 561 profiler = NOOPCTX
562 562 count += 1
563 563 results.append(item[0])
564 564 cstop = util.timer()
565 565 # Look for a stop condition.
566 566 elapsed = cstop - begin
567 567 for t, mincount in limits:
568 568 if elapsed >= t and count >= mincount:
569 569 keepgoing = False
570 570 break
571 571
572 572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 573
574 574
575 575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'perf::delta-find',
930 930 revlogopts + formatteropts,
931 931 b'-c|-m|FILE REV',
932 932 )
933 933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 934 """benchmark the process of finding a valid delta for a revlog revision
935 935
936 936 When a revlog receives a new revision (e.g. from a commit, or from an
937 937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 938 This perf command measures how much time we spend in this process. It
939 939 operates on an already stored revision.
940 940
941 941 See `hg help debug-delta-find` for another related command.
942 942 """
943 943 from mercurial import revlogutils
944 944 import mercurial.revlogutils.deltas as deltautil
945 945
946 946 opts = _byteskwargs(opts)
947 947 if arg_2 is None:
948 948 file_ = None
949 949 rev = arg_1
950 950 else:
951 951 file_ = arg_1
952 952 rev = arg_2
953 953
954 954 repo = repo.unfiltered()
955 955
956 956 timer, fm = gettimer(ui, opts)
957 957
958 958 rev = int(rev)
959 959
960 960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961 961
962 962 deltacomputer = deltautil.deltacomputer(revlog)
963 963
964 964 node = revlog.node(rev)
965 965 p1r, p2r = revlog.parentrevs(rev)
966 966 p1 = revlog.node(p1r)
967 967 p2 = revlog.node(p2r)
968 968 full_text = revlog.revision(rev)
969 969 textlen = len(full_text)
970 970 cachedelta = None
971 971 flags = revlog.flags(rev)
972 972
973 973 revinfo = revlogutils.revisioninfo(
974 974 node,
975 975 p1,
976 976 p2,
977 977 [full_text], # btext
978 978 textlen,
979 979 cachedelta,
980 980 flags,
981 981 )
982 982
983 983 # Note: we should probably purge the potential caches (like the full
984 984 # manifest cache) between runs.
985 985 def find_one():
986 986 with revlog._datafp() as fh:
987 987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988 988
989 989 timer(find_one)
990 990 fm.end()
991 991
992 992
993 993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 994 def perfdiscovery(ui, repo, path, **opts):
995 995 """benchmark discovery between local repo and the peer at given path"""
996 996 repos = [repo, None]
997 997 timer, fm = gettimer(ui, opts)
998 998
999 999 try:
1000 1000 from mercurial.utils.urlutil import get_unique_pull_path
1001 1001
1002 1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 1003 except ImportError:
1004 1004 path = ui.expandpath(path)
1005 1005
1006 1006 def s():
1007 1007 repos[1] = hg.peer(ui, opts, path)
1008 1008
1009 1009 def d():
1010 1010 setdiscovery.findcommonheads(ui, *repos)
1011 1011
1012 1012 timer(d, setup=s)
1013 1013 fm.end()
1014 1014
1015 1015
1016 1016 @command(
1017 1017 b'perf::bookmarks|perfbookmarks',
1018 1018 formatteropts
1019 1019 + [
1020 1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 1021 ],
1022 1022 )
1023 1023 def perfbookmarks(ui, repo, **opts):
1024 1024 """benchmark parsing bookmarks from disk to memory"""
1025 1025 opts = _byteskwargs(opts)
1026 1026 timer, fm = gettimer(ui, opts)
1027 1027
1028 1028 clearrevlogs = opts[b'clear_revlogs']
1029 1029
1030 1030 def s():
1031 1031 if clearrevlogs:
1032 1032 clearchangelog(repo)
1033 1033 clearfilecache(repo, b'_bookmarks')
1034 1034
1035 1035 def d():
1036 1036 repo._bookmarks
1037 1037
1038 1038 timer(d, setup=s)
1039 1039 fm.end()
1040 1040
1041 1041
1042 1042 @command(
1043 1043 b'perf::bundle',
1044 1044 [
1045 1045 (
1046 1046 b'r',
1047 1047 b'rev',
1048 1048 [],
1049 1049 b'changesets to bundle',
1050 1050 b'REV',
1051 1051 ),
1052 1052 (
1053 1053 b't',
1054 1054 b'type',
1055 1055 b'none',
1056 1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 1057 b'TYPE',
1058 1058 ),
1059 1059 ]
1060 1060 + formatteropts,
1061 1061 b'REVS',
1062 1062 )
1063 1063 def perfbundle(ui, repo, *revs, **opts):
1064 1064 """benchmark the creation of a bundle from a repository
1065 1065
1066 1066 For now, this only supports "none" compression.
1067 1067 """
1068 from mercurial import bundlecaches
1068 try:
1069 from mercurial import bundlecaches
1070
1071 parsebundlespec = bundlecaches.parsebundlespec
1072 except ImportError:
1073 from mercurial import exchange
1074
1075 parsebundlespec = exchange.parsebundlespec
1076
1069 1077 from mercurial import discovery
1070 1078 from mercurial import bundle2
1071 1079
1072 1080 opts = _byteskwargs(opts)
1073 1081 timer, fm = gettimer(ui, opts)
1074 1082
1075 1083 cl = repo.changelog
1076 1084 revs = list(revs)
1077 1085 revs.extend(opts.get(b'rev', ()))
1078 1086 revs = scmutil.revrange(repo, revs)
1079 1087 if not revs:
1080 1088 raise error.Abort(b"not revision specified")
1081 1089 # make it a consistent set (ie: without topological gaps)
1082 1090 old_len = len(revs)
1083 1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1084 1092 if old_len != len(revs):
1085 1093 new_count = len(revs) - old_len
1086 1094 msg = b"add %d new revisions to make it a consistent set\n"
1087 1095 ui.write_err(msg % new_count)
1088 1096
1089 1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1090 1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1091 1099 outgoing = discovery.outgoing(repo, bases, targets)
1092 1100
1093 1101 bundle_spec = opts.get(b'type')
1094 1102
1095 1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1096 1104
1097 1105 cgversion = bundle_spec.params.get(b"cg.version")
1098 1106 if cgversion is None:
1099 1107 if bundle_spec.version == b'v1':
1100 1108 cgversion = b'01'
1101 1109 if bundle_spec.version == b'v2':
1102 1110 cgversion = b'02'
1103 1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1104 1112 err = b"repository does not support bundle version %s"
1105 1113 raise error.Abort(err % cgversion)
1106 1114
1107 1115 if cgversion == b'01': # bundle1
1108 1116 bversion = b'HG10' + bundle_spec.wirecompression
1109 1117 bcompression = None
1110 1118 elif cgversion in (b'02', b'03'):
1111 1119 bversion = b'HG20'
1112 1120 bcompression = bundle_spec.wirecompression
1113 1121 else:
1114 1122 err = b'perf::bundle: unexpected changegroup version %s'
1115 1123 raise error.ProgrammingError(err % cgversion)
1116 1124
1117 1125 if bcompression is None:
1118 1126 bcompression = b'UN'
1119 1127
1120 1128 if bcompression != b'UN':
1121 1129 err = b'perf::bundle: compression currently unsupported: %s'
1122 1130 raise error.ProgrammingError(err % bcompression)
1123 1131
1124 1132 def do_bundle():
1125 1133 bundle2.writenewbundle(
1126 1134 ui,
1127 1135 repo,
1128 1136 b'perf::bundle',
1129 1137 os.devnull,
1130 1138 bversion,
1131 1139 outgoing,
1132 1140 bundle_spec.params,
1133 1141 )
1134 1142
1135 1143 timer(do_bundle)
1136 1144 fm.end()
1137 1145
1138 1146
1139 1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1140 1148 def perfbundleread(ui, repo, bundlepath, **opts):
1141 1149 """Benchmark reading of bundle files.
1142 1150
1143 1151 This command is meant to isolate the I/O part of bundle reading as
1144 1152 much as possible.
1145 1153 """
1146 1154 from mercurial import (
1147 1155 bundle2,
1148 1156 exchange,
1149 1157 streamclone,
1150 1158 )
1151 1159
1152 1160 opts = _byteskwargs(opts)
1153 1161
1154 1162 def makebench(fn):
1155 1163 def run():
1156 1164 with open(bundlepath, b'rb') as fh:
1157 1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1158 1166 fn(bundle)
1159 1167
1160 1168 return run
1161 1169
1162 1170 def makereadnbytes(size):
1163 1171 def run():
1164 1172 with open(bundlepath, b'rb') as fh:
1165 1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 1174 while bundle.read(size):
1167 1175 pass
1168 1176
1169 1177 return run
1170 1178
1171 1179 def makestdioread(size):
1172 1180 def run():
1173 1181 with open(bundlepath, b'rb') as fh:
1174 1182 while fh.read(size):
1175 1183 pass
1176 1184
1177 1185 return run
1178 1186
1179 1187 # bundle1
1180 1188
1181 1189 def deltaiter(bundle):
1182 1190 for delta in bundle.deltaiter():
1183 1191 pass
1184 1192
1185 1193 def iterchunks(bundle):
1186 1194 for chunk in bundle.getchunks():
1187 1195 pass
1188 1196
1189 1197 # bundle2
1190 1198
1191 1199 def forwardchunks(bundle):
1192 1200 for chunk in bundle._forwardchunks():
1193 1201 pass
1194 1202
1195 1203 def iterparts(bundle):
1196 1204 for part in bundle.iterparts():
1197 1205 pass
1198 1206
1199 1207 def iterpartsseekable(bundle):
1200 1208 for part in bundle.iterparts(seekable=True):
1201 1209 pass
1202 1210
1203 1211 def seek(bundle):
1204 1212 for part in bundle.iterparts(seekable=True):
1205 1213 part.seek(0, os.SEEK_END)
1206 1214
1207 1215 def makepartreadnbytes(size):
1208 1216 def run():
1209 1217 with open(bundlepath, b'rb') as fh:
1210 1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1211 1219 for part in bundle.iterparts():
1212 1220 while part.read(size):
1213 1221 pass
1214 1222
1215 1223 return run
1216 1224
1217 1225 benches = [
1218 1226 (makestdioread(8192), b'read(8k)'),
1219 1227 (makestdioread(16384), b'read(16k)'),
1220 1228 (makestdioread(32768), b'read(32k)'),
1221 1229 (makestdioread(131072), b'read(128k)'),
1222 1230 ]
1223 1231
1224 1232 with open(bundlepath, b'rb') as fh:
1225 1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1226 1234
1227 1235 if isinstance(bundle, changegroup.cg1unpacker):
1228 1236 benches.extend(
1229 1237 [
1230 1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1231 1239 (makebench(iterchunks), b'cg1 getchunks()'),
1232 1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1233 1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1234 1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1235 1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1236 1244 ]
1237 1245 )
1238 1246 elif isinstance(bundle, bundle2.unbundle20):
1239 1247 benches.extend(
1240 1248 [
1241 1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1242 1250 (makebench(iterparts), b'bundle2 iterparts()'),
1243 1251 (
1244 1252 makebench(iterpartsseekable),
1245 1253 b'bundle2 iterparts() seekable',
1246 1254 ),
1247 1255 (makebench(seek), b'bundle2 part seek()'),
1248 1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1249 1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1250 1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1251 1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1252 1260 ]
1253 1261 )
1254 1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1255 1263 raise error.Abort(b'stream clone bundles not supported')
1256 1264 else:
1257 1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1258 1266
1259 1267 for fn, title in benches:
1260 1268 timer, fm = gettimer(ui, opts)
1261 1269 timer(fn, title=title)
1262 1270 fm.end()
1263 1271
1264 1272
1265 1273 @command(
1266 1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1267 1275 formatteropts
1268 1276 + [
1269 1277 (b'', b'cgversion', b'02', b'changegroup version'),
1270 1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1271 1279 ],
1272 1280 )
1273 1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1274 1282 """Benchmark producing a changelog group for a changegroup.
1275 1283
1276 1284 This measures the time spent processing the changelog during a
1277 1285 bundle operation. This occurs during `hg bundle` and on a server
1278 1286 processing a `getbundle` wire protocol request (handles clones
1279 1287 and pull requests).
1280 1288
1281 1289 By default, all revisions are added to the changegroup.
1282 1290 """
1283 1291 opts = _byteskwargs(opts)
1284 1292 cl = repo.changelog
1285 1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1286 1294 bundler = changegroup.getbundler(cgversion, repo)
1287 1295
1288 1296 def d():
1289 1297 state, chunks = bundler._generatechangelog(cl, nodes)
1290 1298 for chunk in chunks:
1291 1299 pass
1292 1300
1293 1301 timer, fm = gettimer(ui, opts)
1294 1302
1295 1303 # Terminal printing can interfere with timing. So disable it.
1296 1304 with ui.configoverride({(b'progress', b'disable'): True}):
1297 1305 timer(d)
1298 1306
1299 1307 fm.end()
1300 1308
1301 1309
1302 1310 @command(b'perf::dirs|perfdirs', formatteropts)
1303 1311 def perfdirs(ui, repo, **opts):
1304 1312 opts = _byteskwargs(opts)
1305 1313 timer, fm = gettimer(ui, opts)
1306 1314 dirstate = repo.dirstate
1307 1315 b'a' in dirstate
1308 1316
1309 1317 def d():
1310 1318 dirstate.hasdir(b'a')
1311 1319 try:
1312 1320 del dirstate._map._dirs
1313 1321 except AttributeError:
1314 1322 pass
1315 1323
1316 1324 timer(d)
1317 1325 fm.end()
1318 1326
1319 1327
1320 1328 @command(
1321 1329 b'perf::dirstate|perfdirstate',
1322 1330 [
1323 1331 (
1324 1332 b'',
1325 1333 b'iteration',
1326 1334 None,
1327 1335 b'benchmark a full iteration for the dirstate',
1328 1336 ),
1329 1337 (
1330 1338 b'',
1331 1339 b'contains',
1332 1340 None,
1333 1341 b'benchmark a large amount of `nf in dirstate` calls',
1334 1342 ),
1335 1343 ]
1336 1344 + formatteropts,
1337 1345 )
1338 1346 def perfdirstate(ui, repo, **opts):
1339 1347 """benchmap the time of various distate operations
1340 1348
1341 1349 By default benchmark the time necessary to load a dirstate from scratch.
1342 1350 The dirstate is loaded to the point were a "contains" request can be
1343 1351 answered.
1344 1352 """
1345 1353 opts = _byteskwargs(opts)
1346 1354 timer, fm = gettimer(ui, opts)
1347 1355 b"a" in repo.dirstate
1348 1356
1349 1357 if opts[b'iteration'] and opts[b'contains']:
1350 1358 msg = b'only specify one of --iteration or --contains'
1351 1359 raise error.Abort(msg)
1352 1360
1353 1361 if opts[b'iteration']:
1354 1362 setup = None
1355 1363 dirstate = repo.dirstate
1356 1364
1357 1365 def d():
1358 1366 for f in dirstate:
1359 1367 pass
1360 1368
1361 1369 elif opts[b'contains']:
1362 1370 setup = None
1363 1371 dirstate = repo.dirstate
1364 1372 allfiles = list(dirstate)
1365 1373 # also add file path that will be "missing" from the dirstate
1366 1374 allfiles.extend([f[::-1] for f in allfiles])
1367 1375
1368 1376 def d():
1369 1377 for f in allfiles:
1370 1378 f in dirstate
1371 1379
1372 1380 else:
1373 1381
1374 1382 def setup():
1375 1383 repo.dirstate.invalidate()
1376 1384
1377 1385 def d():
1378 1386 b"a" in repo.dirstate
1379 1387
1380 1388 timer(d, setup=setup)
1381 1389 fm.end()
1382 1390
1383 1391
1384 1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1385 1393 def perfdirstatedirs(ui, repo, **opts):
1386 1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1387 1395 opts = _byteskwargs(opts)
1388 1396 timer, fm = gettimer(ui, opts)
1389 1397 repo.dirstate.hasdir(b"a")
1390 1398
1391 1399 def setup():
1392 1400 try:
1393 1401 del repo.dirstate._map._dirs
1394 1402 except AttributeError:
1395 1403 pass
1396 1404
1397 1405 def d():
1398 1406 repo.dirstate.hasdir(b"a")
1399 1407
1400 1408 timer(d, setup=setup)
1401 1409 fm.end()
1402 1410
1403 1411
1404 1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1405 1413 def perfdirstatefoldmap(ui, repo, **opts):
1406 1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1407 1415
1408 1416 The dirstate filefoldmap cache is dropped between every request.
1409 1417 """
1410 1418 opts = _byteskwargs(opts)
1411 1419 timer, fm = gettimer(ui, opts)
1412 1420 dirstate = repo.dirstate
1413 1421 dirstate._map.filefoldmap.get(b'a')
1414 1422
1415 1423 def setup():
1416 1424 del dirstate._map.filefoldmap
1417 1425
1418 1426 def d():
1419 1427 dirstate._map.filefoldmap.get(b'a')
1420 1428
1421 1429 timer(d, setup=setup)
1422 1430 fm.end()
1423 1431
1424 1432
1425 1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1426 1434 def perfdirfoldmap(ui, repo, **opts):
1427 1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1428 1436
1429 1437 The dirstate dirfoldmap cache is dropped between every request.
1430 1438 """
1431 1439 opts = _byteskwargs(opts)
1432 1440 timer, fm = gettimer(ui, opts)
1433 1441 dirstate = repo.dirstate
1434 1442 dirstate._map.dirfoldmap.get(b'a')
1435 1443
1436 1444 def setup():
1437 1445 del dirstate._map.dirfoldmap
1438 1446 try:
1439 1447 del dirstate._map._dirs
1440 1448 except AttributeError:
1441 1449 pass
1442 1450
1443 1451 def d():
1444 1452 dirstate._map.dirfoldmap.get(b'a')
1445 1453
1446 1454 timer(d, setup=setup)
1447 1455 fm.end()
1448 1456
1449 1457
1450 1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1451 1459 def perfdirstatewrite(ui, repo, **opts):
1452 1460 """benchmap the time it take to write a dirstate on disk"""
1453 1461 opts = _byteskwargs(opts)
1454 1462 timer, fm = gettimer(ui, opts)
1455 1463 ds = repo.dirstate
1456 1464 b"a" in ds
1457 1465
1458 1466 def setup():
1459 1467 ds._dirty = True
1460 1468
1461 1469 def d():
1462 1470 ds.write(repo.currenttransaction())
1463 1471
1464 1472 timer(d, setup=setup)
1465 1473 fm.end()
1466 1474
1467 1475
1468 1476 def _getmergerevs(repo, opts):
1469 1477 """parse command argument to return rev involved in merge
1470 1478
1471 1479 input: options dictionnary with `rev`, `from` and `bse`
1472 1480 output: (localctx, otherctx, basectx)
1473 1481 """
1474 1482 if opts[b'from']:
1475 1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1476 1484 wctx = repo[fromrev]
1477 1485 else:
1478 1486 wctx = repo[None]
1479 1487 # we don't want working dir files to be stat'd in the benchmark, so
1480 1488 # prime that cache
1481 1489 wctx.dirty()
1482 1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1483 1491 if opts[b'base']:
1484 1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1485 1493 ancestor = repo[fromrev]
1486 1494 else:
1487 1495 ancestor = wctx.ancestor(rctx)
1488 1496 return (wctx, rctx, ancestor)
1489 1497
1490 1498
1491 1499 @command(
1492 1500 b'perf::mergecalculate|perfmergecalculate',
1493 1501 [
1494 1502 (b'r', b'rev', b'.', b'rev to merge against'),
1495 1503 (b'', b'from', b'', b'rev to merge from'),
1496 1504 (b'', b'base', b'', b'the revision to use as base'),
1497 1505 ]
1498 1506 + formatteropts,
1499 1507 )
1500 1508 def perfmergecalculate(ui, repo, **opts):
1501 1509 opts = _byteskwargs(opts)
1502 1510 timer, fm = gettimer(ui, opts)
1503 1511
1504 1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1505 1513
1506 1514 def d():
1507 1515 # acceptremote is True because we don't want prompts in the middle of
1508 1516 # our benchmark
1509 1517 merge.calculateupdates(
1510 1518 repo,
1511 1519 wctx,
1512 1520 rctx,
1513 1521 [ancestor],
1514 1522 branchmerge=False,
1515 1523 force=False,
1516 1524 acceptremote=True,
1517 1525 followcopies=True,
1518 1526 )
1519 1527
1520 1528 timer(d)
1521 1529 fm.end()
1522 1530
1523 1531
1524 1532 @command(
1525 1533 b'perf::mergecopies|perfmergecopies',
1526 1534 [
1527 1535 (b'r', b'rev', b'.', b'rev to merge against'),
1528 1536 (b'', b'from', b'', b'rev to merge from'),
1529 1537 (b'', b'base', b'', b'the revision to use as base'),
1530 1538 ]
1531 1539 + formatteropts,
1532 1540 )
1533 1541 def perfmergecopies(ui, repo, **opts):
1534 1542 """measure runtime of `copies.mergecopies`"""
1535 1543 opts = _byteskwargs(opts)
1536 1544 timer, fm = gettimer(ui, opts)
1537 1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1538 1546
1539 1547 def d():
1540 1548 # acceptremote is True because we don't want prompts in the middle of
1541 1549 # our benchmark
1542 1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1543 1551
1544 1552 timer(d)
1545 1553 fm.end()
1546 1554
1547 1555
1548 1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1549 1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1550 1558 """benchmark the copy tracing logic"""
1551 1559 opts = _byteskwargs(opts)
1552 1560 timer, fm = gettimer(ui, opts)
1553 1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1554 1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1555 1563
1556 1564 def d():
1557 1565 copies.pathcopies(ctx1, ctx2)
1558 1566
1559 1567 timer(d)
1560 1568 fm.end()
1561 1569
1562 1570
1563 1571 @command(
1564 1572 b'perf::phases|perfphases',
1565 1573 [
1566 1574 (b'', b'full', False, b'include file reading time too'),
1567 1575 ],
1568 1576 b"",
1569 1577 )
1570 1578 def perfphases(ui, repo, **opts):
1571 1579 """benchmark phasesets computation"""
1572 1580 opts = _byteskwargs(opts)
1573 1581 timer, fm = gettimer(ui, opts)
1574 1582 _phases = repo._phasecache
1575 1583 full = opts.get(b'full')
1576 1584
1577 1585 def d():
1578 1586 phases = _phases
1579 1587 if full:
1580 1588 clearfilecache(repo, b'_phasecache')
1581 1589 phases = repo._phasecache
1582 1590 phases.invalidate()
1583 1591 phases.loadphaserevs(repo)
1584 1592
1585 1593 timer(d)
1586 1594 fm.end()
1587 1595
1588 1596
1589 1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1590 1598 def perfphasesremote(ui, repo, dest=None, **opts):
1591 1599 """benchmark time needed to analyse phases of the remote server"""
1592 1600 from mercurial.node import bin
1593 1601 from mercurial import (
1594 1602 exchange,
1595 1603 hg,
1596 1604 phases,
1597 1605 )
1598 1606
1599 1607 opts = _byteskwargs(opts)
1600 1608 timer, fm = gettimer(ui, opts)
1601 1609
1602 1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1603 1611 if not path:
1604 1612 raise error.Abort(
1605 1613 b'default repository not configured!',
1606 1614 hint=b"see 'hg help config.paths'",
1607 1615 )
1608 1616 dest = path.pushloc or path.loc
1609 1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1610 1618 other = hg.peer(repo, opts, dest)
1611 1619
1612 1620 # easier to perform discovery through the operation
1613 1621 op = exchange.pushoperation(repo, other)
1614 1622 exchange._pushdiscoverychangeset(op)
1615 1623
1616 1624 remotesubset = op.fallbackheads
1617 1625
1618 1626 with other.commandexecutor() as e:
1619 1627 remotephases = e.callcommand(
1620 1628 b'listkeys', {b'namespace': b'phases'}
1621 1629 ).result()
1622 1630 del other
1623 1631 publishing = remotephases.get(b'publishing', False)
1624 1632 if publishing:
1625 1633 ui.statusnoi18n(b'publishing: yes\n')
1626 1634 else:
1627 1635 ui.statusnoi18n(b'publishing: no\n')
1628 1636
1629 1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1630 1638 if has_node is None:
1631 1639 has_node = repo.changelog.nodemap.__contains__
1632 1640 nonpublishroots = 0
1633 1641 for nhex, phase in remotephases.iteritems():
1634 1642 if nhex == b'publishing': # ignore data related to publish option
1635 1643 continue
1636 1644 node = bin(nhex)
1637 1645 if has_node(node) and int(phase):
1638 1646 nonpublishroots += 1
1639 1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1640 1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1641 1649
1642 1650 def d():
1643 1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1644 1652
1645 1653 timer(d)
1646 1654 fm.end()
1647 1655
1648 1656
1649 1657 @command(
1650 1658 b'perf::manifest|perfmanifest',
1651 1659 [
1652 1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1653 1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1654 1662 ]
1655 1663 + formatteropts,
1656 1664 b'REV|NODE',
1657 1665 )
1658 1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1659 1667 """benchmark the time to read a manifest from disk and return a usable
1660 1668 dict-like object
1661 1669
1662 1670 Manifest caches are cleared before retrieval."""
1663 1671 opts = _byteskwargs(opts)
1664 1672 timer, fm = gettimer(ui, opts)
1665 1673 if not manifest_rev:
1666 1674 ctx = scmutil.revsingle(repo, rev, rev)
1667 1675 t = ctx.manifestnode()
1668 1676 else:
1669 1677 from mercurial.node import bin
1670 1678
1671 1679 if len(rev) == 40:
1672 1680 t = bin(rev)
1673 1681 else:
1674 1682 try:
1675 1683 rev = int(rev)
1676 1684
1677 1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1678 1686 t = repo.manifestlog.getstorage(b'').node(rev)
1679 1687 else:
1680 1688 t = repo.manifestlog._revlog.lookup(rev)
1681 1689 except ValueError:
1682 1690 raise error.Abort(
1683 1691 b'manifest revision must be integer or full node'
1684 1692 )
1685 1693
1686 1694 def d():
1687 1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1688 1696 repo.manifestlog[t].read()
1689 1697
1690 1698 timer(d)
1691 1699 fm.end()
1692 1700
1693 1701
1694 1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1695 1703 def perfchangeset(ui, repo, rev, **opts):
1696 1704 opts = _byteskwargs(opts)
1697 1705 timer, fm = gettimer(ui, opts)
1698 1706 n = scmutil.revsingle(repo, rev).node()
1699 1707
1700 1708 def d():
1701 1709 repo.changelog.read(n)
1702 1710 # repo.changelog._cache = None
1703 1711
1704 1712 timer(d)
1705 1713 fm.end()
1706 1714
1707 1715
1708 1716 @command(b'perf::ignore|perfignore', formatteropts)
1709 1717 def perfignore(ui, repo, **opts):
1710 1718 """benchmark operation related to computing ignore"""
1711 1719 opts = _byteskwargs(opts)
1712 1720 timer, fm = gettimer(ui, opts)
1713 1721 dirstate = repo.dirstate
1714 1722
1715 1723 def setupone():
1716 1724 dirstate.invalidate()
1717 1725 clearfilecache(dirstate, b'_ignore')
1718 1726
1719 1727 def runone():
1720 1728 dirstate._ignore
1721 1729
1722 1730 timer(runone, setup=setupone, title=b"load")
1723 1731 fm.end()
1724 1732
1725 1733
1726 1734 @command(
1727 1735 b'perf::index|perfindex',
1728 1736 [
1729 1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1730 1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1731 1739 ]
1732 1740 + formatteropts,
1733 1741 )
1734 1742 def perfindex(ui, repo, **opts):
1735 1743 """benchmark index creation time followed by a lookup
1736 1744
1737 1745 The default is to look `tip` up. Depending on the index implementation,
1738 1746 the revision looked up can matters. For example, an implementation
1739 1747 scanning the index will have a faster lookup time for `--rev tip` than for
1740 1748 `--rev 0`. The number of looked up revisions and their order can also
1741 1749 matters.
1742 1750
1743 1751 Example of useful set to test:
1744 1752
1745 1753 * tip
1746 1754 * 0
1747 1755 * -10:
1748 1756 * :10
1749 1757 * -10: + :10
1750 1758 * :10: + -10:
1751 1759 * -10000:
1752 1760 * -10000: + 0
1753 1761
1754 1762 It is not currently possible to check for lookup of a missing node. For
1755 1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1756 1764 import mercurial.revlog
1757 1765
1758 1766 opts = _byteskwargs(opts)
1759 1767 timer, fm = gettimer(ui, opts)
1760 1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1761 1769 if opts[b'no_lookup']:
1762 1770 if opts['rev']:
1763 1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1764 1772 nodes = []
1765 1773 elif not opts[b'rev']:
1766 1774 nodes = [repo[b"tip"].node()]
1767 1775 else:
1768 1776 revs = scmutil.revrange(repo, opts[b'rev'])
1769 1777 cl = repo.changelog
1770 1778 nodes = [cl.node(r) for r in revs]
1771 1779
1772 1780 unfi = repo.unfiltered()
1773 1781 # find the filecache func directly
1774 1782 # This avoid polluting the benchmark with the filecache logic
1775 1783 makecl = unfi.__class__.changelog.func
1776 1784
1777 1785 def setup():
1778 1786 # probably not necessary, but for good measure
1779 1787 clearchangelog(unfi)
1780 1788
1781 1789 def d():
1782 1790 cl = makecl(unfi)
1783 1791 for n in nodes:
1784 1792 cl.rev(n)
1785 1793
1786 1794 timer(d, setup=setup)
1787 1795 fm.end()
1788 1796
1789 1797
1790 1798 @command(
1791 1799 b'perf::nodemap|perfnodemap',
1792 1800 [
1793 1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1794 1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1795 1803 ]
1796 1804 + formatteropts,
1797 1805 )
1798 1806 def perfnodemap(ui, repo, **opts):
1799 1807 """benchmark the time necessary to look up revision from a cold nodemap
1800 1808
1801 1809 Depending on the implementation, the amount and order of revision we look
1802 1810 up can varies. Example of useful set to test:
1803 1811 * tip
1804 1812 * 0
1805 1813 * -10:
1806 1814 * :10
1807 1815 * -10: + :10
1808 1816 * :10: + -10:
1809 1817 * -10000:
1810 1818 * -10000: + 0
1811 1819
1812 1820 The command currently focus on valid binary lookup. Benchmarking for
1813 1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1814 1822 """
1815 1823 import mercurial.revlog
1816 1824
1817 1825 opts = _byteskwargs(opts)
1818 1826 timer, fm = gettimer(ui, opts)
1819 1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1820 1828
1821 1829 unfi = repo.unfiltered()
1822 1830 clearcaches = opts[b'clear_caches']
1823 1831 # find the filecache func directly
1824 1832 # This avoid polluting the benchmark with the filecache logic
1825 1833 makecl = unfi.__class__.changelog.func
1826 1834 if not opts[b'rev']:
1827 1835 raise error.Abort(b'use --rev to specify revisions to look up')
1828 1836 revs = scmutil.revrange(repo, opts[b'rev'])
1829 1837 cl = repo.changelog
1830 1838 nodes = [cl.node(r) for r in revs]
1831 1839
1832 1840 # use a list to pass reference to a nodemap from one closure to the next
1833 1841 nodeget = [None]
1834 1842
1835 1843 def setnodeget():
1836 1844 # probably not necessary, but for good measure
1837 1845 clearchangelog(unfi)
1838 1846 cl = makecl(unfi)
1839 1847 if util.safehasattr(cl.index, 'get_rev'):
1840 1848 nodeget[0] = cl.index.get_rev
1841 1849 else:
1842 1850 nodeget[0] = cl.nodemap.get
1843 1851
1844 1852 def d():
1845 1853 get = nodeget[0]
1846 1854 for n in nodes:
1847 1855 get(n)
1848 1856
1849 1857 setup = None
1850 1858 if clearcaches:
1851 1859
1852 1860 def setup():
1853 1861 setnodeget()
1854 1862
1855 1863 else:
1856 1864 setnodeget()
1857 1865 d() # prewarm the data structure
1858 1866 timer(d, setup=setup)
1859 1867 fm.end()
1860 1868
1861 1869
1862 1870 @command(b'perf::startup|perfstartup', formatteropts)
1863 1871 def perfstartup(ui, repo, **opts):
1864 1872 opts = _byteskwargs(opts)
1865 1873 timer, fm = gettimer(ui, opts)
1866 1874
1867 1875 def d():
1868 1876 if os.name != 'nt':
1869 1877 os.system(
1870 1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1871 1879 )
1872 1880 else:
1873 1881 os.environ['HGRCPATH'] = r' '
1874 1882 os.system("%s version -q > NUL" % sys.argv[0])
1875 1883
1876 1884 timer(d)
1877 1885 fm.end()
1878 1886
1879 1887
1880 1888 @command(b'perf::parents|perfparents', formatteropts)
1881 1889 def perfparents(ui, repo, **opts):
1882 1890 """benchmark the time necessary to fetch one changeset's parents.
1883 1891
1884 1892 The fetch is done using the `node identifier`, traversing all object layers
1885 1893 from the repository object. The first N revisions will be used for this
1886 1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1887 1895 (default: 1000).
1888 1896 """
1889 1897 opts = _byteskwargs(opts)
1890 1898 timer, fm = gettimer(ui, opts)
1891 1899 # control the number of commits perfparents iterates over
1892 1900 # experimental config: perf.parentscount
1893 1901 count = getint(ui, b"perf", b"parentscount", 1000)
1894 1902 if len(repo.changelog) < count:
1895 1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1896 1904 repo = repo.unfiltered()
1897 1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1898 1906
1899 1907 def d():
1900 1908 for n in nl:
1901 1909 repo.changelog.parents(n)
1902 1910
1903 1911 timer(d)
1904 1912 fm.end()
1905 1913
1906 1914
1907 1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1908 1916 def perfctxfiles(ui, repo, x, **opts):
1909 1917 opts = _byteskwargs(opts)
1910 1918 x = int(x)
1911 1919 timer, fm = gettimer(ui, opts)
1912 1920
1913 1921 def d():
1914 1922 len(repo[x].files())
1915 1923
1916 1924 timer(d)
1917 1925 fm.end()
1918 1926
1919 1927
1920 1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1921 1929 def perfrawfiles(ui, repo, x, **opts):
1922 1930 opts = _byteskwargs(opts)
1923 1931 x = int(x)
1924 1932 timer, fm = gettimer(ui, opts)
1925 1933 cl = repo.changelog
1926 1934
1927 1935 def d():
1928 1936 len(cl.read(x)[3])
1929 1937
1930 1938 timer(d)
1931 1939 fm.end()
1932 1940
1933 1941
1934 1942 @command(b'perf::lookup|perflookup', formatteropts)
1935 1943 def perflookup(ui, repo, rev, **opts):
1936 1944 opts = _byteskwargs(opts)
1937 1945 timer, fm = gettimer(ui, opts)
1938 1946 timer(lambda: len(repo.lookup(rev)))
1939 1947 fm.end()
1940 1948
1941 1949
1942 1950 @command(
1943 1951 b'perf::linelogedits|perflinelogedits',
1944 1952 [
1945 1953 (b'n', b'edits', 10000, b'number of edits'),
1946 1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1947 1955 ],
1948 1956 norepo=True,
1949 1957 )
1950 1958 def perflinelogedits(ui, **opts):
1951 1959 from mercurial import linelog
1952 1960
1953 1961 opts = _byteskwargs(opts)
1954 1962
1955 1963 edits = opts[b'edits']
1956 1964 maxhunklines = opts[b'max_hunk_lines']
1957 1965
1958 1966 maxb1 = 100000
1959 1967 random.seed(0)
1960 1968 randint = random.randint
1961 1969 currentlines = 0
1962 1970 arglist = []
1963 1971 for rev in _xrange(edits):
1964 1972 a1 = randint(0, currentlines)
1965 1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1966 1974 b1 = randint(0, maxb1)
1967 1975 b2 = randint(b1, b1 + maxhunklines)
1968 1976 currentlines += (b2 - b1) - (a2 - a1)
1969 1977 arglist.append((rev, a1, a2, b1, b2))
1970 1978
1971 1979 def d():
1972 1980 ll = linelog.linelog()
1973 1981 for args in arglist:
1974 1982 ll.replacelines(*args)
1975 1983
1976 1984 timer, fm = gettimer(ui, opts)
1977 1985 timer(d)
1978 1986 fm.end()
1979 1987
1980 1988
1981 1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1982 1990 def perfrevrange(ui, repo, *specs, **opts):
1983 1991 opts = _byteskwargs(opts)
1984 1992 timer, fm = gettimer(ui, opts)
1985 1993 revrange = scmutil.revrange
1986 1994 timer(lambda: len(revrange(repo, specs)))
1987 1995 fm.end()
1988 1996
1989 1997
1990 1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1991 1999 def perfnodelookup(ui, repo, rev, **opts):
1992 2000 opts = _byteskwargs(opts)
1993 2001 timer, fm = gettimer(ui, opts)
1994 2002 import mercurial.revlog
1995 2003
1996 2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1997 2005 n = scmutil.revsingle(repo, rev).node()
1998 2006
1999 2007 try:
2000 2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2001 2009 except TypeError:
2002 2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2003 2011
2004 2012 def d():
2005 2013 cl.rev(n)
2006 2014 clearcaches(cl)
2007 2015
2008 2016 timer(d)
2009 2017 fm.end()
2010 2018
2011 2019
2012 2020 @command(
2013 2021 b'perf::log|perflog',
2014 2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2015 2023 )
2016 2024 def perflog(ui, repo, rev=None, **opts):
2017 2025 opts = _byteskwargs(opts)
2018 2026 if rev is None:
2019 2027 rev = []
2020 2028 timer, fm = gettimer(ui, opts)
2021 2029 ui.pushbuffer()
2022 2030 timer(
2023 2031 lambda: commands.log(
2024 2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2025 2033 )
2026 2034 )
2027 2035 ui.popbuffer()
2028 2036 fm.end()
2029 2037
2030 2038
2031 2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2032 2040 def perfmoonwalk(ui, repo, **opts):
2033 2041 """benchmark walking the changelog backwards
2034 2042
2035 2043 This also loads the changelog data for each revision in the changelog.
2036 2044 """
2037 2045 opts = _byteskwargs(opts)
2038 2046 timer, fm = gettimer(ui, opts)
2039 2047
2040 2048 def moonwalk():
2041 2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2042 2050 ctx = repo[i]
2043 2051 ctx.branch() # read changelog data (in addition to the index)
2044 2052
2045 2053 timer(moonwalk)
2046 2054 fm.end()
2047 2055
2048 2056
2049 2057 @command(
2050 2058 b'perf::templating|perftemplating',
2051 2059 [
2052 2060 (b'r', b'rev', [], b'revisions to run the template on'),
2053 2061 ]
2054 2062 + formatteropts,
2055 2063 )
2056 2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2057 2065 """test the rendering time of a given template"""
2058 2066 if makelogtemplater is None:
2059 2067 raise error.Abort(
2060 2068 b"perftemplating not available with this Mercurial",
2061 2069 hint=b"use 4.3 or later",
2062 2070 )
2063 2071
2064 2072 opts = _byteskwargs(opts)
2065 2073
2066 2074 nullui = ui.copy()
2067 2075 nullui.fout = open(os.devnull, 'wb')
2068 2076 nullui.disablepager()
2069 2077 revs = opts.get(b'rev')
2070 2078 if not revs:
2071 2079 revs = [b'all()']
2072 2080 revs = list(scmutil.revrange(repo, revs))
2073 2081
2074 2082 defaulttemplate = (
2075 2083 b'{date|shortdate} [{rev}:{node|short}]'
2076 2084 b' {author|person}: {desc|firstline}\n'
2077 2085 )
2078 2086 if testedtemplate is None:
2079 2087 testedtemplate = defaulttemplate
2080 2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2081 2089
2082 2090 def format():
2083 2091 for r in revs:
2084 2092 ctx = repo[r]
2085 2093 displayer.show(ctx)
2086 2094 displayer.flush(ctx)
2087 2095
2088 2096 timer, fm = gettimer(ui, opts)
2089 2097 timer(format)
2090 2098 fm.end()
2091 2099
2092 2100
2093 2101 def _displaystats(ui, opts, entries, data):
2094 2102 # use a second formatter because the data are quite different, not sure
2095 2103 # how it flies with the templater.
2096 2104 fm = ui.formatter(b'perf-stats', opts)
2097 2105 for key, title in entries:
2098 2106 values = data[key]
2099 2107 nbvalues = len(data)
2100 2108 values.sort()
2101 2109 stats = {
2102 2110 'key': key,
2103 2111 'title': title,
2104 2112 'nbitems': len(values),
2105 2113 'min': values[0][0],
2106 2114 '10%': values[(nbvalues * 10) // 100][0],
2107 2115 '25%': values[(nbvalues * 25) // 100][0],
2108 2116 '50%': values[(nbvalues * 50) // 100][0],
2109 2117 '75%': values[(nbvalues * 75) // 100][0],
2110 2118 '80%': values[(nbvalues * 80) // 100][0],
2111 2119 '85%': values[(nbvalues * 85) // 100][0],
2112 2120 '90%': values[(nbvalues * 90) // 100][0],
2113 2121 '95%': values[(nbvalues * 95) // 100][0],
2114 2122 '99%': values[(nbvalues * 99) // 100][0],
2115 2123 'max': values[-1][0],
2116 2124 }
2117 2125 fm.startitem()
2118 2126 fm.data(**stats)
2119 2127 # make node pretty for the human output
2120 2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2121 2129 lines = [
2122 2130 'min',
2123 2131 '10%',
2124 2132 '25%',
2125 2133 '50%',
2126 2134 '75%',
2127 2135 '80%',
2128 2136 '85%',
2129 2137 '90%',
2130 2138 '95%',
2131 2139 '99%',
2132 2140 'max',
2133 2141 ]
2134 2142 for l in lines:
2135 2143 fm.plain('%s: %s\n' % (l, stats[l]))
2136 2144 fm.end()
2137 2145
2138 2146
2139 2147 @command(
2140 2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2141 2149 formatteropts
2142 2150 + [
2143 2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2144 2152 (b'', b'timing', False, b'provides extra data (costly)'),
2145 2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2146 2154 ],
2147 2155 )
2148 2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2149 2157 """find statistics about potential parameters for `perfmergecopies`
2150 2158
2151 2159 This command find (base, p1, p2) triplet relevant for copytracing
2152 2160 benchmarking in the context of a merge. It reports values for some of the
2153 2161 parameters that impact merge copy tracing time during merge.
2154 2162
2155 2163 If `--timing` is set, rename detection is run and the associated timing
2156 2164 will be reported. The extra details come at the cost of slower command
2157 2165 execution.
2158 2166
2159 2167 Since rename detection is only run once, other factors might easily
2160 2168 affect the precision of the timing. However it should give a good
2161 2169 approximation of which revision triplets are very costly.
2162 2170 """
2163 2171 opts = _byteskwargs(opts)
2164 2172 fm = ui.formatter(b'perf', opts)
2165 2173 dotiming = opts[b'timing']
2166 2174 dostats = opts[b'stats']
2167 2175
2168 2176 output_template = [
2169 2177 ("base", "%(base)12s"),
2170 2178 ("p1", "%(p1.node)12s"),
2171 2179 ("p2", "%(p2.node)12s"),
2172 2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2173 2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2174 2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2175 2183 ("p1.time", "%(p1.time)12.3f"),
2176 2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2177 2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2178 2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2179 2187 ("p2.time", "%(p2.time)12.3f"),
2180 2188 ("renames", "%(nbrenamedfiles)12d"),
2181 2189 ("total.time", "%(time)12.3f"),
2182 2190 ]
2183 2191 if not dotiming:
2184 2192 output_template = [
2185 2193 i
2186 2194 for i in output_template
2187 2195 if not ('time' in i[0] or 'renames' in i[0])
2188 2196 ]
2189 2197 header_names = [h for (h, v) in output_template]
2190 2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2191 2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2192 2200 fm.plain(header % tuple(header_names))
2193 2201
2194 2202 if not revs:
2195 2203 revs = ['all()']
2196 2204 revs = scmutil.revrange(repo, revs)
2197 2205
2198 2206 if dostats:
2199 2207 alldata = {
2200 2208 'nbrevs': [],
2201 2209 'nbmissingfiles': [],
2202 2210 }
2203 2211 if dotiming:
2204 2212 alldata['parentnbrenames'] = []
2205 2213 alldata['totalnbrenames'] = []
2206 2214 alldata['parenttime'] = []
2207 2215 alldata['totaltime'] = []
2208 2216
2209 2217 roi = repo.revs('merge() and %ld', revs)
2210 2218 for r in roi:
2211 2219 ctx = repo[r]
2212 2220 p1 = ctx.p1()
2213 2221 p2 = ctx.p2()
2214 2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2215 2223 for b in bases:
2216 2224 b = repo[b]
2217 2225 p1missing = copies._computeforwardmissing(b, p1)
2218 2226 p2missing = copies._computeforwardmissing(b, p2)
2219 2227 data = {
2220 2228 b'base': b.hex(),
2221 2229 b'p1.node': p1.hex(),
2222 2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2223 2231 b'p1.nbmissingfiles': len(p1missing),
2224 2232 b'p2.node': p2.hex(),
2225 2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2226 2234 b'p2.nbmissingfiles': len(p2missing),
2227 2235 }
2228 2236 if dostats:
2229 2237 if p1missing:
2230 2238 alldata['nbrevs'].append(
2231 2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2232 2240 )
2233 2241 alldata['nbmissingfiles'].append(
2234 2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2235 2243 )
2236 2244 if p2missing:
2237 2245 alldata['nbrevs'].append(
2238 2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2239 2247 )
2240 2248 alldata['nbmissingfiles'].append(
2241 2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2242 2250 )
2243 2251 if dotiming:
2244 2252 begin = util.timer()
2245 2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2246 2254 end = util.timer()
2247 2255 # not very stable timing since we did only one run
2248 2256 data['time'] = end - begin
2249 2257 # mergedata contains five dicts: "copy", "movewithdir",
2250 2258 # "diverge", "renamedelete" and "dirmove".
2251 2259 # The first 4 are about renamed file so lets count that.
2252 2260 renames = len(mergedata[0])
2253 2261 renames += len(mergedata[1])
2254 2262 renames += len(mergedata[2])
2255 2263 renames += len(mergedata[3])
2256 2264 data['nbrenamedfiles'] = renames
2257 2265 begin = util.timer()
2258 2266 p1renames = copies.pathcopies(b, p1)
2259 2267 end = util.timer()
2260 2268 data['p1.time'] = end - begin
2261 2269 begin = util.timer()
2262 2270 p2renames = copies.pathcopies(b, p2)
2263 2271 end = util.timer()
2264 2272 data['p2.time'] = end - begin
2265 2273 data['p1.renamedfiles'] = len(p1renames)
2266 2274 data['p2.renamedfiles'] = len(p2renames)
2267 2275
2268 2276 if dostats:
2269 2277 if p1missing:
2270 2278 alldata['parentnbrenames'].append(
2271 2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2272 2280 )
2273 2281 alldata['parenttime'].append(
2274 2282 (data['p1.time'], b.hex(), p1.hex())
2275 2283 )
2276 2284 if p2missing:
2277 2285 alldata['parentnbrenames'].append(
2278 2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2279 2287 )
2280 2288 alldata['parenttime'].append(
2281 2289 (data['p2.time'], b.hex(), p2.hex())
2282 2290 )
2283 2291 if p1missing or p2missing:
2284 2292 alldata['totalnbrenames'].append(
2285 2293 (
2286 2294 data['nbrenamedfiles'],
2287 2295 b.hex(),
2288 2296 p1.hex(),
2289 2297 p2.hex(),
2290 2298 )
2291 2299 )
2292 2300 alldata['totaltime'].append(
2293 2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2294 2302 )
2295 2303 fm.startitem()
2296 2304 fm.data(**data)
2297 2305 # make node pretty for the human output
2298 2306 out = data.copy()
2299 2307 out['base'] = fm.hexfunc(b.node())
2300 2308 out['p1.node'] = fm.hexfunc(p1.node())
2301 2309 out['p2.node'] = fm.hexfunc(p2.node())
2302 2310 fm.plain(output % out)
2303 2311
2304 2312 fm.end()
2305 2313 if dostats:
2306 2314 # use a second formatter because the data are quite different, not sure
2307 2315 # how it flies with the templater.
2308 2316 entries = [
2309 2317 ('nbrevs', 'number of revision covered'),
2310 2318 ('nbmissingfiles', 'number of missing files at head'),
2311 2319 ]
2312 2320 if dotiming:
2313 2321 entries.append(
2314 2322 ('parentnbrenames', 'rename from one parent to base')
2315 2323 )
2316 2324 entries.append(('totalnbrenames', 'total number of renames'))
2317 2325 entries.append(('parenttime', 'time for one parent'))
2318 2326 entries.append(('totaltime', 'time for both parents'))
2319 2327 _displaystats(ui, opts, entries, alldata)
2320 2328
2321 2329
2322 2330 @command(
2323 2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2324 2332 formatteropts
2325 2333 + [
2326 2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2327 2335 (b'', b'timing', False, b'provides extra data (costly)'),
2328 2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2329 2337 ],
2330 2338 )
2331 2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2332 2340 """find statistic about potential parameters for the `perftracecopies`
2333 2341
2334 2342 This command find source-destination pair relevant for copytracing testing.
2335 2343 It report value for some of the parameters that impact copy tracing time.
2336 2344
2337 2345 If `--timing` is set, rename detection is run and the associated timing
2338 2346 will be reported. The extra details comes at the cost of a slower command
2339 2347 execution.
2340 2348
2341 2349 Since the rename detection is only run once, other factors might easily
2342 2350 affect the precision of the timing. However it should give a good
2343 2351 approximation of which revision pairs are very costly.
2344 2352 """
2345 2353 opts = _byteskwargs(opts)
2346 2354 fm = ui.formatter(b'perf', opts)
2347 2355 dotiming = opts[b'timing']
2348 2356 dostats = opts[b'stats']
2349 2357
2350 2358 if dotiming:
2351 2359 header = '%12s %12s %12s %12s %12s %12s\n'
2352 2360 output = (
2353 2361 "%(source)12s %(destination)12s "
2354 2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2355 2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2356 2364 )
2357 2365 header_names = (
2358 2366 "source",
2359 2367 "destination",
2360 2368 "nb-revs",
2361 2369 "nb-files",
2362 2370 "nb-renames",
2363 2371 "time",
2364 2372 )
2365 2373 fm.plain(header % header_names)
2366 2374 else:
2367 2375 header = '%12s %12s %12s %12s\n'
2368 2376 output = (
2369 2377 "%(source)12s %(destination)12s "
2370 2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2371 2379 )
2372 2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2373 2381
2374 2382 if not revs:
2375 2383 revs = ['all()']
2376 2384 revs = scmutil.revrange(repo, revs)
2377 2385
2378 2386 if dostats:
2379 2387 alldata = {
2380 2388 'nbrevs': [],
2381 2389 'nbmissingfiles': [],
2382 2390 }
2383 2391 if dotiming:
2384 2392 alldata['nbrenames'] = []
2385 2393 alldata['time'] = []
2386 2394
2387 2395 roi = repo.revs('merge() and %ld', revs)
2388 2396 for r in roi:
2389 2397 ctx = repo[r]
2390 2398 p1 = ctx.p1().rev()
2391 2399 p2 = ctx.p2().rev()
2392 2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2393 2401 for p in (p1, p2):
2394 2402 for b in bases:
2395 2403 base = repo[b]
2396 2404 parent = repo[p]
2397 2405 missing = copies._computeforwardmissing(base, parent)
2398 2406 if not missing:
2399 2407 continue
2400 2408 data = {
2401 2409 b'source': base.hex(),
2402 2410 b'destination': parent.hex(),
2403 2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2404 2412 b'nbmissingfiles': len(missing),
2405 2413 }
2406 2414 if dostats:
2407 2415 alldata['nbrevs'].append(
2408 2416 (
2409 2417 data['nbrevs'],
2410 2418 base.hex(),
2411 2419 parent.hex(),
2412 2420 )
2413 2421 )
2414 2422 alldata['nbmissingfiles'].append(
2415 2423 (
2416 2424 data['nbmissingfiles'],
2417 2425 base.hex(),
2418 2426 parent.hex(),
2419 2427 )
2420 2428 )
2421 2429 if dotiming:
2422 2430 begin = util.timer()
2423 2431 renames = copies.pathcopies(base, parent)
2424 2432 end = util.timer()
2425 2433 # not very stable timing since we did only one run
2426 2434 data['time'] = end - begin
2427 2435 data['nbrenamedfiles'] = len(renames)
2428 2436 if dostats:
2429 2437 alldata['time'].append(
2430 2438 (
2431 2439 data['time'],
2432 2440 base.hex(),
2433 2441 parent.hex(),
2434 2442 )
2435 2443 )
2436 2444 alldata['nbrenames'].append(
2437 2445 (
2438 2446 data['nbrenamedfiles'],
2439 2447 base.hex(),
2440 2448 parent.hex(),
2441 2449 )
2442 2450 )
2443 2451 fm.startitem()
2444 2452 fm.data(**data)
2445 2453 out = data.copy()
2446 2454 out['source'] = fm.hexfunc(base.node())
2447 2455 out['destination'] = fm.hexfunc(parent.node())
2448 2456 fm.plain(output % out)
2449 2457
2450 2458 fm.end()
2451 2459 if dostats:
2452 2460 entries = [
2453 2461 ('nbrevs', 'number of revision covered'),
2454 2462 ('nbmissingfiles', 'number of missing files at head'),
2455 2463 ]
2456 2464 if dotiming:
2457 2465 entries.append(('nbrenames', 'renamed files'))
2458 2466 entries.append(('time', 'time'))
2459 2467 _displaystats(ui, opts, entries, alldata)
2460 2468
2461 2469
2462 2470 @command(b'perf::cca|perfcca', formatteropts)
2463 2471 def perfcca(ui, repo, **opts):
2464 2472 opts = _byteskwargs(opts)
2465 2473 timer, fm = gettimer(ui, opts)
2466 2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2467 2475 fm.end()
2468 2476
2469 2477
2470 2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2471 2479 def perffncacheload(ui, repo, **opts):
2472 2480 opts = _byteskwargs(opts)
2473 2481 timer, fm = gettimer(ui, opts)
2474 2482 s = repo.store
2475 2483
2476 2484 def d():
2477 2485 s.fncache._load()
2478 2486
2479 2487 timer(d)
2480 2488 fm.end()
2481 2489
2482 2490
2483 2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2484 2492 def perffncachewrite(ui, repo, **opts):
2485 2493 opts = _byteskwargs(opts)
2486 2494 timer, fm = gettimer(ui, opts)
2487 2495 s = repo.store
2488 2496 lock = repo.lock()
2489 2497 s.fncache._load()
2490 2498 tr = repo.transaction(b'perffncachewrite')
2491 2499 tr.addbackup(b'fncache')
2492 2500
2493 2501 def d():
2494 2502 s.fncache._dirty = True
2495 2503 s.fncache.write(tr)
2496 2504
2497 2505 timer(d)
2498 2506 tr.close()
2499 2507 lock.release()
2500 2508 fm.end()
2501 2509
2502 2510
2503 2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2504 2512 def perffncacheencode(ui, repo, **opts):
2505 2513 opts = _byteskwargs(opts)
2506 2514 timer, fm = gettimer(ui, opts)
2507 2515 s = repo.store
2508 2516 s.fncache._load()
2509 2517
2510 2518 def d():
2511 2519 for p in s.fncache.entries:
2512 2520 s.encode(p)
2513 2521
2514 2522 timer(d)
2515 2523 fm.end()
2516 2524
2517 2525
2518 2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2519 2527 while not done.is_set():
2520 2528 pair = q.get()
2521 2529 while pair is not None:
2522 2530 if xdiff:
2523 2531 mdiff.bdiff.xdiffblocks(*pair)
2524 2532 elif blocks:
2525 2533 mdiff.bdiff.blocks(*pair)
2526 2534 else:
2527 2535 mdiff.textdiff(*pair)
2528 2536 q.task_done()
2529 2537 pair = q.get()
2530 2538 q.task_done() # for the None one
2531 2539 with ready:
2532 2540 ready.wait()
2533 2541
2534 2542
2535 2543 def _manifestrevision(repo, mnode):
2536 2544 ml = repo.manifestlog
2537 2545
2538 2546 if util.safehasattr(ml, b'getstorage'):
2539 2547 store = ml.getstorage(b'')
2540 2548 else:
2541 2549 store = ml._revlog
2542 2550
2543 2551 return store.revision(mnode)
2544 2552
2545 2553
2546 2554 @command(
2547 2555 b'perf::bdiff|perfbdiff',
2548 2556 revlogopts
2549 2557 + formatteropts
2550 2558 + [
2551 2559 (
2552 2560 b'',
2553 2561 b'count',
2554 2562 1,
2555 2563 b'number of revisions to test (when using --startrev)',
2556 2564 ),
2557 2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2558 2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2559 2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2560 2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2561 2569 ],
2562 2570 b'-c|-m|FILE REV',
2563 2571 )
2564 2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2565 2573 """benchmark a bdiff between revisions
2566 2574
2567 2575 By default, benchmark a bdiff between its delta parent and itself.
2568 2576
2569 2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2570 2578 revisions starting at the specified revision.
2571 2579
2572 2580 With ``--alldata``, assume the requested revision is a changeset and
2573 2581 measure bdiffs for all changes related to that changeset (manifest
2574 2582 and filelogs).
2575 2583 """
2576 2584 opts = _byteskwargs(opts)
2577 2585
2578 2586 if opts[b'xdiff'] and not opts[b'blocks']:
2579 2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2580 2588
2581 2589 if opts[b'alldata']:
2582 2590 opts[b'changelog'] = True
2583 2591
2584 2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2585 2593 file_, rev = None, file_
2586 2594 elif rev is None:
2587 2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2588 2596
2589 2597 blocks = opts[b'blocks']
2590 2598 xdiff = opts[b'xdiff']
2591 2599 textpairs = []
2592 2600
2593 2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2594 2602
2595 2603 startrev = r.rev(r.lookup(rev))
2596 2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2597 2605 if opts[b'alldata']:
2598 2606 # Load revisions associated with changeset.
2599 2607 ctx = repo[rev]
2600 2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2601 2609 for pctx in ctx.parents():
2602 2610 pman = _manifestrevision(repo, pctx.manifestnode())
2603 2611 textpairs.append((pman, mtext))
2604 2612
2605 2613 # Load filelog revisions by iterating manifest delta.
2606 2614 man = ctx.manifest()
2607 2615 pman = ctx.p1().manifest()
2608 2616 for filename, change in pman.diff(man).items():
2609 2617 fctx = repo.file(filename)
2610 2618 f1 = fctx.revision(change[0][0] or -1)
2611 2619 f2 = fctx.revision(change[1][0] or -1)
2612 2620 textpairs.append((f1, f2))
2613 2621 else:
2614 2622 dp = r.deltaparent(rev)
2615 2623 textpairs.append((r.revision(dp), r.revision(rev)))
2616 2624
2617 2625 withthreads = threads > 0
2618 2626 if not withthreads:
2619 2627
2620 2628 def d():
2621 2629 for pair in textpairs:
2622 2630 if xdiff:
2623 2631 mdiff.bdiff.xdiffblocks(*pair)
2624 2632 elif blocks:
2625 2633 mdiff.bdiff.blocks(*pair)
2626 2634 else:
2627 2635 mdiff.textdiff(*pair)
2628 2636
2629 2637 else:
2630 2638 q = queue()
2631 2639 for i in _xrange(threads):
2632 2640 q.put(None)
2633 2641 ready = threading.Condition()
2634 2642 done = threading.Event()
2635 2643 for i in _xrange(threads):
2636 2644 threading.Thread(
2637 2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2638 2646 ).start()
2639 2647 q.join()
2640 2648
2641 2649 def d():
2642 2650 for pair in textpairs:
2643 2651 q.put(pair)
2644 2652 for i in _xrange(threads):
2645 2653 q.put(None)
2646 2654 with ready:
2647 2655 ready.notify_all()
2648 2656 q.join()
2649 2657
2650 2658 timer, fm = gettimer(ui, opts)
2651 2659 timer(d)
2652 2660 fm.end()
2653 2661
2654 2662 if withthreads:
2655 2663 done.set()
2656 2664 for i in _xrange(threads):
2657 2665 q.put(None)
2658 2666 with ready:
2659 2667 ready.notify_all()
2660 2668
2661 2669
2662 2670 @command(
2663 2671 b'perf::unbundle',
2664 2672 formatteropts,
2665 2673 b'BUNDLE_FILE',
2666 2674 )
2667 2675 def perf_unbundle(ui, repo, fname, **opts):
2668 2676 """benchmark application of a bundle in a repository.
2669 2677
2670 2678 This does not include the final transaction processing"""
2671 2679 from mercurial import exchange
2672 2680 from mercurial import bundle2
2673 2681
2674 2682 opts = _byteskwargs(opts)
2675 2683
2676 2684 with repo.lock():
2677 2685 bundle = [None, None]
2678 2686 orig_quiet = repo.ui.quiet
2679 2687 try:
2680 2688 repo.ui.quiet = True
2681 2689 with open(fname, mode="rb") as f:
2682 2690
2683 2691 def noop_report(*args, **kwargs):
2684 2692 pass
2685 2693
2686 2694 def setup():
2687 2695 gen, tr = bundle
2688 2696 if tr is not None:
2689 2697 tr.abort()
2690 2698 bundle[:] = [None, None]
2691 2699 f.seek(0)
2692 2700 bundle[0] = exchange.readbundle(ui, f, fname)
2693 2701 bundle[1] = repo.transaction(b'perf::unbundle')
2694 2702 bundle[1]._report = noop_report # silence the transaction
2695 2703
2696 2704 def apply():
2697 2705 gen, tr = bundle
2698 2706 bundle2.applybundle(
2699 2707 repo,
2700 2708 gen,
2701 2709 tr,
2702 2710 source=b'perf::unbundle',
2703 2711 url=fname,
2704 2712 )
2705 2713
2706 2714 timer, fm = gettimer(ui, opts)
2707 2715 timer(apply, setup=setup)
2708 2716 fm.end()
2709 2717 finally:
2710 2718 repo.ui.quiet == orig_quiet
2711 2719 gen, tr = bundle
2712 2720 if tr is not None:
2713 2721 tr.abort()
2714 2722
2715 2723
2716 2724 @command(
2717 2725 b'perf::unidiff|perfunidiff',
2718 2726 revlogopts
2719 2727 + formatteropts
2720 2728 + [
2721 2729 (
2722 2730 b'',
2723 2731 b'count',
2724 2732 1,
2725 2733 b'number of revisions to test (when using --startrev)',
2726 2734 ),
2727 2735 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2728 2736 ],
2729 2737 b'-c|-m|FILE REV',
2730 2738 )
2731 2739 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2732 2740 """benchmark a unified diff between revisions
2733 2741
2734 2742 This doesn't include any copy tracing - it's just a unified diff
2735 2743 of the texts.
2736 2744
2737 2745 By default, benchmark a diff between its delta parent and itself.
2738 2746
2739 2747 With ``--count``, benchmark diffs between delta parents and self for N
2740 2748 revisions starting at the specified revision.
2741 2749
2742 2750 With ``--alldata``, assume the requested revision is a changeset and
2743 2751 measure diffs for all changes related to that changeset (manifest
2744 2752 and filelogs).
2745 2753 """
2746 2754 opts = _byteskwargs(opts)
2747 2755 if opts[b'alldata']:
2748 2756 opts[b'changelog'] = True
2749 2757
2750 2758 if opts.get(b'changelog') or opts.get(b'manifest'):
2751 2759 file_, rev = None, file_
2752 2760 elif rev is None:
2753 2761 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2754 2762
2755 2763 textpairs = []
2756 2764
2757 2765 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2758 2766
2759 2767 startrev = r.rev(r.lookup(rev))
2760 2768 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2761 2769 if opts[b'alldata']:
2762 2770 # Load revisions associated with changeset.
2763 2771 ctx = repo[rev]
2764 2772 mtext = _manifestrevision(repo, ctx.manifestnode())
2765 2773 for pctx in ctx.parents():
2766 2774 pman = _manifestrevision(repo, pctx.manifestnode())
2767 2775 textpairs.append((pman, mtext))
2768 2776
2769 2777 # Load filelog revisions by iterating manifest delta.
2770 2778 man = ctx.manifest()
2771 2779 pman = ctx.p1().manifest()
2772 2780 for filename, change in pman.diff(man).items():
2773 2781 fctx = repo.file(filename)
2774 2782 f1 = fctx.revision(change[0][0] or -1)
2775 2783 f2 = fctx.revision(change[1][0] or -1)
2776 2784 textpairs.append((f1, f2))
2777 2785 else:
2778 2786 dp = r.deltaparent(rev)
2779 2787 textpairs.append((r.revision(dp), r.revision(rev)))
2780 2788
2781 2789 def d():
2782 2790 for left, right in textpairs:
2783 2791 # The date strings don't matter, so we pass empty strings.
2784 2792 headerlines, hunks = mdiff.unidiff(
2785 2793 left, b'', right, b'', b'left', b'right', binary=False
2786 2794 )
2787 2795 # consume iterators in roughly the way patch.py does
2788 2796 b'\n'.join(headerlines)
2789 2797 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2790 2798
2791 2799 timer, fm = gettimer(ui, opts)
2792 2800 timer(d)
2793 2801 fm.end()
2794 2802
2795 2803
2796 2804 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2797 2805 def perfdiffwd(ui, repo, **opts):
2798 2806 """Profile diff of working directory changes"""
2799 2807 opts = _byteskwargs(opts)
2800 2808 timer, fm = gettimer(ui, opts)
2801 2809 options = {
2802 2810 'w': 'ignore_all_space',
2803 2811 'b': 'ignore_space_change',
2804 2812 'B': 'ignore_blank_lines',
2805 2813 }
2806 2814
2807 2815 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2808 2816 opts = {options[c]: b'1' for c in diffopt}
2809 2817
2810 2818 def d():
2811 2819 ui.pushbuffer()
2812 2820 commands.diff(ui, repo, **opts)
2813 2821 ui.popbuffer()
2814 2822
2815 2823 diffopt = diffopt.encode('ascii')
2816 2824 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2817 2825 timer(d, title=title)
2818 2826 fm.end()
2819 2827
2820 2828
2821 2829 @command(
2822 2830 b'perf::revlogindex|perfrevlogindex',
2823 2831 revlogopts + formatteropts,
2824 2832 b'-c|-m|FILE',
2825 2833 )
2826 2834 def perfrevlogindex(ui, repo, file_=None, **opts):
2827 2835 """Benchmark operations against a revlog index.
2828 2836
2829 2837 This tests constructing a revlog instance, reading index data,
2830 2838 parsing index data, and performing various operations related to
2831 2839 index data.
2832 2840 """
2833 2841
2834 2842 opts = _byteskwargs(opts)
2835 2843
2836 2844 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2837 2845
2838 2846 opener = getattr(rl, 'opener') # trick linter
2839 2847 # compat with hg <= 5.8
2840 2848 radix = getattr(rl, 'radix', None)
2841 2849 indexfile = getattr(rl, '_indexfile', None)
2842 2850 if indexfile is None:
2843 2851 # compatibility with <= hg-5.8
2844 2852 indexfile = getattr(rl, 'indexfile')
2845 2853 data = opener.read(indexfile)
2846 2854
2847 2855 header = struct.unpack(b'>I', data[0:4])[0]
2848 2856 version = header & 0xFFFF
2849 2857 if version == 1:
2850 2858 inline = header & (1 << 16)
2851 2859 else:
2852 2860 raise error.Abort(b'unsupported revlog version: %d' % version)
2853 2861
2854 2862 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2855 2863 if parse_index_v1 is None:
2856 2864 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2857 2865
2858 2866 rllen = len(rl)
2859 2867
2860 2868 node0 = rl.node(0)
2861 2869 node25 = rl.node(rllen // 4)
2862 2870 node50 = rl.node(rllen // 2)
2863 2871 node75 = rl.node(rllen // 4 * 3)
2864 2872 node100 = rl.node(rllen - 1)
2865 2873
2866 2874 allrevs = range(rllen)
2867 2875 allrevsrev = list(reversed(allrevs))
2868 2876 allnodes = [rl.node(rev) for rev in range(rllen)]
2869 2877 allnodesrev = list(reversed(allnodes))
2870 2878
2871 2879 def constructor():
2872 2880 if radix is not None:
2873 2881 revlog(opener, radix=radix)
2874 2882 else:
2875 2883 # hg <= 5.8
2876 2884 revlog(opener, indexfile=indexfile)
2877 2885
2878 2886 def read():
2879 2887 with opener(indexfile) as fh:
2880 2888 fh.read()
2881 2889
2882 2890 def parseindex():
2883 2891 parse_index_v1(data, inline)
2884 2892
2885 2893 def getentry(revornode):
2886 2894 index = parse_index_v1(data, inline)[0]
2887 2895 index[revornode]
2888 2896
2889 2897 def getentries(revs, count=1):
2890 2898 index = parse_index_v1(data, inline)[0]
2891 2899
2892 2900 for i in range(count):
2893 2901 for rev in revs:
2894 2902 index[rev]
2895 2903
2896 2904 def resolvenode(node):
2897 2905 index = parse_index_v1(data, inline)[0]
2898 2906 rev = getattr(index, 'rev', None)
2899 2907 if rev is None:
2900 2908 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2901 2909 # This only works for the C code.
2902 2910 if nodemap is None:
2903 2911 return
2904 2912 rev = nodemap.__getitem__
2905 2913
2906 2914 try:
2907 2915 rev(node)
2908 2916 except error.RevlogError:
2909 2917 pass
2910 2918
2911 2919 def resolvenodes(nodes, count=1):
2912 2920 index = parse_index_v1(data, inline)[0]
2913 2921 rev = getattr(index, 'rev', None)
2914 2922 if rev is None:
2915 2923 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2916 2924 # This only works for the C code.
2917 2925 if nodemap is None:
2918 2926 return
2919 2927 rev = nodemap.__getitem__
2920 2928
2921 2929 for i in range(count):
2922 2930 for node in nodes:
2923 2931 try:
2924 2932 rev(node)
2925 2933 except error.RevlogError:
2926 2934 pass
2927 2935
2928 2936 benches = [
2929 2937 (constructor, b'revlog constructor'),
2930 2938 (read, b'read'),
2931 2939 (parseindex, b'create index object'),
2932 2940 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2933 2941 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2934 2942 (lambda: resolvenode(node0), b'look up node at rev 0'),
2935 2943 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2936 2944 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2937 2945 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2938 2946 (lambda: resolvenode(node100), b'look up node at tip'),
2939 2947 # 2x variation is to measure caching impact.
2940 2948 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2941 2949 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2942 2950 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2943 2951 (
2944 2952 lambda: resolvenodes(allnodesrev, 2),
2945 2953 b'look up all nodes 2x (reverse)',
2946 2954 ),
2947 2955 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2948 2956 (
2949 2957 lambda: getentries(allrevs, 2),
2950 2958 b'retrieve all index entries 2x (forward)',
2951 2959 ),
2952 2960 (
2953 2961 lambda: getentries(allrevsrev),
2954 2962 b'retrieve all index entries (reverse)',
2955 2963 ),
2956 2964 (
2957 2965 lambda: getentries(allrevsrev, 2),
2958 2966 b'retrieve all index entries 2x (reverse)',
2959 2967 ),
2960 2968 ]
2961 2969
2962 2970 for fn, title in benches:
2963 2971 timer, fm = gettimer(ui, opts)
2964 2972 timer(fn, title=title)
2965 2973 fm.end()
2966 2974
2967 2975
2968 2976 @command(
2969 2977 b'perf::revlogrevisions|perfrevlogrevisions',
2970 2978 revlogopts
2971 2979 + formatteropts
2972 2980 + [
2973 2981 (b'd', b'dist', 100, b'distance between the revisions'),
2974 2982 (b's', b'startrev', 0, b'revision to start reading at'),
2975 2983 (b'', b'reverse', False, b'read in reverse'),
2976 2984 ],
2977 2985 b'-c|-m|FILE',
2978 2986 )
2979 2987 def perfrevlogrevisions(
2980 2988 ui, repo, file_=None, startrev=0, reverse=False, **opts
2981 2989 ):
2982 2990 """Benchmark reading a series of revisions from a revlog.
2983 2991
2984 2992 By default, we read every ``-d/--dist`` revision from 0 to tip of
2985 2993 the specified revlog.
2986 2994
2987 2995 The start revision can be defined via ``-s/--startrev``.
2988 2996 """
2989 2997 opts = _byteskwargs(opts)
2990 2998
2991 2999 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2992 3000 rllen = getlen(ui)(rl)
2993 3001
2994 3002 if startrev < 0:
2995 3003 startrev = rllen + startrev
2996 3004
2997 3005 def d():
2998 3006 rl.clearcaches()
2999 3007
3000 3008 beginrev = startrev
3001 3009 endrev = rllen
3002 3010 dist = opts[b'dist']
3003 3011
3004 3012 if reverse:
3005 3013 beginrev, endrev = endrev - 1, beginrev - 1
3006 3014 dist = -1 * dist
3007 3015
3008 3016 for x in _xrange(beginrev, endrev, dist):
3009 3017 # Old revisions don't support passing int.
3010 3018 n = rl.node(x)
3011 3019 rl.revision(n)
3012 3020
3013 3021 timer, fm = gettimer(ui, opts)
3014 3022 timer(d)
3015 3023 fm.end()
3016 3024
3017 3025
3018 3026 @command(
3019 3027 b'perf::revlogwrite|perfrevlogwrite',
3020 3028 revlogopts
3021 3029 + formatteropts
3022 3030 + [
3023 3031 (b's', b'startrev', 1000, b'revision to start writing at'),
3024 3032 (b'', b'stoprev', -1, b'last revision to write'),
3025 3033 (b'', b'count', 3, b'number of passes to perform'),
3026 3034 (b'', b'details', False, b'print timing for every revisions tested'),
3027 3035 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3028 3036 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3029 3037 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3030 3038 ],
3031 3039 b'-c|-m|FILE',
3032 3040 )
3033 3041 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3034 3042 """Benchmark writing a series of revisions to a revlog.
3035 3043
3036 3044 Possible source values are:
3037 3045 * `full`: add from a full text (default).
3038 3046 * `parent-1`: add from a delta to the first parent
3039 3047 * `parent-2`: add from a delta to the second parent if it exists
3040 3048 (use a delta from the first parent otherwise)
3041 3049 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3042 3050 * `storage`: add from the existing precomputed deltas
3043 3051
3044 3052 Note: This performance command measures performance in a custom way. As a
3045 3053 result some of the global configuration of the 'perf' command does not
3046 3054 apply to it:
3047 3055
3048 3056 * ``pre-run``: disabled
3049 3057
3050 3058 * ``profile-benchmark``: disabled
3051 3059
3052 3060 * ``run-limits``: disabled use --count instead
3053 3061 """
3054 3062 opts = _byteskwargs(opts)
3055 3063
3056 3064 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3057 3065 rllen = getlen(ui)(rl)
3058 3066 if startrev < 0:
3059 3067 startrev = rllen + startrev
3060 3068 if stoprev < 0:
3061 3069 stoprev = rllen + stoprev
3062 3070
3063 3071 lazydeltabase = opts['lazydeltabase']
3064 3072 source = opts['source']
3065 3073 clearcaches = opts['clear_caches']
3066 3074 validsource = (
3067 3075 b'full',
3068 3076 b'parent-1',
3069 3077 b'parent-2',
3070 3078 b'parent-smallest',
3071 3079 b'storage',
3072 3080 )
3073 3081 if source not in validsource:
3074 3082 raise error.Abort('invalid source type: %s' % source)
3075 3083
3076 3084 ### actually gather results
3077 3085 count = opts['count']
3078 3086 if count <= 0:
3079 3087 raise error.Abort('invalide run count: %d' % count)
3080 3088 allresults = []
3081 3089 for c in range(count):
3082 3090 timing = _timeonewrite(
3083 3091 ui,
3084 3092 rl,
3085 3093 source,
3086 3094 startrev,
3087 3095 stoprev,
3088 3096 c + 1,
3089 3097 lazydeltabase=lazydeltabase,
3090 3098 clearcaches=clearcaches,
3091 3099 )
3092 3100 allresults.append(timing)
3093 3101
3094 3102 ### consolidate the results in a single list
3095 3103 results = []
3096 3104 for idx, (rev, t) in enumerate(allresults[0]):
3097 3105 ts = [t]
3098 3106 for other in allresults[1:]:
3099 3107 orev, ot = other[idx]
3100 3108 assert orev == rev
3101 3109 ts.append(ot)
3102 3110 results.append((rev, ts))
3103 3111 resultcount = len(results)
3104 3112
3105 3113 ### Compute and display relevant statistics
3106 3114
3107 3115 # get a formatter
3108 3116 fm = ui.formatter(b'perf', opts)
3109 3117 displayall = ui.configbool(b"perf", b"all-timing", False)
3110 3118
3111 3119 # print individual details if requested
3112 3120 if opts['details']:
3113 3121 for idx, item in enumerate(results, 1):
3114 3122 rev, data = item
3115 3123 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3116 3124 formatone(fm, data, title=title, displayall=displayall)
3117 3125
3118 3126 # sorts results by median time
3119 3127 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3120 3128 # list of (name, index) to display)
3121 3129 relevants = [
3122 3130 ("min", 0),
3123 3131 ("10%", resultcount * 10 // 100),
3124 3132 ("25%", resultcount * 25 // 100),
3125 3133 ("50%", resultcount * 70 // 100),
3126 3134 ("75%", resultcount * 75 // 100),
3127 3135 ("90%", resultcount * 90 // 100),
3128 3136 ("95%", resultcount * 95 // 100),
3129 3137 ("99%", resultcount * 99 // 100),
3130 3138 ("99.9%", resultcount * 999 // 1000),
3131 3139 ("99.99%", resultcount * 9999 // 10000),
3132 3140 ("99.999%", resultcount * 99999 // 100000),
3133 3141 ("max", -1),
3134 3142 ]
3135 3143 if not ui.quiet:
3136 3144 for name, idx in relevants:
3137 3145 data = results[idx]
3138 3146 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3139 3147 formatone(fm, data[1], title=title, displayall=displayall)
3140 3148
3141 3149 # XXX summing that many float will not be very precise, we ignore this fact
3142 3150 # for now
3143 3151 totaltime = []
3144 3152 for item in allresults:
3145 3153 totaltime.append(
3146 3154 (
3147 3155 sum(x[1][0] for x in item),
3148 3156 sum(x[1][1] for x in item),
3149 3157 sum(x[1][2] for x in item),
3150 3158 )
3151 3159 )
3152 3160 formatone(
3153 3161 fm,
3154 3162 totaltime,
3155 3163 title="total time (%d revs)" % resultcount,
3156 3164 displayall=displayall,
3157 3165 )
3158 3166 fm.end()
3159 3167
3160 3168
3161 3169 class _faketr:
3162 3170 def add(s, x, y, z=None):
3163 3171 return None
3164 3172
3165 3173
3166 3174 def _timeonewrite(
3167 3175 ui,
3168 3176 orig,
3169 3177 source,
3170 3178 startrev,
3171 3179 stoprev,
3172 3180 runidx=None,
3173 3181 lazydeltabase=True,
3174 3182 clearcaches=True,
3175 3183 ):
3176 3184 timings = []
3177 3185 tr = _faketr()
3178 3186 with _temprevlog(ui, orig, startrev) as dest:
3179 3187 dest._lazydeltabase = lazydeltabase
3180 3188 revs = list(orig.revs(startrev, stoprev))
3181 3189 total = len(revs)
3182 3190 topic = 'adding'
3183 3191 if runidx is not None:
3184 3192 topic += ' (run #%d)' % runidx
3185 3193 # Support both old and new progress API
3186 3194 if util.safehasattr(ui, 'makeprogress'):
3187 3195 progress = ui.makeprogress(topic, unit='revs', total=total)
3188 3196
3189 3197 def updateprogress(pos):
3190 3198 progress.update(pos)
3191 3199
3192 3200 def completeprogress():
3193 3201 progress.complete()
3194 3202
3195 3203 else:
3196 3204
3197 3205 def updateprogress(pos):
3198 3206 ui.progress(topic, pos, unit='revs', total=total)
3199 3207
3200 3208 def completeprogress():
3201 3209 ui.progress(topic, None, unit='revs', total=total)
3202 3210
3203 3211 for idx, rev in enumerate(revs):
3204 3212 updateprogress(idx)
3205 3213 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3206 3214 if clearcaches:
3207 3215 dest.index.clearcaches()
3208 3216 dest.clearcaches()
3209 3217 with timeone() as r:
3210 3218 dest.addrawrevision(*addargs, **addkwargs)
3211 3219 timings.append((rev, r[0]))
3212 3220 updateprogress(total)
3213 3221 completeprogress()
3214 3222 return timings
3215 3223
3216 3224
3217 3225 def _getrevisionseed(orig, rev, tr, source):
3218 3226 from mercurial.node import nullid
3219 3227
3220 3228 linkrev = orig.linkrev(rev)
3221 3229 node = orig.node(rev)
3222 3230 p1, p2 = orig.parents(node)
3223 3231 flags = orig.flags(rev)
3224 3232 cachedelta = None
3225 3233 text = None
3226 3234
3227 3235 if source == b'full':
3228 3236 text = orig.revision(rev)
3229 3237 elif source == b'parent-1':
3230 3238 baserev = orig.rev(p1)
3231 3239 cachedelta = (baserev, orig.revdiff(p1, rev))
3232 3240 elif source == b'parent-2':
3233 3241 parent = p2
3234 3242 if p2 == nullid:
3235 3243 parent = p1
3236 3244 baserev = orig.rev(parent)
3237 3245 cachedelta = (baserev, orig.revdiff(parent, rev))
3238 3246 elif source == b'parent-smallest':
3239 3247 p1diff = orig.revdiff(p1, rev)
3240 3248 parent = p1
3241 3249 diff = p1diff
3242 3250 if p2 != nullid:
3243 3251 p2diff = orig.revdiff(p2, rev)
3244 3252 if len(p1diff) > len(p2diff):
3245 3253 parent = p2
3246 3254 diff = p2diff
3247 3255 baserev = orig.rev(parent)
3248 3256 cachedelta = (baserev, diff)
3249 3257 elif source == b'storage':
3250 3258 baserev = orig.deltaparent(rev)
3251 3259 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3252 3260
3253 3261 return (
3254 3262 (text, tr, linkrev, p1, p2),
3255 3263 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3256 3264 )
3257 3265
3258 3266
3259 3267 @contextlib.contextmanager
3260 3268 def _temprevlog(ui, orig, truncaterev):
3261 3269 from mercurial import vfs as vfsmod
3262 3270
3263 3271 if orig._inline:
3264 3272 raise error.Abort('not supporting inline revlog (yet)')
3265 3273 revlogkwargs = {}
3266 3274 k = 'upperboundcomp'
3267 3275 if util.safehasattr(orig, k):
3268 3276 revlogkwargs[k] = getattr(orig, k)
3269 3277
3270 3278 indexfile = getattr(orig, '_indexfile', None)
3271 3279 if indexfile is None:
3272 3280 # compatibility with <= hg-5.8
3273 3281 indexfile = getattr(orig, 'indexfile')
3274 3282 origindexpath = orig.opener.join(indexfile)
3275 3283
3276 3284 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3277 3285 origdatapath = orig.opener.join(datafile)
3278 3286 radix = b'revlog'
3279 3287 indexname = b'revlog.i'
3280 3288 dataname = b'revlog.d'
3281 3289
3282 3290 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3283 3291 try:
3284 3292 # copy the data file in a temporary directory
3285 3293 ui.debug('copying data in %s\n' % tmpdir)
3286 3294 destindexpath = os.path.join(tmpdir, 'revlog.i')
3287 3295 destdatapath = os.path.join(tmpdir, 'revlog.d')
3288 3296 shutil.copyfile(origindexpath, destindexpath)
3289 3297 shutil.copyfile(origdatapath, destdatapath)
3290 3298
3291 3299 # remove the data we want to add again
3292 3300 ui.debug('truncating data to be rewritten\n')
3293 3301 with open(destindexpath, 'ab') as index:
3294 3302 index.seek(0)
3295 3303 index.truncate(truncaterev * orig._io.size)
3296 3304 with open(destdatapath, 'ab') as data:
3297 3305 data.seek(0)
3298 3306 data.truncate(orig.start(truncaterev))
3299 3307
3300 3308 # instantiate a new revlog from the temporary copy
3301 3309 ui.debug('truncating adding to be rewritten\n')
3302 3310 vfs = vfsmod.vfs(tmpdir)
3303 3311 vfs.options = getattr(orig.opener, 'options', None)
3304 3312
3305 3313 try:
3306 3314 dest = revlog(vfs, radix=radix, **revlogkwargs)
3307 3315 except TypeError:
3308 3316 dest = revlog(
3309 3317 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3310 3318 )
3311 3319 if dest._inline:
3312 3320 raise error.Abort('not supporting inline revlog (yet)')
3313 3321 # make sure internals are initialized
3314 3322 dest.revision(len(dest) - 1)
3315 3323 yield dest
3316 3324 del dest, vfs
3317 3325 finally:
3318 3326 shutil.rmtree(tmpdir, True)
3319 3327
3320 3328
3321 3329 @command(
3322 3330 b'perf::revlogchunks|perfrevlogchunks',
3323 3331 revlogopts
3324 3332 + formatteropts
3325 3333 + [
3326 3334 (b'e', b'engines', b'', b'compression engines to use'),
3327 3335 (b's', b'startrev', 0, b'revision to start at'),
3328 3336 ],
3329 3337 b'-c|-m|FILE',
3330 3338 )
3331 3339 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3332 3340 """Benchmark operations on revlog chunks.
3333 3341
3334 3342 Logically, each revlog is a collection of fulltext revisions. However,
3335 3343 stored within each revlog are "chunks" of possibly compressed data. This
3336 3344 data needs to be read and decompressed or compressed and written.
3337 3345
3338 3346 This command measures the time it takes to read+decompress and recompress
3339 3347 chunks in a revlog. It effectively isolates I/O and compression performance.
3340 3348 For measurements of higher-level operations like resolving revisions,
3341 3349 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3342 3350 """
3343 3351 opts = _byteskwargs(opts)
3344 3352
3345 3353 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3346 3354
3347 3355 # _chunkraw was renamed to _getsegmentforrevs.
3348 3356 try:
3349 3357 segmentforrevs = rl._getsegmentforrevs
3350 3358 except AttributeError:
3351 3359 segmentforrevs = rl._chunkraw
3352 3360
3353 3361 # Verify engines argument.
3354 3362 if engines:
3355 3363 engines = {e.strip() for e in engines.split(b',')}
3356 3364 for engine in engines:
3357 3365 try:
3358 3366 util.compressionengines[engine]
3359 3367 except KeyError:
3360 3368 raise error.Abort(b'unknown compression engine: %s' % engine)
3361 3369 else:
3362 3370 engines = []
3363 3371 for e in util.compengines:
3364 3372 engine = util.compengines[e]
3365 3373 try:
3366 3374 if engine.available():
3367 3375 engine.revlogcompressor().compress(b'dummy')
3368 3376 engines.append(e)
3369 3377 except NotImplementedError:
3370 3378 pass
3371 3379
3372 3380 revs = list(rl.revs(startrev, len(rl) - 1))
3373 3381
3374 3382 def rlfh(rl):
3375 3383 if rl._inline:
3376 3384 indexfile = getattr(rl, '_indexfile', None)
3377 3385 if indexfile is None:
3378 3386 # compatibility with <= hg-5.8
3379 3387 indexfile = getattr(rl, 'indexfile')
3380 3388 return getsvfs(repo)(indexfile)
3381 3389 else:
3382 3390 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3383 3391 return getsvfs(repo)(datafile)
3384 3392
3385 3393 def doread():
3386 3394 rl.clearcaches()
3387 3395 for rev in revs:
3388 3396 segmentforrevs(rev, rev)
3389 3397
3390 3398 def doreadcachedfh():
3391 3399 rl.clearcaches()
3392 3400 fh = rlfh(rl)
3393 3401 for rev in revs:
3394 3402 segmentforrevs(rev, rev, df=fh)
3395 3403
3396 3404 def doreadbatch():
3397 3405 rl.clearcaches()
3398 3406 segmentforrevs(revs[0], revs[-1])
3399 3407
3400 3408 def doreadbatchcachedfh():
3401 3409 rl.clearcaches()
3402 3410 fh = rlfh(rl)
3403 3411 segmentforrevs(revs[0], revs[-1], df=fh)
3404 3412
3405 3413 def dochunk():
3406 3414 rl.clearcaches()
3407 3415 fh = rlfh(rl)
3408 3416 for rev in revs:
3409 3417 rl._chunk(rev, df=fh)
3410 3418
3411 3419 chunks = [None]
3412 3420
3413 3421 def dochunkbatch():
3414 3422 rl.clearcaches()
3415 3423 fh = rlfh(rl)
3416 3424 # Save chunks as a side-effect.
3417 3425 chunks[0] = rl._chunks(revs, df=fh)
3418 3426
3419 3427 def docompress(compressor):
3420 3428 rl.clearcaches()
3421 3429
3422 3430 try:
3423 3431 # Swap in the requested compression engine.
3424 3432 oldcompressor = rl._compressor
3425 3433 rl._compressor = compressor
3426 3434 for chunk in chunks[0]:
3427 3435 rl.compress(chunk)
3428 3436 finally:
3429 3437 rl._compressor = oldcompressor
3430 3438
3431 3439 benches = [
3432 3440 (lambda: doread(), b'read'),
3433 3441 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3434 3442 (lambda: doreadbatch(), b'read batch'),
3435 3443 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3436 3444 (lambda: dochunk(), b'chunk'),
3437 3445 (lambda: dochunkbatch(), b'chunk batch'),
3438 3446 ]
3439 3447
3440 3448 for engine in sorted(engines):
3441 3449 compressor = util.compengines[engine].revlogcompressor()
3442 3450 benches.append(
3443 3451 (
3444 3452 functools.partial(docompress, compressor),
3445 3453 b'compress w/ %s' % engine,
3446 3454 )
3447 3455 )
3448 3456
3449 3457 for fn, title in benches:
3450 3458 timer, fm = gettimer(ui, opts)
3451 3459 timer(fn, title=title)
3452 3460 fm.end()
3453 3461
3454 3462
3455 3463 @command(
3456 3464 b'perf::revlogrevision|perfrevlogrevision',
3457 3465 revlogopts
3458 3466 + formatteropts
3459 3467 + [(b'', b'cache', False, b'use caches instead of clearing')],
3460 3468 b'-c|-m|FILE REV',
3461 3469 )
3462 3470 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3463 3471 """Benchmark obtaining a revlog revision.
3464 3472
3465 3473 Obtaining a revlog revision consists of roughly the following steps:
3466 3474
3467 3475 1. Compute the delta chain
3468 3476 2. Slice the delta chain if applicable
3469 3477 3. Obtain the raw chunks for that delta chain
3470 3478 4. Decompress each raw chunk
3471 3479 5. Apply binary patches to obtain fulltext
3472 3480 6. Verify hash of fulltext
3473 3481
3474 3482 This command measures the time spent in each of these phases.
3475 3483 """
3476 3484 opts = _byteskwargs(opts)
3477 3485
3478 3486 if opts.get(b'changelog') or opts.get(b'manifest'):
3479 3487 file_, rev = None, file_
3480 3488 elif rev is None:
3481 3489 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3482 3490
3483 3491 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3484 3492
3485 3493 # _chunkraw was renamed to _getsegmentforrevs.
3486 3494 try:
3487 3495 segmentforrevs = r._getsegmentforrevs
3488 3496 except AttributeError:
3489 3497 segmentforrevs = r._chunkraw
3490 3498
3491 3499 node = r.lookup(rev)
3492 3500 rev = r.rev(node)
3493 3501
3494 3502 def getrawchunks(data, chain):
3495 3503 start = r.start
3496 3504 length = r.length
3497 3505 inline = r._inline
3498 3506 try:
3499 3507 iosize = r.index.entry_size
3500 3508 except AttributeError:
3501 3509 iosize = r._io.size
3502 3510 buffer = util.buffer
3503 3511
3504 3512 chunks = []
3505 3513 ladd = chunks.append
3506 3514 for idx, item in enumerate(chain):
3507 3515 offset = start(item[0])
3508 3516 bits = data[idx]
3509 3517 for rev in item:
3510 3518 chunkstart = start(rev)
3511 3519 if inline:
3512 3520 chunkstart += (rev + 1) * iosize
3513 3521 chunklength = length(rev)
3514 3522 ladd(buffer(bits, chunkstart - offset, chunklength))
3515 3523
3516 3524 return chunks
3517 3525
3518 3526 def dodeltachain(rev):
3519 3527 if not cache:
3520 3528 r.clearcaches()
3521 3529 r._deltachain(rev)
3522 3530
3523 3531 def doread(chain):
3524 3532 if not cache:
3525 3533 r.clearcaches()
3526 3534 for item in slicedchain:
3527 3535 segmentforrevs(item[0], item[-1])
3528 3536
3529 3537 def doslice(r, chain, size):
3530 3538 for s in slicechunk(r, chain, targetsize=size):
3531 3539 pass
3532 3540
3533 3541 def dorawchunks(data, chain):
3534 3542 if not cache:
3535 3543 r.clearcaches()
3536 3544 getrawchunks(data, chain)
3537 3545
3538 3546 def dodecompress(chunks):
3539 3547 decomp = r.decompress
3540 3548 for chunk in chunks:
3541 3549 decomp(chunk)
3542 3550
3543 3551 def dopatch(text, bins):
3544 3552 if not cache:
3545 3553 r.clearcaches()
3546 3554 mdiff.patches(text, bins)
3547 3555
3548 3556 def dohash(text):
3549 3557 if not cache:
3550 3558 r.clearcaches()
3551 3559 r.checkhash(text, node, rev=rev)
3552 3560
3553 3561 def dorevision():
3554 3562 if not cache:
3555 3563 r.clearcaches()
3556 3564 r.revision(node)
3557 3565
3558 3566 try:
3559 3567 from mercurial.revlogutils.deltas import slicechunk
3560 3568 except ImportError:
3561 3569 slicechunk = getattr(revlog, '_slicechunk', None)
3562 3570
3563 3571 size = r.length(rev)
3564 3572 chain = r._deltachain(rev)[0]
3565 3573 if not getattr(r, '_withsparseread', False):
3566 3574 slicedchain = (chain,)
3567 3575 else:
3568 3576 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3569 3577 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3570 3578 rawchunks = getrawchunks(data, slicedchain)
3571 3579 bins = r._chunks(chain)
3572 3580 text = bytes(bins[0])
3573 3581 bins = bins[1:]
3574 3582 text = mdiff.patches(text, bins)
3575 3583
3576 3584 benches = [
3577 3585 (lambda: dorevision(), b'full'),
3578 3586 (lambda: dodeltachain(rev), b'deltachain'),
3579 3587 (lambda: doread(chain), b'read'),
3580 3588 ]
3581 3589
3582 3590 if getattr(r, '_withsparseread', False):
3583 3591 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3584 3592 benches.append(slicing)
3585 3593
3586 3594 benches.extend(
3587 3595 [
3588 3596 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3589 3597 (lambda: dodecompress(rawchunks), b'decompress'),
3590 3598 (lambda: dopatch(text, bins), b'patch'),
3591 3599 (lambda: dohash(text), b'hash'),
3592 3600 ]
3593 3601 )
3594 3602
3595 3603 timer, fm = gettimer(ui, opts)
3596 3604 for fn, title in benches:
3597 3605 timer(fn, title=title)
3598 3606 fm.end()
3599 3607
3600 3608
3601 3609 @command(
3602 3610 b'perf::revset|perfrevset',
3603 3611 [
3604 3612 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3605 3613 (b'', b'contexts', False, b'obtain changectx for each revision'),
3606 3614 ]
3607 3615 + formatteropts,
3608 3616 b"REVSET",
3609 3617 )
3610 3618 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3611 3619 """benchmark the execution time of a revset
3612 3620
3613 3621 Use the --clean option if need to evaluate the impact of build volatile
3614 3622 revisions set cache on the revset execution. Volatile cache hold filtered
3615 3623 and obsolete related cache."""
3616 3624 opts = _byteskwargs(opts)
3617 3625
3618 3626 timer, fm = gettimer(ui, opts)
3619 3627
3620 3628 def d():
3621 3629 if clear:
3622 3630 repo.invalidatevolatilesets()
3623 3631 if contexts:
3624 3632 for ctx in repo.set(expr):
3625 3633 pass
3626 3634 else:
3627 3635 for r in repo.revs(expr):
3628 3636 pass
3629 3637
3630 3638 timer(d)
3631 3639 fm.end()
3632 3640
3633 3641
3634 3642 @command(
3635 3643 b'perf::volatilesets|perfvolatilesets',
3636 3644 [
3637 3645 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3638 3646 ]
3639 3647 + formatteropts,
3640 3648 )
3641 3649 def perfvolatilesets(ui, repo, *names, **opts):
3642 3650 """benchmark the computation of various volatile set
3643 3651
3644 3652 Volatile set computes element related to filtering and obsolescence."""
3645 3653 opts = _byteskwargs(opts)
3646 3654 timer, fm = gettimer(ui, opts)
3647 3655 repo = repo.unfiltered()
3648 3656
3649 3657 def getobs(name):
3650 3658 def d():
3651 3659 repo.invalidatevolatilesets()
3652 3660 if opts[b'clear_obsstore']:
3653 3661 clearfilecache(repo, b'obsstore')
3654 3662 obsolete.getrevs(repo, name)
3655 3663
3656 3664 return d
3657 3665
3658 3666 allobs = sorted(obsolete.cachefuncs)
3659 3667 if names:
3660 3668 allobs = [n for n in allobs if n in names]
3661 3669
3662 3670 for name in allobs:
3663 3671 timer(getobs(name), title=name)
3664 3672
3665 3673 def getfiltered(name):
3666 3674 def d():
3667 3675 repo.invalidatevolatilesets()
3668 3676 if opts[b'clear_obsstore']:
3669 3677 clearfilecache(repo, b'obsstore')
3670 3678 repoview.filterrevs(repo, name)
3671 3679
3672 3680 return d
3673 3681
3674 3682 allfilter = sorted(repoview.filtertable)
3675 3683 if names:
3676 3684 allfilter = [n for n in allfilter if n in names]
3677 3685
3678 3686 for name in allfilter:
3679 3687 timer(getfiltered(name), title=name)
3680 3688 fm.end()
3681 3689
3682 3690
3683 3691 @command(
3684 3692 b'perf::branchmap|perfbranchmap',
3685 3693 [
3686 3694 (b'f', b'full', False, b'Includes build time of subset'),
3687 3695 (
3688 3696 b'',
3689 3697 b'clear-revbranch',
3690 3698 False,
3691 3699 b'purge the revbranch cache between computation',
3692 3700 ),
3693 3701 ]
3694 3702 + formatteropts,
3695 3703 )
3696 3704 def perfbranchmap(ui, repo, *filternames, **opts):
3697 3705 """benchmark the update of a branchmap
3698 3706
3699 3707 This benchmarks the full repo.branchmap() call with read and write disabled
3700 3708 """
3701 3709 opts = _byteskwargs(opts)
3702 3710 full = opts.get(b"full", False)
3703 3711 clear_revbranch = opts.get(b"clear_revbranch", False)
3704 3712 timer, fm = gettimer(ui, opts)
3705 3713
3706 3714 def getbranchmap(filtername):
3707 3715 """generate a benchmark function for the filtername"""
3708 3716 if filtername is None:
3709 3717 view = repo
3710 3718 else:
3711 3719 view = repo.filtered(filtername)
3712 3720 if util.safehasattr(view._branchcaches, '_per_filter'):
3713 3721 filtered = view._branchcaches._per_filter
3714 3722 else:
3715 3723 # older versions
3716 3724 filtered = view._branchcaches
3717 3725
3718 3726 def d():
3719 3727 if clear_revbranch:
3720 3728 repo.revbranchcache()._clear()
3721 3729 if full:
3722 3730 view._branchcaches.clear()
3723 3731 else:
3724 3732 filtered.pop(filtername, None)
3725 3733 view.branchmap()
3726 3734
3727 3735 return d
3728 3736
3729 3737 # add filter in smaller subset to bigger subset
3730 3738 possiblefilters = set(repoview.filtertable)
3731 3739 if filternames:
3732 3740 possiblefilters &= set(filternames)
3733 3741 subsettable = getbranchmapsubsettable()
3734 3742 allfilters = []
3735 3743 while possiblefilters:
3736 3744 for name in possiblefilters:
3737 3745 subset = subsettable.get(name)
3738 3746 if subset not in possiblefilters:
3739 3747 break
3740 3748 else:
3741 3749 assert False, b'subset cycle %s!' % possiblefilters
3742 3750 allfilters.append(name)
3743 3751 possiblefilters.remove(name)
3744 3752
3745 3753 # warm the cache
3746 3754 if not full:
3747 3755 for name in allfilters:
3748 3756 repo.filtered(name).branchmap()
3749 3757 if not filternames or b'unfiltered' in filternames:
3750 3758 # add unfiltered
3751 3759 allfilters.append(None)
3752 3760
3753 3761 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3754 3762 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3755 3763 branchcacheread.set(classmethod(lambda *args: None))
3756 3764 else:
3757 3765 # older versions
3758 3766 branchcacheread = safeattrsetter(branchmap, b'read')
3759 3767 branchcacheread.set(lambda *args: None)
3760 3768 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3761 3769 branchcachewrite.set(lambda *args: None)
3762 3770 try:
3763 3771 for name in allfilters:
3764 3772 printname = name
3765 3773 if name is None:
3766 3774 printname = b'unfiltered'
3767 3775 timer(getbranchmap(name), title=printname)
3768 3776 finally:
3769 3777 branchcacheread.restore()
3770 3778 branchcachewrite.restore()
3771 3779 fm.end()
3772 3780
3773 3781
3774 3782 @command(
3775 3783 b'perf::branchmapupdate|perfbranchmapupdate',
3776 3784 [
3777 3785 (b'', b'base', [], b'subset of revision to start from'),
3778 3786 (b'', b'target', [], b'subset of revision to end with'),
3779 3787 (b'', b'clear-caches', False, b'clear cache between each runs'),
3780 3788 ]
3781 3789 + formatteropts,
3782 3790 )
3783 3791 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3784 3792 """benchmark branchmap update from for <base> revs to <target> revs
3785 3793
3786 3794 If `--clear-caches` is passed, the following items will be reset before
3787 3795 each update:
3788 3796 * the changelog instance and associated indexes
3789 3797 * the rev-branch-cache instance
3790 3798
3791 3799 Examples:
3792 3800
3793 3801 # update for the one last revision
3794 3802 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3795 3803
3796 3804 $ update for change coming with a new branch
3797 3805 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3798 3806 """
3799 3807 from mercurial import branchmap
3800 3808 from mercurial import repoview
3801 3809
3802 3810 opts = _byteskwargs(opts)
3803 3811 timer, fm = gettimer(ui, opts)
3804 3812 clearcaches = opts[b'clear_caches']
3805 3813 unfi = repo.unfiltered()
3806 3814 x = [None] # used to pass data between closure
3807 3815
3808 3816 # we use a `list` here to avoid possible side effect from smartset
3809 3817 baserevs = list(scmutil.revrange(repo, base))
3810 3818 targetrevs = list(scmutil.revrange(repo, target))
3811 3819 if not baserevs:
3812 3820 raise error.Abort(b'no revisions selected for --base')
3813 3821 if not targetrevs:
3814 3822 raise error.Abort(b'no revisions selected for --target')
3815 3823
3816 3824 # make sure the target branchmap also contains the one in the base
3817 3825 targetrevs = list(set(baserevs) | set(targetrevs))
3818 3826 targetrevs.sort()
3819 3827
3820 3828 cl = repo.changelog
3821 3829 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3822 3830 allbaserevs.sort()
3823 3831 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3824 3832
3825 3833 newrevs = list(alltargetrevs.difference(allbaserevs))
3826 3834 newrevs.sort()
3827 3835
3828 3836 allrevs = frozenset(unfi.changelog.revs())
3829 3837 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3830 3838 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3831 3839
3832 3840 def basefilter(repo, visibilityexceptions=None):
3833 3841 return basefilterrevs
3834 3842
3835 3843 def targetfilter(repo, visibilityexceptions=None):
3836 3844 return targetfilterrevs
3837 3845
3838 3846 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3839 3847 ui.status(msg % (len(allbaserevs), len(newrevs)))
3840 3848 if targetfilterrevs:
3841 3849 msg = b'(%d revisions still filtered)\n'
3842 3850 ui.status(msg % len(targetfilterrevs))
3843 3851
3844 3852 try:
3845 3853 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3846 3854 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3847 3855
3848 3856 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3849 3857 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3850 3858
3851 3859 # try to find an existing branchmap to reuse
3852 3860 subsettable = getbranchmapsubsettable()
3853 3861 candidatefilter = subsettable.get(None)
3854 3862 while candidatefilter is not None:
3855 3863 candidatebm = repo.filtered(candidatefilter).branchmap()
3856 3864 if candidatebm.validfor(baserepo):
3857 3865 filtered = repoview.filterrevs(repo, candidatefilter)
3858 3866 missing = [r for r in allbaserevs if r in filtered]
3859 3867 base = candidatebm.copy()
3860 3868 base.update(baserepo, missing)
3861 3869 break
3862 3870 candidatefilter = subsettable.get(candidatefilter)
3863 3871 else:
3864 3872 # no suitable subset where found
3865 3873 base = branchmap.branchcache()
3866 3874 base.update(baserepo, allbaserevs)
3867 3875
3868 3876 def setup():
3869 3877 x[0] = base.copy()
3870 3878 if clearcaches:
3871 3879 unfi._revbranchcache = None
3872 3880 clearchangelog(repo)
3873 3881
3874 3882 def bench():
3875 3883 x[0].update(targetrepo, newrevs)
3876 3884
3877 3885 timer(bench, setup=setup)
3878 3886 fm.end()
3879 3887 finally:
3880 3888 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3881 3889 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3882 3890
3883 3891
3884 3892 @command(
3885 3893 b'perf::branchmapload|perfbranchmapload',
3886 3894 [
3887 3895 (b'f', b'filter', b'', b'Specify repoview filter'),
3888 3896 (b'', b'list', False, b'List brachmap filter caches'),
3889 3897 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3890 3898 ]
3891 3899 + formatteropts,
3892 3900 )
3893 3901 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3894 3902 """benchmark reading the branchmap"""
3895 3903 opts = _byteskwargs(opts)
3896 3904 clearrevlogs = opts[b'clear_revlogs']
3897 3905
3898 3906 if list:
3899 3907 for name, kind, st in repo.cachevfs.readdir(stat=True):
3900 3908 if name.startswith(b'branch2'):
3901 3909 filtername = name.partition(b'-')[2] or b'unfiltered'
3902 3910 ui.status(
3903 3911 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3904 3912 )
3905 3913 return
3906 3914 if not filter:
3907 3915 filter = None
3908 3916 subsettable = getbranchmapsubsettable()
3909 3917 if filter is None:
3910 3918 repo = repo.unfiltered()
3911 3919 else:
3912 3920 repo = repoview.repoview(repo, filter)
3913 3921
3914 3922 repo.branchmap() # make sure we have a relevant, up to date branchmap
3915 3923
3916 3924 try:
3917 3925 fromfile = branchmap.branchcache.fromfile
3918 3926 except AttributeError:
3919 3927 # older versions
3920 3928 fromfile = branchmap.read
3921 3929
3922 3930 currentfilter = filter
3923 3931 # try once without timer, the filter may not be cached
3924 3932 while fromfile(repo) is None:
3925 3933 currentfilter = subsettable.get(currentfilter)
3926 3934 if currentfilter is None:
3927 3935 raise error.Abort(
3928 3936 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3929 3937 )
3930 3938 repo = repo.filtered(currentfilter)
3931 3939 timer, fm = gettimer(ui, opts)
3932 3940
3933 3941 def setup():
3934 3942 if clearrevlogs:
3935 3943 clearchangelog(repo)
3936 3944
3937 3945 def bench():
3938 3946 fromfile(repo)
3939 3947
3940 3948 timer(bench, setup=setup)
3941 3949 fm.end()
3942 3950
3943 3951
3944 3952 @command(b'perf::loadmarkers|perfloadmarkers')
3945 3953 def perfloadmarkers(ui, repo):
3946 3954 """benchmark the time to parse the on-disk markers for a repo
3947 3955
3948 3956 Result is the number of markers in the repo."""
3949 3957 timer, fm = gettimer(ui)
3950 3958 svfs = getsvfs(repo)
3951 3959 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3952 3960 fm.end()
3953 3961
3954 3962
3955 3963 @command(
3956 3964 b'perf::lrucachedict|perflrucachedict',
3957 3965 formatteropts
3958 3966 + [
3959 3967 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3960 3968 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3961 3969 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3962 3970 (b'', b'size', 4, b'size of cache'),
3963 3971 (b'', b'gets', 10000, b'number of key lookups'),
3964 3972 (b'', b'sets', 10000, b'number of key sets'),
3965 3973 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3966 3974 (
3967 3975 b'',
3968 3976 b'mixedgetfreq',
3969 3977 50,
3970 3978 b'frequency of get vs set ops in mixed mode',
3971 3979 ),
3972 3980 ],
3973 3981 norepo=True,
3974 3982 )
3975 3983 def perflrucache(
3976 3984 ui,
3977 3985 mincost=0,
3978 3986 maxcost=100,
3979 3987 costlimit=0,
3980 3988 size=4,
3981 3989 gets=10000,
3982 3990 sets=10000,
3983 3991 mixed=10000,
3984 3992 mixedgetfreq=50,
3985 3993 **opts
3986 3994 ):
3987 3995 opts = _byteskwargs(opts)
3988 3996
3989 3997 def doinit():
3990 3998 for i in _xrange(10000):
3991 3999 util.lrucachedict(size)
3992 4000
3993 4001 costrange = list(range(mincost, maxcost + 1))
3994 4002
3995 4003 values = []
3996 4004 for i in _xrange(size):
3997 4005 values.append(random.randint(0, _maxint))
3998 4006
3999 4007 # Get mode fills the cache and tests raw lookup performance with no
4000 4008 # eviction.
4001 4009 getseq = []
4002 4010 for i in _xrange(gets):
4003 4011 getseq.append(random.choice(values))
4004 4012
4005 4013 def dogets():
4006 4014 d = util.lrucachedict(size)
4007 4015 for v in values:
4008 4016 d[v] = v
4009 4017 for key in getseq:
4010 4018 value = d[key]
4011 4019 value # silence pyflakes warning
4012 4020
4013 4021 def dogetscost():
4014 4022 d = util.lrucachedict(size, maxcost=costlimit)
4015 4023 for i, v in enumerate(values):
4016 4024 d.insert(v, v, cost=costs[i])
4017 4025 for key in getseq:
4018 4026 try:
4019 4027 value = d[key]
4020 4028 value # silence pyflakes warning
4021 4029 except KeyError:
4022 4030 pass
4023 4031
4024 4032 # Set mode tests insertion speed with cache eviction.
4025 4033 setseq = []
4026 4034 costs = []
4027 4035 for i in _xrange(sets):
4028 4036 setseq.append(random.randint(0, _maxint))
4029 4037 costs.append(random.choice(costrange))
4030 4038
4031 4039 def doinserts():
4032 4040 d = util.lrucachedict(size)
4033 4041 for v in setseq:
4034 4042 d.insert(v, v)
4035 4043
4036 4044 def doinsertscost():
4037 4045 d = util.lrucachedict(size, maxcost=costlimit)
4038 4046 for i, v in enumerate(setseq):
4039 4047 d.insert(v, v, cost=costs[i])
4040 4048
4041 4049 def dosets():
4042 4050 d = util.lrucachedict(size)
4043 4051 for v in setseq:
4044 4052 d[v] = v
4045 4053
4046 4054 # Mixed mode randomly performs gets and sets with eviction.
4047 4055 mixedops = []
4048 4056 for i in _xrange(mixed):
4049 4057 r = random.randint(0, 100)
4050 4058 if r < mixedgetfreq:
4051 4059 op = 0
4052 4060 else:
4053 4061 op = 1
4054 4062
4055 4063 mixedops.append(
4056 4064 (op, random.randint(0, size * 2), random.choice(costrange))
4057 4065 )
4058 4066
4059 4067 def domixed():
4060 4068 d = util.lrucachedict(size)
4061 4069
4062 4070 for op, v, cost in mixedops:
4063 4071 if op == 0:
4064 4072 try:
4065 4073 d[v]
4066 4074 except KeyError:
4067 4075 pass
4068 4076 else:
4069 4077 d[v] = v
4070 4078
4071 4079 def domixedcost():
4072 4080 d = util.lrucachedict(size, maxcost=costlimit)
4073 4081
4074 4082 for op, v, cost in mixedops:
4075 4083 if op == 0:
4076 4084 try:
4077 4085 d[v]
4078 4086 except KeyError:
4079 4087 pass
4080 4088 else:
4081 4089 d.insert(v, v, cost=cost)
4082 4090
4083 4091 benches = [
4084 4092 (doinit, b'init'),
4085 4093 ]
4086 4094
4087 4095 if costlimit:
4088 4096 benches.extend(
4089 4097 [
4090 4098 (dogetscost, b'gets w/ cost limit'),
4091 4099 (doinsertscost, b'inserts w/ cost limit'),
4092 4100 (domixedcost, b'mixed w/ cost limit'),
4093 4101 ]
4094 4102 )
4095 4103 else:
4096 4104 benches.extend(
4097 4105 [
4098 4106 (dogets, b'gets'),
4099 4107 (doinserts, b'inserts'),
4100 4108 (dosets, b'sets'),
4101 4109 (domixed, b'mixed'),
4102 4110 ]
4103 4111 )
4104 4112
4105 4113 for fn, title in benches:
4106 4114 timer, fm = gettimer(ui, opts)
4107 4115 timer(fn, title=title)
4108 4116 fm.end()
4109 4117
4110 4118
4111 4119 @command(
4112 4120 b'perf::write|perfwrite',
4113 4121 formatteropts
4114 4122 + [
4115 4123 (b'', b'write-method', b'write', b'ui write method'),
4116 4124 (b'', b'nlines', 100, b'number of lines'),
4117 4125 (b'', b'nitems', 100, b'number of items (per line)'),
4118 4126 (b'', b'item', b'x', b'item that is written'),
4119 4127 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4120 4128 (b'', b'flush-line', None, b'flush after each line'),
4121 4129 ],
4122 4130 )
4123 4131 def perfwrite(ui, repo, **opts):
4124 4132 """microbenchmark ui.write (and others)"""
4125 4133 opts = _byteskwargs(opts)
4126 4134
4127 4135 write = getattr(ui, _sysstr(opts[b'write_method']))
4128 4136 nlines = int(opts[b'nlines'])
4129 4137 nitems = int(opts[b'nitems'])
4130 4138 item = opts[b'item']
4131 4139 batch_line = opts.get(b'batch_line')
4132 4140 flush_line = opts.get(b'flush_line')
4133 4141
4134 4142 if batch_line:
4135 4143 line = item * nitems + b'\n'
4136 4144
4137 4145 def benchmark():
4138 4146 for i in pycompat.xrange(nlines):
4139 4147 if batch_line:
4140 4148 write(line)
4141 4149 else:
4142 4150 for i in pycompat.xrange(nitems):
4143 4151 write(item)
4144 4152 write(b'\n')
4145 4153 if flush_line:
4146 4154 ui.flush()
4147 4155 ui.flush()
4148 4156
4149 4157 timer, fm = gettimer(ui, opts)
4150 4158 timer(benchmark)
4151 4159 fm.end()
4152 4160
4153 4161
4154 4162 def uisetup(ui):
4155 4163 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4156 4164 commands, b'debugrevlogopts'
4157 4165 ):
4158 4166 # for "historical portability":
4159 4167 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4160 4168 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4161 4169 # openrevlog() should cause failure, because it has been
4162 4170 # available since 3.5 (or 49c583ca48c4).
4163 4171 def openrevlog(orig, repo, cmd, file_, opts):
4164 4172 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4165 4173 raise error.Abort(
4166 4174 b"This version doesn't support --dir option",
4167 4175 hint=b"use 3.5 or later",
4168 4176 )
4169 4177 return orig(repo, cmd, file_, opts)
4170 4178
4171 4179 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4172 4180
4173 4181
4174 4182 @command(
4175 4183 b'perf::progress|perfprogress',
4176 4184 formatteropts
4177 4185 + [
4178 4186 (b'', b'topic', b'topic', b'topic for progress messages'),
4179 4187 (b'c', b'total', 1000000, b'total value we are progressing to'),
4180 4188 ],
4181 4189 norepo=True,
4182 4190 )
4183 4191 def perfprogress(ui, topic=None, total=None, **opts):
4184 4192 """printing of progress bars"""
4185 4193 opts = _byteskwargs(opts)
4186 4194
4187 4195 timer, fm = gettimer(ui, opts)
4188 4196
4189 4197 def doprogress():
4190 4198 with ui.makeprogress(topic, total=total) as progress:
4191 4199 for i in _xrange(total):
4192 4200 progress.increment()
4193 4201
4194 4202 timer(doprogress)
4195 4203 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now