##// END OF EJS Templates
perf: make perf::bundle compatible before 61ba04693d65...
marmoute -
r50368:a7a5740b default
parent child Browse files
Show More
@@ -1,4190 +1,4195 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238 # for "historical portability":
239 239 # define parsealiases locally, because cmdutil.parsealiases has been
240 240 # available since 1.5 (or 6252852b4332)
241 241 def parsealiases(cmd):
242 242 return cmd.split(b"|")
243 243
244 244
245 245 if safehasattr(registrar, 'command'):
246 246 command = registrar.command(cmdtable)
247 247 elif safehasattr(cmdutil, 'command'):
248 248 command = cmdutil.command(cmdtable)
249 249 if 'norepo' not in getargspec(command).args:
250 250 # for "historical portability":
251 251 # wrap original cmdutil.command, because "norepo" option has
252 252 # been available since 3.1 (or 75a96326cecb)
253 253 _command = command
254 254
255 255 def command(name, options=(), synopsis=None, norepo=False):
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return _command(name, list(options), synopsis)
259 259
260 260
261 261 else:
262 262 # for "historical portability":
263 263 # define "@command" annotation locally, because cmdutil.command
264 264 # has been available since 1.9 (or 2daa5179e73f)
265 265 def command(name, options=(), synopsis=None, norepo=False):
266 266 def decorator(func):
267 267 if synopsis:
268 268 cmdtable[name] = func, list(options), synopsis
269 269 else:
270 270 cmdtable[name] = func, list(options)
271 271 if norepo:
272 272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 273 return func
274 274
275 275 return decorator
276 276
277 277
278 278 try:
279 279 import mercurial.registrar
280 280 import mercurial.configitems
281 281
282 282 configtable = {}
283 283 configitem = mercurial.registrar.configitem(configtable)
284 284 configitem(
285 285 b'perf',
286 286 b'presleep',
287 287 default=mercurial.configitems.dynamicdefault,
288 288 experimental=True,
289 289 )
290 290 configitem(
291 291 b'perf',
292 292 b'stub',
293 293 default=mercurial.configitems.dynamicdefault,
294 294 experimental=True,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'parentscount',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 experimental=True,
301 301 )
302 302 configitem(
303 303 b'perf',
304 304 b'all-timing',
305 305 default=mercurial.configitems.dynamicdefault,
306 306 experimental=True,
307 307 )
308 308 configitem(
309 309 b'perf',
310 310 b'pre-run',
311 311 default=mercurial.configitems.dynamicdefault,
312 312 )
313 313 configitem(
314 314 b'perf',
315 315 b'profile-benchmark',
316 316 default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf',
320 320 b'run-limits',
321 321 default=mercurial.configitems.dynamicdefault,
322 322 experimental=True,
323 323 )
324 324 except (ImportError, AttributeError):
325 325 pass
326 326 except TypeError:
327 327 # compatibility fix for a11fd395e83f
328 328 # hg version: 5.2
329 329 configitem(
330 330 b'perf',
331 331 b'presleep',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'stub',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'parentscount',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'all-timing',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349 configitem(
350 350 b'perf',
351 351 b'pre-run',
352 352 default=mercurial.configitems.dynamicdefault,
353 353 )
354 354 configitem(
355 355 b'perf',
356 356 b'profile-benchmark',
357 357 default=mercurial.configitems.dynamicdefault,
358 358 )
359 359 configitem(
360 360 b'perf',
361 361 b'run-limits',
362 362 default=mercurial.configitems.dynamicdefault,
363 363 )
364 364
365 365
366 366 def getlen(ui):
367 367 if ui.configbool(b"perf", b"stub", False):
368 368 return lambda x: 1
369 369 return len
370 370
371 371
372 372 class noop:
373 373 """dummy context manager"""
374 374
375 375 def __enter__(self):
376 376 pass
377 377
378 378 def __exit__(self, *args):
379 379 pass
380 380
381 381
382 382 NOOPCTX = noop()
383 383
384 384
385 385 def gettimer(ui, opts=None):
386 386 """return a timer function and formatter: (timer, formatter)
387 387
388 388 This function exists to gather the creation of formatter in a single
389 389 place instead of duplicating it in all performance commands."""
390 390
391 391 # enforce an idle period before execution to counteract power management
392 392 # experimental config: perf.presleep
393 393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 394
395 395 if opts is None:
396 396 opts = {}
397 397 # redirect all to stderr unless buffer api is in use
398 398 if not ui._buffers:
399 399 ui = ui.copy()
400 400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 401 if uifout:
402 402 # for "historical portability":
403 403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 404 uifout.set(ui.ferr)
405 405
406 406 # get a formatter
407 407 uiformatter = getattr(ui, 'formatter', None)
408 408 if uiformatter:
409 409 fm = uiformatter(b'perf', opts)
410 410 else:
411 411 # for "historical portability":
412 412 # define formatter locally, because ui.formatter has been
413 413 # available since 2.2 (or ae5f92e154d3)
414 414 from mercurial import node
415 415
416 416 class defaultformatter:
417 417 """Minimized composition of baseformatter and plainformatter"""
418 418
419 419 def __init__(self, ui, topic, opts):
420 420 self._ui = ui
421 421 if ui.debugflag:
422 422 self.hexfunc = node.hex
423 423 else:
424 424 self.hexfunc = node.short
425 425
426 426 def __nonzero__(self):
427 427 return False
428 428
429 429 __bool__ = __nonzero__
430 430
431 431 def startitem(self):
432 432 pass
433 433
434 434 def data(self, **data):
435 435 pass
436 436
437 437 def write(self, fields, deftext, *fielddata, **opts):
438 438 self._ui.write(deftext % fielddata, **opts)
439 439
440 440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 441 if cond:
442 442 self._ui.write(deftext % fielddata, **opts)
443 443
444 444 def plain(self, text, **opts):
445 445 self._ui.write(text, **opts)
446 446
447 447 def end(self):
448 448 pass
449 449
450 450 fm = defaultformatter(ui, b'perf', opts)
451 451
452 452 # stub function, runs code only once instead of in a loop
453 453 # experimental config: perf.stub
454 454 if ui.configbool(b"perf", b"stub", False):
455 455 return functools.partial(stub_timer, fm), fm
456 456
457 457 # experimental config: perf.all-timing
458 458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 459
460 460 # experimental config: perf.run-limits
461 461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 462 limits = []
463 463 for item in limitspec:
464 464 parts = item.split(b'-', 1)
465 465 if len(parts) < 2:
466 466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 467 continue
468 468 try:
469 469 time_limit = float(_sysstr(parts[0]))
470 470 except ValueError as e:
471 471 ui.warn(
472 472 (
473 473 b'malformatted run limit entry, %s: %s\n'
474 474 % (_bytestr(e), item)
475 475 )
476 476 )
477 477 continue
478 478 try:
479 479 run_limit = int(_sysstr(parts[1]))
480 480 except ValueError as e:
481 481 ui.warn(
482 482 (
483 483 b'malformatted run limit entry, %s: %s\n'
484 484 % (_bytestr(e), item)
485 485 )
486 486 )
487 487 continue
488 488 limits.append((time_limit, run_limit))
489 489 if not limits:
490 490 limits = DEFAULTLIMITS
491 491
492 492 profiler = None
493 493 if profiling is not None:
494 494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 495 profiler = profiling.profile(ui)
496 496
497 497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 498 t = functools.partial(
499 499 _timer,
500 500 fm,
501 501 displayall=displayall,
502 502 limits=limits,
503 503 prerun=prerun,
504 504 profiler=profiler,
505 505 )
506 506 return t, fm
507 507
508 508
509 509 def stub_timer(fm, func, setup=None, title=None):
510 510 if setup is not None:
511 511 setup()
512 512 func()
513 513
514 514
515 515 @contextlib.contextmanager
516 516 def timeone():
517 517 r = []
518 518 ostart = os.times()
519 519 cstart = util.timer()
520 520 yield r
521 521 cstop = util.timer()
522 522 ostop = os.times()
523 523 a, b = ostart, ostop
524 524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 525
526 526
527 527 # list of stop condition (elapsed time, minimal run count)
528 528 DEFAULTLIMITS = (
529 529 (3.0, 100),
530 530 (10.0, 3),
531 531 )
532 532
533 533
534 534 def _timer(
535 535 fm,
536 536 func,
537 537 setup=None,
538 538 title=None,
539 539 displayall=False,
540 540 limits=DEFAULTLIMITS,
541 541 prerun=0,
542 542 profiler=None,
543 543 ):
544 544 gc.collect()
545 545 results = []
546 546 begin = util.timer()
547 547 count = 0
548 548 if profiler is None:
549 549 profiler = NOOPCTX
550 550 for i in range(prerun):
551 551 if setup is not None:
552 552 setup()
553 553 func()
554 554 keepgoing = True
555 555 while keepgoing:
556 556 if setup is not None:
557 557 setup()
558 558 with profiler:
559 559 with timeone() as item:
560 560 r = func()
561 561 profiler = NOOPCTX
562 562 count += 1
563 563 results.append(item[0])
564 564 cstop = util.timer()
565 565 # Look for a stop condition.
566 566 elapsed = cstop - begin
567 567 for t, mincount in limits:
568 568 if elapsed >= t and count >= mincount:
569 569 keepgoing = False
570 570 break
571 571
572 572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 573
574 574
575 575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'perf::delta-find',
930 930 revlogopts + formatteropts,
931 931 b'-c|-m|FILE REV',
932 932 )
933 933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 934 """benchmark the process of finding a valid delta for a revlog revision
935 935
936 936 When a revlog receives a new revision (e.g. from a commit, or from an
937 937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 938 This perf command measures how much time we spend in this process. It
939 939 operates on an already stored revision.
940 940
941 941 See `hg help debug-delta-find` for another related command.
942 942 """
943 943 from mercurial import revlogutils
944 944 import mercurial.revlogutils.deltas as deltautil
945 945
946 946 opts = _byteskwargs(opts)
947 947 if arg_2 is None:
948 948 file_ = None
949 949 rev = arg_1
950 950 else:
951 951 file_ = arg_1
952 952 rev = arg_2
953 953
954 954 repo = repo.unfiltered()
955 955
956 956 timer, fm = gettimer(ui, opts)
957 957
958 958 rev = int(rev)
959 959
960 960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961 961
962 962 deltacomputer = deltautil.deltacomputer(revlog)
963 963
964 964 node = revlog.node(rev)
965 965 p1r, p2r = revlog.parentrevs(rev)
966 966 p1 = revlog.node(p1r)
967 967 p2 = revlog.node(p2r)
968 968 full_text = revlog.revision(rev)
969 969 textlen = len(full_text)
970 970 cachedelta = None
971 971 flags = revlog.flags(rev)
972 972
973 973 revinfo = revlogutils.revisioninfo(
974 974 node,
975 975 p1,
976 976 p2,
977 977 [full_text], # btext
978 978 textlen,
979 979 cachedelta,
980 980 flags,
981 981 )
982 982
983 983 # Note: we should probably purge the potential caches (like the full
984 984 # manifest cache) between runs.
985 985 def find_one():
986 986 with revlog._datafp() as fh:
987 987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988 988
989 989 timer(find_one)
990 990 fm.end()
991 991
992 992
993 993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 994 def perfdiscovery(ui, repo, path, **opts):
995 995 """benchmark discovery between local repo and the peer at given path"""
996 996 repos = [repo, None]
997 997 timer, fm = gettimer(ui, opts)
998 998
999 999 try:
1000 1000 from mercurial.utils.urlutil import get_unique_pull_path
1001 1001
1002 1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 1003 except ImportError:
1004 1004 path = ui.expandpath(path)
1005 1005
1006 1006 def s():
1007 1007 repos[1] = hg.peer(ui, opts, path)
1008 1008
1009 1009 def d():
1010 1010 setdiscovery.findcommonheads(ui, *repos)
1011 1011
1012 1012 timer(d, setup=s)
1013 1013 fm.end()
1014 1014
1015 1015
1016 1016 @command(
1017 1017 b'perf::bookmarks|perfbookmarks',
1018 1018 formatteropts
1019 1019 + [
1020 1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 1021 ],
1022 1022 )
1023 1023 def perfbookmarks(ui, repo, **opts):
1024 1024 """benchmark parsing bookmarks from disk to memory"""
1025 1025 opts = _byteskwargs(opts)
1026 1026 timer, fm = gettimer(ui, opts)
1027 1027
1028 1028 clearrevlogs = opts[b'clear_revlogs']
1029 1029
1030 1030 def s():
1031 1031 if clearrevlogs:
1032 1032 clearchangelog(repo)
1033 1033 clearfilecache(repo, b'_bookmarks')
1034 1034
1035 1035 def d():
1036 1036 repo._bookmarks
1037 1037
1038 1038 timer(d, setup=s)
1039 1039 fm.end()
1040 1040
1041 1041
1042 1042 @command(
1043 1043 b'perf::bundle',
1044 1044 [
1045 1045 (
1046 1046 b'r',
1047 1047 b'rev',
1048 1048 [],
1049 1049 b'changesets to bundle',
1050 1050 b'REV',
1051 1051 ),
1052 1052 (
1053 1053 b't',
1054 1054 b'type',
1055 1055 b'none',
1056 1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 1057 b'TYPE',
1058 1058 ),
1059 1059 ]
1060 1060 + formatteropts,
1061 1061 b'REVS',
1062 1062 )
1063 1063 def perfbundle(ui, repo, *revs, **opts):
1064 1064 """benchmark the creation of a bundle from a repository
1065 1065
1066 1066 For now, this only supports "none" compression.
1067 1067 """
1068 1068 from mercurial import bundlecaches
1069 1069 from mercurial import discovery
1070 1070 from mercurial import bundle2
1071 1071
1072 1072 opts = _byteskwargs(opts)
1073 1073 timer, fm = gettimer(ui, opts)
1074 1074
1075 1075 cl = repo.changelog
1076 1076 revs = list(revs)
1077 1077 revs.extend(opts.get(b'rev', ()))
1078 1078 revs = scmutil.revrange(repo, revs)
1079 1079 if not revs:
1080 1080 raise error.Abort(b"not revision specified")
1081 1081 # make it a consistent set (ie: without topological gaps)
1082 1082 old_len = len(revs)
1083 1083 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1084 1084 if old_len != len(revs):
1085 1085 new_count = len(revs) - old_len
1086 1086 msg = b"add %d new revisions to make it a consistent set\n"
1087 1087 ui.write_err(msg % new_count)
1088 1088
1089 1089 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1090 1090 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1091 1091 outgoing = discovery.outgoing(repo, bases, targets)
1092 1092
1093 1093 bundle_spec = opts.get(b'type')
1094 1094
1095 bundle_spec = bundlecaches.parsebundlespec(repo, bundle_spec, strict=False)
1096
1097 cgversion = bundle_spec.params[b"cg.version"]
1095 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1096
1097 cgversion = bundle_spec.params.get(b"cg.version")
1098 if cgversion is None:
1099 if bundle_spec.version == b'v1':
1100 cgversion = b'01'
1101 if bundle_spec.version == b'v2':
1102 cgversion = b'02'
1098 1103 if cgversion not in changegroup.supportedoutgoingversions(repo):
1099 1104 err = b"repository does not support bundle version %s"
1100 1105 raise error.Abort(err % cgversion)
1101 1106
1102 1107 if cgversion == b'01': # bundle1
1103 1108 bversion = b'HG10' + bundle_spec.wirecompression
1104 1109 bcompression = None
1105 1110 elif cgversion in (b'02', b'03'):
1106 1111 bversion = b'HG20'
1107 1112 bcompression = bundle_spec.wirecompression
1108 1113 else:
1109 1114 err = b'perf::bundle: unexpected changegroup version %s'
1110 1115 raise error.ProgrammingError(err % cgversion)
1111 1116
1112 1117 if bcompression is None:
1113 1118 bcompression = b'UN'
1114 1119
1115 1120 if bcompression != b'UN':
1116 1121 err = b'perf::bundle: compression currently unsupported: %s'
1117 1122 raise error.ProgrammingError(err % bcompression)
1118 1123
1119 1124 def do_bundle():
1120 1125 bundle2.writenewbundle(
1121 1126 ui,
1122 1127 repo,
1123 1128 b'perf::bundle',
1124 1129 os.devnull,
1125 1130 bversion,
1126 1131 outgoing,
1127 1132 bundle_spec.params,
1128 1133 )
1129 1134
1130 1135 timer(do_bundle)
1131 1136 fm.end()
1132 1137
1133 1138
1134 1139 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1135 1140 def perfbundleread(ui, repo, bundlepath, **opts):
1136 1141 """Benchmark reading of bundle files.
1137 1142
1138 1143 This command is meant to isolate the I/O part of bundle reading as
1139 1144 much as possible.
1140 1145 """
1141 1146 from mercurial import (
1142 1147 bundle2,
1143 1148 exchange,
1144 1149 streamclone,
1145 1150 )
1146 1151
1147 1152 opts = _byteskwargs(opts)
1148 1153
1149 1154 def makebench(fn):
1150 1155 def run():
1151 1156 with open(bundlepath, b'rb') as fh:
1152 1157 bundle = exchange.readbundle(ui, fh, bundlepath)
1153 1158 fn(bundle)
1154 1159
1155 1160 return run
1156 1161
1157 1162 def makereadnbytes(size):
1158 1163 def run():
1159 1164 with open(bundlepath, b'rb') as fh:
1160 1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1161 1166 while bundle.read(size):
1162 1167 pass
1163 1168
1164 1169 return run
1165 1170
1166 1171 def makestdioread(size):
1167 1172 def run():
1168 1173 with open(bundlepath, b'rb') as fh:
1169 1174 while fh.read(size):
1170 1175 pass
1171 1176
1172 1177 return run
1173 1178
1174 1179 # bundle1
1175 1180
1176 1181 def deltaiter(bundle):
1177 1182 for delta in bundle.deltaiter():
1178 1183 pass
1179 1184
1180 1185 def iterchunks(bundle):
1181 1186 for chunk in bundle.getchunks():
1182 1187 pass
1183 1188
1184 1189 # bundle2
1185 1190
1186 1191 def forwardchunks(bundle):
1187 1192 for chunk in bundle._forwardchunks():
1188 1193 pass
1189 1194
1190 1195 def iterparts(bundle):
1191 1196 for part in bundle.iterparts():
1192 1197 pass
1193 1198
1194 1199 def iterpartsseekable(bundle):
1195 1200 for part in bundle.iterparts(seekable=True):
1196 1201 pass
1197 1202
1198 1203 def seek(bundle):
1199 1204 for part in bundle.iterparts(seekable=True):
1200 1205 part.seek(0, os.SEEK_END)
1201 1206
1202 1207 def makepartreadnbytes(size):
1203 1208 def run():
1204 1209 with open(bundlepath, b'rb') as fh:
1205 1210 bundle = exchange.readbundle(ui, fh, bundlepath)
1206 1211 for part in bundle.iterparts():
1207 1212 while part.read(size):
1208 1213 pass
1209 1214
1210 1215 return run
1211 1216
1212 1217 benches = [
1213 1218 (makestdioread(8192), b'read(8k)'),
1214 1219 (makestdioread(16384), b'read(16k)'),
1215 1220 (makestdioread(32768), b'read(32k)'),
1216 1221 (makestdioread(131072), b'read(128k)'),
1217 1222 ]
1218 1223
1219 1224 with open(bundlepath, b'rb') as fh:
1220 1225 bundle = exchange.readbundle(ui, fh, bundlepath)
1221 1226
1222 1227 if isinstance(bundle, changegroup.cg1unpacker):
1223 1228 benches.extend(
1224 1229 [
1225 1230 (makebench(deltaiter), b'cg1 deltaiter()'),
1226 1231 (makebench(iterchunks), b'cg1 getchunks()'),
1227 1232 (makereadnbytes(8192), b'cg1 read(8k)'),
1228 1233 (makereadnbytes(16384), b'cg1 read(16k)'),
1229 1234 (makereadnbytes(32768), b'cg1 read(32k)'),
1230 1235 (makereadnbytes(131072), b'cg1 read(128k)'),
1231 1236 ]
1232 1237 )
1233 1238 elif isinstance(bundle, bundle2.unbundle20):
1234 1239 benches.extend(
1235 1240 [
1236 1241 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1237 1242 (makebench(iterparts), b'bundle2 iterparts()'),
1238 1243 (
1239 1244 makebench(iterpartsseekable),
1240 1245 b'bundle2 iterparts() seekable',
1241 1246 ),
1242 1247 (makebench(seek), b'bundle2 part seek()'),
1243 1248 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1244 1249 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1245 1250 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1246 1251 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1247 1252 ]
1248 1253 )
1249 1254 elif isinstance(bundle, streamclone.streamcloneapplier):
1250 1255 raise error.Abort(b'stream clone bundles not supported')
1251 1256 else:
1252 1257 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1253 1258
1254 1259 for fn, title in benches:
1255 1260 timer, fm = gettimer(ui, opts)
1256 1261 timer(fn, title=title)
1257 1262 fm.end()
1258 1263
1259 1264
1260 1265 @command(
1261 1266 b'perf::changegroupchangelog|perfchangegroupchangelog',
1262 1267 formatteropts
1263 1268 + [
1264 1269 (b'', b'cgversion', b'02', b'changegroup version'),
1265 1270 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1266 1271 ],
1267 1272 )
1268 1273 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1269 1274 """Benchmark producing a changelog group for a changegroup.
1270 1275
1271 1276 This measures the time spent processing the changelog during a
1272 1277 bundle operation. This occurs during `hg bundle` and on a server
1273 1278 processing a `getbundle` wire protocol request (handles clones
1274 1279 and pull requests).
1275 1280
1276 1281 By default, all revisions are added to the changegroup.
1277 1282 """
1278 1283 opts = _byteskwargs(opts)
1279 1284 cl = repo.changelog
1280 1285 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1281 1286 bundler = changegroup.getbundler(cgversion, repo)
1282 1287
1283 1288 def d():
1284 1289 state, chunks = bundler._generatechangelog(cl, nodes)
1285 1290 for chunk in chunks:
1286 1291 pass
1287 1292
1288 1293 timer, fm = gettimer(ui, opts)
1289 1294
1290 1295 # Terminal printing can interfere with timing. So disable it.
1291 1296 with ui.configoverride({(b'progress', b'disable'): True}):
1292 1297 timer(d)
1293 1298
1294 1299 fm.end()
1295 1300
1296 1301
1297 1302 @command(b'perf::dirs|perfdirs', formatteropts)
1298 1303 def perfdirs(ui, repo, **opts):
1299 1304 opts = _byteskwargs(opts)
1300 1305 timer, fm = gettimer(ui, opts)
1301 1306 dirstate = repo.dirstate
1302 1307 b'a' in dirstate
1303 1308
1304 1309 def d():
1305 1310 dirstate.hasdir(b'a')
1306 1311 try:
1307 1312 del dirstate._map._dirs
1308 1313 except AttributeError:
1309 1314 pass
1310 1315
1311 1316 timer(d)
1312 1317 fm.end()
1313 1318
1314 1319
1315 1320 @command(
1316 1321 b'perf::dirstate|perfdirstate',
1317 1322 [
1318 1323 (
1319 1324 b'',
1320 1325 b'iteration',
1321 1326 None,
1322 1327 b'benchmark a full iteration for the dirstate',
1323 1328 ),
1324 1329 (
1325 1330 b'',
1326 1331 b'contains',
1327 1332 None,
1328 1333 b'benchmark a large amount of `nf in dirstate` calls',
1329 1334 ),
1330 1335 ]
1331 1336 + formatteropts,
1332 1337 )
1333 1338 def perfdirstate(ui, repo, **opts):
1334 1339 """benchmap the time of various distate operations
1335 1340
1336 1341 By default benchmark the time necessary to load a dirstate from scratch.
1337 1342 The dirstate is loaded to the point were a "contains" request can be
1338 1343 answered.
1339 1344 """
1340 1345 opts = _byteskwargs(opts)
1341 1346 timer, fm = gettimer(ui, opts)
1342 1347 b"a" in repo.dirstate
1343 1348
1344 1349 if opts[b'iteration'] and opts[b'contains']:
1345 1350 msg = b'only specify one of --iteration or --contains'
1346 1351 raise error.Abort(msg)
1347 1352
1348 1353 if opts[b'iteration']:
1349 1354 setup = None
1350 1355 dirstate = repo.dirstate
1351 1356
1352 1357 def d():
1353 1358 for f in dirstate:
1354 1359 pass
1355 1360
1356 1361 elif opts[b'contains']:
1357 1362 setup = None
1358 1363 dirstate = repo.dirstate
1359 1364 allfiles = list(dirstate)
1360 1365 # also add file path that will be "missing" from the dirstate
1361 1366 allfiles.extend([f[::-1] for f in allfiles])
1362 1367
1363 1368 def d():
1364 1369 for f in allfiles:
1365 1370 f in dirstate
1366 1371
1367 1372 else:
1368 1373
1369 1374 def setup():
1370 1375 repo.dirstate.invalidate()
1371 1376
1372 1377 def d():
1373 1378 b"a" in repo.dirstate
1374 1379
1375 1380 timer(d, setup=setup)
1376 1381 fm.end()
1377 1382
1378 1383
1379 1384 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1380 1385 def perfdirstatedirs(ui, repo, **opts):
1381 1386 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1382 1387 opts = _byteskwargs(opts)
1383 1388 timer, fm = gettimer(ui, opts)
1384 1389 repo.dirstate.hasdir(b"a")
1385 1390
1386 1391 def setup():
1387 1392 try:
1388 1393 del repo.dirstate._map._dirs
1389 1394 except AttributeError:
1390 1395 pass
1391 1396
1392 1397 def d():
1393 1398 repo.dirstate.hasdir(b"a")
1394 1399
1395 1400 timer(d, setup=setup)
1396 1401 fm.end()
1397 1402
1398 1403
1399 1404 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1400 1405 def perfdirstatefoldmap(ui, repo, **opts):
1401 1406 """benchmap a `dirstate._map.filefoldmap.get()` request
1402 1407
1403 1408 The dirstate filefoldmap cache is dropped between every request.
1404 1409 """
1405 1410 opts = _byteskwargs(opts)
1406 1411 timer, fm = gettimer(ui, opts)
1407 1412 dirstate = repo.dirstate
1408 1413 dirstate._map.filefoldmap.get(b'a')
1409 1414
1410 1415 def setup():
1411 1416 del dirstate._map.filefoldmap
1412 1417
1413 1418 def d():
1414 1419 dirstate._map.filefoldmap.get(b'a')
1415 1420
1416 1421 timer(d, setup=setup)
1417 1422 fm.end()
1418 1423
1419 1424
1420 1425 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1421 1426 def perfdirfoldmap(ui, repo, **opts):
1422 1427 """benchmap a `dirstate._map.dirfoldmap.get()` request
1423 1428
1424 1429 The dirstate dirfoldmap cache is dropped between every request.
1425 1430 """
1426 1431 opts = _byteskwargs(opts)
1427 1432 timer, fm = gettimer(ui, opts)
1428 1433 dirstate = repo.dirstate
1429 1434 dirstate._map.dirfoldmap.get(b'a')
1430 1435
1431 1436 def setup():
1432 1437 del dirstate._map.dirfoldmap
1433 1438 try:
1434 1439 del dirstate._map._dirs
1435 1440 except AttributeError:
1436 1441 pass
1437 1442
1438 1443 def d():
1439 1444 dirstate._map.dirfoldmap.get(b'a')
1440 1445
1441 1446 timer(d, setup=setup)
1442 1447 fm.end()
1443 1448
1444 1449
1445 1450 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1446 1451 def perfdirstatewrite(ui, repo, **opts):
1447 1452 """benchmap the time it take to write a dirstate on disk"""
1448 1453 opts = _byteskwargs(opts)
1449 1454 timer, fm = gettimer(ui, opts)
1450 1455 ds = repo.dirstate
1451 1456 b"a" in ds
1452 1457
1453 1458 def setup():
1454 1459 ds._dirty = True
1455 1460
1456 1461 def d():
1457 1462 ds.write(repo.currenttransaction())
1458 1463
1459 1464 timer(d, setup=setup)
1460 1465 fm.end()
1461 1466
1462 1467
1463 1468 def _getmergerevs(repo, opts):
1464 1469 """parse command argument to return rev involved in merge
1465 1470
1466 1471 input: options dictionnary with `rev`, `from` and `bse`
1467 1472 output: (localctx, otherctx, basectx)
1468 1473 """
1469 1474 if opts[b'from']:
1470 1475 fromrev = scmutil.revsingle(repo, opts[b'from'])
1471 1476 wctx = repo[fromrev]
1472 1477 else:
1473 1478 wctx = repo[None]
1474 1479 # we don't want working dir files to be stat'd in the benchmark, so
1475 1480 # prime that cache
1476 1481 wctx.dirty()
1477 1482 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1478 1483 if opts[b'base']:
1479 1484 fromrev = scmutil.revsingle(repo, opts[b'base'])
1480 1485 ancestor = repo[fromrev]
1481 1486 else:
1482 1487 ancestor = wctx.ancestor(rctx)
1483 1488 return (wctx, rctx, ancestor)
1484 1489
1485 1490
1486 1491 @command(
1487 1492 b'perf::mergecalculate|perfmergecalculate',
1488 1493 [
1489 1494 (b'r', b'rev', b'.', b'rev to merge against'),
1490 1495 (b'', b'from', b'', b'rev to merge from'),
1491 1496 (b'', b'base', b'', b'the revision to use as base'),
1492 1497 ]
1493 1498 + formatteropts,
1494 1499 )
1495 1500 def perfmergecalculate(ui, repo, **opts):
1496 1501 opts = _byteskwargs(opts)
1497 1502 timer, fm = gettimer(ui, opts)
1498 1503
1499 1504 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1500 1505
1501 1506 def d():
1502 1507 # acceptremote is True because we don't want prompts in the middle of
1503 1508 # our benchmark
1504 1509 merge.calculateupdates(
1505 1510 repo,
1506 1511 wctx,
1507 1512 rctx,
1508 1513 [ancestor],
1509 1514 branchmerge=False,
1510 1515 force=False,
1511 1516 acceptremote=True,
1512 1517 followcopies=True,
1513 1518 )
1514 1519
1515 1520 timer(d)
1516 1521 fm.end()
1517 1522
1518 1523
1519 1524 @command(
1520 1525 b'perf::mergecopies|perfmergecopies',
1521 1526 [
1522 1527 (b'r', b'rev', b'.', b'rev to merge against'),
1523 1528 (b'', b'from', b'', b'rev to merge from'),
1524 1529 (b'', b'base', b'', b'the revision to use as base'),
1525 1530 ]
1526 1531 + formatteropts,
1527 1532 )
1528 1533 def perfmergecopies(ui, repo, **opts):
1529 1534 """measure runtime of `copies.mergecopies`"""
1530 1535 opts = _byteskwargs(opts)
1531 1536 timer, fm = gettimer(ui, opts)
1532 1537 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1533 1538
1534 1539 def d():
1535 1540 # acceptremote is True because we don't want prompts in the middle of
1536 1541 # our benchmark
1537 1542 copies.mergecopies(repo, wctx, rctx, ancestor)
1538 1543
1539 1544 timer(d)
1540 1545 fm.end()
1541 1546
1542 1547
1543 1548 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1544 1549 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1545 1550 """benchmark the copy tracing logic"""
1546 1551 opts = _byteskwargs(opts)
1547 1552 timer, fm = gettimer(ui, opts)
1548 1553 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1549 1554 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1550 1555
1551 1556 def d():
1552 1557 copies.pathcopies(ctx1, ctx2)
1553 1558
1554 1559 timer(d)
1555 1560 fm.end()
1556 1561
1557 1562
1558 1563 @command(
1559 1564 b'perf::phases|perfphases',
1560 1565 [
1561 1566 (b'', b'full', False, b'include file reading time too'),
1562 1567 ],
1563 1568 b"",
1564 1569 )
1565 1570 def perfphases(ui, repo, **opts):
1566 1571 """benchmark phasesets computation"""
1567 1572 opts = _byteskwargs(opts)
1568 1573 timer, fm = gettimer(ui, opts)
1569 1574 _phases = repo._phasecache
1570 1575 full = opts.get(b'full')
1571 1576
1572 1577 def d():
1573 1578 phases = _phases
1574 1579 if full:
1575 1580 clearfilecache(repo, b'_phasecache')
1576 1581 phases = repo._phasecache
1577 1582 phases.invalidate()
1578 1583 phases.loadphaserevs(repo)
1579 1584
1580 1585 timer(d)
1581 1586 fm.end()
1582 1587
1583 1588
1584 1589 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1585 1590 def perfphasesremote(ui, repo, dest=None, **opts):
1586 1591 """benchmark time needed to analyse phases of the remote server"""
1587 1592 from mercurial.node import bin
1588 1593 from mercurial import (
1589 1594 exchange,
1590 1595 hg,
1591 1596 phases,
1592 1597 )
1593 1598
1594 1599 opts = _byteskwargs(opts)
1595 1600 timer, fm = gettimer(ui, opts)
1596 1601
1597 1602 path = ui.getpath(dest, default=(b'default-push', b'default'))
1598 1603 if not path:
1599 1604 raise error.Abort(
1600 1605 b'default repository not configured!',
1601 1606 hint=b"see 'hg help config.paths'",
1602 1607 )
1603 1608 dest = path.pushloc or path.loc
1604 1609 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1605 1610 other = hg.peer(repo, opts, dest)
1606 1611
1607 1612 # easier to perform discovery through the operation
1608 1613 op = exchange.pushoperation(repo, other)
1609 1614 exchange._pushdiscoverychangeset(op)
1610 1615
1611 1616 remotesubset = op.fallbackheads
1612 1617
1613 1618 with other.commandexecutor() as e:
1614 1619 remotephases = e.callcommand(
1615 1620 b'listkeys', {b'namespace': b'phases'}
1616 1621 ).result()
1617 1622 del other
1618 1623 publishing = remotephases.get(b'publishing', False)
1619 1624 if publishing:
1620 1625 ui.statusnoi18n(b'publishing: yes\n')
1621 1626 else:
1622 1627 ui.statusnoi18n(b'publishing: no\n')
1623 1628
1624 1629 has_node = getattr(repo.changelog.index, 'has_node', None)
1625 1630 if has_node is None:
1626 1631 has_node = repo.changelog.nodemap.__contains__
1627 1632 nonpublishroots = 0
1628 1633 for nhex, phase in remotephases.iteritems():
1629 1634 if nhex == b'publishing': # ignore data related to publish option
1630 1635 continue
1631 1636 node = bin(nhex)
1632 1637 if has_node(node) and int(phase):
1633 1638 nonpublishroots += 1
1634 1639 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1635 1640 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1636 1641
1637 1642 def d():
1638 1643 phases.remotephasessummary(repo, remotesubset, remotephases)
1639 1644
1640 1645 timer(d)
1641 1646 fm.end()
1642 1647
1643 1648
1644 1649 @command(
1645 1650 b'perf::manifest|perfmanifest',
1646 1651 [
1647 1652 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1648 1653 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1649 1654 ]
1650 1655 + formatteropts,
1651 1656 b'REV|NODE',
1652 1657 )
1653 1658 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1654 1659 """benchmark the time to read a manifest from disk and return a usable
1655 1660 dict-like object
1656 1661
1657 1662 Manifest caches are cleared before retrieval."""
1658 1663 opts = _byteskwargs(opts)
1659 1664 timer, fm = gettimer(ui, opts)
1660 1665 if not manifest_rev:
1661 1666 ctx = scmutil.revsingle(repo, rev, rev)
1662 1667 t = ctx.manifestnode()
1663 1668 else:
1664 1669 from mercurial.node import bin
1665 1670
1666 1671 if len(rev) == 40:
1667 1672 t = bin(rev)
1668 1673 else:
1669 1674 try:
1670 1675 rev = int(rev)
1671 1676
1672 1677 if util.safehasattr(repo.manifestlog, b'getstorage'):
1673 1678 t = repo.manifestlog.getstorage(b'').node(rev)
1674 1679 else:
1675 1680 t = repo.manifestlog._revlog.lookup(rev)
1676 1681 except ValueError:
1677 1682 raise error.Abort(
1678 1683 b'manifest revision must be integer or full node'
1679 1684 )
1680 1685
1681 1686 def d():
1682 1687 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1683 1688 repo.manifestlog[t].read()
1684 1689
1685 1690 timer(d)
1686 1691 fm.end()
1687 1692
1688 1693
1689 1694 @command(b'perf::changeset|perfchangeset', formatteropts)
1690 1695 def perfchangeset(ui, repo, rev, **opts):
1691 1696 opts = _byteskwargs(opts)
1692 1697 timer, fm = gettimer(ui, opts)
1693 1698 n = scmutil.revsingle(repo, rev).node()
1694 1699
1695 1700 def d():
1696 1701 repo.changelog.read(n)
1697 1702 # repo.changelog._cache = None
1698 1703
1699 1704 timer(d)
1700 1705 fm.end()
1701 1706
1702 1707
1703 1708 @command(b'perf::ignore|perfignore', formatteropts)
1704 1709 def perfignore(ui, repo, **opts):
1705 1710 """benchmark operation related to computing ignore"""
1706 1711 opts = _byteskwargs(opts)
1707 1712 timer, fm = gettimer(ui, opts)
1708 1713 dirstate = repo.dirstate
1709 1714
1710 1715 def setupone():
1711 1716 dirstate.invalidate()
1712 1717 clearfilecache(dirstate, b'_ignore')
1713 1718
1714 1719 def runone():
1715 1720 dirstate._ignore
1716 1721
1717 1722 timer(runone, setup=setupone, title=b"load")
1718 1723 fm.end()
1719 1724
1720 1725
1721 1726 @command(
1722 1727 b'perf::index|perfindex',
1723 1728 [
1724 1729 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1725 1730 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1726 1731 ]
1727 1732 + formatteropts,
1728 1733 )
1729 1734 def perfindex(ui, repo, **opts):
1730 1735 """benchmark index creation time followed by a lookup
1731 1736
1732 1737 The default is to look `tip` up. Depending on the index implementation,
1733 1738 the revision looked up can matters. For example, an implementation
1734 1739 scanning the index will have a faster lookup time for `--rev tip` than for
1735 1740 `--rev 0`. The number of looked up revisions and their order can also
1736 1741 matters.
1737 1742
1738 1743 Example of useful set to test:
1739 1744
1740 1745 * tip
1741 1746 * 0
1742 1747 * -10:
1743 1748 * :10
1744 1749 * -10: + :10
1745 1750 * :10: + -10:
1746 1751 * -10000:
1747 1752 * -10000: + 0
1748 1753
1749 1754 It is not currently possible to check for lookup of a missing node. For
1750 1755 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1751 1756 import mercurial.revlog
1752 1757
1753 1758 opts = _byteskwargs(opts)
1754 1759 timer, fm = gettimer(ui, opts)
1755 1760 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1756 1761 if opts[b'no_lookup']:
1757 1762 if opts['rev']:
1758 1763 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1759 1764 nodes = []
1760 1765 elif not opts[b'rev']:
1761 1766 nodes = [repo[b"tip"].node()]
1762 1767 else:
1763 1768 revs = scmutil.revrange(repo, opts[b'rev'])
1764 1769 cl = repo.changelog
1765 1770 nodes = [cl.node(r) for r in revs]
1766 1771
1767 1772 unfi = repo.unfiltered()
1768 1773 # find the filecache func directly
1769 1774 # This avoid polluting the benchmark with the filecache logic
1770 1775 makecl = unfi.__class__.changelog.func
1771 1776
1772 1777 def setup():
1773 1778 # probably not necessary, but for good measure
1774 1779 clearchangelog(unfi)
1775 1780
1776 1781 def d():
1777 1782 cl = makecl(unfi)
1778 1783 for n in nodes:
1779 1784 cl.rev(n)
1780 1785
1781 1786 timer(d, setup=setup)
1782 1787 fm.end()
1783 1788
1784 1789
1785 1790 @command(
1786 1791 b'perf::nodemap|perfnodemap',
1787 1792 [
1788 1793 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1789 1794 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1790 1795 ]
1791 1796 + formatteropts,
1792 1797 )
1793 1798 def perfnodemap(ui, repo, **opts):
1794 1799 """benchmark the time necessary to look up revision from a cold nodemap
1795 1800
1796 1801 Depending on the implementation, the amount and order of revision we look
1797 1802 up can varies. Example of useful set to test:
1798 1803 * tip
1799 1804 * 0
1800 1805 * -10:
1801 1806 * :10
1802 1807 * -10: + :10
1803 1808 * :10: + -10:
1804 1809 * -10000:
1805 1810 * -10000: + 0
1806 1811
1807 1812 The command currently focus on valid binary lookup. Benchmarking for
1808 1813 hexlookup, prefix lookup and missing lookup would also be valuable.
1809 1814 """
1810 1815 import mercurial.revlog
1811 1816
1812 1817 opts = _byteskwargs(opts)
1813 1818 timer, fm = gettimer(ui, opts)
1814 1819 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1815 1820
1816 1821 unfi = repo.unfiltered()
1817 1822 clearcaches = opts[b'clear_caches']
1818 1823 # find the filecache func directly
1819 1824 # This avoid polluting the benchmark with the filecache logic
1820 1825 makecl = unfi.__class__.changelog.func
1821 1826 if not opts[b'rev']:
1822 1827 raise error.Abort(b'use --rev to specify revisions to look up')
1823 1828 revs = scmutil.revrange(repo, opts[b'rev'])
1824 1829 cl = repo.changelog
1825 1830 nodes = [cl.node(r) for r in revs]
1826 1831
1827 1832 # use a list to pass reference to a nodemap from one closure to the next
1828 1833 nodeget = [None]
1829 1834
1830 1835 def setnodeget():
1831 1836 # probably not necessary, but for good measure
1832 1837 clearchangelog(unfi)
1833 1838 cl = makecl(unfi)
1834 1839 if util.safehasattr(cl.index, 'get_rev'):
1835 1840 nodeget[0] = cl.index.get_rev
1836 1841 else:
1837 1842 nodeget[0] = cl.nodemap.get
1838 1843
1839 1844 def d():
1840 1845 get = nodeget[0]
1841 1846 for n in nodes:
1842 1847 get(n)
1843 1848
1844 1849 setup = None
1845 1850 if clearcaches:
1846 1851
1847 1852 def setup():
1848 1853 setnodeget()
1849 1854
1850 1855 else:
1851 1856 setnodeget()
1852 1857 d() # prewarm the data structure
1853 1858 timer(d, setup=setup)
1854 1859 fm.end()
1855 1860
1856 1861
1857 1862 @command(b'perf::startup|perfstartup', formatteropts)
1858 1863 def perfstartup(ui, repo, **opts):
1859 1864 opts = _byteskwargs(opts)
1860 1865 timer, fm = gettimer(ui, opts)
1861 1866
1862 1867 def d():
1863 1868 if os.name != 'nt':
1864 1869 os.system(
1865 1870 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1866 1871 )
1867 1872 else:
1868 1873 os.environ['HGRCPATH'] = r' '
1869 1874 os.system("%s version -q > NUL" % sys.argv[0])
1870 1875
1871 1876 timer(d)
1872 1877 fm.end()
1873 1878
1874 1879
1875 1880 @command(b'perf::parents|perfparents', formatteropts)
1876 1881 def perfparents(ui, repo, **opts):
1877 1882 """benchmark the time necessary to fetch one changeset's parents.
1878 1883
1879 1884 The fetch is done using the `node identifier`, traversing all object layers
1880 1885 from the repository object. The first N revisions will be used for this
1881 1886 benchmark. N is controlled by the ``perf.parentscount`` config option
1882 1887 (default: 1000).
1883 1888 """
1884 1889 opts = _byteskwargs(opts)
1885 1890 timer, fm = gettimer(ui, opts)
1886 1891 # control the number of commits perfparents iterates over
1887 1892 # experimental config: perf.parentscount
1888 1893 count = getint(ui, b"perf", b"parentscount", 1000)
1889 1894 if len(repo.changelog) < count:
1890 1895 raise error.Abort(b"repo needs %d commits for this test" % count)
1891 1896 repo = repo.unfiltered()
1892 1897 nl = [repo.changelog.node(i) for i in _xrange(count)]
1893 1898
1894 1899 def d():
1895 1900 for n in nl:
1896 1901 repo.changelog.parents(n)
1897 1902
1898 1903 timer(d)
1899 1904 fm.end()
1900 1905
1901 1906
1902 1907 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1903 1908 def perfctxfiles(ui, repo, x, **opts):
1904 1909 opts = _byteskwargs(opts)
1905 1910 x = int(x)
1906 1911 timer, fm = gettimer(ui, opts)
1907 1912
1908 1913 def d():
1909 1914 len(repo[x].files())
1910 1915
1911 1916 timer(d)
1912 1917 fm.end()
1913 1918
1914 1919
1915 1920 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1916 1921 def perfrawfiles(ui, repo, x, **opts):
1917 1922 opts = _byteskwargs(opts)
1918 1923 x = int(x)
1919 1924 timer, fm = gettimer(ui, opts)
1920 1925 cl = repo.changelog
1921 1926
1922 1927 def d():
1923 1928 len(cl.read(x)[3])
1924 1929
1925 1930 timer(d)
1926 1931 fm.end()
1927 1932
1928 1933
1929 1934 @command(b'perf::lookup|perflookup', formatteropts)
1930 1935 def perflookup(ui, repo, rev, **opts):
1931 1936 opts = _byteskwargs(opts)
1932 1937 timer, fm = gettimer(ui, opts)
1933 1938 timer(lambda: len(repo.lookup(rev)))
1934 1939 fm.end()
1935 1940
1936 1941
1937 1942 @command(
1938 1943 b'perf::linelogedits|perflinelogedits',
1939 1944 [
1940 1945 (b'n', b'edits', 10000, b'number of edits'),
1941 1946 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1942 1947 ],
1943 1948 norepo=True,
1944 1949 )
1945 1950 def perflinelogedits(ui, **opts):
1946 1951 from mercurial import linelog
1947 1952
1948 1953 opts = _byteskwargs(opts)
1949 1954
1950 1955 edits = opts[b'edits']
1951 1956 maxhunklines = opts[b'max_hunk_lines']
1952 1957
1953 1958 maxb1 = 100000
1954 1959 random.seed(0)
1955 1960 randint = random.randint
1956 1961 currentlines = 0
1957 1962 arglist = []
1958 1963 for rev in _xrange(edits):
1959 1964 a1 = randint(0, currentlines)
1960 1965 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1961 1966 b1 = randint(0, maxb1)
1962 1967 b2 = randint(b1, b1 + maxhunklines)
1963 1968 currentlines += (b2 - b1) - (a2 - a1)
1964 1969 arglist.append((rev, a1, a2, b1, b2))
1965 1970
1966 1971 def d():
1967 1972 ll = linelog.linelog()
1968 1973 for args in arglist:
1969 1974 ll.replacelines(*args)
1970 1975
1971 1976 timer, fm = gettimer(ui, opts)
1972 1977 timer(d)
1973 1978 fm.end()
1974 1979
1975 1980
1976 1981 @command(b'perf::revrange|perfrevrange', formatteropts)
1977 1982 def perfrevrange(ui, repo, *specs, **opts):
1978 1983 opts = _byteskwargs(opts)
1979 1984 timer, fm = gettimer(ui, opts)
1980 1985 revrange = scmutil.revrange
1981 1986 timer(lambda: len(revrange(repo, specs)))
1982 1987 fm.end()
1983 1988
1984 1989
1985 1990 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1986 1991 def perfnodelookup(ui, repo, rev, **opts):
1987 1992 opts = _byteskwargs(opts)
1988 1993 timer, fm = gettimer(ui, opts)
1989 1994 import mercurial.revlog
1990 1995
1991 1996 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1992 1997 n = scmutil.revsingle(repo, rev).node()
1993 1998
1994 1999 try:
1995 2000 cl = revlog(getsvfs(repo), radix=b"00changelog")
1996 2001 except TypeError:
1997 2002 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1998 2003
1999 2004 def d():
2000 2005 cl.rev(n)
2001 2006 clearcaches(cl)
2002 2007
2003 2008 timer(d)
2004 2009 fm.end()
2005 2010
2006 2011
2007 2012 @command(
2008 2013 b'perf::log|perflog',
2009 2014 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2010 2015 )
2011 2016 def perflog(ui, repo, rev=None, **opts):
2012 2017 opts = _byteskwargs(opts)
2013 2018 if rev is None:
2014 2019 rev = []
2015 2020 timer, fm = gettimer(ui, opts)
2016 2021 ui.pushbuffer()
2017 2022 timer(
2018 2023 lambda: commands.log(
2019 2024 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2020 2025 )
2021 2026 )
2022 2027 ui.popbuffer()
2023 2028 fm.end()
2024 2029
2025 2030
2026 2031 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2027 2032 def perfmoonwalk(ui, repo, **opts):
2028 2033 """benchmark walking the changelog backwards
2029 2034
2030 2035 This also loads the changelog data for each revision in the changelog.
2031 2036 """
2032 2037 opts = _byteskwargs(opts)
2033 2038 timer, fm = gettimer(ui, opts)
2034 2039
2035 2040 def moonwalk():
2036 2041 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2037 2042 ctx = repo[i]
2038 2043 ctx.branch() # read changelog data (in addition to the index)
2039 2044
2040 2045 timer(moonwalk)
2041 2046 fm.end()
2042 2047
2043 2048
2044 2049 @command(
2045 2050 b'perf::templating|perftemplating',
2046 2051 [
2047 2052 (b'r', b'rev', [], b'revisions to run the template on'),
2048 2053 ]
2049 2054 + formatteropts,
2050 2055 )
2051 2056 def perftemplating(ui, repo, testedtemplate=None, **opts):
2052 2057 """test the rendering time of a given template"""
2053 2058 if makelogtemplater is None:
2054 2059 raise error.Abort(
2055 2060 b"perftemplating not available with this Mercurial",
2056 2061 hint=b"use 4.3 or later",
2057 2062 )
2058 2063
2059 2064 opts = _byteskwargs(opts)
2060 2065
2061 2066 nullui = ui.copy()
2062 2067 nullui.fout = open(os.devnull, 'wb')
2063 2068 nullui.disablepager()
2064 2069 revs = opts.get(b'rev')
2065 2070 if not revs:
2066 2071 revs = [b'all()']
2067 2072 revs = list(scmutil.revrange(repo, revs))
2068 2073
2069 2074 defaulttemplate = (
2070 2075 b'{date|shortdate} [{rev}:{node|short}]'
2071 2076 b' {author|person}: {desc|firstline}\n'
2072 2077 )
2073 2078 if testedtemplate is None:
2074 2079 testedtemplate = defaulttemplate
2075 2080 displayer = makelogtemplater(nullui, repo, testedtemplate)
2076 2081
2077 2082 def format():
2078 2083 for r in revs:
2079 2084 ctx = repo[r]
2080 2085 displayer.show(ctx)
2081 2086 displayer.flush(ctx)
2082 2087
2083 2088 timer, fm = gettimer(ui, opts)
2084 2089 timer(format)
2085 2090 fm.end()
2086 2091
2087 2092
2088 2093 def _displaystats(ui, opts, entries, data):
2089 2094 # use a second formatter because the data are quite different, not sure
2090 2095 # how it flies with the templater.
2091 2096 fm = ui.formatter(b'perf-stats', opts)
2092 2097 for key, title in entries:
2093 2098 values = data[key]
2094 2099 nbvalues = len(data)
2095 2100 values.sort()
2096 2101 stats = {
2097 2102 'key': key,
2098 2103 'title': title,
2099 2104 'nbitems': len(values),
2100 2105 'min': values[0][0],
2101 2106 '10%': values[(nbvalues * 10) // 100][0],
2102 2107 '25%': values[(nbvalues * 25) // 100][0],
2103 2108 '50%': values[(nbvalues * 50) // 100][0],
2104 2109 '75%': values[(nbvalues * 75) // 100][0],
2105 2110 '80%': values[(nbvalues * 80) // 100][0],
2106 2111 '85%': values[(nbvalues * 85) // 100][0],
2107 2112 '90%': values[(nbvalues * 90) // 100][0],
2108 2113 '95%': values[(nbvalues * 95) // 100][0],
2109 2114 '99%': values[(nbvalues * 99) // 100][0],
2110 2115 'max': values[-1][0],
2111 2116 }
2112 2117 fm.startitem()
2113 2118 fm.data(**stats)
2114 2119 # make node pretty for the human output
2115 2120 fm.plain('### %s (%d items)\n' % (title, len(values)))
2116 2121 lines = [
2117 2122 'min',
2118 2123 '10%',
2119 2124 '25%',
2120 2125 '50%',
2121 2126 '75%',
2122 2127 '80%',
2123 2128 '85%',
2124 2129 '90%',
2125 2130 '95%',
2126 2131 '99%',
2127 2132 'max',
2128 2133 ]
2129 2134 for l in lines:
2130 2135 fm.plain('%s: %s\n' % (l, stats[l]))
2131 2136 fm.end()
2132 2137
2133 2138
2134 2139 @command(
2135 2140 b'perf::helper-mergecopies|perfhelper-mergecopies',
2136 2141 formatteropts
2137 2142 + [
2138 2143 (b'r', b'revs', [], b'restrict search to these revisions'),
2139 2144 (b'', b'timing', False, b'provides extra data (costly)'),
2140 2145 (b'', b'stats', False, b'provides statistic about the measured data'),
2141 2146 ],
2142 2147 )
2143 2148 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2144 2149 """find statistics about potential parameters for `perfmergecopies`
2145 2150
2146 2151 This command find (base, p1, p2) triplet relevant for copytracing
2147 2152 benchmarking in the context of a merge. It reports values for some of the
2148 2153 parameters that impact merge copy tracing time during merge.
2149 2154
2150 2155 If `--timing` is set, rename detection is run and the associated timing
2151 2156 will be reported. The extra details come at the cost of slower command
2152 2157 execution.
2153 2158
2154 2159 Since rename detection is only run once, other factors might easily
2155 2160 affect the precision of the timing. However it should give a good
2156 2161 approximation of which revision triplets are very costly.
2157 2162 """
2158 2163 opts = _byteskwargs(opts)
2159 2164 fm = ui.formatter(b'perf', opts)
2160 2165 dotiming = opts[b'timing']
2161 2166 dostats = opts[b'stats']
2162 2167
2163 2168 output_template = [
2164 2169 ("base", "%(base)12s"),
2165 2170 ("p1", "%(p1.node)12s"),
2166 2171 ("p2", "%(p2.node)12s"),
2167 2172 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2168 2173 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2169 2174 ("p1.renames", "%(p1.renamedfiles)12d"),
2170 2175 ("p1.time", "%(p1.time)12.3f"),
2171 2176 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2172 2177 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2173 2178 ("p2.renames", "%(p2.renamedfiles)12d"),
2174 2179 ("p2.time", "%(p2.time)12.3f"),
2175 2180 ("renames", "%(nbrenamedfiles)12d"),
2176 2181 ("total.time", "%(time)12.3f"),
2177 2182 ]
2178 2183 if not dotiming:
2179 2184 output_template = [
2180 2185 i
2181 2186 for i in output_template
2182 2187 if not ('time' in i[0] or 'renames' in i[0])
2183 2188 ]
2184 2189 header_names = [h for (h, v) in output_template]
2185 2190 output = ' '.join([v for (h, v) in output_template]) + '\n'
2186 2191 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2187 2192 fm.plain(header % tuple(header_names))
2188 2193
2189 2194 if not revs:
2190 2195 revs = ['all()']
2191 2196 revs = scmutil.revrange(repo, revs)
2192 2197
2193 2198 if dostats:
2194 2199 alldata = {
2195 2200 'nbrevs': [],
2196 2201 'nbmissingfiles': [],
2197 2202 }
2198 2203 if dotiming:
2199 2204 alldata['parentnbrenames'] = []
2200 2205 alldata['totalnbrenames'] = []
2201 2206 alldata['parenttime'] = []
2202 2207 alldata['totaltime'] = []
2203 2208
2204 2209 roi = repo.revs('merge() and %ld', revs)
2205 2210 for r in roi:
2206 2211 ctx = repo[r]
2207 2212 p1 = ctx.p1()
2208 2213 p2 = ctx.p2()
2209 2214 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2210 2215 for b in bases:
2211 2216 b = repo[b]
2212 2217 p1missing = copies._computeforwardmissing(b, p1)
2213 2218 p2missing = copies._computeforwardmissing(b, p2)
2214 2219 data = {
2215 2220 b'base': b.hex(),
2216 2221 b'p1.node': p1.hex(),
2217 2222 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2218 2223 b'p1.nbmissingfiles': len(p1missing),
2219 2224 b'p2.node': p2.hex(),
2220 2225 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2221 2226 b'p2.nbmissingfiles': len(p2missing),
2222 2227 }
2223 2228 if dostats:
2224 2229 if p1missing:
2225 2230 alldata['nbrevs'].append(
2226 2231 (data['p1.nbrevs'], b.hex(), p1.hex())
2227 2232 )
2228 2233 alldata['nbmissingfiles'].append(
2229 2234 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2230 2235 )
2231 2236 if p2missing:
2232 2237 alldata['nbrevs'].append(
2233 2238 (data['p2.nbrevs'], b.hex(), p2.hex())
2234 2239 )
2235 2240 alldata['nbmissingfiles'].append(
2236 2241 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2237 2242 )
2238 2243 if dotiming:
2239 2244 begin = util.timer()
2240 2245 mergedata = copies.mergecopies(repo, p1, p2, b)
2241 2246 end = util.timer()
2242 2247 # not very stable timing since we did only one run
2243 2248 data['time'] = end - begin
2244 2249 # mergedata contains five dicts: "copy", "movewithdir",
2245 2250 # "diverge", "renamedelete" and "dirmove".
2246 2251 # The first 4 are about renamed file so lets count that.
2247 2252 renames = len(mergedata[0])
2248 2253 renames += len(mergedata[1])
2249 2254 renames += len(mergedata[2])
2250 2255 renames += len(mergedata[3])
2251 2256 data['nbrenamedfiles'] = renames
2252 2257 begin = util.timer()
2253 2258 p1renames = copies.pathcopies(b, p1)
2254 2259 end = util.timer()
2255 2260 data['p1.time'] = end - begin
2256 2261 begin = util.timer()
2257 2262 p2renames = copies.pathcopies(b, p2)
2258 2263 end = util.timer()
2259 2264 data['p2.time'] = end - begin
2260 2265 data['p1.renamedfiles'] = len(p1renames)
2261 2266 data['p2.renamedfiles'] = len(p2renames)
2262 2267
2263 2268 if dostats:
2264 2269 if p1missing:
2265 2270 alldata['parentnbrenames'].append(
2266 2271 (data['p1.renamedfiles'], b.hex(), p1.hex())
2267 2272 )
2268 2273 alldata['parenttime'].append(
2269 2274 (data['p1.time'], b.hex(), p1.hex())
2270 2275 )
2271 2276 if p2missing:
2272 2277 alldata['parentnbrenames'].append(
2273 2278 (data['p2.renamedfiles'], b.hex(), p2.hex())
2274 2279 )
2275 2280 alldata['parenttime'].append(
2276 2281 (data['p2.time'], b.hex(), p2.hex())
2277 2282 )
2278 2283 if p1missing or p2missing:
2279 2284 alldata['totalnbrenames'].append(
2280 2285 (
2281 2286 data['nbrenamedfiles'],
2282 2287 b.hex(),
2283 2288 p1.hex(),
2284 2289 p2.hex(),
2285 2290 )
2286 2291 )
2287 2292 alldata['totaltime'].append(
2288 2293 (data['time'], b.hex(), p1.hex(), p2.hex())
2289 2294 )
2290 2295 fm.startitem()
2291 2296 fm.data(**data)
2292 2297 # make node pretty for the human output
2293 2298 out = data.copy()
2294 2299 out['base'] = fm.hexfunc(b.node())
2295 2300 out['p1.node'] = fm.hexfunc(p1.node())
2296 2301 out['p2.node'] = fm.hexfunc(p2.node())
2297 2302 fm.plain(output % out)
2298 2303
2299 2304 fm.end()
2300 2305 if dostats:
2301 2306 # use a second formatter because the data are quite different, not sure
2302 2307 # how it flies with the templater.
2303 2308 entries = [
2304 2309 ('nbrevs', 'number of revision covered'),
2305 2310 ('nbmissingfiles', 'number of missing files at head'),
2306 2311 ]
2307 2312 if dotiming:
2308 2313 entries.append(
2309 2314 ('parentnbrenames', 'rename from one parent to base')
2310 2315 )
2311 2316 entries.append(('totalnbrenames', 'total number of renames'))
2312 2317 entries.append(('parenttime', 'time for one parent'))
2313 2318 entries.append(('totaltime', 'time for both parents'))
2314 2319 _displaystats(ui, opts, entries, alldata)
2315 2320
2316 2321
2317 2322 @command(
2318 2323 b'perf::helper-pathcopies|perfhelper-pathcopies',
2319 2324 formatteropts
2320 2325 + [
2321 2326 (b'r', b'revs', [], b'restrict search to these revisions'),
2322 2327 (b'', b'timing', False, b'provides extra data (costly)'),
2323 2328 (b'', b'stats', False, b'provides statistic about the measured data'),
2324 2329 ],
2325 2330 )
2326 2331 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2327 2332 """find statistic about potential parameters for the `perftracecopies`
2328 2333
2329 2334 This command find source-destination pair relevant for copytracing testing.
2330 2335 It report value for some of the parameters that impact copy tracing time.
2331 2336
2332 2337 If `--timing` is set, rename detection is run and the associated timing
2333 2338 will be reported. The extra details comes at the cost of a slower command
2334 2339 execution.
2335 2340
2336 2341 Since the rename detection is only run once, other factors might easily
2337 2342 affect the precision of the timing. However it should give a good
2338 2343 approximation of which revision pairs are very costly.
2339 2344 """
2340 2345 opts = _byteskwargs(opts)
2341 2346 fm = ui.formatter(b'perf', opts)
2342 2347 dotiming = opts[b'timing']
2343 2348 dostats = opts[b'stats']
2344 2349
2345 2350 if dotiming:
2346 2351 header = '%12s %12s %12s %12s %12s %12s\n'
2347 2352 output = (
2348 2353 "%(source)12s %(destination)12s "
2349 2354 "%(nbrevs)12d %(nbmissingfiles)12d "
2350 2355 "%(nbrenamedfiles)12d %(time)18.5f\n"
2351 2356 )
2352 2357 header_names = (
2353 2358 "source",
2354 2359 "destination",
2355 2360 "nb-revs",
2356 2361 "nb-files",
2357 2362 "nb-renames",
2358 2363 "time",
2359 2364 )
2360 2365 fm.plain(header % header_names)
2361 2366 else:
2362 2367 header = '%12s %12s %12s %12s\n'
2363 2368 output = (
2364 2369 "%(source)12s %(destination)12s "
2365 2370 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2366 2371 )
2367 2372 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2368 2373
2369 2374 if not revs:
2370 2375 revs = ['all()']
2371 2376 revs = scmutil.revrange(repo, revs)
2372 2377
2373 2378 if dostats:
2374 2379 alldata = {
2375 2380 'nbrevs': [],
2376 2381 'nbmissingfiles': [],
2377 2382 }
2378 2383 if dotiming:
2379 2384 alldata['nbrenames'] = []
2380 2385 alldata['time'] = []
2381 2386
2382 2387 roi = repo.revs('merge() and %ld', revs)
2383 2388 for r in roi:
2384 2389 ctx = repo[r]
2385 2390 p1 = ctx.p1().rev()
2386 2391 p2 = ctx.p2().rev()
2387 2392 bases = repo.changelog._commonancestorsheads(p1, p2)
2388 2393 for p in (p1, p2):
2389 2394 for b in bases:
2390 2395 base = repo[b]
2391 2396 parent = repo[p]
2392 2397 missing = copies._computeforwardmissing(base, parent)
2393 2398 if not missing:
2394 2399 continue
2395 2400 data = {
2396 2401 b'source': base.hex(),
2397 2402 b'destination': parent.hex(),
2398 2403 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2399 2404 b'nbmissingfiles': len(missing),
2400 2405 }
2401 2406 if dostats:
2402 2407 alldata['nbrevs'].append(
2403 2408 (
2404 2409 data['nbrevs'],
2405 2410 base.hex(),
2406 2411 parent.hex(),
2407 2412 )
2408 2413 )
2409 2414 alldata['nbmissingfiles'].append(
2410 2415 (
2411 2416 data['nbmissingfiles'],
2412 2417 base.hex(),
2413 2418 parent.hex(),
2414 2419 )
2415 2420 )
2416 2421 if dotiming:
2417 2422 begin = util.timer()
2418 2423 renames = copies.pathcopies(base, parent)
2419 2424 end = util.timer()
2420 2425 # not very stable timing since we did only one run
2421 2426 data['time'] = end - begin
2422 2427 data['nbrenamedfiles'] = len(renames)
2423 2428 if dostats:
2424 2429 alldata['time'].append(
2425 2430 (
2426 2431 data['time'],
2427 2432 base.hex(),
2428 2433 parent.hex(),
2429 2434 )
2430 2435 )
2431 2436 alldata['nbrenames'].append(
2432 2437 (
2433 2438 data['nbrenamedfiles'],
2434 2439 base.hex(),
2435 2440 parent.hex(),
2436 2441 )
2437 2442 )
2438 2443 fm.startitem()
2439 2444 fm.data(**data)
2440 2445 out = data.copy()
2441 2446 out['source'] = fm.hexfunc(base.node())
2442 2447 out['destination'] = fm.hexfunc(parent.node())
2443 2448 fm.plain(output % out)
2444 2449
2445 2450 fm.end()
2446 2451 if dostats:
2447 2452 entries = [
2448 2453 ('nbrevs', 'number of revision covered'),
2449 2454 ('nbmissingfiles', 'number of missing files at head'),
2450 2455 ]
2451 2456 if dotiming:
2452 2457 entries.append(('nbrenames', 'renamed files'))
2453 2458 entries.append(('time', 'time'))
2454 2459 _displaystats(ui, opts, entries, alldata)
2455 2460
2456 2461
2457 2462 @command(b'perf::cca|perfcca', formatteropts)
2458 2463 def perfcca(ui, repo, **opts):
2459 2464 opts = _byteskwargs(opts)
2460 2465 timer, fm = gettimer(ui, opts)
2461 2466 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2462 2467 fm.end()
2463 2468
2464 2469
2465 2470 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2466 2471 def perffncacheload(ui, repo, **opts):
2467 2472 opts = _byteskwargs(opts)
2468 2473 timer, fm = gettimer(ui, opts)
2469 2474 s = repo.store
2470 2475
2471 2476 def d():
2472 2477 s.fncache._load()
2473 2478
2474 2479 timer(d)
2475 2480 fm.end()
2476 2481
2477 2482
2478 2483 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2479 2484 def perffncachewrite(ui, repo, **opts):
2480 2485 opts = _byteskwargs(opts)
2481 2486 timer, fm = gettimer(ui, opts)
2482 2487 s = repo.store
2483 2488 lock = repo.lock()
2484 2489 s.fncache._load()
2485 2490 tr = repo.transaction(b'perffncachewrite')
2486 2491 tr.addbackup(b'fncache')
2487 2492
2488 2493 def d():
2489 2494 s.fncache._dirty = True
2490 2495 s.fncache.write(tr)
2491 2496
2492 2497 timer(d)
2493 2498 tr.close()
2494 2499 lock.release()
2495 2500 fm.end()
2496 2501
2497 2502
2498 2503 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2499 2504 def perffncacheencode(ui, repo, **opts):
2500 2505 opts = _byteskwargs(opts)
2501 2506 timer, fm = gettimer(ui, opts)
2502 2507 s = repo.store
2503 2508 s.fncache._load()
2504 2509
2505 2510 def d():
2506 2511 for p in s.fncache.entries:
2507 2512 s.encode(p)
2508 2513
2509 2514 timer(d)
2510 2515 fm.end()
2511 2516
2512 2517
2513 2518 def _bdiffworker(q, blocks, xdiff, ready, done):
2514 2519 while not done.is_set():
2515 2520 pair = q.get()
2516 2521 while pair is not None:
2517 2522 if xdiff:
2518 2523 mdiff.bdiff.xdiffblocks(*pair)
2519 2524 elif blocks:
2520 2525 mdiff.bdiff.blocks(*pair)
2521 2526 else:
2522 2527 mdiff.textdiff(*pair)
2523 2528 q.task_done()
2524 2529 pair = q.get()
2525 2530 q.task_done() # for the None one
2526 2531 with ready:
2527 2532 ready.wait()
2528 2533
2529 2534
2530 2535 def _manifestrevision(repo, mnode):
2531 2536 ml = repo.manifestlog
2532 2537
2533 2538 if util.safehasattr(ml, b'getstorage'):
2534 2539 store = ml.getstorage(b'')
2535 2540 else:
2536 2541 store = ml._revlog
2537 2542
2538 2543 return store.revision(mnode)
2539 2544
2540 2545
2541 2546 @command(
2542 2547 b'perf::bdiff|perfbdiff',
2543 2548 revlogopts
2544 2549 + formatteropts
2545 2550 + [
2546 2551 (
2547 2552 b'',
2548 2553 b'count',
2549 2554 1,
2550 2555 b'number of revisions to test (when using --startrev)',
2551 2556 ),
2552 2557 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2553 2558 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2554 2559 (b'', b'blocks', False, b'test computing diffs into blocks'),
2555 2560 (b'', b'xdiff', False, b'use xdiff algorithm'),
2556 2561 ],
2557 2562 b'-c|-m|FILE REV',
2558 2563 )
2559 2564 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2560 2565 """benchmark a bdiff between revisions
2561 2566
2562 2567 By default, benchmark a bdiff between its delta parent and itself.
2563 2568
2564 2569 With ``--count``, benchmark bdiffs between delta parents and self for N
2565 2570 revisions starting at the specified revision.
2566 2571
2567 2572 With ``--alldata``, assume the requested revision is a changeset and
2568 2573 measure bdiffs for all changes related to that changeset (manifest
2569 2574 and filelogs).
2570 2575 """
2571 2576 opts = _byteskwargs(opts)
2572 2577
2573 2578 if opts[b'xdiff'] and not opts[b'blocks']:
2574 2579 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2575 2580
2576 2581 if opts[b'alldata']:
2577 2582 opts[b'changelog'] = True
2578 2583
2579 2584 if opts.get(b'changelog') or opts.get(b'manifest'):
2580 2585 file_, rev = None, file_
2581 2586 elif rev is None:
2582 2587 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2583 2588
2584 2589 blocks = opts[b'blocks']
2585 2590 xdiff = opts[b'xdiff']
2586 2591 textpairs = []
2587 2592
2588 2593 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2589 2594
2590 2595 startrev = r.rev(r.lookup(rev))
2591 2596 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2592 2597 if opts[b'alldata']:
2593 2598 # Load revisions associated with changeset.
2594 2599 ctx = repo[rev]
2595 2600 mtext = _manifestrevision(repo, ctx.manifestnode())
2596 2601 for pctx in ctx.parents():
2597 2602 pman = _manifestrevision(repo, pctx.manifestnode())
2598 2603 textpairs.append((pman, mtext))
2599 2604
2600 2605 # Load filelog revisions by iterating manifest delta.
2601 2606 man = ctx.manifest()
2602 2607 pman = ctx.p1().manifest()
2603 2608 for filename, change in pman.diff(man).items():
2604 2609 fctx = repo.file(filename)
2605 2610 f1 = fctx.revision(change[0][0] or -1)
2606 2611 f2 = fctx.revision(change[1][0] or -1)
2607 2612 textpairs.append((f1, f2))
2608 2613 else:
2609 2614 dp = r.deltaparent(rev)
2610 2615 textpairs.append((r.revision(dp), r.revision(rev)))
2611 2616
2612 2617 withthreads = threads > 0
2613 2618 if not withthreads:
2614 2619
2615 2620 def d():
2616 2621 for pair in textpairs:
2617 2622 if xdiff:
2618 2623 mdiff.bdiff.xdiffblocks(*pair)
2619 2624 elif blocks:
2620 2625 mdiff.bdiff.blocks(*pair)
2621 2626 else:
2622 2627 mdiff.textdiff(*pair)
2623 2628
2624 2629 else:
2625 2630 q = queue()
2626 2631 for i in _xrange(threads):
2627 2632 q.put(None)
2628 2633 ready = threading.Condition()
2629 2634 done = threading.Event()
2630 2635 for i in _xrange(threads):
2631 2636 threading.Thread(
2632 2637 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2633 2638 ).start()
2634 2639 q.join()
2635 2640
2636 2641 def d():
2637 2642 for pair in textpairs:
2638 2643 q.put(pair)
2639 2644 for i in _xrange(threads):
2640 2645 q.put(None)
2641 2646 with ready:
2642 2647 ready.notify_all()
2643 2648 q.join()
2644 2649
2645 2650 timer, fm = gettimer(ui, opts)
2646 2651 timer(d)
2647 2652 fm.end()
2648 2653
2649 2654 if withthreads:
2650 2655 done.set()
2651 2656 for i in _xrange(threads):
2652 2657 q.put(None)
2653 2658 with ready:
2654 2659 ready.notify_all()
2655 2660
2656 2661
2657 2662 @command(
2658 2663 b'perf::unbundle',
2659 2664 formatteropts,
2660 2665 b'BUNDLE_FILE',
2661 2666 )
2662 2667 def perf_unbundle(ui, repo, fname, **opts):
2663 2668 """benchmark application of a bundle in a repository.
2664 2669
2665 2670 This does not include the final transaction processing"""
2666 2671 from mercurial import exchange
2667 2672 from mercurial import bundle2
2668 2673
2669 2674 opts = _byteskwargs(opts)
2670 2675
2671 2676 with repo.lock():
2672 2677 bundle = [None, None]
2673 2678 orig_quiet = repo.ui.quiet
2674 2679 try:
2675 2680 repo.ui.quiet = True
2676 2681 with open(fname, mode="rb") as f:
2677 2682
2678 2683 def noop_report(*args, **kwargs):
2679 2684 pass
2680 2685
2681 2686 def setup():
2682 2687 gen, tr = bundle
2683 2688 if tr is not None:
2684 2689 tr.abort()
2685 2690 bundle[:] = [None, None]
2686 2691 f.seek(0)
2687 2692 bundle[0] = exchange.readbundle(ui, f, fname)
2688 2693 bundle[1] = repo.transaction(b'perf::unbundle')
2689 2694 bundle[1]._report = noop_report # silence the transaction
2690 2695
2691 2696 def apply():
2692 2697 gen, tr = bundle
2693 2698 bundle2.applybundle(
2694 2699 repo,
2695 2700 gen,
2696 2701 tr,
2697 2702 source=b'perf::unbundle',
2698 2703 url=fname,
2699 2704 )
2700 2705
2701 2706 timer, fm = gettimer(ui, opts)
2702 2707 timer(apply, setup=setup)
2703 2708 fm.end()
2704 2709 finally:
2705 2710 repo.ui.quiet == orig_quiet
2706 2711 gen, tr = bundle
2707 2712 if tr is not None:
2708 2713 tr.abort()
2709 2714
2710 2715
2711 2716 @command(
2712 2717 b'perf::unidiff|perfunidiff',
2713 2718 revlogopts
2714 2719 + formatteropts
2715 2720 + [
2716 2721 (
2717 2722 b'',
2718 2723 b'count',
2719 2724 1,
2720 2725 b'number of revisions to test (when using --startrev)',
2721 2726 ),
2722 2727 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2723 2728 ],
2724 2729 b'-c|-m|FILE REV',
2725 2730 )
2726 2731 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2727 2732 """benchmark a unified diff between revisions
2728 2733
2729 2734 This doesn't include any copy tracing - it's just a unified diff
2730 2735 of the texts.
2731 2736
2732 2737 By default, benchmark a diff between its delta parent and itself.
2733 2738
2734 2739 With ``--count``, benchmark diffs between delta parents and self for N
2735 2740 revisions starting at the specified revision.
2736 2741
2737 2742 With ``--alldata``, assume the requested revision is a changeset and
2738 2743 measure diffs for all changes related to that changeset (manifest
2739 2744 and filelogs).
2740 2745 """
2741 2746 opts = _byteskwargs(opts)
2742 2747 if opts[b'alldata']:
2743 2748 opts[b'changelog'] = True
2744 2749
2745 2750 if opts.get(b'changelog') or opts.get(b'manifest'):
2746 2751 file_, rev = None, file_
2747 2752 elif rev is None:
2748 2753 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2749 2754
2750 2755 textpairs = []
2751 2756
2752 2757 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2753 2758
2754 2759 startrev = r.rev(r.lookup(rev))
2755 2760 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2756 2761 if opts[b'alldata']:
2757 2762 # Load revisions associated with changeset.
2758 2763 ctx = repo[rev]
2759 2764 mtext = _manifestrevision(repo, ctx.manifestnode())
2760 2765 for pctx in ctx.parents():
2761 2766 pman = _manifestrevision(repo, pctx.manifestnode())
2762 2767 textpairs.append((pman, mtext))
2763 2768
2764 2769 # Load filelog revisions by iterating manifest delta.
2765 2770 man = ctx.manifest()
2766 2771 pman = ctx.p1().manifest()
2767 2772 for filename, change in pman.diff(man).items():
2768 2773 fctx = repo.file(filename)
2769 2774 f1 = fctx.revision(change[0][0] or -1)
2770 2775 f2 = fctx.revision(change[1][0] or -1)
2771 2776 textpairs.append((f1, f2))
2772 2777 else:
2773 2778 dp = r.deltaparent(rev)
2774 2779 textpairs.append((r.revision(dp), r.revision(rev)))
2775 2780
2776 2781 def d():
2777 2782 for left, right in textpairs:
2778 2783 # The date strings don't matter, so we pass empty strings.
2779 2784 headerlines, hunks = mdiff.unidiff(
2780 2785 left, b'', right, b'', b'left', b'right', binary=False
2781 2786 )
2782 2787 # consume iterators in roughly the way patch.py does
2783 2788 b'\n'.join(headerlines)
2784 2789 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2785 2790
2786 2791 timer, fm = gettimer(ui, opts)
2787 2792 timer(d)
2788 2793 fm.end()
2789 2794
2790 2795
2791 2796 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2792 2797 def perfdiffwd(ui, repo, **opts):
2793 2798 """Profile diff of working directory changes"""
2794 2799 opts = _byteskwargs(opts)
2795 2800 timer, fm = gettimer(ui, opts)
2796 2801 options = {
2797 2802 'w': 'ignore_all_space',
2798 2803 'b': 'ignore_space_change',
2799 2804 'B': 'ignore_blank_lines',
2800 2805 }
2801 2806
2802 2807 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2803 2808 opts = {options[c]: b'1' for c in diffopt}
2804 2809
2805 2810 def d():
2806 2811 ui.pushbuffer()
2807 2812 commands.diff(ui, repo, **opts)
2808 2813 ui.popbuffer()
2809 2814
2810 2815 diffopt = diffopt.encode('ascii')
2811 2816 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2812 2817 timer(d, title=title)
2813 2818 fm.end()
2814 2819
2815 2820
2816 2821 @command(
2817 2822 b'perf::revlogindex|perfrevlogindex',
2818 2823 revlogopts + formatteropts,
2819 2824 b'-c|-m|FILE',
2820 2825 )
2821 2826 def perfrevlogindex(ui, repo, file_=None, **opts):
2822 2827 """Benchmark operations against a revlog index.
2823 2828
2824 2829 This tests constructing a revlog instance, reading index data,
2825 2830 parsing index data, and performing various operations related to
2826 2831 index data.
2827 2832 """
2828 2833
2829 2834 opts = _byteskwargs(opts)
2830 2835
2831 2836 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2832 2837
2833 2838 opener = getattr(rl, 'opener') # trick linter
2834 2839 # compat with hg <= 5.8
2835 2840 radix = getattr(rl, 'radix', None)
2836 2841 indexfile = getattr(rl, '_indexfile', None)
2837 2842 if indexfile is None:
2838 2843 # compatibility with <= hg-5.8
2839 2844 indexfile = getattr(rl, 'indexfile')
2840 2845 data = opener.read(indexfile)
2841 2846
2842 2847 header = struct.unpack(b'>I', data[0:4])[0]
2843 2848 version = header & 0xFFFF
2844 2849 if version == 1:
2845 2850 inline = header & (1 << 16)
2846 2851 else:
2847 2852 raise error.Abort(b'unsupported revlog version: %d' % version)
2848 2853
2849 2854 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2850 2855 if parse_index_v1 is None:
2851 2856 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2852 2857
2853 2858 rllen = len(rl)
2854 2859
2855 2860 node0 = rl.node(0)
2856 2861 node25 = rl.node(rllen // 4)
2857 2862 node50 = rl.node(rllen // 2)
2858 2863 node75 = rl.node(rllen // 4 * 3)
2859 2864 node100 = rl.node(rllen - 1)
2860 2865
2861 2866 allrevs = range(rllen)
2862 2867 allrevsrev = list(reversed(allrevs))
2863 2868 allnodes = [rl.node(rev) for rev in range(rllen)]
2864 2869 allnodesrev = list(reversed(allnodes))
2865 2870
2866 2871 def constructor():
2867 2872 if radix is not None:
2868 2873 revlog(opener, radix=radix)
2869 2874 else:
2870 2875 # hg <= 5.8
2871 2876 revlog(opener, indexfile=indexfile)
2872 2877
2873 2878 def read():
2874 2879 with opener(indexfile) as fh:
2875 2880 fh.read()
2876 2881
2877 2882 def parseindex():
2878 2883 parse_index_v1(data, inline)
2879 2884
2880 2885 def getentry(revornode):
2881 2886 index = parse_index_v1(data, inline)[0]
2882 2887 index[revornode]
2883 2888
2884 2889 def getentries(revs, count=1):
2885 2890 index = parse_index_v1(data, inline)[0]
2886 2891
2887 2892 for i in range(count):
2888 2893 for rev in revs:
2889 2894 index[rev]
2890 2895
2891 2896 def resolvenode(node):
2892 2897 index = parse_index_v1(data, inline)[0]
2893 2898 rev = getattr(index, 'rev', None)
2894 2899 if rev is None:
2895 2900 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2896 2901 # This only works for the C code.
2897 2902 if nodemap is None:
2898 2903 return
2899 2904 rev = nodemap.__getitem__
2900 2905
2901 2906 try:
2902 2907 rev(node)
2903 2908 except error.RevlogError:
2904 2909 pass
2905 2910
2906 2911 def resolvenodes(nodes, count=1):
2907 2912 index = parse_index_v1(data, inline)[0]
2908 2913 rev = getattr(index, 'rev', None)
2909 2914 if rev is None:
2910 2915 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2911 2916 # This only works for the C code.
2912 2917 if nodemap is None:
2913 2918 return
2914 2919 rev = nodemap.__getitem__
2915 2920
2916 2921 for i in range(count):
2917 2922 for node in nodes:
2918 2923 try:
2919 2924 rev(node)
2920 2925 except error.RevlogError:
2921 2926 pass
2922 2927
2923 2928 benches = [
2924 2929 (constructor, b'revlog constructor'),
2925 2930 (read, b'read'),
2926 2931 (parseindex, b'create index object'),
2927 2932 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2928 2933 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2929 2934 (lambda: resolvenode(node0), b'look up node at rev 0'),
2930 2935 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2931 2936 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2932 2937 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2933 2938 (lambda: resolvenode(node100), b'look up node at tip'),
2934 2939 # 2x variation is to measure caching impact.
2935 2940 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2936 2941 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2937 2942 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2938 2943 (
2939 2944 lambda: resolvenodes(allnodesrev, 2),
2940 2945 b'look up all nodes 2x (reverse)',
2941 2946 ),
2942 2947 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2943 2948 (
2944 2949 lambda: getentries(allrevs, 2),
2945 2950 b'retrieve all index entries 2x (forward)',
2946 2951 ),
2947 2952 (
2948 2953 lambda: getentries(allrevsrev),
2949 2954 b'retrieve all index entries (reverse)',
2950 2955 ),
2951 2956 (
2952 2957 lambda: getentries(allrevsrev, 2),
2953 2958 b'retrieve all index entries 2x (reverse)',
2954 2959 ),
2955 2960 ]
2956 2961
2957 2962 for fn, title in benches:
2958 2963 timer, fm = gettimer(ui, opts)
2959 2964 timer(fn, title=title)
2960 2965 fm.end()
2961 2966
2962 2967
2963 2968 @command(
2964 2969 b'perf::revlogrevisions|perfrevlogrevisions',
2965 2970 revlogopts
2966 2971 + formatteropts
2967 2972 + [
2968 2973 (b'd', b'dist', 100, b'distance between the revisions'),
2969 2974 (b's', b'startrev', 0, b'revision to start reading at'),
2970 2975 (b'', b'reverse', False, b'read in reverse'),
2971 2976 ],
2972 2977 b'-c|-m|FILE',
2973 2978 )
2974 2979 def perfrevlogrevisions(
2975 2980 ui, repo, file_=None, startrev=0, reverse=False, **opts
2976 2981 ):
2977 2982 """Benchmark reading a series of revisions from a revlog.
2978 2983
2979 2984 By default, we read every ``-d/--dist`` revision from 0 to tip of
2980 2985 the specified revlog.
2981 2986
2982 2987 The start revision can be defined via ``-s/--startrev``.
2983 2988 """
2984 2989 opts = _byteskwargs(opts)
2985 2990
2986 2991 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2987 2992 rllen = getlen(ui)(rl)
2988 2993
2989 2994 if startrev < 0:
2990 2995 startrev = rllen + startrev
2991 2996
2992 2997 def d():
2993 2998 rl.clearcaches()
2994 2999
2995 3000 beginrev = startrev
2996 3001 endrev = rllen
2997 3002 dist = opts[b'dist']
2998 3003
2999 3004 if reverse:
3000 3005 beginrev, endrev = endrev - 1, beginrev - 1
3001 3006 dist = -1 * dist
3002 3007
3003 3008 for x in _xrange(beginrev, endrev, dist):
3004 3009 # Old revisions don't support passing int.
3005 3010 n = rl.node(x)
3006 3011 rl.revision(n)
3007 3012
3008 3013 timer, fm = gettimer(ui, opts)
3009 3014 timer(d)
3010 3015 fm.end()
3011 3016
3012 3017
3013 3018 @command(
3014 3019 b'perf::revlogwrite|perfrevlogwrite',
3015 3020 revlogopts
3016 3021 + formatteropts
3017 3022 + [
3018 3023 (b's', b'startrev', 1000, b'revision to start writing at'),
3019 3024 (b'', b'stoprev', -1, b'last revision to write'),
3020 3025 (b'', b'count', 3, b'number of passes to perform'),
3021 3026 (b'', b'details', False, b'print timing for every revisions tested'),
3022 3027 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3023 3028 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3024 3029 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3025 3030 ],
3026 3031 b'-c|-m|FILE',
3027 3032 )
3028 3033 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3029 3034 """Benchmark writing a series of revisions to a revlog.
3030 3035
3031 3036 Possible source values are:
3032 3037 * `full`: add from a full text (default).
3033 3038 * `parent-1`: add from a delta to the first parent
3034 3039 * `parent-2`: add from a delta to the second parent if it exists
3035 3040 (use a delta from the first parent otherwise)
3036 3041 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3037 3042 * `storage`: add from the existing precomputed deltas
3038 3043
3039 3044 Note: This performance command measures performance in a custom way. As a
3040 3045 result some of the global configuration of the 'perf' command does not
3041 3046 apply to it:
3042 3047
3043 3048 * ``pre-run``: disabled
3044 3049
3045 3050 * ``profile-benchmark``: disabled
3046 3051
3047 3052 * ``run-limits``: disabled use --count instead
3048 3053 """
3049 3054 opts = _byteskwargs(opts)
3050 3055
3051 3056 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3052 3057 rllen = getlen(ui)(rl)
3053 3058 if startrev < 0:
3054 3059 startrev = rllen + startrev
3055 3060 if stoprev < 0:
3056 3061 stoprev = rllen + stoprev
3057 3062
3058 3063 lazydeltabase = opts['lazydeltabase']
3059 3064 source = opts['source']
3060 3065 clearcaches = opts['clear_caches']
3061 3066 validsource = (
3062 3067 b'full',
3063 3068 b'parent-1',
3064 3069 b'parent-2',
3065 3070 b'parent-smallest',
3066 3071 b'storage',
3067 3072 )
3068 3073 if source not in validsource:
3069 3074 raise error.Abort('invalid source type: %s' % source)
3070 3075
3071 3076 ### actually gather results
3072 3077 count = opts['count']
3073 3078 if count <= 0:
3074 3079 raise error.Abort('invalide run count: %d' % count)
3075 3080 allresults = []
3076 3081 for c in range(count):
3077 3082 timing = _timeonewrite(
3078 3083 ui,
3079 3084 rl,
3080 3085 source,
3081 3086 startrev,
3082 3087 stoprev,
3083 3088 c + 1,
3084 3089 lazydeltabase=lazydeltabase,
3085 3090 clearcaches=clearcaches,
3086 3091 )
3087 3092 allresults.append(timing)
3088 3093
3089 3094 ### consolidate the results in a single list
3090 3095 results = []
3091 3096 for idx, (rev, t) in enumerate(allresults[0]):
3092 3097 ts = [t]
3093 3098 for other in allresults[1:]:
3094 3099 orev, ot = other[idx]
3095 3100 assert orev == rev
3096 3101 ts.append(ot)
3097 3102 results.append((rev, ts))
3098 3103 resultcount = len(results)
3099 3104
3100 3105 ### Compute and display relevant statistics
3101 3106
3102 3107 # get a formatter
3103 3108 fm = ui.formatter(b'perf', opts)
3104 3109 displayall = ui.configbool(b"perf", b"all-timing", False)
3105 3110
3106 3111 # print individual details if requested
3107 3112 if opts['details']:
3108 3113 for idx, item in enumerate(results, 1):
3109 3114 rev, data = item
3110 3115 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3111 3116 formatone(fm, data, title=title, displayall=displayall)
3112 3117
3113 3118 # sorts results by median time
3114 3119 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3115 3120 # list of (name, index) to display)
3116 3121 relevants = [
3117 3122 ("min", 0),
3118 3123 ("10%", resultcount * 10 // 100),
3119 3124 ("25%", resultcount * 25 // 100),
3120 3125 ("50%", resultcount * 70 // 100),
3121 3126 ("75%", resultcount * 75 // 100),
3122 3127 ("90%", resultcount * 90 // 100),
3123 3128 ("95%", resultcount * 95 // 100),
3124 3129 ("99%", resultcount * 99 // 100),
3125 3130 ("99.9%", resultcount * 999 // 1000),
3126 3131 ("99.99%", resultcount * 9999 // 10000),
3127 3132 ("99.999%", resultcount * 99999 // 100000),
3128 3133 ("max", -1),
3129 3134 ]
3130 3135 if not ui.quiet:
3131 3136 for name, idx in relevants:
3132 3137 data = results[idx]
3133 3138 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3134 3139 formatone(fm, data[1], title=title, displayall=displayall)
3135 3140
3136 3141 # XXX summing that many float will not be very precise, we ignore this fact
3137 3142 # for now
3138 3143 totaltime = []
3139 3144 for item in allresults:
3140 3145 totaltime.append(
3141 3146 (
3142 3147 sum(x[1][0] for x in item),
3143 3148 sum(x[1][1] for x in item),
3144 3149 sum(x[1][2] for x in item),
3145 3150 )
3146 3151 )
3147 3152 formatone(
3148 3153 fm,
3149 3154 totaltime,
3150 3155 title="total time (%d revs)" % resultcount,
3151 3156 displayall=displayall,
3152 3157 )
3153 3158 fm.end()
3154 3159
3155 3160
3156 3161 class _faketr:
3157 3162 def add(s, x, y, z=None):
3158 3163 return None
3159 3164
3160 3165
3161 3166 def _timeonewrite(
3162 3167 ui,
3163 3168 orig,
3164 3169 source,
3165 3170 startrev,
3166 3171 stoprev,
3167 3172 runidx=None,
3168 3173 lazydeltabase=True,
3169 3174 clearcaches=True,
3170 3175 ):
3171 3176 timings = []
3172 3177 tr = _faketr()
3173 3178 with _temprevlog(ui, orig, startrev) as dest:
3174 3179 dest._lazydeltabase = lazydeltabase
3175 3180 revs = list(orig.revs(startrev, stoprev))
3176 3181 total = len(revs)
3177 3182 topic = 'adding'
3178 3183 if runidx is not None:
3179 3184 topic += ' (run #%d)' % runidx
3180 3185 # Support both old and new progress API
3181 3186 if util.safehasattr(ui, 'makeprogress'):
3182 3187 progress = ui.makeprogress(topic, unit='revs', total=total)
3183 3188
3184 3189 def updateprogress(pos):
3185 3190 progress.update(pos)
3186 3191
3187 3192 def completeprogress():
3188 3193 progress.complete()
3189 3194
3190 3195 else:
3191 3196
3192 3197 def updateprogress(pos):
3193 3198 ui.progress(topic, pos, unit='revs', total=total)
3194 3199
3195 3200 def completeprogress():
3196 3201 ui.progress(topic, None, unit='revs', total=total)
3197 3202
3198 3203 for idx, rev in enumerate(revs):
3199 3204 updateprogress(idx)
3200 3205 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3201 3206 if clearcaches:
3202 3207 dest.index.clearcaches()
3203 3208 dest.clearcaches()
3204 3209 with timeone() as r:
3205 3210 dest.addrawrevision(*addargs, **addkwargs)
3206 3211 timings.append((rev, r[0]))
3207 3212 updateprogress(total)
3208 3213 completeprogress()
3209 3214 return timings
3210 3215
3211 3216
3212 3217 def _getrevisionseed(orig, rev, tr, source):
3213 3218 from mercurial.node import nullid
3214 3219
3215 3220 linkrev = orig.linkrev(rev)
3216 3221 node = orig.node(rev)
3217 3222 p1, p2 = orig.parents(node)
3218 3223 flags = orig.flags(rev)
3219 3224 cachedelta = None
3220 3225 text = None
3221 3226
3222 3227 if source == b'full':
3223 3228 text = orig.revision(rev)
3224 3229 elif source == b'parent-1':
3225 3230 baserev = orig.rev(p1)
3226 3231 cachedelta = (baserev, orig.revdiff(p1, rev))
3227 3232 elif source == b'parent-2':
3228 3233 parent = p2
3229 3234 if p2 == nullid:
3230 3235 parent = p1
3231 3236 baserev = orig.rev(parent)
3232 3237 cachedelta = (baserev, orig.revdiff(parent, rev))
3233 3238 elif source == b'parent-smallest':
3234 3239 p1diff = orig.revdiff(p1, rev)
3235 3240 parent = p1
3236 3241 diff = p1diff
3237 3242 if p2 != nullid:
3238 3243 p2diff = orig.revdiff(p2, rev)
3239 3244 if len(p1diff) > len(p2diff):
3240 3245 parent = p2
3241 3246 diff = p2diff
3242 3247 baserev = orig.rev(parent)
3243 3248 cachedelta = (baserev, diff)
3244 3249 elif source == b'storage':
3245 3250 baserev = orig.deltaparent(rev)
3246 3251 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3247 3252
3248 3253 return (
3249 3254 (text, tr, linkrev, p1, p2),
3250 3255 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3251 3256 )
3252 3257
3253 3258
3254 3259 @contextlib.contextmanager
3255 3260 def _temprevlog(ui, orig, truncaterev):
3256 3261 from mercurial import vfs as vfsmod
3257 3262
3258 3263 if orig._inline:
3259 3264 raise error.Abort('not supporting inline revlog (yet)')
3260 3265 revlogkwargs = {}
3261 3266 k = 'upperboundcomp'
3262 3267 if util.safehasattr(orig, k):
3263 3268 revlogkwargs[k] = getattr(orig, k)
3264 3269
3265 3270 indexfile = getattr(orig, '_indexfile', None)
3266 3271 if indexfile is None:
3267 3272 # compatibility with <= hg-5.8
3268 3273 indexfile = getattr(orig, 'indexfile')
3269 3274 origindexpath = orig.opener.join(indexfile)
3270 3275
3271 3276 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3272 3277 origdatapath = orig.opener.join(datafile)
3273 3278 radix = b'revlog'
3274 3279 indexname = b'revlog.i'
3275 3280 dataname = b'revlog.d'
3276 3281
3277 3282 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3278 3283 try:
3279 3284 # copy the data file in a temporary directory
3280 3285 ui.debug('copying data in %s\n' % tmpdir)
3281 3286 destindexpath = os.path.join(tmpdir, 'revlog.i')
3282 3287 destdatapath = os.path.join(tmpdir, 'revlog.d')
3283 3288 shutil.copyfile(origindexpath, destindexpath)
3284 3289 shutil.copyfile(origdatapath, destdatapath)
3285 3290
3286 3291 # remove the data we want to add again
3287 3292 ui.debug('truncating data to be rewritten\n')
3288 3293 with open(destindexpath, 'ab') as index:
3289 3294 index.seek(0)
3290 3295 index.truncate(truncaterev * orig._io.size)
3291 3296 with open(destdatapath, 'ab') as data:
3292 3297 data.seek(0)
3293 3298 data.truncate(orig.start(truncaterev))
3294 3299
3295 3300 # instantiate a new revlog from the temporary copy
3296 3301 ui.debug('truncating adding to be rewritten\n')
3297 3302 vfs = vfsmod.vfs(tmpdir)
3298 3303 vfs.options = getattr(orig.opener, 'options', None)
3299 3304
3300 3305 try:
3301 3306 dest = revlog(vfs, radix=radix, **revlogkwargs)
3302 3307 except TypeError:
3303 3308 dest = revlog(
3304 3309 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3305 3310 )
3306 3311 if dest._inline:
3307 3312 raise error.Abort('not supporting inline revlog (yet)')
3308 3313 # make sure internals are initialized
3309 3314 dest.revision(len(dest) - 1)
3310 3315 yield dest
3311 3316 del dest, vfs
3312 3317 finally:
3313 3318 shutil.rmtree(tmpdir, True)
3314 3319
3315 3320
3316 3321 @command(
3317 3322 b'perf::revlogchunks|perfrevlogchunks',
3318 3323 revlogopts
3319 3324 + formatteropts
3320 3325 + [
3321 3326 (b'e', b'engines', b'', b'compression engines to use'),
3322 3327 (b's', b'startrev', 0, b'revision to start at'),
3323 3328 ],
3324 3329 b'-c|-m|FILE',
3325 3330 )
3326 3331 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3327 3332 """Benchmark operations on revlog chunks.
3328 3333
3329 3334 Logically, each revlog is a collection of fulltext revisions. However,
3330 3335 stored within each revlog are "chunks" of possibly compressed data. This
3331 3336 data needs to be read and decompressed or compressed and written.
3332 3337
3333 3338 This command measures the time it takes to read+decompress and recompress
3334 3339 chunks in a revlog. It effectively isolates I/O and compression performance.
3335 3340 For measurements of higher-level operations like resolving revisions,
3336 3341 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3337 3342 """
3338 3343 opts = _byteskwargs(opts)
3339 3344
3340 3345 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3341 3346
3342 3347 # _chunkraw was renamed to _getsegmentforrevs.
3343 3348 try:
3344 3349 segmentforrevs = rl._getsegmentforrevs
3345 3350 except AttributeError:
3346 3351 segmentforrevs = rl._chunkraw
3347 3352
3348 3353 # Verify engines argument.
3349 3354 if engines:
3350 3355 engines = {e.strip() for e in engines.split(b',')}
3351 3356 for engine in engines:
3352 3357 try:
3353 3358 util.compressionengines[engine]
3354 3359 except KeyError:
3355 3360 raise error.Abort(b'unknown compression engine: %s' % engine)
3356 3361 else:
3357 3362 engines = []
3358 3363 for e in util.compengines:
3359 3364 engine = util.compengines[e]
3360 3365 try:
3361 3366 if engine.available():
3362 3367 engine.revlogcompressor().compress(b'dummy')
3363 3368 engines.append(e)
3364 3369 except NotImplementedError:
3365 3370 pass
3366 3371
3367 3372 revs = list(rl.revs(startrev, len(rl) - 1))
3368 3373
3369 3374 def rlfh(rl):
3370 3375 if rl._inline:
3371 3376 indexfile = getattr(rl, '_indexfile', None)
3372 3377 if indexfile is None:
3373 3378 # compatibility with <= hg-5.8
3374 3379 indexfile = getattr(rl, 'indexfile')
3375 3380 return getsvfs(repo)(indexfile)
3376 3381 else:
3377 3382 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3378 3383 return getsvfs(repo)(datafile)
3379 3384
3380 3385 def doread():
3381 3386 rl.clearcaches()
3382 3387 for rev in revs:
3383 3388 segmentforrevs(rev, rev)
3384 3389
3385 3390 def doreadcachedfh():
3386 3391 rl.clearcaches()
3387 3392 fh = rlfh(rl)
3388 3393 for rev in revs:
3389 3394 segmentforrevs(rev, rev, df=fh)
3390 3395
3391 3396 def doreadbatch():
3392 3397 rl.clearcaches()
3393 3398 segmentforrevs(revs[0], revs[-1])
3394 3399
3395 3400 def doreadbatchcachedfh():
3396 3401 rl.clearcaches()
3397 3402 fh = rlfh(rl)
3398 3403 segmentforrevs(revs[0], revs[-1], df=fh)
3399 3404
3400 3405 def dochunk():
3401 3406 rl.clearcaches()
3402 3407 fh = rlfh(rl)
3403 3408 for rev in revs:
3404 3409 rl._chunk(rev, df=fh)
3405 3410
3406 3411 chunks = [None]
3407 3412
3408 3413 def dochunkbatch():
3409 3414 rl.clearcaches()
3410 3415 fh = rlfh(rl)
3411 3416 # Save chunks as a side-effect.
3412 3417 chunks[0] = rl._chunks(revs, df=fh)
3413 3418
3414 3419 def docompress(compressor):
3415 3420 rl.clearcaches()
3416 3421
3417 3422 try:
3418 3423 # Swap in the requested compression engine.
3419 3424 oldcompressor = rl._compressor
3420 3425 rl._compressor = compressor
3421 3426 for chunk in chunks[0]:
3422 3427 rl.compress(chunk)
3423 3428 finally:
3424 3429 rl._compressor = oldcompressor
3425 3430
3426 3431 benches = [
3427 3432 (lambda: doread(), b'read'),
3428 3433 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3429 3434 (lambda: doreadbatch(), b'read batch'),
3430 3435 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3431 3436 (lambda: dochunk(), b'chunk'),
3432 3437 (lambda: dochunkbatch(), b'chunk batch'),
3433 3438 ]
3434 3439
3435 3440 for engine in sorted(engines):
3436 3441 compressor = util.compengines[engine].revlogcompressor()
3437 3442 benches.append(
3438 3443 (
3439 3444 functools.partial(docompress, compressor),
3440 3445 b'compress w/ %s' % engine,
3441 3446 )
3442 3447 )
3443 3448
3444 3449 for fn, title in benches:
3445 3450 timer, fm = gettimer(ui, opts)
3446 3451 timer(fn, title=title)
3447 3452 fm.end()
3448 3453
3449 3454
3450 3455 @command(
3451 3456 b'perf::revlogrevision|perfrevlogrevision',
3452 3457 revlogopts
3453 3458 + formatteropts
3454 3459 + [(b'', b'cache', False, b'use caches instead of clearing')],
3455 3460 b'-c|-m|FILE REV',
3456 3461 )
3457 3462 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3458 3463 """Benchmark obtaining a revlog revision.
3459 3464
3460 3465 Obtaining a revlog revision consists of roughly the following steps:
3461 3466
3462 3467 1. Compute the delta chain
3463 3468 2. Slice the delta chain if applicable
3464 3469 3. Obtain the raw chunks for that delta chain
3465 3470 4. Decompress each raw chunk
3466 3471 5. Apply binary patches to obtain fulltext
3467 3472 6. Verify hash of fulltext
3468 3473
3469 3474 This command measures the time spent in each of these phases.
3470 3475 """
3471 3476 opts = _byteskwargs(opts)
3472 3477
3473 3478 if opts.get(b'changelog') or opts.get(b'manifest'):
3474 3479 file_, rev = None, file_
3475 3480 elif rev is None:
3476 3481 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3477 3482
3478 3483 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3479 3484
3480 3485 # _chunkraw was renamed to _getsegmentforrevs.
3481 3486 try:
3482 3487 segmentforrevs = r._getsegmentforrevs
3483 3488 except AttributeError:
3484 3489 segmentforrevs = r._chunkraw
3485 3490
3486 3491 node = r.lookup(rev)
3487 3492 rev = r.rev(node)
3488 3493
3489 3494 def getrawchunks(data, chain):
3490 3495 start = r.start
3491 3496 length = r.length
3492 3497 inline = r._inline
3493 3498 try:
3494 3499 iosize = r.index.entry_size
3495 3500 except AttributeError:
3496 3501 iosize = r._io.size
3497 3502 buffer = util.buffer
3498 3503
3499 3504 chunks = []
3500 3505 ladd = chunks.append
3501 3506 for idx, item in enumerate(chain):
3502 3507 offset = start(item[0])
3503 3508 bits = data[idx]
3504 3509 for rev in item:
3505 3510 chunkstart = start(rev)
3506 3511 if inline:
3507 3512 chunkstart += (rev + 1) * iosize
3508 3513 chunklength = length(rev)
3509 3514 ladd(buffer(bits, chunkstart - offset, chunklength))
3510 3515
3511 3516 return chunks
3512 3517
3513 3518 def dodeltachain(rev):
3514 3519 if not cache:
3515 3520 r.clearcaches()
3516 3521 r._deltachain(rev)
3517 3522
3518 3523 def doread(chain):
3519 3524 if not cache:
3520 3525 r.clearcaches()
3521 3526 for item in slicedchain:
3522 3527 segmentforrevs(item[0], item[-1])
3523 3528
3524 3529 def doslice(r, chain, size):
3525 3530 for s in slicechunk(r, chain, targetsize=size):
3526 3531 pass
3527 3532
3528 3533 def dorawchunks(data, chain):
3529 3534 if not cache:
3530 3535 r.clearcaches()
3531 3536 getrawchunks(data, chain)
3532 3537
3533 3538 def dodecompress(chunks):
3534 3539 decomp = r.decompress
3535 3540 for chunk in chunks:
3536 3541 decomp(chunk)
3537 3542
3538 3543 def dopatch(text, bins):
3539 3544 if not cache:
3540 3545 r.clearcaches()
3541 3546 mdiff.patches(text, bins)
3542 3547
3543 3548 def dohash(text):
3544 3549 if not cache:
3545 3550 r.clearcaches()
3546 3551 r.checkhash(text, node, rev=rev)
3547 3552
3548 3553 def dorevision():
3549 3554 if not cache:
3550 3555 r.clearcaches()
3551 3556 r.revision(node)
3552 3557
3553 3558 try:
3554 3559 from mercurial.revlogutils.deltas import slicechunk
3555 3560 except ImportError:
3556 3561 slicechunk = getattr(revlog, '_slicechunk', None)
3557 3562
3558 3563 size = r.length(rev)
3559 3564 chain = r._deltachain(rev)[0]
3560 3565 if not getattr(r, '_withsparseread', False):
3561 3566 slicedchain = (chain,)
3562 3567 else:
3563 3568 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3564 3569 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3565 3570 rawchunks = getrawchunks(data, slicedchain)
3566 3571 bins = r._chunks(chain)
3567 3572 text = bytes(bins[0])
3568 3573 bins = bins[1:]
3569 3574 text = mdiff.patches(text, bins)
3570 3575
3571 3576 benches = [
3572 3577 (lambda: dorevision(), b'full'),
3573 3578 (lambda: dodeltachain(rev), b'deltachain'),
3574 3579 (lambda: doread(chain), b'read'),
3575 3580 ]
3576 3581
3577 3582 if getattr(r, '_withsparseread', False):
3578 3583 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3579 3584 benches.append(slicing)
3580 3585
3581 3586 benches.extend(
3582 3587 [
3583 3588 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3584 3589 (lambda: dodecompress(rawchunks), b'decompress'),
3585 3590 (lambda: dopatch(text, bins), b'patch'),
3586 3591 (lambda: dohash(text), b'hash'),
3587 3592 ]
3588 3593 )
3589 3594
3590 3595 timer, fm = gettimer(ui, opts)
3591 3596 for fn, title in benches:
3592 3597 timer(fn, title=title)
3593 3598 fm.end()
3594 3599
3595 3600
3596 3601 @command(
3597 3602 b'perf::revset|perfrevset',
3598 3603 [
3599 3604 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3600 3605 (b'', b'contexts', False, b'obtain changectx for each revision'),
3601 3606 ]
3602 3607 + formatteropts,
3603 3608 b"REVSET",
3604 3609 )
3605 3610 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3606 3611 """benchmark the execution time of a revset
3607 3612
3608 3613 Use the --clean option if need to evaluate the impact of build volatile
3609 3614 revisions set cache on the revset execution. Volatile cache hold filtered
3610 3615 and obsolete related cache."""
3611 3616 opts = _byteskwargs(opts)
3612 3617
3613 3618 timer, fm = gettimer(ui, opts)
3614 3619
3615 3620 def d():
3616 3621 if clear:
3617 3622 repo.invalidatevolatilesets()
3618 3623 if contexts:
3619 3624 for ctx in repo.set(expr):
3620 3625 pass
3621 3626 else:
3622 3627 for r in repo.revs(expr):
3623 3628 pass
3624 3629
3625 3630 timer(d)
3626 3631 fm.end()
3627 3632
3628 3633
3629 3634 @command(
3630 3635 b'perf::volatilesets|perfvolatilesets',
3631 3636 [
3632 3637 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3633 3638 ]
3634 3639 + formatteropts,
3635 3640 )
3636 3641 def perfvolatilesets(ui, repo, *names, **opts):
3637 3642 """benchmark the computation of various volatile set
3638 3643
3639 3644 Volatile set computes element related to filtering and obsolescence."""
3640 3645 opts = _byteskwargs(opts)
3641 3646 timer, fm = gettimer(ui, opts)
3642 3647 repo = repo.unfiltered()
3643 3648
3644 3649 def getobs(name):
3645 3650 def d():
3646 3651 repo.invalidatevolatilesets()
3647 3652 if opts[b'clear_obsstore']:
3648 3653 clearfilecache(repo, b'obsstore')
3649 3654 obsolete.getrevs(repo, name)
3650 3655
3651 3656 return d
3652 3657
3653 3658 allobs = sorted(obsolete.cachefuncs)
3654 3659 if names:
3655 3660 allobs = [n for n in allobs if n in names]
3656 3661
3657 3662 for name in allobs:
3658 3663 timer(getobs(name), title=name)
3659 3664
3660 3665 def getfiltered(name):
3661 3666 def d():
3662 3667 repo.invalidatevolatilesets()
3663 3668 if opts[b'clear_obsstore']:
3664 3669 clearfilecache(repo, b'obsstore')
3665 3670 repoview.filterrevs(repo, name)
3666 3671
3667 3672 return d
3668 3673
3669 3674 allfilter = sorted(repoview.filtertable)
3670 3675 if names:
3671 3676 allfilter = [n for n in allfilter if n in names]
3672 3677
3673 3678 for name in allfilter:
3674 3679 timer(getfiltered(name), title=name)
3675 3680 fm.end()
3676 3681
3677 3682
3678 3683 @command(
3679 3684 b'perf::branchmap|perfbranchmap',
3680 3685 [
3681 3686 (b'f', b'full', False, b'Includes build time of subset'),
3682 3687 (
3683 3688 b'',
3684 3689 b'clear-revbranch',
3685 3690 False,
3686 3691 b'purge the revbranch cache between computation',
3687 3692 ),
3688 3693 ]
3689 3694 + formatteropts,
3690 3695 )
3691 3696 def perfbranchmap(ui, repo, *filternames, **opts):
3692 3697 """benchmark the update of a branchmap
3693 3698
3694 3699 This benchmarks the full repo.branchmap() call with read and write disabled
3695 3700 """
3696 3701 opts = _byteskwargs(opts)
3697 3702 full = opts.get(b"full", False)
3698 3703 clear_revbranch = opts.get(b"clear_revbranch", False)
3699 3704 timer, fm = gettimer(ui, opts)
3700 3705
3701 3706 def getbranchmap(filtername):
3702 3707 """generate a benchmark function for the filtername"""
3703 3708 if filtername is None:
3704 3709 view = repo
3705 3710 else:
3706 3711 view = repo.filtered(filtername)
3707 3712 if util.safehasattr(view._branchcaches, '_per_filter'):
3708 3713 filtered = view._branchcaches._per_filter
3709 3714 else:
3710 3715 # older versions
3711 3716 filtered = view._branchcaches
3712 3717
3713 3718 def d():
3714 3719 if clear_revbranch:
3715 3720 repo.revbranchcache()._clear()
3716 3721 if full:
3717 3722 view._branchcaches.clear()
3718 3723 else:
3719 3724 filtered.pop(filtername, None)
3720 3725 view.branchmap()
3721 3726
3722 3727 return d
3723 3728
3724 3729 # add filter in smaller subset to bigger subset
3725 3730 possiblefilters = set(repoview.filtertable)
3726 3731 if filternames:
3727 3732 possiblefilters &= set(filternames)
3728 3733 subsettable = getbranchmapsubsettable()
3729 3734 allfilters = []
3730 3735 while possiblefilters:
3731 3736 for name in possiblefilters:
3732 3737 subset = subsettable.get(name)
3733 3738 if subset not in possiblefilters:
3734 3739 break
3735 3740 else:
3736 3741 assert False, b'subset cycle %s!' % possiblefilters
3737 3742 allfilters.append(name)
3738 3743 possiblefilters.remove(name)
3739 3744
3740 3745 # warm the cache
3741 3746 if not full:
3742 3747 for name in allfilters:
3743 3748 repo.filtered(name).branchmap()
3744 3749 if not filternames or b'unfiltered' in filternames:
3745 3750 # add unfiltered
3746 3751 allfilters.append(None)
3747 3752
3748 3753 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3749 3754 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3750 3755 branchcacheread.set(classmethod(lambda *args: None))
3751 3756 else:
3752 3757 # older versions
3753 3758 branchcacheread = safeattrsetter(branchmap, b'read')
3754 3759 branchcacheread.set(lambda *args: None)
3755 3760 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3756 3761 branchcachewrite.set(lambda *args: None)
3757 3762 try:
3758 3763 for name in allfilters:
3759 3764 printname = name
3760 3765 if name is None:
3761 3766 printname = b'unfiltered'
3762 3767 timer(getbranchmap(name), title=printname)
3763 3768 finally:
3764 3769 branchcacheread.restore()
3765 3770 branchcachewrite.restore()
3766 3771 fm.end()
3767 3772
3768 3773
3769 3774 @command(
3770 3775 b'perf::branchmapupdate|perfbranchmapupdate',
3771 3776 [
3772 3777 (b'', b'base', [], b'subset of revision to start from'),
3773 3778 (b'', b'target', [], b'subset of revision to end with'),
3774 3779 (b'', b'clear-caches', False, b'clear cache between each runs'),
3775 3780 ]
3776 3781 + formatteropts,
3777 3782 )
3778 3783 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3779 3784 """benchmark branchmap update from for <base> revs to <target> revs
3780 3785
3781 3786 If `--clear-caches` is passed, the following items will be reset before
3782 3787 each update:
3783 3788 * the changelog instance and associated indexes
3784 3789 * the rev-branch-cache instance
3785 3790
3786 3791 Examples:
3787 3792
3788 3793 # update for the one last revision
3789 3794 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3790 3795
3791 3796 $ update for change coming with a new branch
3792 3797 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3793 3798 """
3794 3799 from mercurial import branchmap
3795 3800 from mercurial import repoview
3796 3801
3797 3802 opts = _byteskwargs(opts)
3798 3803 timer, fm = gettimer(ui, opts)
3799 3804 clearcaches = opts[b'clear_caches']
3800 3805 unfi = repo.unfiltered()
3801 3806 x = [None] # used to pass data between closure
3802 3807
3803 3808 # we use a `list` here to avoid possible side effect from smartset
3804 3809 baserevs = list(scmutil.revrange(repo, base))
3805 3810 targetrevs = list(scmutil.revrange(repo, target))
3806 3811 if not baserevs:
3807 3812 raise error.Abort(b'no revisions selected for --base')
3808 3813 if not targetrevs:
3809 3814 raise error.Abort(b'no revisions selected for --target')
3810 3815
3811 3816 # make sure the target branchmap also contains the one in the base
3812 3817 targetrevs = list(set(baserevs) | set(targetrevs))
3813 3818 targetrevs.sort()
3814 3819
3815 3820 cl = repo.changelog
3816 3821 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3817 3822 allbaserevs.sort()
3818 3823 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3819 3824
3820 3825 newrevs = list(alltargetrevs.difference(allbaserevs))
3821 3826 newrevs.sort()
3822 3827
3823 3828 allrevs = frozenset(unfi.changelog.revs())
3824 3829 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3825 3830 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3826 3831
3827 3832 def basefilter(repo, visibilityexceptions=None):
3828 3833 return basefilterrevs
3829 3834
3830 3835 def targetfilter(repo, visibilityexceptions=None):
3831 3836 return targetfilterrevs
3832 3837
3833 3838 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3834 3839 ui.status(msg % (len(allbaserevs), len(newrevs)))
3835 3840 if targetfilterrevs:
3836 3841 msg = b'(%d revisions still filtered)\n'
3837 3842 ui.status(msg % len(targetfilterrevs))
3838 3843
3839 3844 try:
3840 3845 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3841 3846 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3842 3847
3843 3848 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3844 3849 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3845 3850
3846 3851 # try to find an existing branchmap to reuse
3847 3852 subsettable = getbranchmapsubsettable()
3848 3853 candidatefilter = subsettable.get(None)
3849 3854 while candidatefilter is not None:
3850 3855 candidatebm = repo.filtered(candidatefilter).branchmap()
3851 3856 if candidatebm.validfor(baserepo):
3852 3857 filtered = repoview.filterrevs(repo, candidatefilter)
3853 3858 missing = [r for r in allbaserevs if r in filtered]
3854 3859 base = candidatebm.copy()
3855 3860 base.update(baserepo, missing)
3856 3861 break
3857 3862 candidatefilter = subsettable.get(candidatefilter)
3858 3863 else:
3859 3864 # no suitable subset where found
3860 3865 base = branchmap.branchcache()
3861 3866 base.update(baserepo, allbaserevs)
3862 3867
3863 3868 def setup():
3864 3869 x[0] = base.copy()
3865 3870 if clearcaches:
3866 3871 unfi._revbranchcache = None
3867 3872 clearchangelog(repo)
3868 3873
3869 3874 def bench():
3870 3875 x[0].update(targetrepo, newrevs)
3871 3876
3872 3877 timer(bench, setup=setup)
3873 3878 fm.end()
3874 3879 finally:
3875 3880 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3876 3881 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3877 3882
3878 3883
3879 3884 @command(
3880 3885 b'perf::branchmapload|perfbranchmapload',
3881 3886 [
3882 3887 (b'f', b'filter', b'', b'Specify repoview filter'),
3883 3888 (b'', b'list', False, b'List brachmap filter caches'),
3884 3889 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3885 3890 ]
3886 3891 + formatteropts,
3887 3892 )
3888 3893 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3889 3894 """benchmark reading the branchmap"""
3890 3895 opts = _byteskwargs(opts)
3891 3896 clearrevlogs = opts[b'clear_revlogs']
3892 3897
3893 3898 if list:
3894 3899 for name, kind, st in repo.cachevfs.readdir(stat=True):
3895 3900 if name.startswith(b'branch2'):
3896 3901 filtername = name.partition(b'-')[2] or b'unfiltered'
3897 3902 ui.status(
3898 3903 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3899 3904 )
3900 3905 return
3901 3906 if not filter:
3902 3907 filter = None
3903 3908 subsettable = getbranchmapsubsettable()
3904 3909 if filter is None:
3905 3910 repo = repo.unfiltered()
3906 3911 else:
3907 3912 repo = repoview.repoview(repo, filter)
3908 3913
3909 3914 repo.branchmap() # make sure we have a relevant, up to date branchmap
3910 3915
3911 3916 try:
3912 3917 fromfile = branchmap.branchcache.fromfile
3913 3918 except AttributeError:
3914 3919 # older versions
3915 3920 fromfile = branchmap.read
3916 3921
3917 3922 currentfilter = filter
3918 3923 # try once without timer, the filter may not be cached
3919 3924 while fromfile(repo) is None:
3920 3925 currentfilter = subsettable.get(currentfilter)
3921 3926 if currentfilter is None:
3922 3927 raise error.Abort(
3923 3928 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3924 3929 )
3925 3930 repo = repo.filtered(currentfilter)
3926 3931 timer, fm = gettimer(ui, opts)
3927 3932
3928 3933 def setup():
3929 3934 if clearrevlogs:
3930 3935 clearchangelog(repo)
3931 3936
3932 3937 def bench():
3933 3938 fromfile(repo)
3934 3939
3935 3940 timer(bench, setup=setup)
3936 3941 fm.end()
3937 3942
3938 3943
3939 3944 @command(b'perf::loadmarkers|perfloadmarkers')
3940 3945 def perfloadmarkers(ui, repo):
3941 3946 """benchmark the time to parse the on-disk markers for a repo
3942 3947
3943 3948 Result is the number of markers in the repo."""
3944 3949 timer, fm = gettimer(ui)
3945 3950 svfs = getsvfs(repo)
3946 3951 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3947 3952 fm.end()
3948 3953
3949 3954
3950 3955 @command(
3951 3956 b'perf::lrucachedict|perflrucachedict',
3952 3957 formatteropts
3953 3958 + [
3954 3959 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3955 3960 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3956 3961 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3957 3962 (b'', b'size', 4, b'size of cache'),
3958 3963 (b'', b'gets', 10000, b'number of key lookups'),
3959 3964 (b'', b'sets', 10000, b'number of key sets'),
3960 3965 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3961 3966 (
3962 3967 b'',
3963 3968 b'mixedgetfreq',
3964 3969 50,
3965 3970 b'frequency of get vs set ops in mixed mode',
3966 3971 ),
3967 3972 ],
3968 3973 norepo=True,
3969 3974 )
3970 3975 def perflrucache(
3971 3976 ui,
3972 3977 mincost=0,
3973 3978 maxcost=100,
3974 3979 costlimit=0,
3975 3980 size=4,
3976 3981 gets=10000,
3977 3982 sets=10000,
3978 3983 mixed=10000,
3979 3984 mixedgetfreq=50,
3980 3985 **opts
3981 3986 ):
3982 3987 opts = _byteskwargs(opts)
3983 3988
3984 3989 def doinit():
3985 3990 for i in _xrange(10000):
3986 3991 util.lrucachedict(size)
3987 3992
3988 3993 costrange = list(range(mincost, maxcost + 1))
3989 3994
3990 3995 values = []
3991 3996 for i in _xrange(size):
3992 3997 values.append(random.randint(0, _maxint))
3993 3998
3994 3999 # Get mode fills the cache and tests raw lookup performance with no
3995 4000 # eviction.
3996 4001 getseq = []
3997 4002 for i in _xrange(gets):
3998 4003 getseq.append(random.choice(values))
3999 4004
4000 4005 def dogets():
4001 4006 d = util.lrucachedict(size)
4002 4007 for v in values:
4003 4008 d[v] = v
4004 4009 for key in getseq:
4005 4010 value = d[key]
4006 4011 value # silence pyflakes warning
4007 4012
4008 4013 def dogetscost():
4009 4014 d = util.lrucachedict(size, maxcost=costlimit)
4010 4015 for i, v in enumerate(values):
4011 4016 d.insert(v, v, cost=costs[i])
4012 4017 for key in getseq:
4013 4018 try:
4014 4019 value = d[key]
4015 4020 value # silence pyflakes warning
4016 4021 except KeyError:
4017 4022 pass
4018 4023
4019 4024 # Set mode tests insertion speed with cache eviction.
4020 4025 setseq = []
4021 4026 costs = []
4022 4027 for i in _xrange(sets):
4023 4028 setseq.append(random.randint(0, _maxint))
4024 4029 costs.append(random.choice(costrange))
4025 4030
4026 4031 def doinserts():
4027 4032 d = util.lrucachedict(size)
4028 4033 for v in setseq:
4029 4034 d.insert(v, v)
4030 4035
4031 4036 def doinsertscost():
4032 4037 d = util.lrucachedict(size, maxcost=costlimit)
4033 4038 for i, v in enumerate(setseq):
4034 4039 d.insert(v, v, cost=costs[i])
4035 4040
4036 4041 def dosets():
4037 4042 d = util.lrucachedict(size)
4038 4043 for v in setseq:
4039 4044 d[v] = v
4040 4045
4041 4046 # Mixed mode randomly performs gets and sets with eviction.
4042 4047 mixedops = []
4043 4048 for i in _xrange(mixed):
4044 4049 r = random.randint(0, 100)
4045 4050 if r < mixedgetfreq:
4046 4051 op = 0
4047 4052 else:
4048 4053 op = 1
4049 4054
4050 4055 mixedops.append(
4051 4056 (op, random.randint(0, size * 2), random.choice(costrange))
4052 4057 )
4053 4058
4054 4059 def domixed():
4055 4060 d = util.lrucachedict(size)
4056 4061
4057 4062 for op, v, cost in mixedops:
4058 4063 if op == 0:
4059 4064 try:
4060 4065 d[v]
4061 4066 except KeyError:
4062 4067 pass
4063 4068 else:
4064 4069 d[v] = v
4065 4070
4066 4071 def domixedcost():
4067 4072 d = util.lrucachedict(size, maxcost=costlimit)
4068 4073
4069 4074 for op, v, cost in mixedops:
4070 4075 if op == 0:
4071 4076 try:
4072 4077 d[v]
4073 4078 except KeyError:
4074 4079 pass
4075 4080 else:
4076 4081 d.insert(v, v, cost=cost)
4077 4082
4078 4083 benches = [
4079 4084 (doinit, b'init'),
4080 4085 ]
4081 4086
4082 4087 if costlimit:
4083 4088 benches.extend(
4084 4089 [
4085 4090 (dogetscost, b'gets w/ cost limit'),
4086 4091 (doinsertscost, b'inserts w/ cost limit'),
4087 4092 (domixedcost, b'mixed w/ cost limit'),
4088 4093 ]
4089 4094 )
4090 4095 else:
4091 4096 benches.extend(
4092 4097 [
4093 4098 (dogets, b'gets'),
4094 4099 (doinserts, b'inserts'),
4095 4100 (dosets, b'sets'),
4096 4101 (domixed, b'mixed'),
4097 4102 ]
4098 4103 )
4099 4104
4100 4105 for fn, title in benches:
4101 4106 timer, fm = gettimer(ui, opts)
4102 4107 timer(fn, title=title)
4103 4108 fm.end()
4104 4109
4105 4110
4106 4111 @command(
4107 4112 b'perf::write|perfwrite',
4108 4113 formatteropts
4109 4114 + [
4110 4115 (b'', b'write-method', b'write', b'ui write method'),
4111 4116 (b'', b'nlines', 100, b'number of lines'),
4112 4117 (b'', b'nitems', 100, b'number of items (per line)'),
4113 4118 (b'', b'item', b'x', b'item that is written'),
4114 4119 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4115 4120 (b'', b'flush-line', None, b'flush after each line'),
4116 4121 ],
4117 4122 )
4118 4123 def perfwrite(ui, repo, **opts):
4119 4124 """microbenchmark ui.write (and others)"""
4120 4125 opts = _byteskwargs(opts)
4121 4126
4122 4127 write = getattr(ui, _sysstr(opts[b'write_method']))
4123 4128 nlines = int(opts[b'nlines'])
4124 4129 nitems = int(opts[b'nitems'])
4125 4130 item = opts[b'item']
4126 4131 batch_line = opts.get(b'batch_line')
4127 4132 flush_line = opts.get(b'flush_line')
4128 4133
4129 4134 if batch_line:
4130 4135 line = item * nitems + b'\n'
4131 4136
4132 4137 def benchmark():
4133 4138 for i in pycompat.xrange(nlines):
4134 4139 if batch_line:
4135 4140 write(line)
4136 4141 else:
4137 4142 for i in pycompat.xrange(nitems):
4138 4143 write(item)
4139 4144 write(b'\n')
4140 4145 if flush_line:
4141 4146 ui.flush()
4142 4147 ui.flush()
4143 4148
4144 4149 timer, fm = gettimer(ui, opts)
4145 4150 timer(benchmark)
4146 4151 fm.end()
4147 4152
4148 4153
4149 4154 def uisetup(ui):
4150 4155 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4151 4156 commands, b'debugrevlogopts'
4152 4157 ):
4153 4158 # for "historical portability":
4154 4159 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4155 4160 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4156 4161 # openrevlog() should cause failure, because it has been
4157 4162 # available since 3.5 (or 49c583ca48c4).
4158 4163 def openrevlog(orig, repo, cmd, file_, opts):
4159 4164 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4160 4165 raise error.Abort(
4161 4166 b"This version doesn't support --dir option",
4162 4167 hint=b"use 3.5 or later",
4163 4168 )
4164 4169 return orig(repo, cmd, file_, opts)
4165 4170
4166 4171 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4167 4172
4168 4173
4169 4174 @command(
4170 4175 b'perf::progress|perfprogress',
4171 4176 formatteropts
4172 4177 + [
4173 4178 (b'', b'topic', b'topic', b'topic for progress messages'),
4174 4179 (b'c', b'total', 1000000, b'total value we are progressing to'),
4175 4180 ],
4176 4181 norepo=True,
4177 4182 )
4178 4183 def perfprogress(ui, topic=None, total=None, **opts):
4179 4184 """printing of progress bars"""
4180 4185 opts = _byteskwargs(opts)
4181 4186
4182 4187 timer, fm = gettimer(ui, opts)
4183 4188
4184 4189 def doprogress():
4185 4190 with ui.makeprogress(topic, total=total) as progress:
4186 4191 for i in _xrange(total):
4187 4192 progress.increment()
4188 4193
4189 4194 timer(doprogress)
4190 4195 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now