##// END OF EJS Templates
perf-stream-consume: use the source repository config when applying...
marmoute -
r52449:f1512dbf default
parent child Browse files
Show More
@@ -1,4677 +1,4681 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", True)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 def _default_clear_on_disk_tags_cache(repo):
886 886 from mercurial import tags
887 887
888 888 repo.cachevfs.tryunlink(tags._filename(repo))
889 889
890 890
891 891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 892 from mercurial import tags
893 893
894 894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895 895
896 896
897 897 def _default_forget_fnodes(repo, revs):
898 898 """function used by the perf extension to prune some entries from the
899 899 fnodes cache"""
900 900 from mercurial import tags
901 901
902 902 missing_1 = b'\xff' * 4
903 903 missing_2 = b'\xff' * 20
904 904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 905 for r in revs:
906 906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 907 cache.write()
908 908
909 909
910 910 @command(
911 911 b'perf::tags|perftags',
912 912 formatteropts
913 913 + [
914 914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 915 (
916 916 b'',
917 917 b'clear-on-disk-cache',
918 918 False,
919 919 b'clear on disk tags cache (DESTRUCTIVE)',
920 920 ),
921 921 (
922 922 b'',
923 923 b'clear-fnode-cache-all',
924 924 False,
925 925 b'clear on disk file node cache (DESTRUCTIVE),',
926 926 ),
927 927 (
928 928 b'',
929 929 b'clear-fnode-cache-rev',
930 930 [],
931 931 b'clear on disk file node cache (DESTRUCTIVE),',
932 932 b'REVS',
933 933 ),
934 934 (
935 935 b'',
936 936 b'update-last',
937 937 b'',
938 938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 939 b'N',
940 940 ),
941 941 ],
942 942 )
943 943 def perftags(ui, repo, **opts):
944 944 """Benchmark tags retrieval in various situation
945 945
946 946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 947 altering performance after the command was run. However, it does not
948 948 destroy any stored data.
949 949 """
950 950 from mercurial import tags
951 951
952 952 opts = _byteskwargs(opts)
953 953 timer, fm = gettimer(ui, opts)
954 954 repocleartagscache = repocleartagscachefunc(repo)
955 955 clearrevlogs = opts[b'clear_revlogs']
956 956 clear_disk = opts[b'clear_on_disk_cache']
957 957 clear_fnode = opts[b'clear_fnode_cache_all']
958 958
959 959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 960 update_last_str = opts[b'update_last']
961 961 update_last = None
962 962 if update_last_str:
963 963 try:
964 964 update_last = int(update_last_str)
965 965 except ValueError:
966 966 msg = b'could not parse value for update-last: "%s"'
967 967 msg %= update_last_str
968 968 hint = b'value should be an integer'
969 969 raise error.Abort(msg, hint=hint)
970 970
971 971 clear_disk_fn = getattr(
972 972 tags,
973 973 "clear_cache_on_disk",
974 974 _default_clear_on_disk_tags_cache,
975 975 )
976 976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 977 clear_fnodes_fn = tags.clear_cache_fnodes
978 978 else:
979 979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 980 clear_fnodes_rev_fn = getattr(
981 981 tags,
982 982 "forget_fnodes",
983 983 _default_forget_fnodes,
984 984 )
985 985
986 986 clear_revs = []
987 987 if clear_fnode_revs:
988 988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989 989
990 990 if update_last:
991 991 revset = b'last(all(), %d)' % update_last
992 992 last_revs = repo.unfiltered().revs(revset)
993 993 clear_revs.extend(last_revs)
994 994
995 995 from mercurial import repoview
996 996
997 997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 999 filter_id = repoview.extrafilter(repo.ui)
1000 1000
1001 1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 1002 pre_repo = repo.filtered(filter_name)
1003 1003 pre_repo.tags() # warm the cache
1004 1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 1006
1007 1007 clear_revs = sorted(set(clear_revs))
1008 1008
1009 1009 def s():
1010 1010 if update_last:
1011 1011 util.copyfile(old_tags_path, new_tags_path)
1012 1012 if clearrevlogs:
1013 1013 clearchangelog(repo)
1014 1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 1015 if clear_disk:
1016 1016 clear_disk_fn(repo)
1017 1017 if clear_fnode:
1018 1018 clear_fnodes_fn(repo)
1019 1019 elif clear_revs:
1020 1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 1021 repocleartagscache()
1022 1022
1023 1023 def t():
1024 1024 len(repo.tags())
1025 1025
1026 1026 timer(t, setup=s)
1027 1027 fm.end()
1028 1028
1029 1029
1030 1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 1031 def perfancestors(ui, repo, **opts):
1032 1032 opts = _byteskwargs(opts)
1033 1033 timer, fm = gettimer(ui, opts)
1034 1034 heads = repo.changelog.headrevs()
1035 1035
1036 1036 def d():
1037 1037 for a in repo.changelog.ancestors(heads):
1038 1038 pass
1039 1039
1040 1040 timer(d)
1041 1041 fm.end()
1042 1042
1043 1043
1044 1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 1045 def perfancestorset(ui, repo, revset, **opts):
1046 1046 opts = _byteskwargs(opts)
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 revs = repo.revs(revset)
1049 1049 heads = repo.changelog.headrevs()
1050 1050
1051 1051 def d():
1052 1052 s = repo.changelog.ancestors(heads)
1053 1053 for rev in revs:
1054 1054 rev in s
1055 1055
1056 1056 timer(d)
1057 1057 fm.end()
1058 1058
1059 1059
1060 1060 @command(
1061 1061 b'perf::delta-find',
1062 1062 revlogopts + formatteropts,
1063 1063 b'-c|-m|FILE REV',
1064 1064 )
1065 1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 1066 """benchmark the process of finding a valid delta for a revlog revision
1067 1067
1068 1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 1070 This perf command measures how much time we spend in this process. It
1071 1071 operates on an already stored revision.
1072 1072
1073 1073 See `hg help debug-delta-find` for another related command.
1074 1074 """
1075 1075 from mercurial import revlogutils
1076 1076 import mercurial.revlogutils.deltas as deltautil
1077 1077
1078 1078 opts = _byteskwargs(opts)
1079 1079 if arg_2 is None:
1080 1080 file_ = None
1081 1081 rev = arg_1
1082 1082 else:
1083 1083 file_ = arg_1
1084 1084 rev = arg_2
1085 1085
1086 1086 repo = repo.unfiltered()
1087 1087
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089
1090 1090 rev = int(rev)
1091 1091
1092 1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 1093
1094 1094 deltacomputer = deltautil.deltacomputer(revlog)
1095 1095
1096 1096 node = revlog.node(rev)
1097 1097 p1r, p2r = revlog.parentrevs(rev)
1098 1098 p1 = revlog.node(p1r)
1099 1099 p2 = revlog.node(p2r)
1100 1100 full_text = revlog.revision(rev)
1101 1101 textlen = len(full_text)
1102 1102 cachedelta = None
1103 1103 flags = revlog.flags(rev)
1104 1104
1105 1105 revinfo = revlogutils.revisioninfo(
1106 1106 node,
1107 1107 p1,
1108 1108 p2,
1109 1109 [full_text], # btext
1110 1110 textlen,
1111 1111 cachedelta,
1112 1112 flags,
1113 1113 )
1114 1114
1115 1115 # Note: we should probably purge the potential caches (like the full
1116 1116 # manifest cache) between runs.
1117 1117 def find_one():
1118 1118 with revlog._datafp() as fh:
1119 1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 1120
1121 1121 timer(find_one)
1122 1122 fm.end()
1123 1123
1124 1124
1125 1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 1126 def perfdiscovery(ui, repo, path, **opts):
1127 1127 """benchmark discovery between local repo and the peer at given path"""
1128 1128 repos = [repo, None]
1129 1129 timer, fm = gettimer(ui, opts)
1130 1130
1131 1131 try:
1132 1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 1133
1134 1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 1135 except ImportError:
1136 1136 try:
1137 1137 from mercurial.utils.urlutil import get_unique_pull_path
1138 1138
1139 1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 1140 except ImportError:
1141 1141 path = ui.expandpath(path)
1142 1142
1143 1143 def s():
1144 1144 repos[1] = hg.peer(ui, opts, path)
1145 1145
1146 1146 def d():
1147 1147 setdiscovery.findcommonheads(ui, *repos)
1148 1148
1149 1149 timer(d, setup=s)
1150 1150 fm.end()
1151 1151
1152 1152
1153 1153 @command(
1154 1154 b'perf::bookmarks|perfbookmarks',
1155 1155 formatteropts
1156 1156 + [
1157 1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 1158 ],
1159 1159 )
1160 1160 def perfbookmarks(ui, repo, **opts):
1161 1161 """benchmark parsing bookmarks from disk to memory"""
1162 1162 opts = _byteskwargs(opts)
1163 1163 timer, fm = gettimer(ui, opts)
1164 1164
1165 1165 clearrevlogs = opts[b'clear_revlogs']
1166 1166
1167 1167 def s():
1168 1168 if clearrevlogs:
1169 1169 clearchangelog(repo)
1170 1170 clearfilecache(repo, b'_bookmarks')
1171 1171
1172 1172 def d():
1173 1173 repo._bookmarks
1174 1174
1175 1175 timer(d, setup=s)
1176 1176 fm.end()
1177 1177
1178 1178
1179 1179 @command(
1180 1180 b'perf::bundle',
1181 1181 [
1182 1182 (
1183 1183 b'r',
1184 1184 b'rev',
1185 1185 [],
1186 1186 b'changesets to bundle',
1187 1187 b'REV',
1188 1188 ),
1189 1189 (
1190 1190 b't',
1191 1191 b'type',
1192 1192 b'none',
1193 1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 1194 b'TYPE',
1195 1195 ),
1196 1196 ]
1197 1197 + formatteropts,
1198 1198 b'REVS',
1199 1199 )
1200 1200 def perfbundle(ui, repo, *revs, **opts):
1201 1201 """benchmark the creation of a bundle from a repository
1202 1202
1203 1203 For now, this only supports "none" compression.
1204 1204 """
1205 1205 try:
1206 1206 from mercurial import bundlecaches
1207 1207
1208 1208 parsebundlespec = bundlecaches.parsebundlespec
1209 1209 except ImportError:
1210 1210 from mercurial import exchange
1211 1211
1212 1212 parsebundlespec = exchange.parsebundlespec
1213 1213
1214 1214 from mercurial import discovery
1215 1215 from mercurial import bundle2
1216 1216
1217 1217 opts = _byteskwargs(opts)
1218 1218 timer, fm = gettimer(ui, opts)
1219 1219
1220 1220 cl = repo.changelog
1221 1221 revs = list(revs)
1222 1222 revs.extend(opts.get(b'rev', ()))
1223 1223 revs = scmutil.revrange(repo, revs)
1224 1224 if not revs:
1225 1225 raise error.Abort(b"not revision specified")
1226 1226 # make it a consistent set (ie: without topological gaps)
1227 1227 old_len = len(revs)
1228 1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 1229 if old_len != len(revs):
1230 1230 new_count = len(revs) - old_len
1231 1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 1232 ui.write_err(msg % new_count)
1233 1233
1234 1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 1236 outgoing = discovery.outgoing(repo, bases, targets)
1237 1237
1238 1238 bundle_spec = opts.get(b'type')
1239 1239
1240 1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 1241
1242 1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 1243 if cgversion is None:
1244 1244 if bundle_spec.version == b'v1':
1245 1245 cgversion = b'01'
1246 1246 if bundle_spec.version == b'v2':
1247 1247 cgversion = b'02'
1248 1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 1249 err = b"repository does not support bundle version %s"
1250 1250 raise error.Abort(err % cgversion)
1251 1251
1252 1252 if cgversion == b'01': # bundle1
1253 1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 1254 bcompression = None
1255 1255 elif cgversion in (b'02', b'03'):
1256 1256 bversion = b'HG20'
1257 1257 bcompression = bundle_spec.wirecompression
1258 1258 else:
1259 1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 1260 raise error.ProgrammingError(err % cgversion)
1261 1261
1262 1262 if bcompression is None:
1263 1263 bcompression = b'UN'
1264 1264
1265 1265 if bcompression != b'UN':
1266 1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 1267 raise error.ProgrammingError(err % bcompression)
1268 1268
1269 1269 def do_bundle():
1270 1270 bundle2.writenewbundle(
1271 1271 ui,
1272 1272 repo,
1273 1273 b'perf::bundle',
1274 1274 os.devnull,
1275 1275 bversion,
1276 1276 outgoing,
1277 1277 bundle_spec.params,
1278 1278 )
1279 1279
1280 1280 timer(do_bundle)
1281 1281 fm.end()
1282 1282
1283 1283
1284 1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 1286 """Benchmark reading of bundle files.
1287 1287
1288 1288 This command is meant to isolate the I/O part of bundle reading as
1289 1289 much as possible.
1290 1290 """
1291 1291 from mercurial import (
1292 1292 bundle2,
1293 1293 exchange,
1294 1294 streamclone,
1295 1295 )
1296 1296
1297 1297 opts = _byteskwargs(opts)
1298 1298
1299 1299 def makebench(fn):
1300 1300 def run():
1301 1301 with open(bundlepath, b'rb') as fh:
1302 1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 1303 fn(bundle)
1304 1304
1305 1305 return run
1306 1306
1307 1307 def makereadnbytes(size):
1308 1308 def run():
1309 1309 with open(bundlepath, b'rb') as fh:
1310 1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 1311 while bundle.read(size):
1312 1312 pass
1313 1313
1314 1314 return run
1315 1315
1316 1316 def makestdioread(size):
1317 1317 def run():
1318 1318 with open(bundlepath, b'rb') as fh:
1319 1319 while fh.read(size):
1320 1320 pass
1321 1321
1322 1322 return run
1323 1323
1324 1324 # bundle1
1325 1325
1326 1326 def deltaiter(bundle):
1327 1327 for delta in bundle.deltaiter():
1328 1328 pass
1329 1329
1330 1330 def iterchunks(bundle):
1331 1331 for chunk in bundle.getchunks():
1332 1332 pass
1333 1333
1334 1334 # bundle2
1335 1335
1336 1336 def forwardchunks(bundle):
1337 1337 for chunk in bundle._forwardchunks():
1338 1338 pass
1339 1339
1340 1340 def iterparts(bundle):
1341 1341 for part in bundle.iterparts():
1342 1342 pass
1343 1343
1344 1344 def iterpartsseekable(bundle):
1345 1345 for part in bundle.iterparts(seekable=True):
1346 1346 pass
1347 1347
1348 1348 def seek(bundle):
1349 1349 for part in bundle.iterparts(seekable=True):
1350 1350 part.seek(0, os.SEEK_END)
1351 1351
1352 1352 def makepartreadnbytes(size):
1353 1353 def run():
1354 1354 with open(bundlepath, b'rb') as fh:
1355 1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 1356 for part in bundle.iterparts():
1357 1357 while part.read(size):
1358 1358 pass
1359 1359
1360 1360 return run
1361 1361
1362 1362 benches = [
1363 1363 (makestdioread(8192), b'read(8k)'),
1364 1364 (makestdioread(16384), b'read(16k)'),
1365 1365 (makestdioread(32768), b'read(32k)'),
1366 1366 (makestdioread(131072), b'read(128k)'),
1367 1367 ]
1368 1368
1369 1369 with open(bundlepath, b'rb') as fh:
1370 1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 1371
1372 1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 1373 benches.extend(
1374 1374 [
1375 1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 1381 ]
1382 1382 )
1383 1383 elif isinstance(bundle, bundle2.unbundle20):
1384 1384 benches.extend(
1385 1385 [
1386 1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 1388 (
1389 1389 makebench(iterpartsseekable),
1390 1390 b'bundle2 iterparts() seekable',
1391 1391 ),
1392 1392 (makebench(seek), b'bundle2 part seek()'),
1393 1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 1397 ]
1398 1398 )
1399 1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 1400 raise error.Abort(b'stream clone bundles not supported')
1401 1401 else:
1402 1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 1403
1404 1404 for fn, title in benches:
1405 1405 timer, fm = gettimer(ui, opts)
1406 1406 timer(fn, title=title)
1407 1407 fm.end()
1408 1408
1409 1409
1410 1410 @command(
1411 1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 1412 formatteropts
1413 1413 + [
1414 1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 1416 ],
1417 1417 )
1418 1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 1419 """Benchmark producing a changelog group for a changegroup.
1420 1420
1421 1421 This measures the time spent processing the changelog during a
1422 1422 bundle operation. This occurs during `hg bundle` and on a server
1423 1423 processing a `getbundle` wire protocol request (handles clones
1424 1424 and pull requests).
1425 1425
1426 1426 By default, all revisions are added to the changegroup.
1427 1427 """
1428 1428 opts = _byteskwargs(opts)
1429 1429 cl = repo.changelog
1430 1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 1431 bundler = changegroup.getbundler(cgversion, repo)
1432 1432
1433 1433 def d():
1434 1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 1435 for chunk in chunks:
1436 1436 pass
1437 1437
1438 1438 timer, fm = gettimer(ui, opts)
1439 1439
1440 1440 # Terminal printing can interfere with timing. So disable it.
1441 1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 1442 timer(d)
1443 1443
1444 1444 fm.end()
1445 1445
1446 1446
1447 1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 1448 def perfdirs(ui, repo, **opts):
1449 1449 opts = _byteskwargs(opts)
1450 1450 timer, fm = gettimer(ui, opts)
1451 1451 dirstate = repo.dirstate
1452 1452 b'a' in dirstate
1453 1453
1454 1454 def d():
1455 1455 dirstate.hasdir(b'a')
1456 1456 try:
1457 1457 del dirstate._map._dirs
1458 1458 except AttributeError:
1459 1459 pass
1460 1460
1461 1461 timer(d)
1462 1462 fm.end()
1463 1463
1464 1464
1465 1465 @command(
1466 1466 b'perf::dirstate|perfdirstate',
1467 1467 [
1468 1468 (
1469 1469 b'',
1470 1470 b'iteration',
1471 1471 None,
1472 1472 b'benchmark a full iteration for the dirstate',
1473 1473 ),
1474 1474 (
1475 1475 b'',
1476 1476 b'contains',
1477 1477 None,
1478 1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 1479 ),
1480 1480 ]
1481 1481 + formatteropts,
1482 1482 )
1483 1483 def perfdirstate(ui, repo, **opts):
1484 1484 """benchmap the time of various distate operations
1485 1485
1486 1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 1487 The dirstate is loaded to the point were a "contains" request can be
1488 1488 answered.
1489 1489 """
1490 1490 opts = _byteskwargs(opts)
1491 1491 timer, fm = gettimer(ui, opts)
1492 1492 b"a" in repo.dirstate
1493 1493
1494 1494 if opts[b'iteration'] and opts[b'contains']:
1495 1495 msg = b'only specify one of --iteration or --contains'
1496 1496 raise error.Abort(msg)
1497 1497
1498 1498 if opts[b'iteration']:
1499 1499 setup = None
1500 1500 dirstate = repo.dirstate
1501 1501
1502 1502 def d():
1503 1503 for f in dirstate:
1504 1504 pass
1505 1505
1506 1506 elif opts[b'contains']:
1507 1507 setup = None
1508 1508 dirstate = repo.dirstate
1509 1509 allfiles = list(dirstate)
1510 1510 # also add file path that will be "missing" from the dirstate
1511 1511 allfiles.extend([f[::-1] for f in allfiles])
1512 1512
1513 1513 def d():
1514 1514 for f in allfiles:
1515 1515 f in dirstate
1516 1516
1517 1517 else:
1518 1518
1519 1519 def setup():
1520 1520 repo.dirstate.invalidate()
1521 1521
1522 1522 def d():
1523 1523 b"a" in repo.dirstate
1524 1524
1525 1525 timer(d, setup=setup)
1526 1526 fm.end()
1527 1527
1528 1528
1529 1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 1530 def perfdirstatedirs(ui, repo, **opts):
1531 1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 1532 opts = _byteskwargs(opts)
1533 1533 timer, fm = gettimer(ui, opts)
1534 1534 repo.dirstate.hasdir(b"a")
1535 1535
1536 1536 def setup():
1537 1537 try:
1538 1538 del repo.dirstate._map._dirs
1539 1539 except AttributeError:
1540 1540 pass
1541 1541
1542 1542 def d():
1543 1543 repo.dirstate.hasdir(b"a")
1544 1544
1545 1545 timer(d, setup=setup)
1546 1546 fm.end()
1547 1547
1548 1548
1549 1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 1552
1553 1553 The dirstate filefoldmap cache is dropped between every request.
1554 1554 """
1555 1555 opts = _byteskwargs(opts)
1556 1556 timer, fm = gettimer(ui, opts)
1557 1557 dirstate = repo.dirstate
1558 1558 dirstate._map.filefoldmap.get(b'a')
1559 1559
1560 1560 def setup():
1561 1561 del dirstate._map.filefoldmap
1562 1562
1563 1563 def d():
1564 1564 dirstate._map.filefoldmap.get(b'a')
1565 1565
1566 1566 timer(d, setup=setup)
1567 1567 fm.end()
1568 1568
1569 1569
1570 1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 1571 def perfdirfoldmap(ui, repo, **opts):
1572 1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 1573
1574 1574 The dirstate dirfoldmap cache is dropped between every request.
1575 1575 """
1576 1576 opts = _byteskwargs(opts)
1577 1577 timer, fm = gettimer(ui, opts)
1578 1578 dirstate = repo.dirstate
1579 1579 dirstate._map.dirfoldmap.get(b'a')
1580 1580
1581 1581 def setup():
1582 1582 del dirstate._map.dirfoldmap
1583 1583 try:
1584 1584 del dirstate._map._dirs
1585 1585 except AttributeError:
1586 1586 pass
1587 1587
1588 1588 def d():
1589 1589 dirstate._map.dirfoldmap.get(b'a')
1590 1590
1591 1591 timer(d, setup=setup)
1592 1592 fm.end()
1593 1593
1594 1594
1595 1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 1596 def perfdirstatewrite(ui, repo, **opts):
1597 1597 """benchmap the time it take to write a dirstate on disk"""
1598 1598 opts = _byteskwargs(opts)
1599 1599 timer, fm = gettimer(ui, opts)
1600 1600 ds = repo.dirstate
1601 1601 b"a" in ds
1602 1602
1603 1603 def setup():
1604 1604 ds._dirty = True
1605 1605
1606 1606 def d():
1607 1607 ds.write(repo.currenttransaction())
1608 1608
1609 1609 with repo.wlock():
1610 1610 timer(d, setup=setup)
1611 1611 fm.end()
1612 1612
1613 1613
1614 1614 def _getmergerevs(repo, opts):
1615 1615 """parse command argument to return rev involved in merge
1616 1616
1617 1617 input: options dictionnary with `rev`, `from` and `bse`
1618 1618 output: (localctx, otherctx, basectx)
1619 1619 """
1620 1620 if opts[b'from']:
1621 1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 1622 wctx = repo[fromrev]
1623 1623 else:
1624 1624 wctx = repo[None]
1625 1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 1626 # prime that cache
1627 1627 wctx.dirty()
1628 1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 1629 if opts[b'base']:
1630 1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 1631 ancestor = repo[fromrev]
1632 1632 else:
1633 1633 ancestor = wctx.ancestor(rctx)
1634 1634 return (wctx, rctx, ancestor)
1635 1635
1636 1636
1637 1637 @command(
1638 1638 b'perf::mergecalculate|perfmergecalculate',
1639 1639 [
1640 1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 1641 (b'', b'from', b'', b'rev to merge from'),
1642 1642 (b'', b'base', b'', b'the revision to use as base'),
1643 1643 ]
1644 1644 + formatteropts,
1645 1645 )
1646 1646 def perfmergecalculate(ui, repo, **opts):
1647 1647 opts = _byteskwargs(opts)
1648 1648 timer, fm = gettimer(ui, opts)
1649 1649
1650 1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 1651
1652 1652 def d():
1653 1653 # acceptremote is True because we don't want prompts in the middle of
1654 1654 # our benchmark
1655 1655 merge.calculateupdates(
1656 1656 repo,
1657 1657 wctx,
1658 1658 rctx,
1659 1659 [ancestor],
1660 1660 branchmerge=False,
1661 1661 force=False,
1662 1662 acceptremote=True,
1663 1663 followcopies=True,
1664 1664 )
1665 1665
1666 1666 timer(d)
1667 1667 fm.end()
1668 1668
1669 1669
1670 1670 @command(
1671 1671 b'perf::mergecopies|perfmergecopies',
1672 1672 [
1673 1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 1674 (b'', b'from', b'', b'rev to merge from'),
1675 1675 (b'', b'base', b'', b'the revision to use as base'),
1676 1676 ]
1677 1677 + formatteropts,
1678 1678 )
1679 1679 def perfmergecopies(ui, repo, **opts):
1680 1680 """measure runtime of `copies.mergecopies`"""
1681 1681 opts = _byteskwargs(opts)
1682 1682 timer, fm = gettimer(ui, opts)
1683 1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 1684
1685 1685 def d():
1686 1686 # acceptremote is True because we don't want prompts in the middle of
1687 1687 # our benchmark
1688 1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 1689
1690 1690 timer(d)
1691 1691 fm.end()
1692 1692
1693 1693
1694 1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 1696 """benchmark the copy tracing logic"""
1697 1697 opts = _byteskwargs(opts)
1698 1698 timer, fm = gettimer(ui, opts)
1699 1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 1701
1702 1702 def d():
1703 1703 copies.pathcopies(ctx1, ctx2)
1704 1704
1705 1705 timer(d)
1706 1706 fm.end()
1707 1707
1708 1708
1709 1709 @command(
1710 1710 b'perf::phases|perfphases',
1711 1711 [
1712 1712 (b'', b'full', False, b'include file reading time too'),
1713 1713 ]
1714 1714 + formatteropts,
1715 1715 b"",
1716 1716 )
1717 1717 def perfphases(ui, repo, **opts):
1718 1718 """benchmark phasesets computation"""
1719 1719 opts = _byteskwargs(opts)
1720 1720 timer, fm = gettimer(ui, opts)
1721 1721 _phases = repo._phasecache
1722 1722 full = opts.get(b'full')
1723 1723 tip_rev = repo.changelog.tiprev()
1724 1724
1725 1725 def d():
1726 1726 phases = _phases
1727 1727 if full:
1728 1728 clearfilecache(repo, b'_phasecache')
1729 1729 phases = repo._phasecache
1730 1730 phases.invalidate()
1731 1731 phases.phase(repo, tip_rev)
1732 1732
1733 1733 timer(d)
1734 1734 fm.end()
1735 1735
1736 1736
1737 1737 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 1738 def perfphasesremote(ui, repo, dest=None, **opts):
1739 1739 """benchmark time needed to analyse phases of the remote server"""
1740 1740 from mercurial.node import bin
1741 1741 from mercurial import (
1742 1742 exchange,
1743 1743 hg,
1744 1744 phases,
1745 1745 )
1746 1746
1747 1747 opts = _byteskwargs(opts)
1748 1748 timer, fm = gettimer(ui, opts)
1749 1749
1750 1750 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 1751 if not path:
1752 1752 raise error.Abort(
1753 1753 b'default repository not configured!',
1754 1754 hint=b"see 'hg help config.paths'",
1755 1755 )
1756 1756 if util.safehasattr(path, 'main_path'):
1757 1757 path = path.get_push_variant()
1758 1758 dest = path.loc
1759 1759 else:
1760 1760 dest = path.pushloc or path.loc
1761 1761 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 1762 other = hg.peer(repo, opts, dest)
1763 1763
1764 1764 # easier to perform discovery through the operation
1765 1765 op = exchange.pushoperation(repo, other)
1766 1766 exchange._pushdiscoverychangeset(op)
1767 1767
1768 1768 remotesubset = op.fallbackheads
1769 1769
1770 1770 with other.commandexecutor() as e:
1771 1771 remotephases = e.callcommand(
1772 1772 b'listkeys', {b'namespace': b'phases'}
1773 1773 ).result()
1774 1774 del other
1775 1775 publishing = remotephases.get(b'publishing', False)
1776 1776 if publishing:
1777 1777 ui.statusnoi18n(b'publishing: yes\n')
1778 1778 else:
1779 1779 ui.statusnoi18n(b'publishing: no\n')
1780 1780
1781 1781 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 1782 if has_node is None:
1783 1783 has_node = repo.changelog.nodemap.__contains__
1784 1784 nonpublishroots = 0
1785 1785 for nhex, phase in remotephases.iteritems():
1786 1786 if nhex == b'publishing': # ignore data related to publish option
1787 1787 continue
1788 1788 node = bin(nhex)
1789 1789 if has_node(node) and int(phase):
1790 1790 nonpublishroots += 1
1791 1791 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 1792 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793 1793
1794 1794 def d():
1795 1795 phases.remotephasessummary(repo, remotesubset, remotephases)
1796 1796
1797 1797 timer(d)
1798 1798 fm.end()
1799 1799
1800 1800
1801 1801 @command(
1802 1802 b'perf::manifest|perfmanifest',
1803 1803 [
1804 1804 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 1805 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 1806 ]
1807 1807 + formatteropts,
1808 1808 b'REV|NODE',
1809 1809 )
1810 1810 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 1811 """benchmark the time to read a manifest from disk and return a usable
1812 1812 dict-like object
1813 1813
1814 1814 Manifest caches are cleared before retrieval."""
1815 1815 opts = _byteskwargs(opts)
1816 1816 timer, fm = gettimer(ui, opts)
1817 1817 if not manifest_rev:
1818 1818 ctx = scmutil.revsingle(repo, rev, rev)
1819 1819 t = ctx.manifestnode()
1820 1820 else:
1821 1821 from mercurial.node import bin
1822 1822
1823 1823 if len(rev) == 40:
1824 1824 t = bin(rev)
1825 1825 else:
1826 1826 try:
1827 1827 rev = int(rev)
1828 1828
1829 1829 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 1830 t = repo.manifestlog.getstorage(b'').node(rev)
1831 1831 else:
1832 1832 t = repo.manifestlog._revlog.lookup(rev)
1833 1833 except ValueError:
1834 1834 raise error.Abort(
1835 1835 b'manifest revision must be integer or full node'
1836 1836 )
1837 1837
1838 1838 def d():
1839 1839 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 1840 repo.manifestlog[t].read()
1841 1841
1842 1842 timer(d)
1843 1843 fm.end()
1844 1844
1845 1845
1846 1846 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 1847 def perfchangeset(ui, repo, rev, **opts):
1848 1848 opts = _byteskwargs(opts)
1849 1849 timer, fm = gettimer(ui, opts)
1850 1850 n = scmutil.revsingle(repo, rev).node()
1851 1851
1852 1852 def d():
1853 1853 repo.changelog.read(n)
1854 1854 # repo.changelog._cache = None
1855 1855
1856 1856 timer(d)
1857 1857 fm.end()
1858 1858
1859 1859
1860 1860 @command(b'perf::ignore|perfignore', formatteropts)
1861 1861 def perfignore(ui, repo, **opts):
1862 1862 """benchmark operation related to computing ignore"""
1863 1863 opts = _byteskwargs(opts)
1864 1864 timer, fm = gettimer(ui, opts)
1865 1865 dirstate = repo.dirstate
1866 1866
1867 1867 def setupone():
1868 1868 dirstate.invalidate()
1869 1869 clearfilecache(dirstate, b'_ignore')
1870 1870
1871 1871 def runone():
1872 1872 dirstate._ignore
1873 1873
1874 1874 timer(runone, setup=setupone, title=b"load")
1875 1875 fm.end()
1876 1876
1877 1877
1878 1878 @command(
1879 1879 b'perf::index|perfindex',
1880 1880 [
1881 1881 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 1882 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 1883 ]
1884 1884 + formatteropts,
1885 1885 )
1886 1886 def perfindex(ui, repo, **opts):
1887 1887 """benchmark index creation time followed by a lookup
1888 1888
1889 1889 The default is to look `tip` up. Depending on the index implementation,
1890 1890 the revision looked up can matters. For example, an implementation
1891 1891 scanning the index will have a faster lookup time for `--rev tip` than for
1892 1892 `--rev 0`. The number of looked up revisions and their order can also
1893 1893 matters.
1894 1894
1895 1895 Example of useful set to test:
1896 1896
1897 1897 * tip
1898 1898 * 0
1899 1899 * -10:
1900 1900 * :10
1901 1901 * -10: + :10
1902 1902 * :10: + -10:
1903 1903 * -10000:
1904 1904 * -10000: + 0
1905 1905
1906 1906 It is not currently possible to check for lookup of a missing node. For
1907 1907 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 1908 import mercurial.revlog
1909 1909
1910 1910 opts = _byteskwargs(opts)
1911 1911 timer, fm = gettimer(ui, opts)
1912 1912 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 1913 if opts[b'no_lookup']:
1914 1914 if opts['rev']:
1915 1915 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 1916 nodes = []
1917 1917 elif not opts[b'rev']:
1918 1918 nodes = [repo[b"tip"].node()]
1919 1919 else:
1920 1920 revs = scmutil.revrange(repo, opts[b'rev'])
1921 1921 cl = repo.changelog
1922 1922 nodes = [cl.node(r) for r in revs]
1923 1923
1924 1924 unfi = repo.unfiltered()
1925 1925 # find the filecache func directly
1926 1926 # This avoid polluting the benchmark with the filecache logic
1927 1927 makecl = unfi.__class__.changelog.func
1928 1928
1929 1929 def setup():
1930 1930 # probably not necessary, but for good measure
1931 1931 clearchangelog(unfi)
1932 1932
1933 1933 def d():
1934 1934 cl = makecl(unfi)
1935 1935 for n in nodes:
1936 1936 cl.rev(n)
1937 1937
1938 1938 timer(d, setup=setup)
1939 1939 fm.end()
1940 1940
1941 1941
1942 1942 @command(
1943 1943 b'perf::nodemap|perfnodemap',
1944 1944 [
1945 1945 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 1946 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 1947 ]
1948 1948 + formatteropts,
1949 1949 )
1950 1950 def perfnodemap(ui, repo, **opts):
1951 1951 """benchmark the time necessary to look up revision from a cold nodemap
1952 1952
1953 1953 Depending on the implementation, the amount and order of revision we look
1954 1954 up can varies. Example of useful set to test:
1955 1955 * tip
1956 1956 * 0
1957 1957 * -10:
1958 1958 * :10
1959 1959 * -10: + :10
1960 1960 * :10: + -10:
1961 1961 * -10000:
1962 1962 * -10000: + 0
1963 1963
1964 1964 The command currently focus on valid binary lookup. Benchmarking for
1965 1965 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 1966 """
1967 1967 import mercurial.revlog
1968 1968
1969 1969 opts = _byteskwargs(opts)
1970 1970 timer, fm = gettimer(ui, opts)
1971 1971 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972 1972
1973 1973 unfi = repo.unfiltered()
1974 1974 clearcaches = opts[b'clear_caches']
1975 1975 # find the filecache func directly
1976 1976 # This avoid polluting the benchmark with the filecache logic
1977 1977 makecl = unfi.__class__.changelog.func
1978 1978 if not opts[b'rev']:
1979 1979 raise error.Abort(b'use --rev to specify revisions to look up')
1980 1980 revs = scmutil.revrange(repo, opts[b'rev'])
1981 1981 cl = repo.changelog
1982 1982 nodes = [cl.node(r) for r in revs]
1983 1983
1984 1984 # use a list to pass reference to a nodemap from one closure to the next
1985 1985 nodeget = [None]
1986 1986
1987 1987 def setnodeget():
1988 1988 # probably not necessary, but for good measure
1989 1989 clearchangelog(unfi)
1990 1990 cl = makecl(unfi)
1991 1991 if util.safehasattr(cl.index, 'get_rev'):
1992 1992 nodeget[0] = cl.index.get_rev
1993 1993 else:
1994 1994 nodeget[0] = cl.nodemap.get
1995 1995
1996 1996 def d():
1997 1997 get = nodeget[0]
1998 1998 for n in nodes:
1999 1999 get(n)
2000 2000
2001 2001 setup = None
2002 2002 if clearcaches:
2003 2003
2004 2004 def setup():
2005 2005 setnodeget()
2006 2006
2007 2007 else:
2008 2008 setnodeget()
2009 2009 d() # prewarm the data structure
2010 2010 timer(d, setup=setup)
2011 2011 fm.end()
2012 2012
2013 2013
2014 2014 @command(b'perf::startup|perfstartup', formatteropts)
2015 2015 def perfstartup(ui, repo, **opts):
2016 2016 opts = _byteskwargs(opts)
2017 2017 timer, fm = gettimer(ui, opts)
2018 2018
2019 2019 def d():
2020 2020 if os.name != 'nt':
2021 2021 os.system(
2022 2022 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 2023 )
2024 2024 else:
2025 2025 os.environ['HGRCPATH'] = r' '
2026 2026 os.system("%s version -q > NUL" % sys.argv[0])
2027 2027
2028 2028 timer(d)
2029 2029 fm.end()
2030 2030
2031 2031
2032 2032 def _find_stream_generator(version):
2033 2033 """find the proper generator function for this stream version"""
2034 2034 import mercurial.streamclone
2035 2035
2036 2036 available = {}
2037 2037
2038 2038 # try to fetch a v1 generator
2039 2039 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 2040 if generatev1 is not None:
2041 2041
2042 2042 def generate(repo):
2043 2043 entries, bytes, data = generatev1(repo, None, None, True)
2044 2044 return data
2045 2045
2046 2046 available[b'v1'] = generatev1
2047 2047 # try to fetch a v2 generator
2048 2048 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 2049 if generatev2 is not None:
2050 2050
2051 2051 def generate(repo):
2052 2052 entries, bytes, data = generatev2(repo, None, None, True)
2053 2053 return data
2054 2054
2055 2055 available[b'v2'] = generate
2056 2056 # try to fetch a v3 generator
2057 2057 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 2058 if generatev3 is not None:
2059 2059
2060 2060 def generate(repo):
2061 2061 return generatev3(repo, None, None, True)
2062 2062
2063 2063 available[b'v3-exp'] = generate
2064 2064
2065 2065 # resolve the request
2066 2066 if version == b"latest":
2067 2067 # latest is the highest non experimental version
2068 2068 latest_key = max(v for v in available if b'-exp' not in v)
2069 2069 return available[latest_key]
2070 2070 elif version in available:
2071 2071 return available[version]
2072 2072 else:
2073 2073 msg = b"unkown or unavailable version: %s"
2074 2074 msg %= version
2075 2075 hint = b"available versions: %s"
2076 2076 hint %= b', '.join(sorted(available))
2077 2077 raise error.Abort(msg, hint=hint)
2078 2078
2079 2079
2080 2080 @command(
2081 2081 b'perf::stream-locked-section',
2082 2082 [
2083 2083 (
2084 2084 b'',
2085 2085 b'stream-version',
2086 2086 b'latest',
2087 2087 b'stream version to use ("v1", "v2", "v3-exp" '
2088 2088 b'or "latest", (the default))',
2089 2089 ),
2090 2090 ]
2091 2091 + formatteropts,
2092 2092 )
2093 2093 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 2094 """benchmark the initial, repo-locked, section of a stream-clone"""
2095 2095
2096 2096 opts = _byteskwargs(opts)
2097 2097 timer, fm = gettimer(ui, opts)
2098 2098
2099 2099 # deletion of the generator may trigger some cleanup that we do not want to
2100 2100 # measure
2101 2101 result_holder = [None]
2102 2102
2103 2103 def setupone():
2104 2104 result_holder[0] = None
2105 2105
2106 2106 generate = _find_stream_generator(stream_version)
2107 2107
2108 2108 def runone():
2109 2109 # the lock is held for the duration the initialisation
2110 2110 result_holder[0] = generate(repo)
2111 2111
2112 2112 timer(runone, setup=setupone, title=b"load")
2113 2113 fm.end()
2114 2114
2115 2115
2116 2116 @command(
2117 2117 b'perf::stream-generate',
2118 2118 [
2119 2119 (
2120 2120 b'',
2121 2121 b'stream-version',
2122 2122 b'latest',
2123 2123 b'stream version to us ("v1", "v2", "v3-exp" '
2124 2124 b'or "latest", (the default))',
2125 2125 ),
2126 2126 ]
2127 2127 + formatteropts,
2128 2128 )
2129 2129 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2130 2130 """benchmark the full generation of a stream clone"""
2131 2131
2132 2132 opts = _byteskwargs(opts)
2133 2133 timer, fm = gettimer(ui, opts)
2134 2134
2135 2135 # deletion of the generator may trigger some cleanup that we do not want to
2136 2136 # measure
2137 2137
2138 2138 generate = _find_stream_generator(stream_version)
2139 2139
2140 2140 def runone():
2141 2141 # the lock is held for the duration the initialisation
2142 2142 for chunk in generate(repo):
2143 2143 pass
2144 2144
2145 2145 timer(runone, title=b"generate")
2146 2146 fm.end()
2147 2147
2148 2148
2149 2149 @command(
2150 2150 b'perf::stream-consume',
2151 2151 formatteropts,
2152 2152 )
2153 2153 def perf_stream_clone_consume(ui, repo, filename, **opts):
2154 2154 """benchmark the full application of a stream clone
2155 2155
2156 2156 This include the creation of the repository
2157 2157 """
2158 2158 # try except to appease check code
2159 2159 msg = b"mercurial too old, missing necessary module: %s"
2160 2160 try:
2161 2161 from mercurial import bundle2
2162 2162 except ImportError as exc:
2163 2163 msg %= _bytestr(exc)
2164 2164 raise error.Abort(msg)
2165 2165 try:
2166 2166 from mercurial import exchange
2167 2167 except ImportError as exc:
2168 2168 msg %= _bytestr(exc)
2169 2169 raise error.Abort(msg)
2170 2170 try:
2171 2171 from mercurial import hg
2172 2172 except ImportError as exc:
2173 2173 msg %= _bytestr(exc)
2174 2174 raise error.Abort(msg)
2175 2175 try:
2176 2176 from mercurial import localrepo
2177 2177 except ImportError as exc:
2178 2178 msg %= _bytestr(exc)
2179 2179 raise error.Abort(msg)
2180 2180
2181 2181 opts = _byteskwargs(opts)
2182 2182 timer, fm = gettimer(ui, opts)
2183 2183
2184 2184 # deletion of the generator may trigger some cleanup that we do not want to
2185 2185 # measure
2186 2186 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2187 2187 raise error.Abort("not a readable file: %s" % filename)
2188 2188
2189 2189 run_variables = [None, None]
2190 2190
2191 2191 @contextlib.contextmanager
2192 2192 def context():
2193 2193 with open(filename, mode='rb') as bundle:
2194 2194 with tempfile.TemporaryDirectory() as tmp_dir:
2195 2195 tmp_dir = fsencode(tmp_dir)
2196 2196 run_variables[0] = bundle
2197 2197 run_variables[1] = tmp_dir
2198 2198 yield
2199 2199 run_variables[0] = None
2200 2200 run_variables[1] = None
2201 2201
2202 2202 def runone():
2203 2203 bundle = run_variables[0]
2204 2204 tmp_dir = run_variables[1]
2205
2206 # we actually wants to copy all config to ensure the repo config is
2207 # taken in account during the benchmark
2208 new_ui = repo.ui.__class__(repo.ui)
2205 2209 # only pass ui when no srcrepo
2206 2210 localrepo.createrepository(
2207 repo.ui, tmp_dir, requirements=repo.requirements
2211 new_ui, tmp_dir, requirements=repo.requirements
2208 2212 )
2209 target = hg.repository(repo.ui, tmp_dir)
2213 target = hg.repository(new_ui, tmp_dir)
2210 2214 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2211 2215 # stream v1
2212 2216 if util.safehasattr(gen, 'apply'):
2213 2217 gen.apply(target)
2214 2218 else:
2215 2219 with target.transaction(b"perf::stream-consume") as tr:
2216 2220 bundle2.applybundle(
2217 2221 target,
2218 2222 gen,
2219 2223 tr,
2220 2224 source=b'unbundle',
2221 2225 url=filename,
2222 2226 )
2223 2227
2224 2228 timer(runone, context=context, title=b"consume")
2225 2229 fm.end()
2226 2230
2227 2231
2228 2232 @command(b'perf::parents|perfparents', formatteropts)
2229 2233 def perfparents(ui, repo, **opts):
2230 2234 """benchmark the time necessary to fetch one changeset's parents.
2231 2235
2232 2236 The fetch is done using the `node identifier`, traversing all object layers
2233 2237 from the repository object. The first N revisions will be used for this
2234 2238 benchmark. N is controlled by the ``perf.parentscount`` config option
2235 2239 (default: 1000).
2236 2240 """
2237 2241 opts = _byteskwargs(opts)
2238 2242 timer, fm = gettimer(ui, opts)
2239 2243 # control the number of commits perfparents iterates over
2240 2244 # experimental config: perf.parentscount
2241 2245 count = getint(ui, b"perf", b"parentscount", 1000)
2242 2246 if len(repo.changelog) < count:
2243 2247 raise error.Abort(b"repo needs %d commits for this test" % count)
2244 2248 repo = repo.unfiltered()
2245 2249 nl = [repo.changelog.node(i) for i in _xrange(count)]
2246 2250
2247 2251 def d():
2248 2252 for n in nl:
2249 2253 repo.changelog.parents(n)
2250 2254
2251 2255 timer(d)
2252 2256 fm.end()
2253 2257
2254 2258
2255 2259 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2256 2260 def perfctxfiles(ui, repo, x, **opts):
2257 2261 opts = _byteskwargs(opts)
2258 2262 x = int(x)
2259 2263 timer, fm = gettimer(ui, opts)
2260 2264
2261 2265 def d():
2262 2266 len(repo[x].files())
2263 2267
2264 2268 timer(d)
2265 2269 fm.end()
2266 2270
2267 2271
2268 2272 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2269 2273 def perfrawfiles(ui, repo, x, **opts):
2270 2274 opts = _byteskwargs(opts)
2271 2275 x = int(x)
2272 2276 timer, fm = gettimer(ui, opts)
2273 2277 cl = repo.changelog
2274 2278
2275 2279 def d():
2276 2280 len(cl.read(x)[3])
2277 2281
2278 2282 timer(d)
2279 2283 fm.end()
2280 2284
2281 2285
2282 2286 @command(b'perf::lookup|perflookup', formatteropts)
2283 2287 def perflookup(ui, repo, rev, **opts):
2284 2288 opts = _byteskwargs(opts)
2285 2289 timer, fm = gettimer(ui, opts)
2286 2290 timer(lambda: len(repo.lookup(rev)))
2287 2291 fm.end()
2288 2292
2289 2293
2290 2294 @command(
2291 2295 b'perf::linelogedits|perflinelogedits',
2292 2296 [
2293 2297 (b'n', b'edits', 10000, b'number of edits'),
2294 2298 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2295 2299 ],
2296 2300 norepo=True,
2297 2301 )
2298 2302 def perflinelogedits(ui, **opts):
2299 2303 from mercurial import linelog
2300 2304
2301 2305 opts = _byteskwargs(opts)
2302 2306
2303 2307 edits = opts[b'edits']
2304 2308 maxhunklines = opts[b'max_hunk_lines']
2305 2309
2306 2310 maxb1 = 100000
2307 2311 random.seed(0)
2308 2312 randint = random.randint
2309 2313 currentlines = 0
2310 2314 arglist = []
2311 2315 for rev in _xrange(edits):
2312 2316 a1 = randint(0, currentlines)
2313 2317 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2314 2318 b1 = randint(0, maxb1)
2315 2319 b2 = randint(b1, b1 + maxhunklines)
2316 2320 currentlines += (b2 - b1) - (a2 - a1)
2317 2321 arglist.append((rev, a1, a2, b1, b2))
2318 2322
2319 2323 def d():
2320 2324 ll = linelog.linelog()
2321 2325 for args in arglist:
2322 2326 ll.replacelines(*args)
2323 2327
2324 2328 timer, fm = gettimer(ui, opts)
2325 2329 timer(d)
2326 2330 fm.end()
2327 2331
2328 2332
2329 2333 @command(b'perf::revrange|perfrevrange', formatteropts)
2330 2334 def perfrevrange(ui, repo, *specs, **opts):
2331 2335 opts = _byteskwargs(opts)
2332 2336 timer, fm = gettimer(ui, opts)
2333 2337 revrange = scmutil.revrange
2334 2338 timer(lambda: len(revrange(repo, specs)))
2335 2339 fm.end()
2336 2340
2337 2341
2338 2342 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2339 2343 def perfnodelookup(ui, repo, rev, **opts):
2340 2344 opts = _byteskwargs(opts)
2341 2345 timer, fm = gettimer(ui, opts)
2342 2346 import mercurial.revlog
2343 2347
2344 2348 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2345 2349 n = scmutil.revsingle(repo, rev).node()
2346 2350
2347 2351 try:
2348 2352 cl = revlog(getsvfs(repo), radix=b"00changelog")
2349 2353 except TypeError:
2350 2354 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2351 2355
2352 2356 def d():
2353 2357 cl.rev(n)
2354 2358 clearcaches(cl)
2355 2359
2356 2360 timer(d)
2357 2361 fm.end()
2358 2362
2359 2363
2360 2364 @command(
2361 2365 b'perf::log|perflog',
2362 2366 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2363 2367 )
2364 2368 def perflog(ui, repo, rev=None, **opts):
2365 2369 opts = _byteskwargs(opts)
2366 2370 if rev is None:
2367 2371 rev = []
2368 2372 timer, fm = gettimer(ui, opts)
2369 2373 ui.pushbuffer()
2370 2374 timer(
2371 2375 lambda: commands.log(
2372 2376 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2373 2377 )
2374 2378 )
2375 2379 ui.popbuffer()
2376 2380 fm.end()
2377 2381
2378 2382
2379 2383 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2380 2384 def perfmoonwalk(ui, repo, **opts):
2381 2385 """benchmark walking the changelog backwards
2382 2386
2383 2387 This also loads the changelog data for each revision in the changelog.
2384 2388 """
2385 2389 opts = _byteskwargs(opts)
2386 2390 timer, fm = gettimer(ui, opts)
2387 2391
2388 2392 def moonwalk():
2389 2393 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2390 2394 ctx = repo[i]
2391 2395 ctx.branch() # read changelog data (in addition to the index)
2392 2396
2393 2397 timer(moonwalk)
2394 2398 fm.end()
2395 2399
2396 2400
2397 2401 @command(
2398 2402 b'perf::templating|perftemplating',
2399 2403 [
2400 2404 (b'r', b'rev', [], b'revisions to run the template on'),
2401 2405 ]
2402 2406 + formatteropts,
2403 2407 )
2404 2408 def perftemplating(ui, repo, testedtemplate=None, **opts):
2405 2409 """test the rendering time of a given template"""
2406 2410 if makelogtemplater is None:
2407 2411 raise error.Abort(
2408 2412 b"perftemplating not available with this Mercurial",
2409 2413 hint=b"use 4.3 or later",
2410 2414 )
2411 2415
2412 2416 opts = _byteskwargs(opts)
2413 2417
2414 2418 nullui = ui.copy()
2415 2419 nullui.fout = open(os.devnull, 'wb')
2416 2420 nullui.disablepager()
2417 2421 revs = opts.get(b'rev')
2418 2422 if not revs:
2419 2423 revs = [b'all()']
2420 2424 revs = list(scmutil.revrange(repo, revs))
2421 2425
2422 2426 defaulttemplate = (
2423 2427 b'{date|shortdate} [{rev}:{node|short}]'
2424 2428 b' {author|person}: {desc|firstline}\n'
2425 2429 )
2426 2430 if testedtemplate is None:
2427 2431 testedtemplate = defaulttemplate
2428 2432 displayer = makelogtemplater(nullui, repo, testedtemplate)
2429 2433
2430 2434 def format():
2431 2435 for r in revs:
2432 2436 ctx = repo[r]
2433 2437 displayer.show(ctx)
2434 2438 displayer.flush(ctx)
2435 2439
2436 2440 timer, fm = gettimer(ui, opts)
2437 2441 timer(format)
2438 2442 fm.end()
2439 2443
2440 2444
2441 2445 def _displaystats(ui, opts, entries, data):
2442 2446 # use a second formatter because the data are quite different, not sure
2443 2447 # how it flies with the templater.
2444 2448 fm = ui.formatter(b'perf-stats', opts)
2445 2449 for key, title in entries:
2446 2450 values = data[key]
2447 2451 nbvalues = len(data)
2448 2452 values.sort()
2449 2453 stats = {
2450 2454 'key': key,
2451 2455 'title': title,
2452 2456 'nbitems': len(values),
2453 2457 'min': values[0][0],
2454 2458 '10%': values[(nbvalues * 10) // 100][0],
2455 2459 '25%': values[(nbvalues * 25) // 100][0],
2456 2460 '50%': values[(nbvalues * 50) // 100][0],
2457 2461 '75%': values[(nbvalues * 75) // 100][0],
2458 2462 '80%': values[(nbvalues * 80) // 100][0],
2459 2463 '85%': values[(nbvalues * 85) // 100][0],
2460 2464 '90%': values[(nbvalues * 90) // 100][0],
2461 2465 '95%': values[(nbvalues * 95) // 100][0],
2462 2466 '99%': values[(nbvalues * 99) // 100][0],
2463 2467 'max': values[-1][0],
2464 2468 }
2465 2469 fm.startitem()
2466 2470 fm.data(**stats)
2467 2471 # make node pretty for the human output
2468 2472 fm.plain('### %s (%d items)\n' % (title, len(values)))
2469 2473 lines = [
2470 2474 'min',
2471 2475 '10%',
2472 2476 '25%',
2473 2477 '50%',
2474 2478 '75%',
2475 2479 '80%',
2476 2480 '85%',
2477 2481 '90%',
2478 2482 '95%',
2479 2483 '99%',
2480 2484 'max',
2481 2485 ]
2482 2486 for l in lines:
2483 2487 fm.plain('%s: %s\n' % (l, stats[l]))
2484 2488 fm.end()
2485 2489
2486 2490
2487 2491 @command(
2488 2492 b'perf::helper-mergecopies|perfhelper-mergecopies',
2489 2493 formatteropts
2490 2494 + [
2491 2495 (b'r', b'revs', [], b'restrict search to these revisions'),
2492 2496 (b'', b'timing', False, b'provides extra data (costly)'),
2493 2497 (b'', b'stats', False, b'provides statistic about the measured data'),
2494 2498 ],
2495 2499 )
2496 2500 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2497 2501 """find statistics about potential parameters for `perfmergecopies`
2498 2502
2499 2503 This command find (base, p1, p2) triplet relevant for copytracing
2500 2504 benchmarking in the context of a merge. It reports values for some of the
2501 2505 parameters that impact merge copy tracing time during merge.
2502 2506
2503 2507 If `--timing` is set, rename detection is run and the associated timing
2504 2508 will be reported. The extra details come at the cost of slower command
2505 2509 execution.
2506 2510
2507 2511 Since rename detection is only run once, other factors might easily
2508 2512 affect the precision of the timing. However it should give a good
2509 2513 approximation of which revision triplets are very costly.
2510 2514 """
2511 2515 opts = _byteskwargs(opts)
2512 2516 fm = ui.formatter(b'perf', opts)
2513 2517 dotiming = opts[b'timing']
2514 2518 dostats = opts[b'stats']
2515 2519
2516 2520 output_template = [
2517 2521 ("base", "%(base)12s"),
2518 2522 ("p1", "%(p1.node)12s"),
2519 2523 ("p2", "%(p2.node)12s"),
2520 2524 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2521 2525 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2522 2526 ("p1.renames", "%(p1.renamedfiles)12d"),
2523 2527 ("p1.time", "%(p1.time)12.3f"),
2524 2528 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2525 2529 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2526 2530 ("p2.renames", "%(p2.renamedfiles)12d"),
2527 2531 ("p2.time", "%(p2.time)12.3f"),
2528 2532 ("renames", "%(nbrenamedfiles)12d"),
2529 2533 ("total.time", "%(time)12.3f"),
2530 2534 ]
2531 2535 if not dotiming:
2532 2536 output_template = [
2533 2537 i
2534 2538 for i in output_template
2535 2539 if not ('time' in i[0] or 'renames' in i[0])
2536 2540 ]
2537 2541 header_names = [h for (h, v) in output_template]
2538 2542 output = ' '.join([v for (h, v) in output_template]) + '\n'
2539 2543 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2540 2544 fm.plain(header % tuple(header_names))
2541 2545
2542 2546 if not revs:
2543 2547 revs = ['all()']
2544 2548 revs = scmutil.revrange(repo, revs)
2545 2549
2546 2550 if dostats:
2547 2551 alldata = {
2548 2552 'nbrevs': [],
2549 2553 'nbmissingfiles': [],
2550 2554 }
2551 2555 if dotiming:
2552 2556 alldata['parentnbrenames'] = []
2553 2557 alldata['totalnbrenames'] = []
2554 2558 alldata['parenttime'] = []
2555 2559 alldata['totaltime'] = []
2556 2560
2557 2561 roi = repo.revs('merge() and %ld', revs)
2558 2562 for r in roi:
2559 2563 ctx = repo[r]
2560 2564 p1 = ctx.p1()
2561 2565 p2 = ctx.p2()
2562 2566 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2563 2567 for b in bases:
2564 2568 b = repo[b]
2565 2569 p1missing = copies._computeforwardmissing(b, p1)
2566 2570 p2missing = copies._computeforwardmissing(b, p2)
2567 2571 data = {
2568 2572 b'base': b.hex(),
2569 2573 b'p1.node': p1.hex(),
2570 2574 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2571 2575 b'p1.nbmissingfiles': len(p1missing),
2572 2576 b'p2.node': p2.hex(),
2573 2577 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2574 2578 b'p2.nbmissingfiles': len(p2missing),
2575 2579 }
2576 2580 if dostats:
2577 2581 if p1missing:
2578 2582 alldata['nbrevs'].append(
2579 2583 (data['p1.nbrevs'], b.hex(), p1.hex())
2580 2584 )
2581 2585 alldata['nbmissingfiles'].append(
2582 2586 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2583 2587 )
2584 2588 if p2missing:
2585 2589 alldata['nbrevs'].append(
2586 2590 (data['p2.nbrevs'], b.hex(), p2.hex())
2587 2591 )
2588 2592 alldata['nbmissingfiles'].append(
2589 2593 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2590 2594 )
2591 2595 if dotiming:
2592 2596 begin = util.timer()
2593 2597 mergedata = copies.mergecopies(repo, p1, p2, b)
2594 2598 end = util.timer()
2595 2599 # not very stable timing since we did only one run
2596 2600 data['time'] = end - begin
2597 2601 # mergedata contains five dicts: "copy", "movewithdir",
2598 2602 # "diverge", "renamedelete" and "dirmove".
2599 2603 # The first 4 are about renamed file so lets count that.
2600 2604 renames = len(mergedata[0])
2601 2605 renames += len(mergedata[1])
2602 2606 renames += len(mergedata[2])
2603 2607 renames += len(mergedata[3])
2604 2608 data['nbrenamedfiles'] = renames
2605 2609 begin = util.timer()
2606 2610 p1renames = copies.pathcopies(b, p1)
2607 2611 end = util.timer()
2608 2612 data['p1.time'] = end - begin
2609 2613 begin = util.timer()
2610 2614 p2renames = copies.pathcopies(b, p2)
2611 2615 end = util.timer()
2612 2616 data['p2.time'] = end - begin
2613 2617 data['p1.renamedfiles'] = len(p1renames)
2614 2618 data['p2.renamedfiles'] = len(p2renames)
2615 2619
2616 2620 if dostats:
2617 2621 if p1missing:
2618 2622 alldata['parentnbrenames'].append(
2619 2623 (data['p1.renamedfiles'], b.hex(), p1.hex())
2620 2624 )
2621 2625 alldata['parenttime'].append(
2622 2626 (data['p1.time'], b.hex(), p1.hex())
2623 2627 )
2624 2628 if p2missing:
2625 2629 alldata['parentnbrenames'].append(
2626 2630 (data['p2.renamedfiles'], b.hex(), p2.hex())
2627 2631 )
2628 2632 alldata['parenttime'].append(
2629 2633 (data['p2.time'], b.hex(), p2.hex())
2630 2634 )
2631 2635 if p1missing or p2missing:
2632 2636 alldata['totalnbrenames'].append(
2633 2637 (
2634 2638 data['nbrenamedfiles'],
2635 2639 b.hex(),
2636 2640 p1.hex(),
2637 2641 p2.hex(),
2638 2642 )
2639 2643 )
2640 2644 alldata['totaltime'].append(
2641 2645 (data['time'], b.hex(), p1.hex(), p2.hex())
2642 2646 )
2643 2647 fm.startitem()
2644 2648 fm.data(**data)
2645 2649 # make node pretty for the human output
2646 2650 out = data.copy()
2647 2651 out['base'] = fm.hexfunc(b.node())
2648 2652 out['p1.node'] = fm.hexfunc(p1.node())
2649 2653 out['p2.node'] = fm.hexfunc(p2.node())
2650 2654 fm.plain(output % out)
2651 2655
2652 2656 fm.end()
2653 2657 if dostats:
2654 2658 # use a second formatter because the data are quite different, not sure
2655 2659 # how it flies with the templater.
2656 2660 entries = [
2657 2661 ('nbrevs', 'number of revision covered'),
2658 2662 ('nbmissingfiles', 'number of missing files at head'),
2659 2663 ]
2660 2664 if dotiming:
2661 2665 entries.append(
2662 2666 ('parentnbrenames', 'rename from one parent to base')
2663 2667 )
2664 2668 entries.append(('totalnbrenames', 'total number of renames'))
2665 2669 entries.append(('parenttime', 'time for one parent'))
2666 2670 entries.append(('totaltime', 'time for both parents'))
2667 2671 _displaystats(ui, opts, entries, alldata)
2668 2672
2669 2673
2670 2674 @command(
2671 2675 b'perf::helper-pathcopies|perfhelper-pathcopies',
2672 2676 formatteropts
2673 2677 + [
2674 2678 (b'r', b'revs', [], b'restrict search to these revisions'),
2675 2679 (b'', b'timing', False, b'provides extra data (costly)'),
2676 2680 (b'', b'stats', False, b'provides statistic about the measured data'),
2677 2681 ],
2678 2682 )
2679 2683 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2680 2684 """find statistic about potential parameters for the `perftracecopies`
2681 2685
2682 2686 This command find source-destination pair relevant for copytracing testing.
2683 2687 It report value for some of the parameters that impact copy tracing time.
2684 2688
2685 2689 If `--timing` is set, rename detection is run and the associated timing
2686 2690 will be reported. The extra details comes at the cost of a slower command
2687 2691 execution.
2688 2692
2689 2693 Since the rename detection is only run once, other factors might easily
2690 2694 affect the precision of the timing. However it should give a good
2691 2695 approximation of which revision pairs are very costly.
2692 2696 """
2693 2697 opts = _byteskwargs(opts)
2694 2698 fm = ui.formatter(b'perf', opts)
2695 2699 dotiming = opts[b'timing']
2696 2700 dostats = opts[b'stats']
2697 2701
2698 2702 if dotiming:
2699 2703 header = '%12s %12s %12s %12s %12s %12s\n'
2700 2704 output = (
2701 2705 "%(source)12s %(destination)12s "
2702 2706 "%(nbrevs)12d %(nbmissingfiles)12d "
2703 2707 "%(nbrenamedfiles)12d %(time)18.5f\n"
2704 2708 )
2705 2709 header_names = (
2706 2710 "source",
2707 2711 "destination",
2708 2712 "nb-revs",
2709 2713 "nb-files",
2710 2714 "nb-renames",
2711 2715 "time",
2712 2716 )
2713 2717 fm.plain(header % header_names)
2714 2718 else:
2715 2719 header = '%12s %12s %12s %12s\n'
2716 2720 output = (
2717 2721 "%(source)12s %(destination)12s "
2718 2722 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2719 2723 )
2720 2724 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2721 2725
2722 2726 if not revs:
2723 2727 revs = ['all()']
2724 2728 revs = scmutil.revrange(repo, revs)
2725 2729
2726 2730 if dostats:
2727 2731 alldata = {
2728 2732 'nbrevs': [],
2729 2733 'nbmissingfiles': [],
2730 2734 }
2731 2735 if dotiming:
2732 2736 alldata['nbrenames'] = []
2733 2737 alldata['time'] = []
2734 2738
2735 2739 roi = repo.revs('merge() and %ld', revs)
2736 2740 for r in roi:
2737 2741 ctx = repo[r]
2738 2742 p1 = ctx.p1().rev()
2739 2743 p2 = ctx.p2().rev()
2740 2744 bases = repo.changelog._commonancestorsheads(p1, p2)
2741 2745 for p in (p1, p2):
2742 2746 for b in bases:
2743 2747 base = repo[b]
2744 2748 parent = repo[p]
2745 2749 missing = copies._computeforwardmissing(base, parent)
2746 2750 if not missing:
2747 2751 continue
2748 2752 data = {
2749 2753 b'source': base.hex(),
2750 2754 b'destination': parent.hex(),
2751 2755 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2752 2756 b'nbmissingfiles': len(missing),
2753 2757 }
2754 2758 if dostats:
2755 2759 alldata['nbrevs'].append(
2756 2760 (
2757 2761 data['nbrevs'],
2758 2762 base.hex(),
2759 2763 parent.hex(),
2760 2764 )
2761 2765 )
2762 2766 alldata['nbmissingfiles'].append(
2763 2767 (
2764 2768 data['nbmissingfiles'],
2765 2769 base.hex(),
2766 2770 parent.hex(),
2767 2771 )
2768 2772 )
2769 2773 if dotiming:
2770 2774 begin = util.timer()
2771 2775 renames = copies.pathcopies(base, parent)
2772 2776 end = util.timer()
2773 2777 # not very stable timing since we did only one run
2774 2778 data['time'] = end - begin
2775 2779 data['nbrenamedfiles'] = len(renames)
2776 2780 if dostats:
2777 2781 alldata['time'].append(
2778 2782 (
2779 2783 data['time'],
2780 2784 base.hex(),
2781 2785 parent.hex(),
2782 2786 )
2783 2787 )
2784 2788 alldata['nbrenames'].append(
2785 2789 (
2786 2790 data['nbrenamedfiles'],
2787 2791 base.hex(),
2788 2792 parent.hex(),
2789 2793 )
2790 2794 )
2791 2795 fm.startitem()
2792 2796 fm.data(**data)
2793 2797 out = data.copy()
2794 2798 out['source'] = fm.hexfunc(base.node())
2795 2799 out['destination'] = fm.hexfunc(parent.node())
2796 2800 fm.plain(output % out)
2797 2801
2798 2802 fm.end()
2799 2803 if dostats:
2800 2804 entries = [
2801 2805 ('nbrevs', 'number of revision covered'),
2802 2806 ('nbmissingfiles', 'number of missing files at head'),
2803 2807 ]
2804 2808 if dotiming:
2805 2809 entries.append(('nbrenames', 'renamed files'))
2806 2810 entries.append(('time', 'time'))
2807 2811 _displaystats(ui, opts, entries, alldata)
2808 2812
2809 2813
2810 2814 @command(b'perf::cca|perfcca', formatteropts)
2811 2815 def perfcca(ui, repo, **opts):
2812 2816 opts = _byteskwargs(opts)
2813 2817 timer, fm = gettimer(ui, opts)
2814 2818 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2815 2819 fm.end()
2816 2820
2817 2821
2818 2822 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2819 2823 def perffncacheload(ui, repo, **opts):
2820 2824 opts = _byteskwargs(opts)
2821 2825 timer, fm = gettimer(ui, opts)
2822 2826 s = repo.store
2823 2827
2824 2828 def d():
2825 2829 s.fncache._load()
2826 2830
2827 2831 timer(d)
2828 2832 fm.end()
2829 2833
2830 2834
2831 2835 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2832 2836 def perffncachewrite(ui, repo, **opts):
2833 2837 opts = _byteskwargs(opts)
2834 2838 timer, fm = gettimer(ui, opts)
2835 2839 s = repo.store
2836 2840 lock = repo.lock()
2837 2841 s.fncache._load()
2838 2842 tr = repo.transaction(b'perffncachewrite')
2839 2843 tr.addbackup(b'fncache')
2840 2844
2841 2845 def d():
2842 2846 s.fncache._dirty = True
2843 2847 s.fncache.write(tr)
2844 2848
2845 2849 timer(d)
2846 2850 tr.close()
2847 2851 lock.release()
2848 2852 fm.end()
2849 2853
2850 2854
2851 2855 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2852 2856 def perffncacheencode(ui, repo, **opts):
2853 2857 opts = _byteskwargs(opts)
2854 2858 timer, fm = gettimer(ui, opts)
2855 2859 s = repo.store
2856 2860 s.fncache._load()
2857 2861
2858 2862 def d():
2859 2863 for p in s.fncache.entries:
2860 2864 s.encode(p)
2861 2865
2862 2866 timer(d)
2863 2867 fm.end()
2864 2868
2865 2869
2866 2870 def _bdiffworker(q, blocks, xdiff, ready, done):
2867 2871 while not done.is_set():
2868 2872 pair = q.get()
2869 2873 while pair is not None:
2870 2874 if xdiff:
2871 2875 mdiff.bdiff.xdiffblocks(*pair)
2872 2876 elif blocks:
2873 2877 mdiff.bdiff.blocks(*pair)
2874 2878 else:
2875 2879 mdiff.textdiff(*pair)
2876 2880 q.task_done()
2877 2881 pair = q.get()
2878 2882 q.task_done() # for the None one
2879 2883 with ready:
2880 2884 ready.wait()
2881 2885
2882 2886
2883 2887 def _manifestrevision(repo, mnode):
2884 2888 ml = repo.manifestlog
2885 2889
2886 2890 if util.safehasattr(ml, b'getstorage'):
2887 2891 store = ml.getstorage(b'')
2888 2892 else:
2889 2893 store = ml._revlog
2890 2894
2891 2895 return store.revision(mnode)
2892 2896
2893 2897
2894 2898 @command(
2895 2899 b'perf::bdiff|perfbdiff',
2896 2900 revlogopts
2897 2901 + formatteropts
2898 2902 + [
2899 2903 (
2900 2904 b'',
2901 2905 b'count',
2902 2906 1,
2903 2907 b'number of revisions to test (when using --startrev)',
2904 2908 ),
2905 2909 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2906 2910 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2907 2911 (b'', b'blocks', False, b'test computing diffs into blocks'),
2908 2912 (b'', b'xdiff', False, b'use xdiff algorithm'),
2909 2913 ],
2910 2914 b'-c|-m|FILE REV',
2911 2915 )
2912 2916 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2913 2917 """benchmark a bdiff between revisions
2914 2918
2915 2919 By default, benchmark a bdiff between its delta parent and itself.
2916 2920
2917 2921 With ``--count``, benchmark bdiffs between delta parents and self for N
2918 2922 revisions starting at the specified revision.
2919 2923
2920 2924 With ``--alldata``, assume the requested revision is a changeset and
2921 2925 measure bdiffs for all changes related to that changeset (manifest
2922 2926 and filelogs).
2923 2927 """
2924 2928 opts = _byteskwargs(opts)
2925 2929
2926 2930 if opts[b'xdiff'] and not opts[b'blocks']:
2927 2931 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2928 2932
2929 2933 if opts[b'alldata']:
2930 2934 opts[b'changelog'] = True
2931 2935
2932 2936 if opts.get(b'changelog') or opts.get(b'manifest'):
2933 2937 file_, rev = None, file_
2934 2938 elif rev is None:
2935 2939 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2936 2940
2937 2941 blocks = opts[b'blocks']
2938 2942 xdiff = opts[b'xdiff']
2939 2943 textpairs = []
2940 2944
2941 2945 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2942 2946
2943 2947 startrev = r.rev(r.lookup(rev))
2944 2948 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2945 2949 if opts[b'alldata']:
2946 2950 # Load revisions associated with changeset.
2947 2951 ctx = repo[rev]
2948 2952 mtext = _manifestrevision(repo, ctx.manifestnode())
2949 2953 for pctx in ctx.parents():
2950 2954 pman = _manifestrevision(repo, pctx.manifestnode())
2951 2955 textpairs.append((pman, mtext))
2952 2956
2953 2957 # Load filelog revisions by iterating manifest delta.
2954 2958 man = ctx.manifest()
2955 2959 pman = ctx.p1().manifest()
2956 2960 for filename, change in pman.diff(man).items():
2957 2961 fctx = repo.file(filename)
2958 2962 f1 = fctx.revision(change[0][0] or -1)
2959 2963 f2 = fctx.revision(change[1][0] or -1)
2960 2964 textpairs.append((f1, f2))
2961 2965 else:
2962 2966 dp = r.deltaparent(rev)
2963 2967 textpairs.append((r.revision(dp), r.revision(rev)))
2964 2968
2965 2969 withthreads = threads > 0
2966 2970 if not withthreads:
2967 2971
2968 2972 def d():
2969 2973 for pair in textpairs:
2970 2974 if xdiff:
2971 2975 mdiff.bdiff.xdiffblocks(*pair)
2972 2976 elif blocks:
2973 2977 mdiff.bdiff.blocks(*pair)
2974 2978 else:
2975 2979 mdiff.textdiff(*pair)
2976 2980
2977 2981 else:
2978 2982 q = queue()
2979 2983 for i in _xrange(threads):
2980 2984 q.put(None)
2981 2985 ready = threading.Condition()
2982 2986 done = threading.Event()
2983 2987 for i in _xrange(threads):
2984 2988 threading.Thread(
2985 2989 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2986 2990 ).start()
2987 2991 q.join()
2988 2992
2989 2993 def d():
2990 2994 for pair in textpairs:
2991 2995 q.put(pair)
2992 2996 for i in _xrange(threads):
2993 2997 q.put(None)
2994 2998 with ready:
2995 2999 ready.notify_all()
2996 3000 q.join()
2997 3001
2998 3002 timer, fm = gettimer(ui, opts)
2999 3003 timer(d)
3000 3004 fm.end()
3001 3005
3002 3006 if withthreads:
3003 3007 done.set()
3004 3008 for i in _xrange(threads):
3005 3009 q.put(None)
3006 3010 with ready:
3007 3011 ready.notify_all()
3008 3012
3009 3013
3010 3014 @command(
3011 3015 b'perf::unbundle',
3012 3016 [
3013 3017 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3014 3018 ]
3015 3019 + formatteropts,
3016 3020 b'BUNDLE_FILE',
3017 3021 )
3018 3022 def perf_unbundle(ui, repo, fname, **opts):
3019 3023 """benchmark application of a bundle in a repository.
3020 3024
3021 3025 This does not include the final transaction processing
3022 3026
3023 3027 The --as-push option make the unbundle operation appears like it comes from
3024 3028 a client push. It change some aspect of the processing and associated
3025 3029 performance profile.
3026 3030 """
3027 3031
3028 3032 from mercurial import exchange
3029 3033 from mercurial import bundle2
3030 3034 from mercurial import transaction
3031 3035
3032 3036 opts = _byteskwargs(opts)
3033 3037
3034 3038 ### some compatibility hotfix
3035 3039 #
3036 3040 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3037 3041 # critical regression that break transaction rollback for files that are
3038 3042 # de-inlined.
3039 3043 method = transaction.transaction._addentry
3040 3044 pre_63edc384d3b7 = "data" in getargspec(method).args
3041 3045 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3042 3046 # a changeset that is a close descendant of 18415fc918a1, the changeset
3043 3047 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3044 3048 args = getargspec(error.Abort.__init__).args
3045 3049 post_18415fc918a1 = "detailed_exit_code" in args
3046 3050
3047 3051 unbundle_source = b'perf::unbundle'
3048 3052 if opts[b'as_push']:
3049 3053 unbundle_source = b'push'
3050 3054
3051 3055 old_max_inline = None
3052 3056 try:
3053 3057 if not (pre_63edc384d3b7 or post_18415fc918a1):
3054 3058 # disable inlining
3055 3059 old_max_inline = mercurial.revlog._maxinline
3056 3060 # large enough to never happen
3057 3061 mercurial.revlog._maxinline = 2 ** 50
3058 3062
3059 3063 with repo.lock():
3060 3064 bundle = [None, None]
3061 3065 orig_quiet = repo.ui.quiet
3062 3066 try:
3063 3067 repo.ui.quiet = True
3064 3068 with open(fname, mode="rb") as f:
3065 3069
3066 3070 def noop_report(*args, **kwargs):
3067 3071 pass
3068 3072
3069 3073 def setup():
3070 3074 gen, tr = bundle
3071 3075 if tr is not None:
3072 3076 tr.abort()
3073 3077 bundle[:] = [None, None]
3074 3078 f.seek(0)
3075 3079 bundle[0] = exchange.readbundle(ui, f, fname)
3076 3080 bundle[1] = repo.transaction(b'perf::unbundle')
3077 3081 # silence the transaction
3078 3082 bundle[1]._report = noop_report
3079 3083
3080 3084 def apply():
3081 3085 gen, tr = bundle
3082 3086 bundle2.applybundle(
3083 3087 repo,
3084 3088 gen,
3085 3089 tr,
3086 3090 source=unbundle_source,
3087 3091 url=fname,
3088 3092 )
3089 3093
3090 3094 timer, fm = gettimer(ui, opts)
3091 3095 timer(apply, setup=setup)
3092 3096 fm.end()
3093 3097 finally:
3094 3098 repo.ui.quiet == orig_quiet
3095 3099 gen, tr = bundle
3096 3100 if tr is not None:
3097 3101 tr.abort()
3098 3102 finally:
3099 3103 if old_max_inline is not None:
3100 3104 mercurial.revlog._maxinline = old_max_inline
3101 3105
3102 3106
3103 3107 @command(
3104 3108 b'perf::unidiff|perfunidiff',
3105 3109 revlogopts
3106 3110 + formatteropts
3107 3111 + [
3108 3112 (
3109 3113 b'',
3110 3114 b'count',
3111 3115 1,
3112 3116 b'number of revisions to test (when using --startrev)',
3113 3117 ),
3114 3118 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3115 3119 ],
3116 3120 b'-c|-m|FILE REV',
3117 3121 )
3118 3122 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3119 3123 """benchmark a unified diff between revisions
3120 3124
3121 3125 This doesn't include any copy tracing - it's just a unified diff
3122 3126 of the texts.
3123 3127
3124 3128 By default, benchmark a diff between its delta parent and itself.
3125 3129
3126 3130 With ``--count``, benchmark diffs between delta parents and self for N
3127 3131 revisions starting at the specified revision.
3128 3132
3129 3133 With ``--alldata``, assume the requested revision is a changeset and
3130 3134 measure diffs for all changes related to that changeset (manifest
3131 3135 and filelogs).
3132 3136 """
3133 3137 opts = _byteskwargs(opts)
3134 3138 if opts[b'alldata']:
3135 3139 opts[b'changelog'] = True
3136 3140
3137 3141 if opts.get(b'changelog') or opts.get(b'manifest'):
3138 3142 file_, rev = None, file_
3139 3143 elif rev is None:
3140 3144 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3141 3145
3142 3146 textpairs = []
3143 3147
3144 3148 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3145 3149
3146 3150 startrev = r.rev(r.lookup(rev))
3147 3151 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3148 3152 if opts[b'alldata']:
3149 3153 # Load revisions associated with changeset.
3150 3154 ctx = repo[rev]
3151 3155 mtext = _manifestrevision(repo, ctx.manifestnode())
3152 3156 for pctx in ctx.parents():
3153 3157 pman = _manifestrevision(repo, pctx.manifestnode())
3154 3158 textpairs.append((pman, mtext))
3155 3159
3156 3160 # Load filelog revisions by iterating manifest delta.
3157 3161 man = ctx.manifest()
3158 3162 pman = ctx.p1().manifest()
3159 3163 for filename, change in pman.diff(man).items():
3160 3164 fctx = repo.file(filename)
3161 3165 f1 = fctx.revision(change[0][0] or -1)
3162 3166 f2 = fctx.revision(change[1][0] or -1)
3163 3167 textpairs.append((f1, f2))
3164 3168 else:
3165 3169 dp = r.deltaparent(rev)
3166 3170 textpairs.append((r.revision(dp), r.revision(rev)))
3167 3171
3168 3172 def d():
3169 3173 for left, right in textpairs:
3170 3174 # The date strings don't matter, so we pass empty strings.
3171 3175 headerlines, hunks = mdiff.unidiff(
3172 3176 left, b'', right, b'', b'left', b'right', binary=False
3173 3177 )
3174 3178 # consume iterators in roughly the way patch.py does
3175 3179 b'\n'.join(headerlines)
3176 3180 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3177 3181
3178 3182 timer, fm = gettimer(ui, opts)
3179 3183 timer(d)
3180 3184 fm.end()
3181 3185
3182 3186
3183 3187 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3184 3188 def perfdiffwd(ui, repo, **opts):
3185 3189 """Profile diff of working directory changes"""
3186 3190 opts = _byteskwargs(opts)
3187 3191 timer, fm = gettimer(ui, opts)
3188 3192 options = {
3189 3193 'w': 'ignore_all_space',
3190 3194 'b': 'ignore_space_change',
3191 3195 'B': 'ignore_blank_lines',
3192 3196 }
3193 3197
3194 3198 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3195 3199 opts = {options[c]: b'1' for c in diffopt}
3196 3200
3197 3201 def d():
3198 3202 ui.pushbuffer()
3199 3203 commands.diff(ui, repo, **opts)
3200 3204 ui.popbuffer()
3201 3205
3202 3206 diffopt = diffopt.encode('ascii')
3203 3207 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3204 3208 timer(d, title=title)
3205 3209 fm.end()
3206 3210
3207 3211
3208 3212 @command(
3209 3213 b'perf::revlogindex|perfrevlogindex',
3210 3214 revlogopts + formatteropts,
3211 3215 b'-c|-m|FILE',
3212 3216 )
3213 3217 def perfrevlogindex(ui, repo, file_=None, **opts):
3214 3218 """Benchmark operations against a revlog index.
3215 3219
3216 3220 This tests constructing a revlog instance, reading index data,
3217 3221 parsing index data, and performing various operations related to
3218 3222 index data.
3219 3223 """
3220 3224
3221 3225 opts = _byteskwargs(opts)
3222 3226
3223 3227 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3224 3228
3225 3229 opener = getattr(rl, 'opener') # trick linter
3226 3230 # compat with hg <= 5.8
3227 3231 radix = getattr(rl, 'radix', None)
3228 3232 indexfile = getattr(rl, '_indexfile', None)
3229 3233 if indexfile is None:
3230 3234 # compatibility with <= hg-5.8
3231 3235 indexfile = getattr(rl, 'indexfile')
3232 3236 data = opener.read(indexfile)
3233 3237
3234 3238 header = struct.unpack(b'>I', data[0:4])[0]
3235 3239 version = header & 0xFFFF
3236 3240 if version == 1:
3237 3241 inline = header & (1 << 16)
3238 3242 else:
3239 3243 raise error.Abort(b'unsupported revlog version: %d' % version)
3240 3244
3241 3245 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3242 3246 if parse_index_v1 is None:
3243 3247 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3244 3248
3245 3249 rllen = len(rl)
3246 3250
3247 3251 node0 = rl.node(0)
3248 3252 node25 = rl.node(rllen // 4)
3249 3253 node50 = rl.node(rllen // 2)
3250 3254 node75 = rl.node(rllen // 4 * 3)
3251 3255 node100 = rl.node(rllen - 1)
3252 3256
3253 3257 allrevs = range(rllen)
3254 3258 allrevsrev = list(reversed(allrevs))
3255 3259 allnodes = [rl.node(rev) for rev in range(rllen)]
3256 3260 allnodesrev = list(reversed(allnodes))
3257 3261
3258 3262 def constructor():
3259 3263 if radix is not None:
3260 3264 revlog(opener, radix=radix)
3261 3265 else:
3262 3266 # hg <= 5.8
3263 3267 revlog(opener, indexfile=indexfile)
3264 3268
3265 3269 def read():
3266 3270 with opener(indexfile) as fh:
3267 3271 fh.read()
3268 3272
3269 3273 def parseindex():
3270 3274 parse_index_v1(data, inline)
3271 3275
3272 3276 def getentry(revornode):
3273 3277 index = parse_index_v1(data, inline)[0]
3274 3278 index[revornode]
3275 3279
3276 3280 def getentries(revs, count=1):
3277 3281 index = parse_index_v1(data, inline)[0]
3278 3282
3279 3283 for i in range(count):
3280 3284 for rev in revs:
3281 3285 index[rev]
3282 3286
3283 3287 def resolvenode(node):
3284 3288 index = parse_index_v1(data, inline)[0]
3285 3289 rev = getattr(index, 'rev', None)
3286 3290 if rev is None:
3287 3291 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 3292 # This only works for the C code.
3289 3293 if nodemap is None:
3290 3294 return
3291 3295 rev = nodemap.__getitem__
3292 3296
3293 3297 try:
3294 3298 rev(node)
3295 3299 except error.RevlogError:
3296 3300 pass
3297 3301
3298 3302 def resolvenodes(nodes, count=1):
3299 3303 index = parse_index_v1(data, inline)[0]
3300 3304 rev = getattr(index, 'rev', None)
3301 3305 if rev is None:
3302 3306 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3303 3307 # This only works for the C code.
3304 3308 if nodemap is None:
3305 3309 return
3306 3310 rev = nodemap.__getitem__
3307 3311
3308 3312 for i in range(count):
3309 3313 for node in nodes:
3310 3314 try:
3311 3315 rev(node)
3312 3316 except error.RevlogError:
3313 3317 pass
3314 3318
3315 3319 benches = [
3316 3320 (constructor, b'revlog constructor'),
3317 3321 (read, b'read'),
3318 3322 (parseindex, b'create index object'),
3319 3323 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3320 3324 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3321 3325 (lambda: resolvenode(node0), b'look up node at rev 0'),
3322 3326 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3323 3327 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3324 3328 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3325 3329 (lambda: resolvenode(node100), b'look up node at tip'),
3326 3330 # 2x variation is to measure caching impact.
3327 3331 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3328 3332 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3329 3333 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3330 3334 (
3331 3335 lambda: resolvenodes(allnodesrev, 2),
3332 3336 b'look up all nodes 2x (reverse)',
3333 3337 ),
3334 3338 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3335 3339 (
3336 3340 lambda: getentries(allrevs, 2),
3337 3341 b'retrieve all index entries 2x (forward)',
3338 3342 ),
3339 3343 (
3340 3344 lambda: getentries(allrevsrev),
3341 3345 b'retrieve all index entries (reverse)',
3342 3346 ),
3343 3347 (
3344 3348 lambda: getentries(allrevsrev, 2),
3345 3349 b'retrieve all index entries 2x (reverse)',
3346 3350 ),
3347 3351 ]
3348 3352
3349 3353 for fn, title in benches:
3350 3354 timer, fm = gettimer(ui, opts)
3351 3355 timer(fn, title=title)
3352 3356 fm.end()
3353 3357
3354 3358
3355 3359 @command(
3356 3360 b'perf::revlogrevisions|perfrevlogrevisions',
3357 3361 revlogopts
3358 3362 + formatteropts
3359 3363 + [
3360 3364 (b'd', b'dist', 100, b'distance between the revisions'),
3361 3365 (b's', b'startrev', 0, b'revision to start reading at'),
3362 3366 (b'', b'reverse', False, b'read in reverse'),
3363 3367 ],
3364 3368 b'-c|-m|FILE',
3365 3369 )
3366 3370 def perfrevlogrevisions(
3367 3371 ui, repo, file_=None, startrev=0, reverse=False, **opts
3368 3372 ):
3369 3373 """Benchmark reading a series of revisions from a revlog.
3370 3374
3371 3375 By default, we read every ``-d/--dist`` revision from 0 to tip of
3372 3376 the specified revlog.
3373 3377
3374 3378 The start revision can be defined via ``-s/--startrev``.
3375 3379 """
3376 3380 opts = _byteskwargs(opts)
3377 3381
3378 3382 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3379 3383 rllen = getlen(ui)(rl)
3380 3384
3381 3385 if startrev < 0:
3382 3386 startrev = rllen + startrev
3383 3387
3384 3388 def d():
3385 3389 rl.clearcaches()
3386 3390
3387 3391 beginrev = startrev
3388 3392 endrev = rllen
3389 3393 dist = opts[b'dist']
3390 3394
3391 3395 if reverse:
3392 3396 beginrev, endrev = endrev - 1, beginrev - 1
3393 3397 dist = -1 * dist
3394 3398
3395 3399 for x in _xrange(beginrev, endrev, dist):
3396 3400 # Old revisions don't support passing int.
3397 3401 n = rl.node(x)
3398 3402 rl.revision(n)
3399 3403
3400 3404 timer, fm = gettimer(ui, opts)
3401 3405 timer(d)
3402 3406 fm.end()
3403 3407
3404 3408
3405 3409 @command(
3406 3410 b'perf::revlogwrite|perfrevlogwrite',
3407 3411 revlogopts
3408 3412 + formatteropts
3409 3413 + [
3410 3414 (b's', b'startrev', 1000, b'revision to start writing at'),
3411 3415 (b'', b'stoprev', -1, b'last revision to write'),
3412 3416 (b'', b'count', 3, b'number of passes to perform'),
3413 3417 (b'', b'details', False, b'print timing for every revisions tested'),
3414 3418 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3415 3419 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3416 3420 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3417 3421 ],
3418 3422 b'-c|-m|FILE',
3419 3423 )
3420 3424 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3421 3425 """Benchmark writing a series of revisions to a revlog.
3422 3426
3423 3427 Possible source values are:
3424 3428 * `full`: add from a full text (default).
3425 3429 * `parent-1`: add from a delta to the first parent
3426 3430 * `parent-2`: add from a delta to the second parent if it exists
3427 3431 (use a delta from the first parent otherwise)
3428 3432 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3429 3433 * `storage`: add from the existing precomputed deltas
3430 3434
3431 3435 Note: This performance command measures performance in a custom way. As a
3432 3436 result some of the global configuration of the 'perf' command does not
3433 3437 apply to it:
3434 3438
3435 3439 * ``pre-run``: disabled
3436 3440
3437 3441 * ``profile-benchmark``: disabled
3438 3442
3439 3443 * ``run-limits``: disabled use --count instead
3440 3444 """
3441 3445 opts = _byteskwargs(opts)
3442 3446
3443 3447 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3444 3448 rllen = getlen(ui)(rl)
3445 3449 if startrev < 0:
3446 3450 startrev = rllen + startrev
3447 3451 if stoprev < 0:
3448 3452 stoprev = rllen + stoprev
3449 3453
3450 3454 lazydeltabase = opts['lazydeltabase']
3451 3455 source = opts['source']
3452 3456 clearcaches = opts['clear_caches']
3453 3457 validsource = (
3454 3458 b'full',
3455 3459 b'parent-1',
3456 3460 b'parent-2',
3457 3461 b'parent-smallest',
3458 3462 b'storage',
3459 3463 )
3460 3464 if source not in validsource:
3461 3465 raise error.Abort('invalid source type: %s' % source)
3462 3466
3463 3467 ### actually gather results
3464 3468 count = opts['count']
3465 3469 if count <= 0:
3466 3470 raise error.Abort('invalide run count: %d' % count)
3467 3471 allresults = []
3468 3472 for c in range(count):
3469 3473 timing = _timeonewrite(
3470 3474 ui,
3471 3475 rl,
3472 3476 source,
3473 3477 startrev,
3474 3478 stoprev,
3475 3479 c + 1,
3476 3480 lazydeltabase=lazydeltabase,
3477 3481 clearcaches=clearcaches,
3478 3482 )
3479 3483 allresults.append(timing)
3480 3484
3481 3485 ### consolidate the results in a single list
3482 3486 results = []
3483 3487 for idx, (rev, t) in enumerate(allresults[0]):
3484 3488 ts = [t]
3485 3489 for other in allresults[1:]:
3486 3490 orev, ot = other[idx]
3487 3491 assert orev == rev
3488 3492 ts.append(ot)
3489 3493 results.append((rev, ts))
3490 3494 resultcount = len(results)
3491 3495
3492 3496 ### Compute and display relevant statistics
3493 3497
3494 3498 # get a formatter
3495 3499 fm = ui.formatter(b'perf', opts)
3496 3500 displayall = ui.configbool(b"perf", b"all-timing", True)
3497 3501
3498 3502 # print individual details if requested
3499 3503 if opts['details']:
3500 3504 for idx, item in enumerate(results, 1):
3501 3505 rev, data = item
3502 3506 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3503 3507 formatone(fm, data, title=title, displayall=displayall)
3504 3508
3505 3509 # sorts results by median time
3506 3510 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3507 3511 # list of (name, index) to display)
3508 3512 relevants = [
3509 3513 ("min", 0),
3510 3514 ("10%", resultcount * 10 // 100),
3511 3515 ("25%", resultcount * 25 // 100),
3512 3516 ("50%", resultcount * 70 // 100),
3513 3517 ("75%", resultcount * 75 // 100),
3514 3518 ("90%", resultcount * 90 // 100),
3515 3519 ("95%", resultcount * 95 // 100),
3516 3520 ("99%", resultcount * 99 // 100),
3517 3521 ("99.9%", resultcount * 999 // 1000),
3518 3522 ("99.99%", resultcount * 9999 // 10000),
3519 3523 ("99.999%", resultcount * 99999 // 100000),
3520 3524 ("max", -1),
3521 3525 ]
3522 3526 if not ui.quiet:
3523 3527 for name, idx in relevants:
3524 3528 data = results[idx]
3525 3529 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3526 3530 formatone(fm, data[1], title=title, displayall=displayall)
3527 3531
3528 3532 # XXX summing that many float will not be very precise, we ignore this fact
3529 3533 # for now
3530 3534 totaltime = []
3531 3535 for item in allresults:
3532 3536 totaltime.append(
3533 3537 (
3534 3538 sum(x[1][0] for x in item),
3535 3539 sum(x[1][1] for x in item),
3536 3540 sum(x[1][2] for x in item),
3537 3541 )
3538 3542 )
3539 3543 formatone(
3540 3544 fm,
3541 3545 totaltime,
3542 3546 title="total time (%d revs)" % resultcount,
3543 3547 displayall=displayall,
3544 3548 )
3545 3549 fm.end()
3546 3550
3547 3551
3548 3552 class _faketr:
3549 3553 def add(s, x, y, z=None):
3550 3554 return None
3551 3555
3552 3556
3553 3557 def _timeonewrite(
3554 3558 ui,
3555 3559 orig,
3556 3560 source,
3557 3561 startrev,
3558 3562 stoprev,
3559 3563 runidx=None,
3560 3564 lazydeltabase=True,
3561 3565 clearcaches=True,
3562 3566 ):
3563 3567 timings = []
3564 3568 tr = _faketr()
3565 3569 with _temprevlog(ui, orig, startrev) as dest:
3566 3570 if hasattr(dest, "delta_config"):
3567 3571 dest.delta_config.lazy_delta_base = lazydeltabase
3568 3572 else:
3569 3573 dest._lazydeltabase = lazydeltabase
3570 3574 revs = list(orig.revs(startrev, stoprev))
3571 3575 total = len(revs)
3572 3576 topic = 'adding'
3573 3577 if runidx is not None:
3574 3578 topic += ' (run #%d)' % runidx
3575 3579 # Support both old and new progress API
3576 3580 if util.safehasattr(ui, 'makeprogress'):
3577 3581 progress = ui.makeprogress(topic, unit='revs', total=total)
3578 3582
3579 3583 def updateprogress(pos):
3580 3584 progress.update(pos)
3581 3585
3582 3586 def completeprogress():
3583 3587 progress.complete()
3584 3588
3585 3589 else:
3586 3590
3587 3591 def updateprogress(pos):
3588 3592 ui.progress(topic, pos, unit='revs', total=total)
3589 3593
3590 3594 def completeprogress():
3591 3595 ui.progress(topic, None, unit='revs', total=total)
3592 3596
3593 3597 for idx, rev in enumerate(revs):
3594 3598 updateprogress(idx)
3595 3599 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3596 3600 if clearcaches:
3597 3601 dest.index.clearcaches()
3598 3602 dest.clearcaches()
3599 3603 with timeone() as r:
3600 3604 dest.addrawrevision(*addargs, **addkwargs)
3601 3605 timings.append((rev, r[0]))
3602 3606 updateprogress(total)
3603 3607 completeprogress()
3604 3608 return timings
3605 3609
3606 3610
3607 3611 def _getrevisionseed(orig, rev, tr, source):
3608 3612 from mercurial.node import nullid
3609 3613
3610 3614 linkrev = orig.linkrev(rev)
3611 3615 node = orig.node(rev)
3612 3616 p1, p2 = orig.parents(node)
3613 3617 flags = orig.flags(rev)
3614 3618 cachedelta = None
3615 3619 text = None
3616 3620
3617 3621 if source == b'full':
3618 3622 text = orig.revision(rev)
3619 3623 elif source == b'parent-1':
3620 3624 baserev = orig.rev(p1)
3621 3625 cachedelta = (baserev, orig.revdiff(p1, rev))
3622 3626 elif source == b'parent-2':
3623 3627 parent = p2
3624 3628 if p2 == nullid:
3625 3629 parent = p1
3626 3630 baserev = orig.rev(parent)
3627 3631 cachedelta = (baserev, orig.revdiff(parent, rev))
3628 3632 elif source == b'parent-smallest':
3629 3633 p1diff = orig.revdiff(p1, rev)
3630 3634 parent = p1
3631 3635 diff = p1diff
3632 3636 if p2 != nullid:
3633 3637 p2diff = orig.revdiff(p2, rev)
3634 3638 if len(p1diff) > len(p2diff):
3635 3639 parent = p2
3636 3640 diff = p2diff
3637 3641 baserev = orig.rev(parent)
3638 3642 cachedelta = (baserev, diff)
3639 3643 elif source == b'storage':
3640 3644 baserev = orig.deltaparent(rev)
3641 3645 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3642 3646
3643 3647 return (
3644 3648 (text, tr, linkrev, p1, p2),
3645 3649 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3646 3650 )
3647 3651
3648 3652
3649 3653 @contextlib.contextmanager
3650 3654 def _temprevlog(ui, orig, truncaterev):
3651 3655 from mercurial import vfs as vfsmod
3652 3656
3653 3657 if orig._inline:
3654 3658 raise error.Abort('not supporting inline revlog (yet)')
3655 3659 revlogkwargs = {}
3656 3660 k = 'upperboundcomp'
3657 3661 if util.safehasattr(orig, k):
3658 3662 revlogkwargs[k] = getattr(orig, k)
3659 3663
3660 3664 indexfile = getattr(orig, '_indexfile', None)
3661 3665 if indexfile is None:
3662 3666 # compatibility with <= hg-5.8
3663 3667 indexfile = getattr(orig, 'indexfile')
3664 3668 origindexpath = orig.opener.join(indexfile)
3665 3669
3666 3670 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3667 3671 origdatapath = orig.opener.join(datafile)
3668 3672 radix = b'revlog'
3669 3673 indexname = b'revlog.i'
3670 3674 dataname = b'revlog.d'
3671 3675
3672 3676 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3673 3677 try:
3674 3678 # copy the data file in a temporary directory
3675 3679 ui.debug('copying data in %s\n' % tmpdir)
3676 3680 destindexpath = os.path.join(tmpdir, 'revlog.i')
3677 3681 destdatapath = os.path.join(tmpdir, 'revlog.d')
3678 3682 shutil.copyfile(origindexpath, destindexpath)
3679 3683 shutil.copyfile(origdatapath, destdatapath)
3680 3684
3681 3685 # remove the data we want to add again
3682 3686 ui.debug('truncating data to be rewritten\n')
3683 3687 with open(destindexpath, 'ab') as index:
3684 3688 index.seek(0)
3685 3689 index.truncate(truncaterev * orig._io.size)
3686 3690 with open(destdatapath, 'ab') as data:
3687 3691 data.seek(0)
3688 3692 data.truncate(orig.start(truncaterev))
3689 3693
3690 3694 # instantiate a new revlog from the temporary copy
3691 3695 ui.debug('truncating adding to be rewritten\n')
3692 3696 vfs = vfsmod.vfs(tmpdir)
3693 3697 vfs.options = getattr(orig.opener, 'options', None)
3694 3698
3695 3699 try:
3696 3700 dest = revlog(vfs, radix=radix, **revlogkwargs)
3697 3701 except TypeError:
3698 3702 dest = revlog(
3699 3703 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3700 3704 )
3701 3705 if dest._inline:
3702 3706 raise error.Abort('not supporting inline revlog (yet)')
3703 3707 # make sure internals are initialized
3704 3708 dest.revision(len(dest) - 1)
3705 3709 yield dest
3706 3710 del dest, vfs
3707 3711 finally:
3708 3712 shutil.rmtree(tmpdir, True)
3709 3713
3710 3714
3711 3715 @command(
3712 3716 b'perf::revlogchunks|perfrevlogchunks',
3713 3717 revlogopts
3714 3718 + formatteropts
3715 3719 + [
3716 3720 (b'e', b'engines', b'', b'compression engines to use'),
3717 3721 (b's', b'startrev', 0, b'revision to start at'),
3718 3722 ],
3719 3723 b'-c|-m|FILE',
3720 3724 )
3721 3725 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3722 3726 """Benchmark operations on revlog chunks.
3723 3727
3724 3728 Logically, each revlog is a collection of fulltext revisions. However,
3725 3729 stored within each revlog are "chunks" of possibly compressed data. This
3726 3730 data needs to be read and decompressed or compressed and written.
3727 3731
3728 3732 This command measures the time it takes to read+decompress and recompress
3729 3733 chunks in a revlog. It effectively isolates I/O and compression performance.
3730 3734 For measurements of higher-level operations like resolving revisions,
3731 3735 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3732 3736 """
3733 3737 opts = _byteskwargs(opts)
3734 3738
3735 3739 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3736 3740
3737 3741 # - _chunkraw was renamed to _getsegmentforrevs
3738 3742 # - _getsegmentforrevs was moved on the inner object
3739 3743 try:
3740 3744 segmentforrevs = rl._inner.get_segment_for_revs
3741 3745 except AttributeError:
3742 3746 try:
3743 3747 segmentforrevs = rl._getsegmentforrevs
3744 3748 except AttributeError:
3745 3749 segmentforrevs = rl._chunkraw
3746 3750
3747 3751 # Verify engines argument.
3748 3752 if engines:
3749 3753 engines = {e.strip() for e in engines.split(b',')}
3750 3754 for engine in engines:
3751 3755 try:
3752 3756 util.compressionengines[engine]
3753 3757 except KeyError:
3754 3758 raise error.Abort(b'unknown compression engine: %s' % engine)
3755 3759 else:
3756 3760 engines = []
3757 3761 for e in util.compengines:
3758 3762 engine = util.compengines[e]
3759 3763 try:
3760 3764 if engine.available():
3761 3765 engine.revlogcompressor().compress(b'dummy')
3762 3766 engines.append(e)
3763 3767 except NotImplementedError:
3764 3768 pass
3765 3769
3766 3770 revs = list(rl.revs(startrev, len(rl) - 1))
3767 3771
3768 3772 @contextlib.contextmanager
3769 3773 def reading(rl):
3770 3774 if getattr(rl, 'reading', None) is not None:
3771 3775 with rl.reading():
3772 3776 yield None
3773 3777 elif rl._inline:
3774 3778 indexfile = getattr(rl, '_indexfile', None)
3775 3779 if indexfile is None:
3776 3780 # compatibility with <= hg-5.8
3777 3781 indexfile = getattr(rl, 'indexfile')
3778 3782 yield getsvfs(repo)(indexfile)
3779 3783 else:
3780 3784 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3781 3785 yield getsvfs(repo)(datafile)
3782 3786
3783 3787 if getattr(rl, 'reading', None) is not None:
3784 3788
3785 3789 @contextlib.contextmanager
3786 3790 def lazy_reading(rl):
3787 3791 with rl.reading():
3788 3792 yield
3789 3793
3790 3794 else:
3791 3795
3792 3796 @contextlib.contextmanager
3793 3797 def lazy_reading(rl):
3794 3798 yield
3795 3799
3796 3800 def doread():
3797 3801 rl.clearcaches()
3798 3802 for rev in revs:
3799 3803 with lazy_reading(rl):
3800 3804 segmentforrevs(rev, rev)
3801 3805
3802 3806 def doreadcachedfh():
3803 3807 rl.clearcaches()
3804 3808 with reading(rl) as fh:
3805 3809 if fh is not None:
3806 3810 for rev in revs:
3807 3811 segmentforrevs(rev, rev, df=fh)
3808 3812 else:
3809 3813 for rev in revs:
3810 3814 segmentforrevs(rev, rev)
3811 3815
3812 3816 def doreadbatch():
3813 3817 rl.clearcaches()
3814 3818 with lazy_reading(rl):
3815 3819 segmentforrevs(revs[0], revs[-1])
3816 3820
3817 3821 def doreadbatchcachedfh():
3818 3822 rl.clearcaches()
3819 3823 with reading(rl) as fh:
3820 3824 if fh is not None:
3821 3825 segmentforrevs(revs[0], revs[-1], df=fh)
3822 3826 else:
3823 3827 segmentforrevs(revs[0], revs[-1])
3824 3828
3825 3829 def dochunk():
3826 3830 rl.clearcaches()
3827 3831 # chunk used to be available directly on the revlog
3828 3832 _chunk = getattr(rl, '_inner', rl)._chunk
3829 3833 with reading(rl) as fh:
3830 3834 if fh is not None:
3831 3835 for rev in revs:
3832 3836 _chunk(rev, df=fh)
3833 3837 else:
3834 3838 for rev in revs:
3835 3839 _chunk(rev)
3836 3840
3837 3841 chunks = [None]
3838 3842
3839 3843 def dochunkbatch():
3840 3844 rl.clearcaches()
3841 3845 _chunks = getattr(rl, '_inner', rl)._chunks
3842 3846 with reading(rl) as fh:
3843 3847 if fh is not None:
3844 3848 # Save chunks as a side-effect.
3845 3849 chunks[0] = _chunks(revs, df=fh)
3846 3850 else:
3847 3851 # Save chunks as a side-effect.
3848 3852 chunks[0] = _chunks(revs)
3849 3853
3850 3854 def docompress(compressor):
3851 3855 rl.clearcaches()
3852 3856
3853 3857 compressor_holder = getattr(rl, '_inner', rl)
3854 3858
3855 3859 try:
3856 3860 # Swap in the requested compression engine.
3857 3861 oldcompressor = compressor_holder._compressor
3858 3862 compressor_holder._compressor = compressor
3859 3863 for chunk in chunks[0]:
3860 3864 rl.compress(chunk)
3861 3865 finally:
3862 3866 compressor_holder._compressor = oldcompressor
3863 3867
3864 3868 benches = [
3865 3869 (lambda: doread(), b'read'),
3866 3870 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3867 3871 (lambda: doreadbatch(), b'read batch'),
3868 3872 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3869 3873 (lambda: dochunk(), b'chunk'),
3870 3874 (lambda: dochunkbatch(), b'chunk batch'),
3871 3875 ]
3872 3876
3873 3877 for engine in sorted(engines):
3874 3878 compressor = util.compengines[engine].revlogcompressor()
3875 3879 benches.append(
3876 3880 (
3877 3881 functools.partial(docompress, compressor),
3878 3882 b'compress w/ %s' % engine,
3879 3883 )
3880 3884 )
3881 3885
3882 3886 for fn, title in benches:
3883 3887 timer, fm = gettimer(ui, opts)
3884 3888 timer(fn, title=title)
3885 3889 fm.end()
3886 3890
3887 3891
3888 3892 @command(
3889 3893 b'perf::revlogrevision|perfrevlogrevision',
3890 3894 revlogopts
3891 3895 + formatteropts
3892 3896 + [(b'', b'cache', False, b'use caches instead of clearing')],
3893 3897 b'-c|-m|FILE REV',
3894 3898 )
3895 3899 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3896 3900 """Benchmark obtaining a revlog revision.
3897 3901
3898 3902 Obtaining a revlog revision consists of roughly the following steps:
3899 3903
3900 3904 1. Compute the delta chain
3901 3905 2. Slice the delta chain if applicable
3902 3906 3. Obtain the raw chunks for that delta chain
3903 3907 4. Decompress each raw chunk
3904 3908 5. Apply binary patches to obtain fulltext
3905 3909 6. Verify hash of fulltext
3906 3910
3907 3911 This command measures the time spent in each of these phases.
3908 3912 """
3909 3913 opts = _byteskwargs(opts)
3910 3914
3911 3915 if opts.get(b'changelog') or opts.get(b'manifest'):
3912 3916 file_, rev = None, file_
3913 3917 elif rev is None:
3914 3918 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3915 3919
3916 3920 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3917 3921
3918 3922 # _chunkraw was renamed to _getsegmentforrevs.
3919 3923 try:
3920 3924 segmentforrevs = r._inner.get_segment_for_revs
3921 3925 except AttributeError:
3922 3926 try:
3923 3927 segmentforrevs = r._getsegmentforrevs
3924 3928 except AttributeError:
3925 3929 segmentforrevs = r._chunkraw
3926 3930
3927 3931 node = r.lookup(rev)
3928 3932 rev = r.rev(node)
3929 3933
3930 3934 if getattr(r, 'reading', None) is not None:
3931 3935
3932 3936 @contextlib.contextmanager
3933 3937 def lazy_reading(r):
3934 3938 with r.reading():
3935 3939 yield
3936 3940
3937 3941 else:
3938 3942
3939 3943 @contextlib.contextmanager
3940 3944 def lazy_reading(r):
3941 3945 yield
3942 3946
3943 3947 def getrawchunks(data, chain):
3944 3948 start = r.start
3945 3949 length = r.length
3946 3950 inline = r._inline
3947 3951 try:
3948 3952 iosize = r.index.entry_size
3949 3953 except AttributeError:
3950 3954 iosize = r._io.size
3951 3955 buffer = util.buffer
3952 3956
3953 3957 chunks = []
3954 3958 ladd = chunks.append
3955 3959 for idx, item in enumerate(chain):
3956 3960 offset = start(item[0])
3957 3961 bits = data[idx]
3958 3962 for rev in item:
3959 3963 chunkstart = start(rev)
3960 3964 if inline:
3961 3965 chunkstart += (rev + 1) * iosize
3962 3966 chunklength = length(rev)
3963 3967 ladd(buffer(bits, chunkstart - offset, chunklength))
3964 3968
3965 3969 return chunks
3966 3970
3967 3971 def dodeltachain(rev):
3968 3972 if not cache:
3969 3973 r.clearcaches()
3970 3974 r._deltachain(rev)
3971 3975
3972 3976 def doread(chain):
3973 3977 if not cache:
3974 3978 r.clearcaches()
3975 3979 for item in slicedchain:
3976 3980 with lazy_reading(r):
3977 3981 segmentforrevs(item[0], item[-1])
3978 3982
3979 3983 def doslice(r, chain, size):
3980 3984 for s in slicechunk(r, chain, targetsize=size):
3981 3985 pass
3982 3986
3983 3987 def dorawchunks(data, chain):
3984 3988 if not cache:
3985 3989 r.clearcaches()
3986 3990 getrawchunks(data, chain)
3987 3991
3988 3992 def dodecompress(chunks):
3989 3993 decomp = r.decompress
3990 3994 for chunk in chunks:
3991 3995 decomp(chunk)
3992 3996
3993 3997 def dopatch(text, bins):
3994 3998 if not cache:
3995 3999 r.clearcaches()
3996 4000 mdiff.patches(text, bins)
3997 4001
3998 4002 def dohash(text):
3999 4003 if not cache:
4000 4004 r.clearcaches()
4001 4005 r.checkhash(text, node, rev=rev)
4002 4006
4003 4007 def dorevision():
4004 4008 if not cache:
4005 4009 r.clearcaches()
4006 4010 r.revision(node)
4007 4011
4008 4012 try:
4009 4013 from mercurial.revlogutils.deltas import slicechunk
4010 4014 except ImportError:
4011 4015 slicechunk = getattr(revlog, '_slicechunk', None)
4012 4016
4013 4017 size = r.length(rev)
4014 4018 chain = r._deltachain(rev)[0]
4015 4019
4016 4020 with_sparse_read = False
4017 4021 if hasattr(r, 'data_config'):
4018 4022 with_sparse_read = r.data_config.with_sparse_read
4019 4023 elif hasattr(r, '_withsparseread'):
4020 4024 with_sparse_read = r._withsparseread
4021 4025 if with_sparse_read:
4022 4026 slicedchain = (chain,)
4023 4027 else:
4024 4028 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4025 4029 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4026 4030 rawchunks = getrawchunks(data, slicedchain)
4027 4031 bins = r._inner._chunks(chain)
4028 4032 text = bytes(bins[0])
4029 4033 bins = bins[1:]
4030 4034 text = mdiff.patches(text, bins)
4031 4035
4032 4036 benches = [
4033 4037 (lambda: dorevision(), b'full'),
4034 4038 (lambda: dodeltachain(rev), b'deltachain'),
4035 4039 (lambda: doread(chain), b'read'),
4036 4040 ]
4037 4041
4038 4042 if with_sparse_read:
4039 4043 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4040 4044 benches.append(slicing)
4041 4045
4042 4046 benches.extend(
4043 4047 [
4044 4048 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4045 4049 (lambda: dodecompress(rawchunks), b'decompress'),
4046 4050 (lambda: dopatch(text, bins), b'patch'),
4047 4051 (lambda: dohash(text), b'hash'),
4048 4052 ]
4049 4053 )
4050 4054
4051 4055 timer, fm = gettimer(ui, opts)
4052 4056 for fn, title in benches:
4053 4057 timer(fn, title=title)
4054 4058 fm.end()
4055 4059
4056 4060
4057 4061 @command(
4058 4062 b'perf::revset|perfrevset',
4059 4063 [
4060 4064 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4061 4065 (b'', b'contexts', False, b'obtain changectx for each revision'),
4062 4066 ]
4063 4067 + formatteropts,
4064 4068 b"REVSET",
4065 4069 )
4066 4070 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4067 4071 """benchmark the execution time of a revset
4068 4072
4069 4073 Use the --clean option if need to evaluate the impact of build volatile
4070 4074 revisions set cache on the revset execution. Volatile cache hold filtered
4071 4075 and obsolete related cache."""
4072 4076 opts = _byteskwargs(opts)
4073 4077
4074 4078 timer, fm = gettimer(ui, opts)
4075 4079
4076 4080 def d():
4077 4081 if clear:
4078 4082 repo.invalidatevolatilesets()
4079 4083 if contexts:
4080 4084 for ctx in repo.set(expr):
4081 4085 pass
4082 4086 else:
4083 4087 for r in repo.revs(expr):
4084 4088 pass
4085 4089
4086 4090 timer(d)
4087 4091 fm.end()
4088 4092
4089 4093
4090 4094 @command(
4091 4095 b'perf::volatilesets|perfvolatilesets',
4092 4096 [
4093 4097 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4094 4098 ]
4095 4099 + formatteropts,
4096 4100 )
4097 4101 def perfvolatilesets(ui, repo, *names, **opts):
4098 4102 """benchmark the computation of various volatile set
4099 4103
4100 4104 Volatile set computes element related to filtering and obsolescence."""
4101 4105 opts = _byteskwargs(opts)
4102 4106 timer, fm = gettimer(ui, opts)
4103 4107 repo = repo.unfiltered()
4104 4108
4105 4109 def getobs(name):
4106 4110 def d():
4107 4111 repo.invalidatevolatilesets()
4108 4112 if opts[b'clear_obsstore']:
4109 4113 clearfilecache(repo, b'obsstore')
4110 4114 obsolete.getrevs(repo, name)
4111 4115
4112 4116 return d
4113 4117
4114 4118 allobs = sorted(obsolete.cachefuncs)
4115 4119 if names:
4116 4120 allobs = [n for n in allobs if n in names]
4117 4121
4118 4122 for name in allobs:
4119 4123 timer(getobs(name), title=name)
4120 4124
4121 4125 def getfiltered(name):
4122 4126 def d():
4123 4127 repo.invalidatevolatilesets()
4124 4128 if opts[b'clear_obsstore']:
4125 4129 clearfilecache(repo, b'obsstore')
4126 4130 repoview.filterrevs(repo, name)
4127 4131
4128 4132 return d
4129 4133
4130 4134 allfilter = sorted(repoview.filtertable)
4131 4135 if names:
4132 4136 allfilter = [n for n in allfilter if n in names]
4133 4137
4134 4138 for name in allfilter:
4135 4139 timer(getfiltered(name), title=name)
4136 4140 fm.end()
4137 4141
4138 4142
4139 4143 @command(
4140 4144 b'perf::branchmap|perfbranchmap',
4141 4145 [
4142 4146 (b'f', b'full', False, b'Includes build time of subset'),
4143 4147 (
4144 4148 b'',
4145 4149 b'clear-revbranch',
4146 4150 False,
4147 4151 b'purge the revbranch cache between computation',
4148 4152 ),
4149 4153 ]
4150 4154 + formatteropts,
4151 4155 )
4152 4156 def perfbranchmap(ui, repo, *filternames, **opts):
4153 4157 """benchmark the update of a branchmap
4154 4158
4155 4159 This benchmarks the full repo.branchmap() call with read and write disabled
4156 4160 """
4157 4161 opts = _byteskwargs(opts)
4158 4162 full = opts.get(b"full", False)
4159 4163 clear_revbranch = opts.get(b"clear_revbranch", False)
4160 4164 timer, fm = gettimer(ui, opts)
4161 4165
4162 4166 def getbranchmap(filtername):
4163 4167 """generate a benchmark function for the filtername"""
4164 4168 if filtername is None:
4165 4169 view = repo
4166 4170 else:
4167 4171 view = repo.filtered(filtername)
4168 4172 if util.safehasattr(view._branchcaches, '_per_filter'):
4169 4173 filtered = view._branchcaches._per_filter
4170 4174 else:
4171 4175 # older versions
4172 4176 filtered = view._branchcaches
4173 4177
4174 4178 def d():
4175 4179 if clear_revbranch:
4176 4180 repo.revbranchcache()._clear()
4177 4181 if full:
4178 4182 view._branchcaches.clear()
4179 4183 else:
4180 4184 filtered.pop(filtername, None)
4181 4185 view.branchmap()
4182 4186
4183 4187 return d
4184 4188
4185 4189 # add filter in smaller subset to bigger subset
4186 4190 possiblefilters = set(repoview.filtertable)
4187 4191 if filternames:
4188 4192 possiblefilters &= set(filternames)
4189 4193 subsettable = getbranchmapsubsettable()
4190 4194 allfilters = []
4191 4195 while possiblefilters:
4192 4196 for name in possiblefilters:
4193 4197 subset = subsettable.get(name)
4194 4198 if subset not in possiblefilters:
4195 4199 break
4196 4200 else:
4197 4201 assert False, b'subset cycle %s!' % possiblefilters
4198 4202 allfilters.append(name)
4199 4203 possiblefilters.remove(name)
4200 4204
4201 4205 # warm the cache
4202 4206 if not full:
4203 4207 for name in allfilters:
4204 4208 repo.filtered(name).branchmap()
4205 4209 if not filternames or b'unfiltered' in filternames:
4206 4210 # add unfiltered
4207 4211 allfilters.append(None)
4208 4212
4209 4213 old_branch_cache_from_file = None
4210 4214 branchcacheread = None
4211 4215 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4212 4216 old_branch_cache_from_file = branchmap.branch_cache_from_file
4213 4217 branchmap.branch_cache_from_file = lambda *args: None
4214 4218 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4215 4219 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4216 4220 branchcacheread.set(classmethod(lambda *args: None))
4217 4221 else:
4218 4222 # older versions
4219 4223 branchcacheread = safeattrsetter(branchmap, b'read')
4220 4224 branchcacheread.set(lambda *args: None)
4221 4225 if util.safehasattr(branchmap, '_LocalBranchCache'):
4222 4226 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4223 4227 branchcachewrite.set(lambda *args: None)
4224 4228 else:
4225 4229 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4226 4230 branchcachewrite.set(lambda *args: None)
4227 4231 try:
4228 4232 for name in allfilters:
4229 4233 printname = name
4230 4234 if name is None:
4231 4235 printname = b'unfiltered'
4232 4236 timer(getbranchmap(name), title=printname)
4233 4237 finally:
4234 4238 if old_branch_cache_from_file is not None:
4235 4239 branchmap.branch_cache_from_file = old_branch_cache_from_file
4236 4240 if branchcacheread is not None:
4237 4241 branchcacheread.restore()
4238 4242 branchcachewrite.restore()
4239 4243 fm.end()
4240 4244
4241 4245
4242 4246 @command(
4243 4247 b'perf::branchmapupdate|perfbranchmapupdate',
4244 4248 [
4245 4249 (b'', b'base', [], b'subset of revision to start from'),
4246 4250 (b'', b'target', [], b'subset of revision to end with'),
4247 4251 (b'', b'clear-caches', False, b'clear cache between each runs'),
4248 4252 ]
4249 4253 + formatteropts,
4250 4254 )
4251 4255 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4252 4256 """benchmark branchmap update from for <base> revs to <target> revs
4253 4257
4254 4258 If `--clear-caches` is passed, the following items will be reset before
4255 4259 each update:
4256 4260 * the changelog instance and associated indexes
4257 4261 * the rev-branch-cache instance
4258 4262
4259 4263 Examples:
4260 4264
4261 4265 # update for the one last revision
4262 4266 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4263 4267
4264 4268 $ update for change coming with a new branch
4265 4269 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4266 4270 """
4267 4271 from mercurial import branchmap
4268 4272 from mercurial import repoview
4269 4273
4270 4274 opts = _byteskwargs(opts)
4271 4275 timer, fm = gettimer(ui, opts)
4272 4276 clearcaches = opts[b'clear_caches']
4273 4277 unfi = repo.unfiltered()
4274 4278 x = [None] # used to pass data between closure
4275 4279
4276 4280 # we use a `list` here to avoid possible side effect from smartset
4277 4281 baserevs = list(scmutil.revrange(repo, base))
4278 4282 targetrevs = list(scmutil.revrange(repo, target))
4279 4283 if not baserevs:
4280 4284 raise error.Abort(b'no revisions selected for --base')
4281 4285 if not targetrevs:
4282 4286 raise error.Abort(b'no revisions selected for --target')
4283 4287
4284 4288 # make sure the target branchmap also contains the one in the base
4285 4289 targetrevs = list(set(baserevs) | set(targetrevs))
4286 4290 targetrevs.sort()
4287 4291
4288 4292 cl = repo.changelog
4289 4293 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4290 4294 allbaserevs.sort()
4291 4295 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4292 4296
4293 4297 newrevs = list(alltargetrevs.difference(allbaserevs))
4294 4298 newrevs.sort()
4295 4299
4296 4300 allrevs = frozenset(unfi.changelog.revs())
4297 4301 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4298 4302 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4299 4303
4300 4304 def basefilter(repo, visibilityexceptions=None):
4301 4305 return basefilterrevs
4302 4306
4303 4307 def targetfilter(repo, visibilityexceptions=None):
4304 4308 return targetfilterrevs
4305 4309
4306 4310 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4307 4311 ui.status(msg % (len(allbaserevs), len(newrevs)))
4308 4312 if targetfilterrevs:
4309 4313 msg = b'(%d revisions still filtered)\n'
4310 4314 ui.status(msg % len(targetfilterrevs))
4311 4315
4312 4316 try:
4313 4317 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4314 4318 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4315 4319
4316 4320 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4317 4321 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4318 4322
4319 4323 bcache = repo.branchmap()
4320 4324 copy_method = 'copy'
4321 4325
4322 4326 copy_base_kwargs = copy_base_kwargs = {}
4323 4327 if hasattr(bcache, 'copy'):
4324 4328 if 'repo' in getargspec(bcache.copy).args:
4325 4329 copy_base_kwargs = {"repo": baserepo}
4326 4330 copy_target_kwargs = {"repo": targetrepo}
4327 4331 else:
4328 4332 copy_method = 'inherit_for'
4329 4333 copy_base_kwargs = {"repo": baserepo}
4330 4334 copy_target_kwargs = {"repo": targetrepo}
4331 4335
4332 4336 # try to find an existing branchmap to reuse
4333 4337 subsettable = getbranchmapsubsettable()
4334 4338 candidatefilter = subsettable.get(None)
4335 4339 while candidatefilter is not None:
4336 4340 candidatebm = repo.filtered(candidatefilter).branchmap()
4337 4341 if candidatebm.validfor(baserepo):
4338 4342 filtered = repoview.filterrevs(repo, candidatefilter)
4339 4343 missing = [r for r in allbaserevs if r in filtered]
4340 4344 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4341 4345 base.update(baserepo, missing)
4342 4346 break
4343 4347 candidatefilter = subsettable.get(candidatefilter)
4344 4348 else:
4345 4349 # no suitable subset where found
4346 4350 base = branchmap.branchcache()
4347 4351 base.update(baserepo, allbaserevs)
4348 4352
4349 4353 def setup():
4350 4354 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4351 4355 if clearcaches:
4352 4356 unfi._revbranchcache = None
4353 4357 clearchangelog(repo)
4354 4358
4355 4359 def bench():
4356 4360 x[0].update(targetrepo, newrevs)
4357 4361
4358 4362 timer(bench, setup=setup)
4359 4363 fm.end()
4360 4364 finally:
4361 4365 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4362 4366 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4363 4367
4364 4368
4365 4369 @command(
4366 4370 b'perf::branchmapload|perfbranchmapload',
4367 4371 [
4368 4372 (b'f', b'filter', b'', b'Specify repoview filter'),
4369 4373 (b'', b'list', False, b'List brachmap filter caches'),
4370 4374 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4371 4375 ]
4372 4376 + formatteropts,
4373 4377 )
4374 4378 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4375 4379 """benchmark reading the branchmap"""
4376 4380 opts = _byteskwargs(opts)
4377 4381 clearrevlogs = opts[b'clear_revlogs']
4378 4382
4379 4383 if list:
4380 4384 for name, kind, st in repo.cachevfs.readdir(stat=True):
4381 4385 if name.startswith(b'branch2'):
4382 4386 filtername = name.partition(b'-')[2] or b'unfiltered'
4383 4387 ui.status(
4384 4388 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4385 4389 )
4386 4390 return
4387 4391 if not filter:
4388 4392 filter = None
4389 4393 subsettable = getbranchmapsubsettable()
4390 4394 if filter is None:
4391 4395 repo = repo.unfiltered()
4392 4396 else:
4393 4397 repo = repoview.repoview(repo, filter)
4394 4398
4395 4399 repo.branchmap() # make sure we have a relevant, up to date branchmap
4396 4400
4397 4401 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4398 4402 if fromfile is None:
4399 4403 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4400 4404 if fromfile is None:
4401 4405 fromfile = branchmap.read
4402 4406
4403 4407 currentfilter = filter
4404 4408 # try once without timer, the filter may not be cached
4405 4409 while fromfile(repo) is None:
4406 4410 currentfilter = subsettable.get(currentfilter)
4407 4411 if currentfilter is None:
4408 4412 raise error.Abort(
4409 4413 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4410 4414 )
4411 4415 repo = repo.filtered(currentfilter)
4412 4416 timer, fm = gettimer(ui, opts)
4413 4417
4414 4418 def setup():
4415 4419 if clearrevlogs:
4416 4420 clearchangelog(repo)
4417 4421
4418 4422 def bench():
4419 4423 fromfile(repo)
4420 4424
4421 4425 timer(bench, setup=setup)
4422 4426 fm.end()
4423 4427
4424 4428
4425 4429 @command(b'perf::loadmarkers|perfloadmarkers')
4426 4430 def perfloadmarkers(ui, repo):
4427 4431 """benchmark the time to parse the on-disk markers for a repo
4428 4432
4429 4433 Result is the number of markers in the repo."""
4430 4434 timer, fm = gettimer(ui)
4431 4435 svfs = getsvfs(repo)
4432 4436 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4433 4437 fm.end()
4434 4438
4435 4439
4436 4440 @command(
4437 4441 b'perf::lrucachedict|perflrucachedict',
4438 4442 formatteropts
4439 4443 + [
4440 4444 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4441 4445 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4442 4446 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4443 4447 (b'', b'size', 4, b'size of cache'),
4444 4448 (b'', b'gets', 10000, b'number of key lookups'),
4445 4449 (b'', b'sets', 10000, b'number of key sets'),
4446 4450 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4447 4451 (
4448 4452 b'',
4449 4453 b'mixedgetfreq',
4450 4454 50,
4451 4455 b'frequency of get vs set ops in mixed mode',
4452 4456 ),
4453 4457 ],
4454 4458 norepo=True,
4455 4459 )
4456 4460 def perflrucache(
4457 4461 ui,
4458 4462 mincost=0,
4459 4463 maxcost=100,
4460 4464 costlimit=0,
4461 4465 size=4,
4462 4466 gets=10000,
4463 4467 sets=10000,
4464 4468 mixed=10000,
4465 4469 mixedgetfreq=50,
4466 4470 **opts
4467 4471 ):
4468 4472 opts = _byteskwargs(opts)
4469 4473
4470 4474 def doinit():
4471 4475 for i in _xrange(10000):
4472 4476 util.lrucachedict(size)
4473 4477
4474 4478 costrange = list(range(mincost, maxcost + 1))
4475 4479
4476 4480 values = []
4477 4481 for i in _xrange(size):
4478 4482 values.append(random.randint(0, _maxint))
4479 4483
4480 4484 # Get mode fills the cache and tests raw lookup performance with no
4481 4485 # eviction.
4482 4486 getseq = []
4483 4487 for i in _xrange(gets):
4484 4488 getseq.append(random.choice(values))
4485 4489
4486 4490 def dogets():
4487 4491 d = util.lrucachedict(size)
4488 4492 for v in values:
4489 4493 d[v] = v
4490 4494 for key in getseq:
4491 4495 value = d[key]
4492 4496 value # silence pyflakes warning
4493 4497
4494 4498 def dogetscost():
4495 4499 d = util.lrucachedict(size, maxcost=costlimit)
4496 4500 for i, v in enumerate(values):
4497 4501 d.insert(v, v, cost=costs[i])
4498 4502 for key in getseq:
4499 4503 try:
4500 4504 value = d[key]
4501 4505 value # silence pyflakes warning
4502 4506 except KeyError:
4503 4507 pass
4504 4508
4505 4509 # Set mode tests insertion speed with cache eviction.
4506 4510 setseq = []
4507 4511 costs = []
4508 4512 for i in _xrange(sets):
4509 4513 setseq.append(random.randint(0, _maxint))
4510 4514 costs.append(random.choice(costrange))
4511 4515
4512 4516 def doinserts():
4513 4517 d = util.lrucachedict(size)
4514 4518 for v in setseq:
4515 4519 d.insert(v, v)
4516 4520
4517 4521 def doinsertscost():
4518 4522 d = util.lrucachedict(size, maxcost=costlimit)
4519 4523 for i, v in enumerate(setseq):
4520 4524 d.insert(v, v, cost=costs[i])
4521 4525
4522 4526 def dosets():
4523 4527 d = util.lrucachedict(size)
4524 4528 for v in setseq:
4525 4529 d[v] = v
4526 4530
4527 4531 # Mixed mode randomly performs gets and sets with eviction.
4528 4532 mixedops = []
4529 4533 for i in _xrange(mixed):
4530 4534 r = random.randint(0, 100)
4531 4535 if r < mixedgetfreq:
4532 4536 op = 0
4533 4537 else:
4534 4538 op = 1
4535 4539
4536 4540 mixedops.append(
4537 4541 (op, random.randint(0, size * 2), random.choice(costrange))
4538 4542 )
4539 4543
4540 4544 def domixed():
4541 4545 d = util.lrucachedict(size)
4542 4546
4543 4547 for op, v, cost in mixedops:
4544 4548 if op == 0:
4545 4549 try:
4546 4550 d[v]
4547 4551 except KeyError:
4548 4552 pass
4549 4553 else:
4550 4554 d[v] = v
4551 4555
4552 4556 def domixedcost():
4553 4557 d = util.lrucachedict(size, maxcost=costlimit)
4554 4558
4555 4559 for op, v, cost in mixedops:
4556 4560 if op == 0:
4557 4561 try:
4558 4562 d[v]
4559 4563 except KeyError:
4560 4564 pass
4561 4565 else:
4562 4566 d.insert(v, v, cost=cost)
4563 4567
4564 4568 benches = [
4565 4569 (doinit, b'init'),
4566 4570 ]
4567 4571
4568 4572 if costlimit:
4569 4573 benches.extend(
4570 4574 [
4571 4575 (dogetscost, b'gets w/ cost limit'),
4572 4576 (doinsertscost, b'inserts w/ cost limit'),
4573 4577 (domixedcost, b'mixed w/ cost limit'),
4574 4578 ]
4575 4579 )
4576 4580 else:
4577 4581 benches.extend(
4578 4582 [
4579 4583 (dogets, b'gets'),
4580 4584 (doinserts, b'inserts'),
4581 4585 (dosets, b'sets'),
4582 4586 (domixed, b'mixed'),
4583 4587 ]
4584 4588 )
4585 4589
4586 4590 for fn, title in benches:
4587 4591 timer, fm = gettimer(ui, opts)
4588 4592 timer(fn, title=title)
4589 4593 fm.end()
4590 4594
4591 4595
4592 4596 @command(
4593 4597 b'perf::write|perfwrite',
4594 4598 formatteropts
4595 4599 + [
4596 4600 (b'', b'write-method', b'write', b'ui write method'),
4597 4601 (b'', b'nlines', 100, b'number of lines'),
4598 4602 (b'', b'nitems', 100, b'number of items (per line)'),
4599 4603 (b'', b'item', b'x', b'item that is written'),
4600 4604 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4601 4605 (b'', b'flush-line', None, b'flush after each line'),
4602 4606 ],
4603 4607 )
4604 4608 def perfwrite(ui, repo, **opts):
4605 4609 """microbenchmark ui.write (and others)"""
4606 4610 opts = _byteskwargs(opts)
4607 4611
4608 4612 write = getattr(ui, _sysstr(opts[b'write_method']))
4609 4613 nlines = int(opts[b'nlines'])
4610 4614 nitems = int(opts[b'nitems'])
4611 4615 item = opts[b'item']
4612 4616 batch_line = opts.get(b'batch_line')
4613 4617 flush_line = opts.get(b'flush_line')
4614 4618
4615 4619 if batch_line:
4616 4620 line = item * nitems + b'\n'
4617 4621
4618 4622 def benchmark():
4619 4623 for i in pycompat.xrange(nlines):
4620 4624 if batch_line:
4621 4625 write(line)
4622 4626 else:
4623 4627 for i in pycompat.xrange(nitems):
4624 4628 write(item)
4625 4629 write(b'\n')
4626 4630 if flush_line:
4627 4631 ui.flush()
4628 4632 ui.flush()
4629 4633
4630 4634 timer, fm = gettimer(ui, opts)
4631 4635 timer(benchmark)
4632 4636 fm.end()
4633 4637
4634 4638
4635 4639 def uisetup(ui):
4636 4640 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4637 4641 commands, b'debugrevlogopts'
4638 4642 ):
4639 4643 # for "historical portability":
4640 4644 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4641 4645 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4642 4646 # openrevlog() should cause failure, because it has been
4643 4647 # available since 3.5 (or 49c583ca48c4).
4644 4648 def openrevlog(orig, repo, cmd, file_, opts):
4645 4649 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4646 4650 raise error.Abort(
4647 4651 b"This version doesn't support --dir option",
4648 4652 hint=b"use 3.5 or later",
4649 4653 )
4650 4654 return orig(repo, cmd, file_, opts)
4651 4655
4652 4656 name = _sysstr(b'openrevlog')
4653 4657 extensions.wrapfunction(cmdutil, name, openrevlog)
4654 4658
4655 4659
4656 4660 @command(
4657 4661 b'perf::progress|perfprogress',
4658 4662 formatteropts
4659 4663 + [
4660 4664 (b'', b'topic', b'topic', b'topic for progress messages'),
4661 4665 (b'c', b'total', 1000000, b'total value we are progressing to'),
4662 4666 ],
4663 4667 norepo=True,
4664 4668 )
4665 4669 def perfprogress(ui, topic=None, total=None, **opts):
4666 4670 """printing of progress bars"""
4667 4671 opts = _byteskwargs(opts)
4668 4672
4669 4673 timer, fm = gettimer(ui, opts)
4670 4674
4671 4675 def doprogress():
4672 4676 with ui.makeprogress(topic, total=total) as progress:
4673 4677 for i in _xrange(total):
4674 4678 progress.increment()
4675 4679
4676 4680 timer(doprogress)
4677 4681 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now