##// END OF EJS Templates
perf-bundle: accept --rev arguments...
marmoute -
r50307:3635aae8 default
parent child Browse files
Show More
@@ -1,4029 +1,4044 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238 # for "historical portability":
239 239 # define parsealiases locally, because cmdutil.parsealiases has been
240 240 # available since 1.5 (or 6252852b4332)
241 241 def parsealiases(cmd):
242 242 return cmd.split(b"|")
243 243
244 244
245 245 if safehasattr(registrar, 'command'):
246 246 command = registrar.command(cmdtable)
247 247 elif safehasattr(cmdutil, 'command'):
248 248 command = cmdutil.command(cmdtable)
249 249 if 'norepo' not in getargspec(command).args:
250 250 # for "historical portability":
251 251 # wrap original cmdutil.command, because "norepo" option has
252 252 # been available since 3.1 (or 75a96326cecb)
253 253 _command = command
254 254
255 255 def command(name, options=(), synopsis=None, norepo=False):
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return _command(name, list(options), synopsis)
259 259
260 260
261 261 else:
262 262 # for "historical portability":
263 263 # define "@command" annotation locally, because cmdutil.command
264 264 # has been available since 1.9 (or 2daa5179e73f)
265 265 def command(name, options=(), synopsis=None, norepo=False):
266 266 def decorator(func):
267 267 if synopsis:
268 268 cmdtable[name] = func, list(options), synopsis
269 269 else:
270 270 cmdtable[name] = func, list(options)
271 271 if norepo:
272 272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 273 return func
274 274
275 275 return decorator
276 276
277 277
278 278 try:
279 279 import mercurial.registrar
280 280 import mercurial.configitems
281 281
282 282 configtable = {}
283 283 configitem = mercurial.registrar.configitem(configtable)
284 284 configitem(
285 285 b'perf',
286 286 b'presleep',
287 287 default=mercurial.configitems.dynamicdefault,
288 288 experimental=True,
289 289 )
290 290 configitem(
291 291 b'perf',
292 292 b'stub',
293 293 default=mercurial.configitems.dynamicdefault,
294 294 experimental=True,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'parentscount',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 experimental=True,
301 301 )
302 302 configitem(
303 303 b'perf',
304 304 b'all-timing',
305 305 default=mercurial.configitems.dynamicdefault,
306 306 experimental=True,
307 307 )
308 308 configitem(
309 309 b'perf',
310 310 b'pre-run',
311 311 default=mercurial.configitems.dynamicdefault,
312 312 )
313 313 configitem(
314 314 b'perf',
315 315 b'profile-benchmark',
316 316 default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf',
320 320 b'run-limits',
321 321 default=mercurial.configitems.dynamicdefault,
322 322 experimental=True,
323 323 )
324 324 except (ImportError, AttributeError):
325 325 pass
326 326 except TypeError:
327 327 # compatibility fix for a11fd395e83f
328 328 # hg version: 5.2
329 329 configitem(
330 330 b'perf',
331 331 b'presleep',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'stub',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'parentscount',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'all-timing',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349 configitem(
350 350 b'perf',
351 351 b'pre-run',
352 352 default=mercurial.configitems.dynamicdefault,
353 353 )
354 354 configitem(
355 355 b'perf',
356 356 b'profile-benchmark',
357 357 default=mercurial.configitems.dynamicdefault,
358 358 )
359 359 configitem(
360 360 b'perf',
361 361 b'run-limits',
362 362 default=mercurial.configitems.dynamicdefault,
363 363 )
364 364
365 365
366 366 def getlen(ui):
367 367 if ui.configbool(b"perf", b"stub", False):
368 368 return lambda x: 1
369 369 return len
370 370
371 371
372 372 class noop:
373 373 """dummy context manager"""
374 374
375 375 def __enter__(self):
376 376 pass
377 377
378 378 def __exit__(self, *args):
379 379 pass
380 380
381 381
382 382 NOOPCTX = noop()
383 383
384 384
385 385 def gettimer(ui, opts=None):
386 386 """return a timer function and formatter: (timer, formatter)
387 387
388 388 This function exists to gather the creation of formatter in a single
389 389 place instead of duplicating it in all performance commands."""
390 390
391 391 # enforce an idle period before execution to counteract power management
392 392 # experimental config: perf.presleep
393 393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 394
395 395 if opts is None:
396 396 opts = {}
397 397 # redirect all to stderr unless buffer api is in use
398 398 if not ui._buffers:
399 399 ui = ui.copy()
400 400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 401 if uifout:
402 402 # for "historical portability":
403 403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 404 uifout.set(ui.ferr)
405 405
406 406 # get a formatter
407 407 uiformatter = getattr(ui, 'formatter', None)
408 408 if uiformatter:
409 409 fm = uiformatter(b'perf', opts)
410 410 else:
411 411 # for "historical portability":
412 412 # define formatter locally, because ui.formatter has been
413 413 # available since 2.2 (or ae5f92e154d3)
414 414 from mercurial import node
415 415
416 416 class defaultformatter:
417 417 """Minimized composition of baseformatter and plainformatter"""
418 418
419 419 def __init__(self, ui, topic, opts):
420 420 self._ui = ui
421 421 if ui.debugflag:
422 422 self.hexfunc = node.hex
423 423 else:
424 424 self.hexfunc = node.short
425 425
426 426 def __nonzero__(self):
427 427 return False
428 428
429 429 __bool__ = __nonzero__
430 430
431 431 def startitem(self):
432 432 pass
433 433
434 434 def data(self, **data):
435 435 pass
436 436
437 437 def write(self, fields, deftext, *fielddata, **opts):
438 438 self._ui.write(deftext % fielddata, **opts)
439 439
440 440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 441 if cond:
442 442 self._ui.write(deftext % fielddata, **opts)
443 443
444 444 def plain(self, text, **opts):
445 445 self._ui.write(text, **opts)
446 446
447 447 def end(self):
448 448 pass
449 449
450 450 fm = defaultformatter(ui, b'perf', opts)
451 451
452 452 # stub function, runs code only once instead of in a loop
453 453 # experimental config: perf.stub
454 454 if ui.configbool(b"perf", b"stub", False):
455 455 return functools.partial(stub_timer, fm), fm
456 456
457 457 # experimental config: perf.all-timing
458 458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 459
460 460 # experimental config: perf.run-limits
461 461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 462 limits = []
463 463 for item in limitspec:
464 464 parts = item.split(b'-', 1)
465 465 if len(parts) < 2:
466 466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 467 continue
468 468 try:
469 469 time_limit = float(_sysstr(parts[0]))
470 470 except ValueError as e:
471 471 ui.warn(
472 472 (
473 473 b'malformatted run limit entry, %s: %s\n'
474 474 % (_bytestr(e), item)
475 475 )
476 476 )
477 477 continue
478 478 try:
479 479 run_limit = int(_sysstr(parts[1]))
480 480 except ValueError as e:
481 481 ui.warn(
482 482 (
483 483 b'malformatted run limit entry, %s: %s\n'
484 484 % (_bytestr(e), item)
485 485 )
486 486 )
487 487 continue
488 488 limits.append((time_limit, run_limit))
489 489 if not limits:
490 490 limits = DEFAULTLIMITS
491 491
492 492 profiler = None
493 493 if profiling is not None:
494 494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 495 profiler = profiling.profile(ui)
496 496
497 497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 498 t = functools.partial(
499 499 _timer,
500 500 fm,
501 501 displayall=displayall,
502 502 limits=limits,
503 503 prerun=prerun,
504 504 profiler=profiler,
505 505 )
506 506 return t, fm
507 507
508 508
509 509 def stub_timer(fm, func, setup=None, title=None):
510 510 if setup is not None:
511 511 setup()
512 512 func()
513 513
514 514
515 515 @contextlib.contextmanager
516 516 def timeone():
517 517 r = []
518 518 ostart = os.times()
519 519 cstart = util.timer()
520 520 yield r
521 521 cstop = util.timer()
522 522 ostop = os.times()
523 523 a, b = ostart, ostop
524 524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 525
526 526
527 527 # list of stop condition (elapsed time, minimal run count)
528 528 DEFAULTLIMITS = (
529 529 (3.0, 100),
530 530 (10.0, 3),
531 531 )
532 532
533 533
534 534 def _timer(
535 535 fm,
536 536 func,
537 537 setup=None,
538 538 title=None,
539 539 displayall=False,
540 540 limits=DEFAULTLIMITS,
541 541 prerun=0,
542 542 profiler=None,
543 543 ):
544 544 gc.collect()
545 545 results = []
546 546 begin = util.timer()
547 547 count = 0
548 548 if profiler is None:
549 549 profiler = NOOPCTX
550 550 for i in range(prerun):
551 551 if setup is not None:
552 552 setup()
553 553 func()
554 554 keepgoing = True
555 555 while keepgoing:
556 556 if setup is not None:
557 557 setup()
558 558 with profiler:
559 559 with timeone() as item:
560 560 r = func()
561 561 profiler = NOOPCTX
562 562 count += 1
563 563 results.append(item[0])
564 564 cstop = util.timer()
565 565 # Look for a stop condition.
566 566 elapsed = cstop - begin
567 567 for t, mincount in limits:
568 568 if elapsed >= t and count >= mincount:
569 569 keepgoing = False
570 570 break
571 571
572 572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 573
574 574
575 575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
929 929 def perfdiscovery(ui, repo, path, **opts):
930 930 """benchmark discovery between local repo and the peer at given path"""
931 931 repos = [repo, None]
932 932 timer, fm = gettimer(ui, opts)
933 933
934 934 try:
935 935 from mercurial.utils.urlutil import get_unique_pull_path
936 936
937 937 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
938 938 except ImportError:
939 939 path = ui.expandpath(path)
940 940
941 941 def s():
942 942 repos[1] = hg.peer(ui, opts, path)
943 943
944 944 def d():
945 945 setdiscovery.findcommonheads(ui, *repos)
946 946
947 947 timer(d, setup=s)
948 948 fm.end()
949 949
950 950
951 951 @command(
952 952 b'perf::bookmarks|perfbookmarks',
953 953 formatteropts
954 954 + [
955 955 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
956 956 ],
957 957 )
958 958 def perfbookmarks(ui, repo, **opts):
959 959 """benchmark parsing bookmarks from disk to memory"""
960 960 opts = _byteskwargs(opts)
961 961 timer, fm = gettimer(ui, opts)
962 962
963 963 clearrevlogs = opts[b'clear_revlogs']
964 964
965 965 def s():
966 966 if clearrevlogs:
967 967 clearchangelog(repo)
968 968 clearfilecache(repo, b'_bookmarks')
969 969
970 970 def d():
971 971 repo._bookmarks
972 972
973 973 timer(d, setup=s)
974 974 fm.end()
975 975
976 976
977 @command(b'perf::bundle', formatteropts, b'REVS')
977 @command(
978 b'perf::bundle',
979 [
980 (
981 b'r',
982 b'rev',
983 [],
984 b'changesets to bundle',
985 b'REV',
986 ),
987 ]
988 + formatteropts,
989 b'REVS',
990 )
978 991 def perfbundle(ui, repo, *revs, **opts):
979 992 """benchmark the creation of a bundle from a repository
980 993
981 994 For now, this create a `none-v1` bundle.
982 995 """
983 996 from mercurial import bundlecaches
984 997 from mercurial import discovery
985 998 from mercurial import bundle2
986 999
987 1000 opts = _byteskwargs(opts)
988 1001 timer, fm = gettimer(ui, opts)
989 1002
990 1003 cl = repo.changelog
1004 revs = list(revs)
1005 revs.extend(opts.get(b'rev', ()))
991 1006 revs = scmutil.revrange(repo, revs)
992 1007 if not revs:
993 1008 raise error.Abort(b"not revision specified")
994 1009 # make it a consistent set (ie: without topological gaps)
995 1010 old_len = len(revs)
996 1011 revs = list(repo.revs(b"%ld::%ld", revs, revs))
997 1012 if old_len != len(revs):
998 1013 new_count = len(revs) - old_len
999 1014 msg = b"add %d new revisions to make it a consistent set\n"
1000 1015 ui.write_err(msg % new_count)
1001 1016
1002 1017 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1003 1018 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1004 1019 outgoing = discovery.outgoing(repo, bases, targets)
1005 1020
1006 1021 bundlespec = bundlecaches.parsebundlespec(
1007 1022 repo, b"none", strict=False
1008 1023 )
1009 1024
1010 1025 bversion = b'HG10' + bundlespec.wirecompression
1011 1026
1012 1027 def do_bundle():
1013 1028 bundle2.writenewbundle(
1014 1029 ui,
1015 1030 repo,
1016 1031 b'perf::bundle',
1017 1032 os.devnull,
1018 1033 bversion,
1019 1034 outgoing,
1020 1035 {},
1021 1036 )
1022 1037
1023 1038 timer(do_bundle)
1024 1039 fm.end()
1025 1040
1026 1041
1027 1042 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1028 1043 def perfbundleread(ui, repo, bundlepath, **opts):
1029 1044 """Benchmark reading of bundle files.
1030 1045
1031 1046 This command is meant to isolate the I/O part of bundle reading as
1032 1047 much as possible.
1033 1048 """
1034 1049 from mercurial import (
1035 1050 bundle2,
1036 1051 exchange,
1037 1052 streamclone,
1038 1053 )
1039 1054
1040 1055 opts = _byteskwargs(opts)
1041 1056
1042 1057 def makebench(fn):
1043 1058 def run():
1044 1059 with open(bundlepath, b'rb') as fh:
1045 1060 bundle = exchange.readbundle(ui, fh, bundlepath)
1046 1061 fn(bundle)
1047 1062
1048 1063 return run
1049 1064
1050 1065 def makereadnbytes(size):
1051 1066 def run():
1052 1067 with open(bundlepath, b'rb') as fh:
1053 1068 bundle = exchange.readbundle(ui, fh, bundlepath)
1054 1069 while bundle.read(size):
1055 1070 pass
1056 1071
1057 1072 return run
1058 1073
1059 1074 def makestdioread(size):
1060 1075 def run():
1061 1076 with open(bundlepath, b'rb') as fh:
1062 1077 while fh.read(size):
1063 1078 pass
1064 1079
1065 1080 return run
1066 1081
1067 1082 # bundle1
1068 1083
1069 1084 def deltaiter(bundle):
1070 1085 for delta in bundle.deltaiter():
1071 1086 pass
1072 1087
1073 1088 def iterchunks(bundle):
1074 1089 for chunk in bundle.getchunks():
1075 1090 pass
1076 1091
1077 1092 # bundle2
1078 1093
1079 1094 def forwardchunks(bundle):
1080 1095 for chunk in bundle._forwardchunks():
1081 1096 pass
1082 1097
1083 1098 def iterparts(bundle):
1084 1099 for part in bundle.iterparts():
1085 1100 pass
1086 1101
1087 1102 def iterpartsseekable(bundle):
1088 1103 for part in bundle.iterparts(seekable=True):
1089 1104 pass
1090 1105
1091 1106 def seek(bundle):
1092 1107 for part in bundle.iterparts(seekable=True):
1093 1108 part.seek(0, os.SEEK_END)
1094 1109
1095 1110 def makepartreadnbytes(size):
1096 1111 def run():
1097 1112 with open(bundlepath, b'rb') as fh:
1098 1113 bundle = exchange.readbundle(ui, fh, bundlepath)
1099 1114 for part in bundle.iterparts():
1100 1115 while part.read(size):
1101 1116 pass
1102 1117
1103 1118 return run
1104 1119
1105 1120 benches = [
1106 1121 (makestdioread(8192), b'read(8k)'),
1107 1122 (makestdioread(16384), b'read(16k)'),
1108 1123 (makestdioread(32768), b'read(32k)'),
1109 1124 (makestdioread(131072), b'read(128k)'),
1110 1125 ]
1111 1126
1112 1127 with open(bundlepath, b'rb') as fh:
1113 1128 bundle = exchange.readbundle(ui, fh, bundlepath)
1114 1129
1115 1130 if isinstance(bundle, changegroup.cg1unpacker):
1116 1131 benches.extend(
1117 1132 [
1118 1133 (makebench(deltaiter), b'cg1 deltaiter()'),
1119 1134 (makebench(iterchunks), b'cg1 getchunks()'),
1120 1135 (makereadnbytes(8192), b'cg1 read(8k)'),
1121 1136 (makereadnbytes(16384), b'cg1 read(16k)'),
1122 1137 (makereadnbytes(32768), b'cg1 read(32k)'),
1123 1138 (makereadnbytes(131072), b'cg1 read(128k)'),
1124 1139 ]
1125 1140 )
1126 1141 elif isinstance(bundle, bundle2.unbundle20):
1127 1142 benches.extend(
1128 1143 [
1129 1144 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1130 1145 (makebench(iterparts), b'bundle2 iterparts()'),
1131 1146 (
1132 1147 makebench(iterpartsseekable),
1133 1148 b'bundle2 iterparts() seekable',
1134 1149 ),
1135 1150 (makebench(seek), b'bundle2 part seek()'),
1136 1151 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1137 1152 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1138 1153 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1139 1154 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1140 1155 ]
1141 1156 )
1142 1157 elif isinstance(bundle, streamclone.streamcloneapplier):
1143 1158 raise error.Abort(b'stream clone bundles not supported')
1144 1159 else:
1145 1160 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1146 1161
1147 1162 for fn, title in benches:
1148 1163 timer, fm = gettimer(ui, opts)
1149 1164 timer(fn, title=title)
1150 1165 fm.end()
1151 1166
1152 1167
1153 1168 @command(
1154 1169 b'perf::changegroupchangelog|perfchangegroupchangelog',
1155 1170 formatteropts
1156 1171 + [
1157 1172 (b'', b'cgversion', b'02', b'changegroup version'),
1158 1173 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1159 1174 ],
1160 1175 )
1161 1176 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1162 1177 """Benchmark producing a changelog group for a changegroup.
1163 1178
1164 1179 This measures the time spent processing the changelog during a
1165 1180 bundle operation. This occurs during `hg bundle` and on a server
1166 1181 processing a `getbundle` wire protocol request (handles clones
1167 1182 and pull requests).
1168 1183
1169 1184 By default, all revisions are added to the changegroup.
1170 1185 """
1171 1186 opts = _byteskwargs(opts)
1172 1187 cl = repo.changelog
1173 1188 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1174 1189 bundler = changegroup.getbundler(cgversion, repo)
1175 1190
1176 1191 def d():
1177 1192 state, chunks = bundler._generatechangelog(cl, nodes)
1178 1193 for chunk in chunks:
1179 1194 pass
1180 1195
1181 1196 timer, fm = gettimer(ui, opts)
1182 1197
1183 1198 # Terminal printing can interfere with timing. So disable it.
1184 1199 with ui.configoverride({(b'progress', b'disable'): True}):
1185 1200 timer(d)
1186 1201
1187 1202 fm.end()
1188 1203
1189 1204
1190 1205 @command(b'perf::dirs|perfdirs', formatteropts)
1191 1206 def perfdirs(ui, repo, **opts):
1192 1207 opts = _byteskwargs(opts)
1193 1208 timer, fm = gettimer(ui, opts)
1194 1209 dirstate = repo.dirstate
1195 1210 b'a' in dirstate
1196 1211
1197 1212 def d():
1198 1213 dirstate.hasdir(b'a')
1199 1214 try:
1200 1215 del dirstate._map._dirs
1201 1216 except AttributeError:
1202 1217 pass
1203 1218
1204 1219 timer(d)
1205 1220 fm.end()
1206 1221
1207 1222
1208 1223 @command(
1209 1224 b'perf::dirstate|perfdirstate',
1210 1225 [
1211 1226 (
1212 1227 b'',
1213 1228 b'iteration',
1214 1229 None,
1215 1230 b'benchmark a full iteration for the dirstate',
1216 1231 ),
1217 1232 (
1218 1233 b'',
1219 1234 b'contains',
1220 1235 None,
1221 1236 b'benchmark a large amount of `nf in dirstate` calls',
1222 1237 ),
1223 1238 ]
1224 1239 + formatteropts,
1225 1240 )
1226 1241 def perfdirstate(ui, repo, **opts):
1227 1242 """benchmap the time of various distate operations
1228 1243
1229 1244 By default benchmark the time necessary to load a dirstate from scratch.
1230 1245 The dirstate is loaded to the point were a "contains" request can be
1231 1246 answered.
1232 1247 """
1233 1248 opts = _byteskwargs(opts)
1234 1249 timer, fm = gettimer(ui, opts)
1235 1250 b"a" in repo.dirstate
1236 1251
1237 1252 if opts[b'iteration'] and opts[b'contains']:
1238 1253 msg = b'only specify one of --iteration or --contains'
1239 1254 raise error.Abort(msg)
1240 1255
1241 1256 if opts[b'iteration']:
1242 1257 setup = None
1243 1258 dirstate = repo.dirstate
1244 1259
1245 1260 def d():
1246 1261 for f in dirstate:
1247 1262 pass
1248 1263
1249 1264 elif opts[b'contains']:
1250 1265 setup = None
1251 1266 dirstate = repo.dirstate
1252 1267 allfiles = list(dirstate)
1253 1268 # also add file path that will be "missing" from the dirstate
1254 1269 allfiles.extend([f[::-1] for f in allfiles])
1255 1270
1256 1271 def d():
1257 1272 for f in allfiles:
1258 1273 f in dirstate
1259 1274
1260 1275 else:
1261 1276
1262 1277 def setup():
1263 1278 repo.dirstate.invalidate()
1264 1279
1265 1280 def d():
1266 1281 b"a" in repo.dirstate
1267 1282
1268 1283 timer(d, setup=setup)
1269 1284 fm.end()
1270 1285
1271 1286
1272 1287 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1273 1288 def perfdirstatedirs(ui, repo, **opts):
1274 1289 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1275 1290 opts = _byteskwargs(opts)
1276 1291 timer, fm = gettimer(ui, opts)
1277 1292 repo.dirstate.hasdir(b"a")
1278 1293
1279 1294 def setup():
1280 1295 try:
1281 1296 del repo.dirstate._map._dirs
1282 1297 except AttributeError:
1283 1298 pass
1284 1299
1285 1300 def d():
1286 1301 repo.dirstate.hasdir(b"a")
1287 1302
1288 1303 timer(d, setup=setup)
1289 1304 fm.end()
1290 1305
1291 1306
1292 1307 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1293 1308 def perfdirstatefoldmap(ui, repo, **opts):
1294 1309 """benchmap a `dirstate._map.filefoldmap.get()` request
1295 1310
1296 1311 The dirstate filefoldmap cache is dropped between every request.
1297 1312 """
1298 1313 opts = _byteskwargs(opts)
1299 1314 timer, fm = gettimer(ui, opts)
1300 1315 dirstate = repo.dirstate
1301 1316 dirstate._map.filefoldmap.get(b'a')
1302 1317
1303 1318 def setup():
1304 1319 del dirstate._map.filefoldmap
1305 1320
1306 1321 def d():
1307 1322 dirstate._map.filefoldmap.get(b'a')
1308 1323
1309 1324 timer(d, setup=setup)
1310 1325 fm.end()
1311 1326
1312 1327
1313 1328 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1314 1329 def perfdirfoldmap(ui, repo, **opts):
1315 1330 """benchmap a `dirstate._map.dirfoldmap.get()` request
1316 1331
1317 1332 The dirstate dirfoldmap cache is dropped between every request.
1318 1333 """
1319 1334 opts = _byteskwargs(opts)
1320 1335 timer, fm = gettimer(ui, opts)
1321 1336 dirstate = repo.dirstate
1322 1337 dirstate._map.dirfoldmap.get(b'a')
1323 1338
1324 1339 def setup():
1325 1340 del dirstate._map.dirfoldmap
1326 1341 try:
1327 1342 del dirstate._map._dirs
1328 1343 except AttributeError:
1329 1344 pass
1330 1345
1331 1346 def d():
1332 1347 dirstate._map.dirfoldmap.get(b'a')
1333 1348
1334 1349 timer(d, setup=setup)
1335 1350 fm.end()
1336 1351
1337 1352
1338 1353 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1339 1354 def perfdirstatewrite(ui, repo, **opts):
1340 1355 """benchmap the time it take to write a dirstate on disk"""
1341 1356 opts = _byteskwargs(opts)
1342 1357 timer, fm = gettimer(ui, opts)
1343 1358 ds = repo.dirstate
1344 1359 b"a" in ds
1345 1360
1346 1361 def setup():
1347 1362 ds._dirty = True
1348 1363
1349 1364 def d():
1350 1365 ds.write(repo.currenttransaction())
1351 1366
1352 1367 timer(d, setup=setup)
1353 1368 fm.end()
1354 1369
1355 1370
1356 1371 def _getmergerevs(repo, opts):
1357 1372 """parse command argument to return rev involved in merge
1358 1373
1359 1374 input: options dictionnary with `rev`, `from` and `bse`
1360 1375 output: (localctx, otherctx, basectx)
1361 1376 """
1362 1377 if opts[b'from']:
1363 1378 fromrev = scmutil.revsingle(repo, opts[b'from'])
1364 1379 wctx = repo[fromrev]
1365 1380 else:
1366 1381 wctx = repo[None]
1367 1382 # we don't want working dir files to be stat'd in the benchmark, so
1368 1383 # prime that cache
1369 1384 wctx.dirty()
1370 1385 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1371 1386 if opts[b'base']:
1372 1387 fromrev = scmutil.revsingle(repo, opts[b'base'])
1373 1388 ancestor = repo[fromrev]
1374 1389 else:
1375 1390 ancestor = wctx.ancestor(rctx)
1376 1391 return (wctx, rctx, ancestor)
1377 1392
1378 1393
1379 1394 @command(
1380 1395 b'perf::mergecalculate|perfmergecalculate',
1381 1396 [
1382 1397 (b'r', b'rev', b'.', b'rev to merge against'),
1383 1398 (b'', b'from', b'', b'rev to merge from'),
1384 1399 (b'', b'base', b'', b'the revision to use as base'),
1385 1400 ]
1386 1401 + formatteropts,
1387 1402 )
1388 1403 def perfmergecalculate(ui, repo, **opts):
1389 1404 opts = _byteskwargs(opts)
1390 1405 timer, fm = gettimer(ui, opts)
1391 1406
1392 1407 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1393 1408
1394 1409 def d():
1395 1410 # acceptremote is True because we don't want prompts in the middle of
1396 1411 # our benchmark
1397 1412 merge.calculateupdates(
1398 1413 repo,
1399 1414 wctx,
1400 1415 rctx,
1401 1416 [ancestor],
1402 1417 branchmerge=False,
1403 1418 force=False,
1404 1419 acceptremote=True,
1405 1420 followcopies=True,
1406 1421 )
1407 1422
1408 1423 timer(d)
1409 1424 fm.end()
1410 1425
1411 1426
1412 1427 @command(
1413 1428 b'perf::mergecopies|perfmergecopies',
1414 1429 [
1415 1430 (b'r', b'rev', b'.', b'rev to merge against'),
1416 1431 (b'', b'from', b'', b'rev to merge from'),
1417 1432 (b'', b'base', b'', b'the revision to use as base'),
1418 1433 ]
1419 1434 + formatteropts,
1420 1435 )
1421 1436 def perfmergecopies(ui, repo, **opts):
1422 1437 """measure runtime of `copies.mergecopies`"""
1423 1438 opts = _byteskwargs(opts)
1424 1439 timer, fm = gettimer(ui, opts)
1425 1440 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1426 1441
1427 1442 def d():
1428 1443 # acceptremote is True because we don't want prompts in the middle of
1429 1444 # our benchmark
1430 1445 copies.mergecopies(repo, wctx, rctx, ancestor)
1431 1446
1432 1447 timer(d)
1433 1448 fm.end()
1434 1449
1435 1450
1436 1451 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1437 1452 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1438 1453 """benchmark the copy tracing logic"""
1439 1454 opts = _byteskwargs(opts)
1440 1455 timer, fm = gettimer(ui, opts)
1441 1456 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1442 1457 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1443 1458
1444 1459 def d():
1445 1460 copies.pathcopies(ctx1, ctx2)
1446 1461
1447 1462 timer(d)
1448 1463 fm.end()
1449 1464
1450 1465
1451 1466 @command(
1452 1467 b'perf::phases|perfphases',
1453 1468 [
1454 1469 (b'', b'full', False, b'include file reading time too'),
1455 1470 ],
1456 1471 b"",
1457 1472 )
1458 1473 def perfphases(ui, repo, **opts):
1459 1474 """benchmark phasesets computation"""
1460 1475 opts = _byteskwargs(opts)
1461 1476 timer, fm = gettimer(ui, opts)
1462 1477 _phases = repo._phasecache
1463 1478 full = opts.get(b'full')
1464 1479
1465 1480 def d():
1466 1481 phases = _phases
1467 1482 if full:
1468 1483 clearfilecache(repo, b'_phasecache')
1469 1484 phases = repo._phasecache
1470 1485 phases.invalidate()
1471 1486 phases.loadphaserevs(repo)
1472 1487
1473 1488 timer(d)
1474 1489 fm.end()
1475 1490
1476 1491
1477 1492 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1478 1493 def perfphasesremote(ui, repo, dest=None, **opts):
1479 1494 """benchmark time needed to analyse phases of the remote server"""
1480 1495 from mercurial.node import bin
1481 1496 from mercurial import (
1482 1497 exchange,
1483 1498 hg,
1484 1499 phases,
1485 1500 )
1486 1501
1487 1502 opts = _byteskwargs(opts)
1488 1503 timer, fm = gettimer(ui, opts)
1489 1504
1490 1505 path = ui.getpath(dest, default=(b'default-push', b'default'))
1491 1506 if not path:
1492 1507 raise error.Abort(
1493 1508 b'default repository not configured!',
1494 1509 hint=b"see 'hg help config.paths'",
1495 1510 )
1496 1511 dest = path.pushloc or path.loc
1497 1512 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1498 1513 other = hg.peer(repo, opts, dest)
1499 1514
1500 1515 # easier to perform discovery through the operation
1501 1516 op = exchange.pushoperation(repo, other)
1502 1517 exchange._pushdiscoverychangeset(op)
1503 1518
1504 1519 remotesubset = op.fallbackheads
1505 1520
1506 1521 with other.commandexecutor() as e:
1507 1522 remotephases = e.callcommand(
1508 1523 b'listkeys', {b'namespace': b'phases'}
1509 1524 ).result()
1510 1525 del other
1511 1526 publishing = remotephases.get(b'publishing', False)
1512 1527 if publishing:
1513 1528 ui.statusnoi18n(b'publishing: yes\n')
1514 1529 else:
1515 1530 ui.statusnoi18n(b'publishing: no\n')
1516 1531
1517 1532 has_node = getattr(repo.changelog.index, 'has_node', None)
1518 1533 if has_node is None:
1519 1534 has_node = repo.changelog.nodemap.__contains__
1520 1535 nonpublishroots = 0
1521 1536 for nhex, phase in remotephases.iteritems():
1522 1537 if nhex == b'publishing': # ignore data related to publish option
1523 1538 continue
1524 1539 node = bin(nhex)
1525 1540 if has_node(node) and int(phase):
1526 1541 nonpublishroots += 1
1527 1542 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1528 1543 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1529 1544
1530 1545 def d():
1531 1546 phases.remotephasessummary(repo, remotesubset, remotephases)
1532 1547
1533 1548 timer(d)
1534 1549 fm.end()
1535 1550
1536 1551
1537 1552 @command(
1538 1553 b'perf::manifest|perfmanifest',
1539 1554 [
1540 1555 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1541 1556 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1542 1557 ]
1543 1558 + formatteropts,
1544 1559 b'REV|NODE',
1545 1560 )
1546 1561 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1547 1562 """benchmark the time to read a manifest from disk and return a usable
1548 1563 dict-like object
1549 1564
1550 1565 Manifest caches are cleared before retrieval."""
1551 1566 opts = _byteskwargs(opts)
1552 1567 timer, fm = gettimer(ui, opts)
1553 1568 if not manifest_rev:
1554 1569 ctx = scmutil.revsingle(repo, rev, rev)
1555 1570 t = ctx.manifestnode()
1556 1571 else:
1557 1572 from mercurial.node import bin
1558 1573
1559 1574 if len(rev) == 40:
1560 1575 t = bin(rev)
1561 1576 else:
1562 1577 try:
1563 1578 rev = int(rev)
1564 1579
1565 1580 if util.safehasattr(repo.manifestlog, b'getstorage'):
1566 1581 t = repo.manifestlog.getstorage(b'').node(rev)
1567 1582 else:
1568 1583 t = repo.manifestlog._revlog.lookup(rev)
1569 1584 except ValueError:
1570 1585 raise error.Abort(
1571 1586 b'manifest revision must be integer or full node'
1572 1587 )
1573 1588
1574 1589 def d():
1575 1590 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1576 1591 repo.manifestlog[t].read()
1577 1592
1578 1593 timer(d)
1579 1594 fm.end()
1580 1595
1581 1596
1582 1597 @command(b'perf::changeset|perfchangeset', formatteropts)
1583 1598 def perfchangeset(ui, repo, rev, **opts):
1584 1599 opts = _byteskwargs(opts)
1585 1600 timer, fm = gettimer(ui, opts)
1586 1601 n = scmutil.revsingle(repo, rev).node()
1587 1602
1588 1603 def d():
1589 1604 repo.changelog.read(n)
1590 1605 # repo.changelog._cache = None
1591 1606
1592 1607 timer(d)
1593 1608 fm.end()
1594 1609
1595 1610
1596 1611 @command(b'perf::ignore|perfignore', formatteropts)
1597 1612 def perfignore(ui, repo, **opts):
1598 1613 """benchmark operation related to computing ignore"""
1599 1614 opts = _byteskwargs(opts)
1600 1615 timer, fm = gettimer(ui, opts)
1601 1616 dirstate = repo.dirstate
1602 1617
1603 1618 def setupone():
1604 1619 dirstate.invalidate()
1605 1620 clearfilecache(dirstate, b'_ignore')
1606 1621
1607 1622 def runone():
1608 1623 dirstate._ignore
1609 1624
1610 1625 timer(runone, setup=setupone, title=b"load")
1611 1626 fm.end()
1612 1627
1613 1628
1614 1629 @command(
1615 1630 b'perf::index|perfindex',
1616 1631 [
1617 1632 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1618 1633 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1619 1634 ]
1620 1635 + formatteropts,
1621 1636 )
1622 1637 def perfindex(ui, repo, **opts):
1623 1638 """benchmark index creation time followed by a lookup
1624 1639
1625 1640 The default is to look `tip` up. Depending on the index implementation,
1626 1641 the revision looked up can matters. For example, an implementation
1627 1642 scanning the index will have a faster lookup time for `--rev tip` than for
1628 1643 `--rev 0`. The number of looked up revisions and their order can also
1629 1644 matters.
1630 1645
1631 1646 Example of useful set to test:
1632 1647
1633 1648 * tip
1634 1649 * 0
1635 1650 * -10:
1636 1651 * :10
1637 1652 * -10: + :10
1638 1653 * :10: + -10:
1639 1654 * -10000:
1640 1655 * -10000: + 0
1641 1656
1642 1657 It is not currently possible to check for lookup of a missing node. For
1643 1658 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1644 1659 import mercurial.revlog
1645 1660
1646 1661 opts = _byteskwargs(opts)
1647 1662 timer, fm = gettimer(ui, opts)
1648 1663 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1649 1664 if opts[b'no_lookup']:
1650 1665 if opts['rev']:
1651 1666 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1652 1667 nodes = []
1653 1668 elif not opts[b'rev']:
1654 1669 nodes = [repo[b"tip"].node()]
1655 1670 else:
1656 1671 revs = scmutil.revrange(repo, opts[b'rev'])
1657 1672 cl = repo.changelog
1658 1673 nodes = [cl.node(r) for r in revs]
1659 1674
1660 1675 unfi = repo.unfiltered()
1661 1676 # find the filecache func directly
1662 1677 # This avoid polluting the benchmark with the filecache logic
1663 1678 makecl = unfi.__class__.changelog.func
1664 1679
1665 1680 def setup():
1666 1681 # probably not necessary, but for good measure
1667 1682 clearchangelog(unfi)
1668 1683
1669 1684 def d():
1670 1685 cl = makecl(unfi)
1671 1686 for n in nodes:
1672 1687 cl.rev(n)
1673 1688
1674 1689 timer(d, setup=setup)
1675 1690 fm.end()
1676 1691
1677 1692
1678 1693 @command(
1679 1694 b'perf::nodemap|perfnodemap',
1680 1695 [
1681 1696 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1682 1697 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1683 1698 ]
1684 1699 + formatteropts,
1685 1700 )
1686 1701 def perfnodemap(ui, repo, **opts):
1687 1702 """benchmark the time necessary to look up revision from a cold nodemap
1688 1703
1689 1704 Depending on the implementation, the amount and order of revision we look
1690 1705 up can varies. Example of useful set to test:
1691 1706 * tip
1692 1707 * 0
1693 1708 * -10:
1694 1709 * :10
1695 1710 * -10: + :10
1696 1711 * :10: + -10:
1697 1712 * -10000:
1698 1713 * -10000: + 0
1699 1714
1700 1715 The command currently focus on valid binary lookup. Benchmarking for
1701 1716 hexlookup, prefix lookup and missing lookup would also be valuable.
1702 1717 """
1703 1718 import mercurial.revlog
1704 1719
1705 1720 opts = _byteskwargs(opts)
1706 1721 timer, fm = gettimer(ui, opts)
1707 1722 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1708 1723
1709 1724 unfi = repo.unfiltered()
1710 1725 clearcaches = opts[b'clear_caches']
1711 1726 # find the filecache func directly
1712 1727 # This avoid polluting the benchmark with the filecache logic
1713 1728 makecl = unfi.__class__.changelog.func
1714 1729 if not opts[b'rev']:
1715 1730 raise error.Abort(b'use --rev to specify revisions to look up')
1716 1731 revs = scmutil.revrange(repo, opts[b'rev'])
1717 1732 cl = repo.changelog
1718 1733 nodes = [cl.node(r) for r in revs]
1719 1734
1720 1735 # use a list to pass reference to a nodemap from one closure to the next
1721 1736 nodeget = [None]
1722 1737
1723 1738 def setnodeget():
1724 1739 # probably not necessary, but for good measure
1725 1740 clearchangelog(unfi)
1726 1741 cl = makecl(unfi)
1727 1742 if util.safehasattr(cl.index, 'get_rev'):
1728 1743 nodeget[0] = cl.index.get_rev
1729 1744 else:
1730 1745 nodeget[0] = cl.nodemap.get
1731 1746
1732 1747 def d():
1733 1748 get = nodeget[0]
1734 1749 for n in nodes:
1735 1750 get(n)
1736 1751
1737 1752 setup = None
1738 1753 if clearcaches:
1739 1754
1740 1755 def setup():
1741 1756 setnodeget()
1742 1757
1743 1758 else:
1744 1759 setnodeget()
1745 1760 d() # prewarm the data structure
1746 1761 timer(d, setup=setup)
1747 1762 fm.end()
1748 1763
1749 1764
1750 1765 @command(b'perf::startup|perfstartup', formatteropts)
1751 1766 def perfstartup(ui, repo, **opts):
1752 1767 opts = _byteskwargs(opts)
1753 1768 timer, fm = gettimer(ui, opts)
1754 1769
1755 1770 def d():
1756 1771 if os.name != 'nt':
1757 1772 os.system(
1758 1773 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1759 1774 )
1760 1775 else:
1761 1776 os.environ['HGRCPATH'] = r' '
1762 1777 os.system("%s version -q > NUL" % sys.argv[0])
1763 1778
1764 1779 timer(d)
1765 1780 fm.end()
1766 1781
1767 1782
1768 1783 @command(b'perf::parents|perfparents', formatteropts)
1769 1784 def perfparents(ui, repo, **opts):
1770 1785 """benchmark the time necessary to fetch one changeset's parents.
1771 1786
1772 1787 The fetch is done using the `node identifier`, traversing all object layers
1773 1788 from the repository object. The first N revisions will be used for this
1774 1789 benchmark. N is controlled by the ``perf.parentscount`` config option
1775 1790 (default: 1000).
1776 1791 """
1777 1792 opts = _byteskwargs(opts)
1778 1793 timer, fm = gettimer(ui, opts)
1779 1794 # control the number of commits perfparents iterates over
1780 1795 # experimental config: perf.parentscount
1781 1796 count = getint(ui, b"perf", b"parentscount", 1000)
1782 1797 if len(repo.changelog) < count:
1783 1798 raise error.Abort(b"repo needs %d commits for this test" % count)
1784 1799 repo = repo.unfiltered()
1785 1800 nl = [repo.changelog.node(i) for i in _xrange(count)]
1786 1801
1787 1802 def d():
1788 1803 for n in nl:
1789 1804 repo.changelog.parents(n)
1790 1805
1791 1806 timer(d)
1792 1807 fm.end()
1793 1808
1794 1809
1795 1810 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1796 1811 def perfctxfiles(ui, repo, x, **opts):
1797 1812 opts = _byteskwargs(opts)
1798 1813 x = int(x)
1799 1814 timer, fm = gettimer(ui, opts)
1800 1815
1801 1816 def d():
1802 1817 len(repo[x].files())
1803 1818
1804 1819 timer(d)
1805 1820 fm.end()
1806 1821
1807 1822
1808 1823 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1809 1824 def perfrawfiles(ui, repo, x, **opts):
1810 1825 opts = _byteskwargs(opts)
1811 1826 x = int(x)
1812 1827 timer, fm = gettimer(ui, opts)
1813 1828 cl = repo.changelog
1814 1829
1815 1830 def d():
1816 1831 len(cl.read(x)[3])
1817 1832
1818 1833 timer(d)
1819 1834 fm.end()
1820 1835
1821 1836
1822 1837 @command(b'perf::lookup|perflookup', formatteropts)
1823 1838 def perflookup(ui, repo, rev, **opts):
1824 1839 opts = _byteskwargs(opts)
1825 1840 timer, fm = gettimer(ui, opts)
1826 1841 timer(lambda: len(repo.lookup(rev)))
1827 1842 fm.end()
1828 1843
1829 1844
1830 1845 @command(
1831 1846 b'perf::linelogedits|perflinelogedits',
1832 1847 [
1833 1848 (b'n', b'edits', 10000, b'number of edits'),
1834 1849 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1835 1850 ],
1836 1851 norepo=True,
1837 1852 )
1838 1853 def perflinelogedits(ui, **opts):
1839 1854 from mercurial import linelog
1840 1855
1841 1856 opts = _byteskwargs(opts)
1842 1857
1843 1858 edits = opts[b'edits']
1844 1859 maxhunklines = opts[b'max_hunk_lines']
1845 1860
1846 1861 maxb1 = 100000
1847 1862 random.seed(0)
1848 1863 randint = random.randint
1849 1864 currentlines = 0
1850 1865 arglist = []
1851 1866 for rev in _xrange(edits):
1852 1867 a1 = randint(0, currentlines)
1853 1868 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1854 1869 b1 = randint(0, maxb1)
1855 1870 b2 = randint(b1, b1 + maxhunklines)
1856 1871 currentlines += (b2 - b1) - (a2 - a1)
1857 1872 arglist.append((rev, a1, a2, b1, b2))
1858 1873
1859 1874 def d():
1860 1875 ll = linelog.linelog()
1861 1876 for args in arglist:
1862 1877 ll.replacelines(*args)
1863 1878
1864 1879 timer, fm = gettimer(ui, opts)
1865 1880 timer(d)
1866 1881 fm.end()
1867 1882
1868 1883
1869 1884 @command(b'perf::revrange|perfrevrange', formatteropts)
1870 1885 def perfrevrange(ui, repo, *specs, **opts):
1871 1886 opts = _byteskwargs(opts)
1872 1887 timer, fm = gettimer(ui, opts)
1873 1888 revrange = scmutil.revrange
1874 1889 timer(lambda: len(revrange(repo, specs)))
1875 1890 fm.end()
1876 1891
1877 1892
1878 1893 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1879 1894 def perfnodelookup(ui, repo, rev, **opts):
1880 1895 opts = _byteskwargs(opts)
1881 1896 timer, fm = gettimer(ui, opts)
1882 1897 import mercurial.revlog
1883 1898
1884 1899 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1885 1900 n = scmutil.revsingle(repo, rev).node()
1886 1901
1887 1902 try:
1888 1903 cl = revlog(getsvfs(repo), radix=b"00changelog")
1889 1904 except TypeError:
1890 1905 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
1891 1906
1892 1907 def d():
1893 1908 cl.rev(n)
1894 1909 clearcaches(cl)
1895 1910
1896 1911 timer(d)
1897 1912 fm.end()
1898 1913
1899 1914
1900 1915 @command(
1901 1916 b'perf::log|perflog',
1902 1917 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1903 1918 )
1904 1919 def perflog(ui, repo, rev=None, **opts):
1905 1920 opts = _byteskwargs(opts)
1906 1921 if rev is None:
1907 1922 rev = []
1908 1923 timer, fm = gettimer(ui, opts)
1909 1924 ui.pushbuffer()
1910 1925 timer(
1911 1926 lambda: commands.log(
1912 1927 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1913 1928 )
1914 1929 )
1915 1930 ui.popbuffer()
1916 1931 fm.end()
1917 1932
1918 1933
1919 1934 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1920 1935 def perfmoonwalk(ui, repo, **opts):
1921 1936 """benchmark walking the changelog backwards
1922 1937
1923 1938 This also loads the changelog data for each revision in the changelog.
1924 1939 """
1925 1940 opts = _byteskwargs(opts)
1926 1941 timer, fm = gettimer(ui, opts)
1927 1942
1928 1943 def moonwalk():
1929 1944 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1930 1945 ctx = repo[i]
1931 1946 ctx.branch() # read changelog data (in addition to the index)
1932 1947
1933 1948 timer(moonwalk)
1934 1949 fm.end()
1935 1950
1936 1951
1937 1952 @command(
1938 1953 b'perf::templating|perftemplating',
1939 1954 [
1940 1955 (b'r', b'rev', [], b'revisions to run the template on'),
1941 1956 ]
1942 1957 + formatteropts,
1943 1958 )
1944 1959 def perftemplating(ui, repo, testedtemplate=None, **opts):
1945 1960 """test the rendering time of a given template"""
1946 1961 if makelogtemplater is None:
1947 1962 raise error.Abort(
1948 1963 b"perftemplating not available with this Mercurial",
1949 1964 hint=b"use 4.3 or later",
1950 1965 )
1951 1966
1952 1967 opts = _byteskwargs(opts)
1953 1968
1954 1969 nullui = ui.copy()
1955 1970 nullui.fout = open(os.devnull, 'wb')
1956 1971 nullui.disablepager()
1957 1972 revs = opts.get(b'rev')
1958 1973 if not revs:
1959 1974 revs = [b'all()']
1960 1975 revs = list(scmutil.revrange(repo, revs))
1961 1976
1962 1977 defaulttemplate = (
1963 1978 b'{date|shortdate} [{rev}:{node|short}]'
1964 1979 b' {author|person}: {desc|firstline}\n'
1965 1980 )
1966 1981 if testedtemplate is None:
1967 1982 testedtemplate = defaulttemplate
1968 1983 displayer = makelogtemplater(nullui, repo, testedtemplate)
1969 1984
1970 1985 def format():
1971 1986 for r in revs:
1972 1987 ctx = repo[r]
1973 1988 displayer.show(ctx)
1974 1989 displayer.flush(ctx)
1975 1990
1976 1991 timer, fm = gettimer(ui, opts)
1977 1992 timer(format)
1978 1993 fm.end()
1979 1994
1980 1995
1981 1996 def _displaystats(ui, opts, entries, data):
1982 1997 # use a second formatter because the data are quite different, not sure
1983 1998 # how it flies with the templater.
1984 1999 fm = ui.formatter(b'perf-stats', opts)
1985 2000 for key, title in entries:
1986 2001 values = data[key]
1987 2002 nbvalues = len(data)
1988 2003 values.sort()
1989 2004 stats = {
1990 2005 'key': key,
1991 2006 'title': title,
1992 2007 'nbitems': len(values),
1993 2008 'min': values[0][0],
1994 2009 '10%': values[(nbvalues * 10) // 100][0],
1995 2010 '25%': values[(nbvalues * 25) // 100][0],
1996 2011 '50%': values[(nbvalues * 50) // 100][0],
1997 2012 '75%': values[(nbvalues * 75) // 100][0],
1998 2013 '80%': values[(nbvalues * 80) // 100][0],
1999 2014 '85%': values[(nbvalues * 85) // 100][0],
2000 2015 '90%': values[(nbvalues * 90) // 100][0],
2001 2016 '95%': values[(nbvalues * 95) // 100][0],
2002 2017 '99%': values[(nbvalues * 99) // 100][0],
2003 2018 'max': values[-1][0],
2004 2019 }
2005 2020 fm.startitem()
2006 2021 fm.data(**stats)
2007 2022 # make node pretty for the human output
2008 2023 fm.plain('### %s (%d items)\n' % (title, len(values)))
2009 2024 lines = [
2010 2025 'min',
2011 2026 '10%',
2012 2027 '25%',
2013 2028 '50%',
2014 2029 '75%',
2015 2030 '80%',
2016 2031 '85%',
2017 2032 '90%',
2018 2033 '95%',
2019 2034 '99%',
2020 2035 'max',
2021 2036 ]
2022 2037 for l in lines:
2023 2038 fm.plain('%s: %s\n' % (l, stats[l]))
2024 2039 fm.end()
2025 2040
2026 2041
2027 2042 @command(
2028 2043 b'perf::helper-mergecopies|perfhelper-mergecopies',
2029 2044 formatteropts
2030 2045 + [
2031 2046 (b'r', b'revs', [], b'restrict search to these revisions'),
2032 2047 (b'', b'timing', False, b'provides extra data (costly)'),
2033 2048 (b'', b'stats', False, b'provides statistic about the measured data'),
2034 2049 ],
2035 2050 )
2036 2051 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2037 2052 """find statistics about potential parameters for `perfmergecopies`
2038 2053
2039 2054 This command find (base, p1, p2) triplet relevant for copytracing
2040 2055 benchmarking in the context of a merge. It reports values for some of the
2041 2056 parameters that impact merge copy tracing time during merge.
2042 2057
2043 2058 If `--timing` is set, rename detection is run and the associated timing
2044 2059 will be reported. The extra details come at the cost of slower command
2045 2060 execution.
2046 2061
2047 2062 Since rename detection is only run once, other factors might easily
2048 2063 affect the precision of the timing. However it should give a good
2049 2064 approximation of which revision triplets are very costly.
2050 2065 """
2051 2066 opts = _byteskwargs(opts)
2052 2067 fm = ui.formatter(b'perf', opts)
2053 2068 dotiming = opts[b'timing']
2054 2069 dostats = opts[b'stats']
2055 2070
2056 2071 output_template = [
2057 2072 ("base", "%(base)12s"),
2058 2073 ("p1", "%(p1.node)12s"),
2059 2074 ("p2", "%(p2.node)12s"),
2060 2075 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2061 2076 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2062 2077 ("p1.renames", "%(p1.renamedfiles)12d"),
2063 2078 ("p1.time", "%(p1.time)12.3f"),
2064 2079 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2065 2080 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2066 2081 ("p2.renames", "%(p2.renamedfiles)12d"),
2067 2082 ("p2.time", "%(p2.time)12.3f"),
2068 2083 ("renames", "%(nbrenamedfiles)12d"),
2069 2084 ("total.time", "%(time)12.3f"),
2070 2085 ]
2071 2086 if not dotiming:
2072 2087 output_template = [
2073 2088 i
2074 2089 for i in output_template
2075 2090 if not ('time' in i[0] or 'renames' in i[0])
2076 2091 ]
2077 2092 header_names = [h for (h, v) in output_template]
2078 2093 output = ' '.join([v for (h, v) in output_template]) + '\n'
2079 2094 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2080 2095 fm.plain(header % tuple(header_names))
2081 2096
2082 2097 if not revs:
2083 2098 revs = ['all()']
2084 2099 revs = scmutil.revrange(repo, revs)
2085 2100
2086 2101 if dostats:
2087 2102 alldata = {
2088 2103 'nbrevs': [],
2089 2104 'nbmissingfiles': [],
2090 2105 }
2091 2106 if dotiming:
2092 2107 alldata['parentnbrenames'] = []
2093 2108 alldata['totalnbrenames'] = []
2094 2109 alldata['parenttime'] = []
2095 2110 alldata['totaltime'] = []
2096 2111
2097 2112 roi = repo.revs('merge() and %ld', revs)
2098 2113 for r in roi:
2099 2114 ctx = repo[r]
2100 2115 p1 = ctx.p1()
2101 2116 p2 = ctx.p2()
2102 2117 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2103 2118 for b in bases:
2104 2119 b = repo[b]
2105 2120 p1missing = copies._computeforwardmissing(b, p1)
2106 2121 p2missing = copies._computeforwardmissing(b, p2)
2107 2122 data = {
2108 2123 b'base': b.hex(),
2109 2124 b'p1.node': p1.hex(),
2110 2125 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2111 2126 b'p1.nbmissingfiles': len(p1missing),
2112 2127 b'p2.node': p2.hex(),
2113 2128 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2114 2129 b'p2.nbmissingfiles': len(p2missing),
2115 2130 }
2116 2131 if dostats:
2117 2132 if p1missing:
2118 2133 alldata['nbrevs'].append(
2119 2134 (data['p1.nbrevs'], b.hex(), p1.hex())
2120 2135 )
2121 2136 alldata['nbmissingfiles'].append(
2122 2137 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2123 2138 )
2124 2139 if p2missing:
2125 2140 alldata['nbrevs'].append(
2126 2141 (data['p2.nbrevs'], b.hex(), p2.hex())
2127 2142 )
2128 2143 alldata['nbmissingfiles'].append(
2129 2144 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2130 2145 )
2131 2146 if dotiming:
2132 2147 begin = util.timer()
2133 2148 mergedata = copies.mergecopies(repo, p1, p2, b)
2134 2149 end = util.timer()
2135 2150 # not very stable timing since we did only one run
2136 2151 data['time'] = end - begin
2137 2152 # mergedata contains five dicts: "copy", "movewithdir",
2138 2153 # "diverge", "renamedelete" and "dirmove".
2139 2154 # The first 4 are about renamed file so lets count that.
2140 2155 renames = len(mergedata[0])
2141 2156 renames += len(mergedata[1])
2142 2157 renames += len(mergedata[2])
2143 2158 renames += len(mergedata[3])
2144 2159 data['nbrenamedfiles'] = renames
2145 2160 begin = util.timer()
2146 2161 p1renames = copies.pathcopies(b, p1)
2147 2162 end = util.timer()
2148 2163 data['p1.time'] = end - begin
2149 2164 begin = util.timer()
2150 2165 p2renames = copies.pathcopies(b, p2)
2151 2166 end = util.timer()
2152 2167 data['p2.time'] = end - begin
2153 2168 data['p1.renamedfiles'] = len(p1renames)
2154 2169 data['p2.renamedfiles'] = len(p2renames)
2155 2170
2156 2171 if dostats:
2157 2172 if p1missing:
2158 2173 alldata['parentnbrenames'].append(
2159 2174 (data['p1.renamedfiles'], b.hex(), p1.hex())
2160 2175 )
2161 2176 alldata['parenttime'].append(
2162 2177 (data['p1.time'], b.hex(), p1.hex())
2163 2178 )
2164 2179 if p2missing:
2165 2180 alldata['parentnbrenames'].append(
2166 2181 (data['p2.renamedfiles'], b.hex(), p2.hex())
2167 2182 )
2168 2183 alldata['parenttime'].append(
2169 2184 (data['p2.time'], b.hex(), p2.hex())
2170 2185 )
2171 2186 if p1missing or p2missing:
2172 2187 alldata['totalnbrenames'].append(
2173 2188 (
2174 2189 data['nbrenamedfiles'],
2175 2190 b.hex(),
2176 2191 p1.hex(),
2177 2192 p2.hex(),
2178 2193 )
2179 2194 )
2180 2195 alldata['totaltime'].append(
2181 2196 (data['time'], b.hex(), p1.hex(), p2.hex())
2182 2197 )
2183 2198 fm.startitem()
2184 2199 fm.data(**data)
2185 2200 # make node pretty for the human output
2186 2201 out = data.copy()
2187 2202 out['base'] = fm.hexfunc(b.node())
2188 2203 out['p1.node'] = fm.hexfunc(p1.node())
2189 2204 out['p2.node'] = fm.hexfunc(p2.node())
2190 2205 fm.plain(output % out)
2191 2206
2192 2207 fm.end()
2193 2208 if dostats:
2194 2209 # use a second formatter because the data are quite different, not sure
2195 2210 # how it flies with the templater.
2196 2211 entries = [
2197 2212 ('nbrevs', 'number of revision covered'),
2198 2213 ('nbmissingfiles', 'number of missing files at head'),
2199 2214 ]
2200 2215 if dotiming:
2201 2216 entries.append(
2202 2217 ('parentnbrenames', 'rename from one parent to base')
2203 2218 )
2204 2219 entries.append(('totalnbrenames', 'total number of renames'))
2205 2220 entries.append(('parenttime', 'time for one parent'))
2206 2221 entries.append(('totaltime', 'time for both parents'))
2207 2222 _displaystats(ui, opts, entries, alldata)
2208 2223
2209 2224
2210 2225 @command(
2211 2226 b'perf::helper-pathcopies|perfhelper-pathcopies',
2212 2227 formatteropts
2213 2228 + [
2214 2229 (b'r', b'revs', [], b'restrict search to these revisions'),
2215 2230 (b'', b'timing', False, b'provides extra data (costly)'),
2216 2231 (b'', b'stats', False, b'provides statistic about the measured data'),
2217 2232 ],
2218 2233 )
2219 2234 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2220 2235 """find statistic about potential parameters for the `perftracecopies`
2221 2236
2222 2237 This command find source-destination pair relevant for copytracing testing.
2223 2238 It report value for some of the parameters that impact copy tracing time.
2224 2239
2225 2240 If `--timing` is set, rename detection is run and the associated timing
2226 2241 will be reported. The extra details comes at the cost of a slower command
2227 2242 execution.
2228 2243
2229 2244 Since the rename detection is only run once, other factors might easily
2230 2245 affect the precision of the timing. However it should give a good
2231 2246 approximation of which revision pairs are very costly.
2232 2247 """
2233 2248 opts = _byteskwargs(opts)
2234 2249 fm = ui.formatter(b'perf', opts)
2235 2250 dotiming = opts[b'timing']
2236 2251 dostats = opts[b'stats']
2237 2252
2238 2253 if dotiming:
2239 2254 header = '%12s %12s %12s %12s %12s %12s\n'
2240 2255 output = (
2241 2256 "%(source)12s %(destination)12s "
2242 2257 "%(nbrevs)12d %(nbmissingfiles)12d "
2243 2258 "%(nbrenamedfiles)12d %(time)18.5f\n"
2244 2259 )
2245 2260 header_names = (
2246 2261 "source",
2247 2262 "destination",
2248 2263 "nb-revs",
2249 2264 "nb-files",
2250 2265 "nb-renames",
2251 2266 "time",
2252 2267 )
2253 2268 fm.plain(header % header_names)
2254 2269 else:
2255 2270 header = '%12s %12s %12s %12s\n'
2256 2271 output = (
2257 2272 "%(source)12s %(destination)12s "
2258 2273 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2259 2274 )
2260 2275 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2261 2276
2262 2277 if not revs:
2263 2278 revs = ['all()']
2264 2279 revs = scmutil.revrange(repo, revs)
2265 2280
2266 2281 if dostats:
2267 2282 alldata = {
2268 2283 'nbrevs': [],
2269 2284 'nbmissingfiles': [],
2270 2285 }
2271 2286 if dotiming:
2272 2287 alldata['nbrenames'] = []
2273 2288 alldata['time'] = []
2274 2289
2275 2290 roi = repo.revs('merge() and %ld', revs)
2276 2291 for r in roi:
2277 2292 ctx = repo[r]
2278 2293 p1 = ctx.p1().rev()
2279 2294 p2 = ctx.p2().rev()
2280 2295 bases = repo.changelog._commonancestorsheads(p1, p2)
2281 2296 for p in (p1, p2):
2282 2297 for b in bases:
2283 2298 base = repo[b]
2284 2299 parent = repo[p]
2285 2300 missing = copies._computeforwardmissing(base, parent)
2286 2301 if not missing:
2287 2302 continue
2288 2303 data = {
2289 2304 b'source': base.hex(),
2290 2305 b'destination': parent.hex(),
2291 2306 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2292 2307 b'nbmissingfiles': len(missing),
2293 2308 }
2294 2309 if dostats:
2295 2310 alldata['nbrevs'].append(
2296 2311 (
2297 2312 data['nbrevs'],
2298 2313 base.hex(),
2299 2314 parent.hex(),
2300 2315 )
2301 2316 )
2302 2317 alldata['nbmissingfiles'].append(
2303 2318 (
2304 2319 data['nbmissingfiles'],
2305 2320 base.hex(),
2306 2321 parent.hex(),
2307 2322 )
2308 2323 )
2309 2324 if dotiming:
2310 2325 begin = util.timer()
2311 2326 renames = copies.pathcopies(base, parent)
2312 2327 end = util.timer()
2313 2328 # not very stable timing since we did only one run
2314 2329 data['time'] = end - begin
2315 2330 data['nbrenamedfiles'] = len(renames)
2316 2331 if dostats:
2317 2332 alldata['time'].append(
2318 2333 (
2319 2334 data['time'],
2320 2335 base.hex(),
2321 2336 parent.hex(),
2322 2337 )
2323 2338 )
2324 2339 alldata['nbrenames'].append(
2325 2340 (
2326 2341 data['nbrenamedfiles'],
2327 2342 base.hex(),
2328 2343 parent.hex(),
2329 2344 )
2330 2345 )
2331 2346 fm.startitem()
2332 2347 fm.data(**data)
2333 2348 out = data.copy()
2334 2349 out['source'] = fm.hexfunc(base.node())
2335 2350 out['destination'] = fm.hexfunc(parent.node())
2336 2351 fm.plain(output % out)
2337 2352
2338 2353 fm.end()
2339 2354 if dostats:
2340 2355 entries = [
2341 2356 ('nbrevs', 'number of revision covered'),
2342 2357 ('nbmissingfiles', 'number of missing files at head'),
2343 2358 ]
2344 2359 if dotiming:
2345 2360 entries.append(('nbrenames', 'renamed files'))
2346 2361 entries.append(('time', 'time'))
2347 2362 _displaystats(ui, opts, entries, alldata)
2348 2363
2349 2364
2350 2365 @command(b'perf::cca|perfcca', formatteropts)
2351 2366 def perfcca(ui, repo, **opts):
2352 2367 opts = _byteskwargs(opts)
2353 2368 timer, fm = gettimer(ui, opts)
2354 2369 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2355 2370 fm.end()
2356 2371
2357 2372
2358 2373 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2359 2374 def perffncacheload(ui, repo, **opts):
2360 2375 opts = _byteskwargs(opts)
2361 2376 timer, fm = gettimer(ui, opts)
2362 2377 s = repo.store
2363 2378
2364 2379 def d():
2365 2380 s.fncache._load()
2366 2381
2367 2382 timer(d)
2368 2383 fm.end()
2369 2384
2370 2385
2371 2386 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2372 2387 def perffncachewrite(ui, repo, **opts):
2373 2388 opts = _byteskwargs(opts)
2374 2389 timer, fm = gettimer(ui, opts)
2375 2390 s = repo.store
2376 2391 lock = repo.lock()
2377 2392 s.fncache._load()
2378 2393 tr = repo.transaction(b'perffncachewrite')
2379 2394 tr.addbackup(b'fncache')
2380 2395
2381 2396 def d():
2382 2397 s.fncache._dirty = True
2383 2398 s.fncache.write(tr)
2384 2399
2385 2400 timer(d)
2386 2401 tr.close()
2387 2402 lock.release()
2388 2403 fm.end()
2389 2404
2390 2405
2391 2406 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2392 2407 def perffncacheencode(ui, repo, **opts):
2393 2408 opts = _byteskwargs(opts)
2394 2409 timer, fm = gettimer(ui, opts)
2395 2410 s = repo.store
2396 2411 s.fncache._load()
2397 2412
2398 2413 def d():
2399 2414 for p in s.fncache.entries:
2400 2415 s.encode(p)
2401 2416
2402 2417 timer(d)
2403 2418 fm.end()
2404 2419
2405 2420
2406 2421 def _bdiffworker(q, blocks, xdiff, ready, done):
2407 2422 while not done.is_set():
2408 2423 pair = q.get()
2409 2424 while pair is not None:
2410 2425 if xdiff:
2411 2426 mdiff.bdiff.xdiffblocks(*pair)
2412 2427 elif blocks:
2413 2428 mdiff.bdiff.blocks(*pair)
2414 2429 else:
2415 2430 mdiff.textdiff(*pair)
2416 2431 q.task_done()
2417 2432 pair = q.get()
2418 2433 q.task_done() # for the None one
2419 2434 with ready:
2420 2435 ready.wait()
2421 2436
2422 2437
2423 2438 def _manifestrevision(repo, mnode):
2424 2439 ml = repo.manifestlog
2425 2440
2426 2441 if util.safehasattr(ml, b'getstorage'):
2427 2442 store = ml.getstorage(b'')
2428 2443 else:
2429 2444 store = ml._revlog
2430 2445
2431 2446 return store.revision(mnode)
2432 2447
2433 2448
2434 2449 @command(
2435 2450 b'perf::bdiff|perfbdiff',
2436 2451 revlogopts
2437 2452 + formatteropts
2438 2453 + [
2439 2454 (
2440 2455 b'',
2441 2456 b'count',
2442 2457 1,
2443 2458 b'number of revisions to test (when using --startrev)',
2444 2459 ),
2445 2460 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2446 2461 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2447 2462 (b'', b'blocks', False, b'test computing diffs into blocks'),
2448 2463 (b'', b'xdiff', False, b'use xdiff algorithm'),
2449 2464 ],
2450 2465 b'-c|-m|FILE REV',
2451 2466 )
2452 2467 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2453 2468 """benchmark a bdiff between revisions
2454 2469
2455 2470 By default, benchmark a bdiff between its delta parent and itself.
2456 2471
2457 2472 With ``--count``, benchmark bdiffs between delta parents and self for N
2458 2473 revisions starting at the specified revision.
2459 2474
2460 2475 With ``--alldata``, assume the requested revision is a changeset and
2461 2476 measure bdiffs for all changes related to that changeset (manifest
2462 2477 and filelogs).
2463 2478 """
2464 2479 opts = _byteskwargs(opts)
2465 2480
2466 2481 if opts[b'xdiff'] and not opts[b'blocks']:
2467 2482 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2468 2483
2469 2484 if opts[b'alldata']:
2470 2485 opts[b'changelog'] = True
2471 2486
2472 2487 if opts.get(b'changelog') or opts.get(b'manifest'):
2473 2488 file_, rev = None, file_
2474 2489 elif rev is None:
2475 2490 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2476 2491
2477 2492 blocks = opts[b'blocks']
2478 2493 xdiff = opts[b'xdiff']
2479 2494 textpairs = []
2480 2495
2481 2496 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2482 2497
2483 2498 startrev = r.rev(r.lookup(rev))
2484 2499 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2485 2500 if opts[b'alldata']:
2486 2501 # Load revisions associated with changeset.
2487 2502 ctx = repo[rev]
2488 2503 mtext = _manifestrevision(repo, ctx.manifestnode())
2489 2504 for pctx in ctx.parents():
2490 2505 pman = _manifestrevision(repo, pctx.manifestnode())
2491 2506 textpairs.append((pman, mtext))
2492 2507
2493 2508 # Load filelog revisions by iterating manifest delta.
2494 2509 man = ctx.manifest()
2495 2510 pman = ctx.p1().manifest()
2496 2511 for filename, change in pman.diff(man).items():
2497 2512 fctx = repo.file(filename)
2498 2513 f1 = fctx.revision(change[0][0] or -1)
2499 2514 f2 = fctx.revision(change[1][0] or -1)
2500 2515 textpairs.append((f1, f2))
2501 2516 else:
2502 2517 dp = r.deltaparent(rev)
2503 2518 textpairs.append((r.revision(dp), r.revision(rev)))
2504 2519
2505 2520 withthreads = threads > 0
2506 2521 if not withthreads:
2507 2522
2508 2523 def d():
2509 2524 for pair in textpairs:
2510 2525 if xdiff:
2511 2526 mdiff.bdiff.xdiffblocks(*pair)
2512 2527 elif blocks:
2513 2528 mdiff.bdiff.blocks(*pair)
2514 2529 else:
2515 2530 mdiff.textdiff(*pair)
2516 2531
2517 2532 else:
2518 2533 q = queue()
2519 2534 for i in _xrange(threads):
2520 2535 q.put(None)
2521 2536 ready = threading.Condition()
2522 2537 done = threading.Event()
2523 2538 for i in _xrange(threads):
2524 2539 threading.Thread(
2525 2540 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2526 2541 ).start()
2527 2542 q.join()
2528 2543
2529 2544 def d():
2530 2545 for pair in textpairs:
2531 2546 q.put(pair)
2532 2547 for i in _xrange(threads):
2533 2548 q.put(None)
2534 2549 with ready:
2535 2550 ready.notify_all()
2536 2551 q.join()
2537 2552
2538 2553 timer, fm = gettimer(ui, opts)
2539 2554 timer(d)
2540 2555 fm.end()
2541 2556
2542 2557 if withthreads:
2543 2558 done.set()
2544 2559 for i in _xrange(threads):
2545 2560 q.put(None)
2546 2561 with ready:
2547 2562 ready.notify_all()
2548 2563
2549 2564
2550 2565 @command(
2551 2566 b'perf::unidiff|perfunidiff',
2552 2567 revlogopts
2553 2568 + formatteropts
2554 2569 + [
2555 2570 (
2556 2571 b'',
2557 2572 b'count',
2558 2573 1,
2559 2574 b'number of revisions to test (when using --startrev)',
2560 2575 ),
2561 2576 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2562 2577 ],
2563 2578 b'-c|-m|FILE REV',
2564 2579 )
2565 2580 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2566 2581 """benchmark a unified diff between revisions
2567 2582
2568 2583 This doesn't include any copy tracing - it's just a unified diff
2569 2584 of the texts.
2570 2585
2571 2586 By default, benchmark a diff between its delta parent and itself.
2572 2587
2573 2588 With ``--count``, benchmark diffs between delta parents and self for N
2574 2589 revisions starting at the specified revision.
2575 2590
2576 2591 With ``--alldata``, assume the requested revision is a changeset and
2577 2592 measure diffs for all changes related to that changeset (manifest
2578 2593 and filelogs).
2579 2594 """
2580 2595 opts = _byteskwargs(opts)
2581 2596 if opts[b'alldata']:
2582 2597 opts[b'changelog'] = True
2583 2598
2584 2599 if opts.get(b'changelog') or opts.get(b'manifest'):
2585 2600 file_, rev = None, file_
2586 2601 elif rev is None:
2587 2602 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2588 2603
2589 2604 textpairs = []
2590 2605
2591 2606 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2592 2607
2593 2608 startrev = r.rev(r.lookup(rev))
2594 2609 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2595 2610 if opts[b'alldata']:
2596 2611 # Load revisions associated with changeset.
2597 2612 ctx = repo[rev]
2598 2613 mtext = _manifestrevision(repo, ctx.manifestnode())
2599 2614 for pctx in ctx.parents():
2600 2615 pman = _manifestrevision(repo, pctx.manifestnode())
2601 2616 textpairs.append((pman, mtext))
2602 2617
2603 2618 # Load filelog revisions by iterating manifest delta.
2604 2619 man = ctx.manifest()
2605 2620 pman = ctx.p1().manifest()
2606 2621 for filename, change in pman.diff(man).items():
2607 2622 fctx = repo.file(filename)
2608 2623 f1 = fctx.revision(change[0][0] or -1)
2609 2624 f2 = fctx.revision(change[1][0] or -1)
2610 2625 textpairs.append((f1, f2))
2611 2626 else:
2612 2627 dp = r.deltaparent(rev)
2613 2628 textpairs.append((r.revision(dp), r.revision(rev)))
2614 2629
2615 2630 def d():
2616 2631 for left, right in textpairs:
2617 2632 # The date strings don't matter, so we pass empty strings.
2618 2633 headerlines, hunks = mdiff.unidiff(
2619 2634 left, b'', right, b'', b'left', b'right', binary=False
2620 2635 )
2621 2636 # consume iterators in roughly the way patch.py does
2622 2637 b'\n'.join(headerlines)
2623 2638 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2624 2639
2625 2640 timer, fm = gettimer(ui, opts)
2626 2641 timer(d)
2627 2642 fm.end()
2628 2643
2629 2644
2630 2645 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2631 2646 def perfdiffwd(ui, repo, **opts):
2632 2647 """Profile diff of working directory changes"""
2633 2648 opts = _byteskwargs(opts)
2634 2649 timer, fm = gettimer(ui, opts)
2635 2650 options = {
2636 2651 'w': 'ignore_all_space',
2637 2652 'b': 'ignore_space_change',
2638 2653 'B': 'ignore_blank_lines',
2639 2654 }
2640 2655
2641 2656 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2642 2657 opts = {options[c]: b'1' for c in diffopt}
2643 2658
2644 2659 def d():
2645 2660 ui.pushbuffer()
2646 2661 commands.diff(ui, repo, **opts)
2647 2662 ui.popbuffer()
2648 2663
2649 2664 diffopt = diffopt.encode('ascii')
2650 2665 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2651 2666 timer(d, title=title)
2652 2667 fm.end()
2653 2668
2654 2669
2655 2670 @command(
2656 2671 b'perf::revlogindex|perfrevlogindex',
2657 2672 revlogopts + formatteropts,
2658 2673 b'-c|-m|FILE',
2659 2674 )
2660 2675 def perfrevlogindex(ui, repo, file_=None, **opts):
2661 2676 """Benchmark operations against a revlog index.
2662 2677
2663 2678 This tests constructing a revlog instance, reading index data,
2664 2679 parsing index data, and performing various operations related to
2665 2680 index data.
2666 2681 """
2667 2682
2668 2683 opts = _byteskwargs(opts)
2669 2684
2670 2685 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2671 2686
2672 2687 opener = getattr(rl, 'opener') # trick linter
2673 2688 # compat with hg <= 5.8
2674 2689 radix = getattr(rl, 'radix', None)
2675 2690 indexfile = getattr(rl, '_indexfile', None)
2676 2691 if indexfile is None:
2677 2692 # compatibility with <= hg-5.8
2678 2693 indexfile = getattr(rl, 'indexfile')
2679 2694 data = opener.read(indexfile)
2680 2695
2681 2696 header = struct.unpack(b'>I', data[0:4])[0]
2682 2697 version = header & 0xFFFF
2683 2698 if version == 1:
2684 2699 inline = header & (1 << 16)
2685 2700 else:
2686 2701 raise error.Abort(b'unsupported revlog version: %d' % version)
2687 2702
2688 2703 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2689 2704 if parse_index_v1 is None:
2690 2705 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2691 2706
2692 2707 rllen = len(rl)
2693 2708
2694 2709 node0 = rl.node(0)
2695 2710 node25 = rl.node(rllen // 4)
2696 2711 node50 = rl.node(rllen // 2)
2697 2712 node75 = rl.node(rllen // 4 * 3)
2698 2713 node100 = rl.node(rllen - 1)
2699 2714
2700 2715 allrevs = range(rllen)
2701 2716 allrevsrev = list(reversed(allrevs))
2702 2717 allnodes = [rl.node(rev) for rev in range(rllen)]
2703 2718 allnodesrev = list(reversed(allnodes))
2704 2719
2705 2720 def constructor():
2706 2721 if radix is not None:
2707 2722 revlog(opener, radix=radix)
2708 2723 else:
2709 2724 # hg <= 5.8
2710 2725 revlog(opener, indexfile=indexfile)
2711 2726
2712 2727 def read():
2713 2728 with opener(indexfile) as fh:
2714 2729 fh.read()
2715 2730
2716 2731 def parseindex():
2717 2732 parse_index_v1(data, inline)
2718 2733
2719 2734 def getentry(revornode):
2720 2735 index = parse_index_v1(data, inline)[0]
2721 2736 index[revornode]
2722 2737
2723 2738 def getentries(revs, count=1):
2724 2739 index = parse_index_v1(data, inline)[0]
2725 2740
2726 2741 for i in range(count):
2727 2742 for rev in revs:
2728 2743 index[rev]
2729 2744
2730 2745 def resolvenode(node):
2731 2746 index = parse_index_v1(data, inline)[0]
2732 2747 rev = getattr(index, 'rev', None)
2733 2748 if rev is None:
2734 2749 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2735 2750 # This only works for the C code.
2736 2751 if nodemap is None:
2737 2752 return
2738 2753 rev = nodemap.__getitem__
2739 2754
2740 2755 try:
2741 2756 rev(node)
2742 2757 except error.RevlogError:
2743 2758 pass
2744 2759
2745 2760 def resolvenodes(nodes, count=1):
2746 2761 index = parse_index_v1(data, inline)[0]
2747 2762 rev = getattr(index, 'rev', None)
2748 2763 if rev is None:
2749 2764 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2750 2765 # This only works for the C code.
2751 2766 if nodemap is None:
2752 2767 return
2753 2768 rev = nodemap.__getitem__
2754 2769
2755 2770 for i in range(count):
2756 2771 for node in nodes:
2757 2772 try:
2758 2773 rev(node)
2759 2774 except error.RevlogError:
2760 2775 pass
2761 2776
2762 2777 benches = [
2763 2778 (constructor, b'revlog constructor'),
2764 2779 (read, b'read'),
2765 2780 (parseindex, b'create index object'),
2766 2781 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2767 2782 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2768 2783 (lambda: resolvenode(node0), b'look up node at rev 0'),
2769 2784 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2770 2785 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2771 2786 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2772 2787 (lambda: resolvenode(node100), b'look up node at tip'),
2773 2788 # 2x variation is to measure caching impact.
2774 2789 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2775 2790 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2776 2791 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2777 2792 (
2778 2793 lambda: resolvenodes(allnodesrev, 2),
2779 2794 b'look up all nodes 2x (reverse)',
2780 2795 ),
2781 2796 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2782 2797 (
2783 2798 lambda: getentries(allrevs, 2),
2784 2799 b'retrieve all index entries 2x (forward)',
2785 2800 ),
2786 2801 (
2787 2802 lambda: getentries(allrevsrev),
2788 2803 b'retrieve all index entries (reverse)',
2789 2804 ),
2790 2805 (
2791 2806 lambda: getentries(allrevsrev, 2),
2792 2807 b'retrieve all index entries 2x (reverse)',
2793 2808 ),
2794 2809 ]
2795 2810
2796 2811 for fn, title in benches:
2797 2812 timer, fm = gettimer(ui, opts)
2798 2813 timer(fn, title=title)
2799 2814 fm.end()
2800 2815
2801 2816
2802 2817 @command(
2803 2818 b'perf::revlogrevisions|perfrevlogrevisions',
2804 2819 revlogopts
2805 2820 + formatteropts
2806 2821 + [
2807 2822 (b'd', b'dist', 100, b'distance between the revisions'),
2808 2823 (b's', b'startrev', 0, b'revision to start reading at'),
2809 2824 (b'', b'reverse', False, b'read in reverse'),
2810 2825 ],
2811 2826 b'-c|-m|FILE',
2812 2827 )
2813 2828 def perfrevlogrevisions(
2814 2829 ui, repo, file_=None, startrev=0, reverse=False, **opts
2815 2830 ):
2816 2831 """Benchmark reading a series of revisions from a revlog.
2817 2832
2818 2833 By default, we read every ``-d/--dist`` revision from 0 to tip of
2819 2834 the specified revlog.
2820 2835
2821 2836 The start revision can be defined via ``-s/--startrev``.
2822 2837 """
2823 2838 opts = _byteskwargs(opts)
2824 2839
2825 2840 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2826 2841 rllen = getlen(ui)(rl)
2827 2842
2828 2843 if startrev < 0:
2829 2844 startrev = rllen + startrev
2830 2845
2831 2846 def d():
2832 2847 rl.clearcaches()
2833 2848
2834 2849 beginrev = startrev
2835 2850 endrev = rllen
2836 2851 dist = opts[b'dist']
2837 2852
2838 2853 if reverse:
2839 2854 beginrev, endrev = endrev - 1, beginrev - 1
2840 2855 dist = -1 * dist
2841 2856
2842 2857 for x in _xrange(beginrev, endrev, dist):
2843 2858 # Old revisions don't support passing int.
2844 2859 n = rl.node(x)
2845 2860 rl.revision(n)
2846 2861
2847 2862 timer, fm = gettimer(ui, opts)
2848 2863 timer(d)
2849 2864 fm.end()
2850 2865
2851 2866
2852 2867 @command(
2853 2868 b'perf::revlogwrite|perfrevlogwrite',
2854 2869 revlogopts
2855 2870 + formatteropts
2856 2871 + [
2857 2872 (b's', b'startrev', 1000, b'revision to start writing at'),
2858 2873 (b'', b'stoprev', -1, b'last revision to write'),
2859 2874 (b'', b'count', 3, b'number of passes to perform'),
2860 2875 (b'', b'details', False, b'print timing for every revisions tested'),
2861 2876 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2862 2877 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2863 2878 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2864 2879 ],
2865 2880 b'-c|-m|FILE',
2866 2881 )
2867 2882 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2868 2883 """Benchmark writing a series of revisions to a revlog.
2869 2884
2870 2885 Possible source values are:
2871 2886 * `full`: add from a full text (default).
2872 2887 * `parent-1`: add from a delta to the first parent
2873 2888 * `parent-2`: add from a delta to the second parent if it exists
2874 2889 (use a delta from the first parent otherwise)
2875 2890 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2876 2891 * `storage`: add from the existing precomputed deltas
2877 2892
2878 2893 Note: This performance command measures performance in a custom way. As a
2879 2894 result some of the global configuration of the 'perf' command does not
2880 2895 apply to it:
2881 2896
2882 2897 * ``pre-run``: disabled
2883 2898
2884 2899 * ``profile-benchmark``: disabled
2885 2900
2886 2901 * ``run-limits``: disabled use --count instead
2887 2902 """
2888 2903 opts = _byteskwargs(opts)
2889 2904
2890 2905 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2891 2906 rllen = getlen(ui)(rl)
2892 2907 if startrev < 0:
2893 2908 startrev = rllen + startrev
2894 2909 if stoprev < 0:
2895 2910 stoprev = rllen + stoprev
2896 2911
2897 2912 lazydeltabase = opts['lazydeltabase']
2898 2913 source = opts['source']
2899 2914 clearcaches = opts['clear_caches']
2900 2915 validsource = (
2901 2916 b'full',
2902 2917 b'parent-1',
2903 2918 b'parent-2',
2904 2919 b'parent-smallest',
2905 2920 b'storage',
2906 2921 )
2907 2922 if source not in validsource:
2908 2923 raise error.Abort('invalid source type: %s' % source)
2909 2924
2910 2925 ### actually gather results
2911 2926 count = opts['count']
2912 2927 if count <= 0:
2913 2928 raise error.Abort('invalide run count: %d' % count)
2914 2929 allresults = []
2915 2930 for c in range(count):
2916 2931 timing = _timeonewrite(
2917 2932 ui,
2918 2933 rl,
2919 2934 source,
2920 2935 startrev,
2921 2936 stoprev,
2922 2937 c + 1,
2923 2938 lazydeltabase=lazydeltabase,
2924 2939 clearcaches=clearcaches,
2925 2940 )
2926 2941 allresults.append(timing)
2927 2942
2928 2943 ### consolidate the results in a single list
2929 2944 results = []
2930 2945 for idx, (rev, t) in enumerate(allresults[0]):
2931 2946 ts = [t]
2932 2947 for other in allresults[1:]:
2933 2948 orev, ot = other[idx]
2934 2949 assert orev == rev
2935 2950 ts.append(ot)
2936 2951 results.append((rev, ts))
2937 2952 resultcount = len(results)
2938 2953
2939 2954 ### Compute and display relevant statistics
2940 2955
2941 2956 # get a formatter
2942 2957 fm = ui.formatter(b'perf', opts)
2943 2958 displayall = ui.configbool(b"perf", b"all-timing", False)
2944 2959
2945 2960 # print individual details if requested
2946 2961 if opts['details']:
2947 2962 for idx, item in enumerate(results, 1):
2948 2963 rev, data = item
2949 2964 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2950 2965 formatone(fm, data, title=title, displayall=displayall)
2951 2966
2952 2967 # sorts results by median time
2953 2968 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2954 2969 # list of (name, index) to display)
2955 2970 relevants = [
2956 2971 ("min", 0),
2957 2972 ("10%", resultcount * 10 // 100),
2958 2973 ("25%", resultcount * 25 // 100),
2959 2974 ("50%", resultcount * 70 // 100),
2960 2975 ("75%", resultcount * 75 // 100),
2961 2976 ("90%", resultcount * 90 // 100),
2962 2977 ("95%", resultcount * 95 // 100),
2963 2978 ("99%", resultcount * 99 // 100),
2964 2979 ("99.9%", resultcount * 999 // 1000),
2965 2980 ("99.99%", resultcount * 9999 // 10000),
2966 2981 ("99.999%", resultcount * 99999 // 100000),
2967 2982 ("max", -1),
2968 2983 ]
2969 2984 if not ui.quiet:
2970 2985 for name, idx in relevants:
2971 2986 data = results[idx]
2972 2987 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2973 2988 formatone(fm, data[1], title=title, displayall=displayall)
2974 2989
2975 2990 # XXX summing that many float will not be very precise, we ignore this fact
2976 2991 # for now
2977 2992 totaltime = []
2978 2993 for item in allresults:
2979 2994 totaltime.append(
2980 2995 (
2981 2996 sum(x[1][0] for x in item),
2982 2997 sum(x[1][1] for x in item),
2983 2998 sum(x[1][2] for x in item),
2984 2999 )
2985 3000 )
2986 3001 formatone(
2987 3002 fm,
2988 3003 totaltime,
2989 3004 title="total time (%d revs)" % resultcount,
2990 3005 displayall=displayall,
2991 3006 )
2992 3007 fm.end()
2993 3008
2994 3009
2995 3010 class _faketr:
2996 3011 def add(s, x, y, z=None):
2997 3012 return None
2998 3013
2999 3014
3000 3015 def _timeonewrite(
3001 3016 ui,
3002 3017 orig,
3003 3018 source,
3004 3019 startrev,
3005 3020 stoprev,
3006 3021 runidx=None,
3007 3022 lazydeltabase=True,
3008 3023 clearcaches=True,
3009 3024 ):
3010 3025 timings = []
3011 3026 tr = _faketr()
3012 3027 with _temprevlog(ui, orig, startrev) as dest:
3013 3028 dest._lazydeltabase = lazydeltabase
3014 3029 revs = list(orig.revs(startrev, stoprev))
3015 3030 total = len(revs)
3016 3031 topic = 'adding'
3017 3032 if runidx is not None:
3018 3033 topic += ' (run #%d)' % runidx
3019 3034 # Support both old and new progress API
3020 3035 if util.safehasattr(ui, 'makeprogress'):
3021 3036 progress = ui.makeprogress(topic, unit='revs', total=total)
3022 3037
3023 3038 def updateprogress(pos):
3024 3039 progress.update(pos)
3025 3040
3026 3041 def completeprogress():
3027 3042 progress.complete()
3028 3043
3029 3044 else:
3030 3045
3031 3046 def updateprogress(pos):
3032 3047 ui.progress(topic, pos, unit='revs', total=total)
3033 3048
3034 3049 def completeprogress():
3035 3050 ui.progress(topic, None, unit='revs', total=total)
3036 3051
3037 3052 for idx, rev in enumerate(revs):
3038 3053 updateprogress(idx)
3039 3054 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3040 3055 if clearcaches:
3041 3056 dest.index.clearcaches()
3042 3057 dest.clearcaches()
3043 3058 with timeone() as r:
3044 3059 dest.addrawrevision(*addargs, **addkwargs)
3045 3060 timings.append((rev, r[0]))
3046 3061 updateprogress(total)
3047 3062 completeprogress()
3048 3063 return timings
3049 3064
3050 3065
3051 3066 def _getrevisionseed(orig, rev, tr, source):
3052 3067 from mercurial.node import nullid
3053 3068
3054 3069 linkrev = orig.linkrev(rev)
3055 3070 node = orig.node(rev)
3056 3071 p1, p2 = orig.parents(node)
3057 3072 flags = orig.flags(rev)
3058 3073 cachedelta = None
3059 3074 text = None
3060 3075
3061 3076 if source == b'full':
3062 3077 text = orig.revision(rev)
3063 3078 elif source == b'parent-1':
3064 3079 baserev = orig.rev(p1)
3065 3080 cachedelta = (baserev, orig.revdiff(p1, rev))
3066 3081 elif source == b'parent-2':
3067 3082 parent = p2
3068 3083 if p2 == nullid:
3069 3084 parent = p1
3070 3085 baserev = orig.rev(parent)
3071 3086 cachedelta = (baserev, orig.revdiff(parent, rev))
3072 3087 elif source == b'parent-smallest':
3073 3088 p1diff = orig.revdiff(p1, rev)
3074 3089 parent = p1
3075 3090 diff = p1diff
3076 3091 if p2 != nullid:
3077 3092 p2diff = orig.revdiff(p2, rev)
3078 3093 if len(p1diff) > len(p2diff):
3079 3094 parent = p2
3080 3095 diff = p2diff
3081 3096 baserev = orig.rev(parent)
3082 3097 cachedelta = (baserev, diff)
3083 3098 elif source == b'storage':
3084 3099 baserev = orig.deltaparent(rev)
3085 3100 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3086 3101
3087 3102 return (
3088 3103 (text, tr, linkrev, p1, p2),
3089 3104 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3090 3105 )
3091 3106
3092 3107
3093 3108 @contextlib.contextmanager
3094 3109 def _temprevlog(ui, orig, truncaterev):
3095 3110 from mercurial import vfs as vfsmod
3096 3111
3097 3112 if orig._inline:
3098 3113 raise error.Abort('not supporting inline revlog (yet)')
3099 3114 revlogkwargs = {}
3100 3115 k = 'upperboundcomp'
3101 3116 if util.safehasattr(orig, k):
3102 3117 revlogkwargs[k] = getattr(orig, k)
3103 3118
3104 3119 indexfile = getattr(orig, '_indexfile', None)
3105 3120 if indexfile is None:
3106 3121 # compatibility with <= hg-5.8
3107 3122 indexfile = getattr(orig, 'indexfile')
3108 3123 origindexpath = orig.opener.join(indexfile)
3109 3124
3110 3125 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3111 3126 origdatapath = orig.opener.join(datafile)
3112 3127 radix = b'revlog'
3113 3128 indexname = b'revlog.i'
3114 3129 dataname = b'revlog.d'
3115 3130
3116 3131 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3117 3132 try:
3118 3133 # copy the data file in a temporary directory
3119 3134 ui.debug('copying data in %s\n' % tmpdir)
3120 3135 destindexpath = os.path.join(tmpdir, 'revlog.i')
3121 3136 destdatapath = os.path.join(tmpdir, 'revlog.d')
3122 3137 shutil.copyfile(origindexpath, destindexpath)
3123 3138 shutil.copyfile(origdatapath, destdatapath)
3124 3139
3125 3140 # remove the data we want to add again
3126 3141 ui.debug('truncating data to be rewritten\n')
3127 3142 with open(destindexpath, 'ab') as index:
3128 3143 index.seek(0)
3129 3144 index.truncate(truncaterev * orig._io.size)
3130 3145 with open(destdatapath, 'ab') as data:
3131 3146 data.seek(0)
3132 3147 data.truncate(orig.start(truncaterev))
3133 3148
3134 3149 # instantiate a new revlog from the temporary copy
3135 3150 ui.debug('truncating adding to be rewritten\n')
3136 3151 vfs = vfsmod.vfs(tmpdir)
3137 3152 vfs.options = getattr(orig.opener, 'options', None)
3138 3153
3139 3154 try:
3140 3155 dest = revlog(vfs, radix=radix, **revlogkwargs)
3141 3156 except TypeError:
3142 3157 dest = revlog(
3143 3158 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3144 3159 )
3145 3160 if dest._inline:
3146 3161 raise error.Abort('not supporting inline revlog (yet)')
3147 3162 # make sure internals are initialized
3148 3163 dest.revision(len(dest) - 1)
3149 3164 yield dest
3150 3165 del dest, vfs
3151 3166 finally:
3152 3167 shutil.rmtree(tmpdir, True)
3153 3168
3154 3169
3155 3170 @command(
3156 3171 b'perf::revlogchunks|perfrevlogchunks',
3157 3172 revlogopts
3158 3173 + formatteropts
3159 3174 + [
3160 3175 (b'e', b'engines', b'', b'compression engines to use'),
3161 3176 (b's', b'startrev', 0, b'revision to start at'),
3162 3177 ],
3163 3178 b'-c|-m|FILE',
3164 3179 )
3165 3180 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3166 3181 """Benchmark operations on revlog chunks.
3167 3182
3168 3183 Logically, each revlog is a collection of fulltext revisions. However,
3169 3184 stored within each revlog are "chunks" of possibly compressed data. This
3170 3185 data needs to be read and decompressed or compressed and written.
3171 3186
3172 3187 This command measures the time it takes to read+decompress and recompress
3173 3188 chunks in a revlog. It effectively isolates I/O and compression performance.
3174 3189 For measurements of higher-level operations like resolving revisions,
3175 3190 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3176 3191 """
3177 3192 opts = _byteskwargs(opts)
3178 3193
3179 3194 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3180 3195
3181 3196 # _chunkraw was renamed to _getsegmentforrevs.
3182 3197 try:
3183 3198 segmentforrevs = rl._getsegmentforrevs
3184 3199 except AttributeError:
3185 3200 segmentforrevs = rl._chunkraw
3186 3201
3187 3202 # Verify engines argument.
3188 3203 if engines:
3189 3204 engines = {e.strip() for e in engines.split(b',')}
3190 3205 for engine in engines:
3191 3206 try:
3192 3207 util.compressionengines[engine]
3193 3208 except KeyError:
3194 3209 raise error.Abort(b'unknown compression engine: %s' % engine)
3195 3210 else:
3196 3211 engines = []
3197 3212 for e in util.compengines:
3198 3213 engine = util.compengines[e]
3199 3214 try:
3200 3215 if engine.available():
3201 3216 engine.revlogcompressor().compress(b'dummy')
3202 3217 engines.append(e)
3203 3218 except NotImplementedError:
3204 3219 pass
3205 3220
3206 3221 revs = list(rl.revs(startrev, len(rl) - 1))
3207 3222
3208 3223 def rlfh(rl):
3209 3224 if rl._inline:
3210 3225 indexfile = getattr(rl, '_indexfile', None)
3211 3226 if indexfile is None:
3212 3227 # compatibility with <= hg-5.8
3213 3228 indexfile = getattr(rl, 'indexfile')
3214 3229 return getsvfs(repo)(indexfile)
3215 3230 else:
3216 3231 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3217 3232 return getsvfs(repo)(datafile)
3218 3233
3219 3234 def doread():
3220 3235 rl.clearcaches()
3221 3236 for rev in revs:
3222 3237 segmentforrevs(rev, rev)
3223 3238
3224 3239 def doreadcachedfh():
3225 3240 rl.clearcaches()
3226 3241 fh = rlfh(rl)
3227 3242 for rev in revs:
3228 3243 segmentforrevs(rev, rev, df=fh)
3229 3244
3230 3245 def doreadbatch():
3231 3246 rl.clearcaches()
3232 3247 segmentforrevs(revs[0], revs[-1])
3233 3248
3234 3249 def doreadbatchcachedfh():
3235 3250 rl.clearcaches()
3236 3251 fh = rlfh(rl)
3237 3252 segmentforrevs(revs[0], revs[-1], df=fh)
3238 3253
3239 3254 def dochunk():
3240 3255 rl.clearcaches()
3241 3256 fh = rlfh(rl)
3242 3257 for rev in revs:
3243 3258 rl._chunk(rev, df=fh)
3244 3259
3245 3260 chunks = [None]
3246 3261
3247 3262 def dochunkbatch():
3248 3263 rl.clearcaches()
3249 3264 fh = rlfh(rl)
3250 3265 # Save chunks as a side-effect.
3251 3266 chunks[0] = rl._chunks(revs, df=fh)
3252 3267
3253 3268 def docompress(compressor):
3254 3269 rl.clearcaches()
3255 3270
3256 3271 try:
3257 3272 # Swap in the requested compression engine.
3258 3273 oldcompressor = rl._compressor
3259 3274 rl._compressor = compressor
3260 3275 for chunk in chunks[0]:
3261 3276 rl.compress(chunk)
3262 3277 finally:
3263 3278 rl._compressor = oldcompressor
3264 3279
3265 3280 benches = [
3266 3281 (lambda: doread(), b'read'),
3267 3282 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3268 3283 (lambda: doreadbatch(), b'read batch'),
3269 3284 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3270 3285 (lambda: dochunk(), b'chunk'),
3271 3286 (lambda: dochunkbatch(), b'chunk batch'),
3272 3287 ]
3273 3288
3274 3289 for engine in sorted(engines):
3275 3290 compressor = util.compengines[engine].revlogcompressor()
3276 3291 benches.append(
3277 3292 (
3278 3293 functools.partial(docompress, compressor),
3279 3294 b'compress w/ %s' % engine,
3280 3295 )
3281 3296 )
3282 3297
3283 3298 for fn, title in benches:
3284 3299 timer, fm = gettimer(ui, opts)
3285 3300 timer(fn, title=title)
3286 3301 fm.end()
3287 3302
3288 3303
3289 3304 @command(
3290 3305 b'perf::revlogrevision|perfrevlogrevision',
3291 3306 revlogopts
3292 3307 + formatteropts
3293 3308 + [(b'', b'cache', False, b'use caches instead of clearing')],
3294 3309 b'-c|-m|FILE REV',
3295 3310 )
3296 3311 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3297 3312 """Benchmark obtaining a revlog revision.
3298 3313
3299 3314 Obtaining a revlog revision consists of roughly the following steps:
3300 3315
3301 3316 1. Compute the delta chain
3302 3317 2. Slice the delta chain if applicable
3303 3318 3. Obtain the raw chunks for that delta chain
3304 3319 4. Decompress each raw chunk
3305 3320 5. Apply binary patches to obtain fulltext
3306 3321 6. Verify hash of fulltext
3307 3322
3308 3323 This command measures the time spent in each of these phases.
3309 3324 """
3310 3325 opts = _byteskwargs(opts)
3311 3326
3312 3327 if opts.get(b'changelog') or opts.get(b'manifest'):
3313 3328 file_, rev = None, file_
3314 3329 elif rev is None:
3315 3330 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3316 3331
3317 3332 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3318 3333
3319 3334 # _chunkraw was renamed to _getsegmentforrevs.
3320 3335 try:
3321 3336 segmentforrevs = r._getsegmentforrevs
3322 3337 except AttributeError:
3323 3338 segmentforrevs = r._chunkraw
3324 3339
3325 3340 node = r.lookup(rev)
3326 3341 rev = r.rev(node)
3327 3342
3328 3343 def getrawchunks(data, chain):
3329 3344 start = r.start
3330 3345 length = r.length
3331 3346 inline = r._inline
3332 3347 try:
3333 3348 iosize = r.index.entry_size
3334 3349 except AttributeError:
3335 3350 iosize = r._io.size
3336 3351 buffer = util.buffer
3337 3352
3338 3353 chunks = []
3339 3354 ladd = chunks.append
3340 3355 for idx, item in enumerate(chain):
3341 3356 offset = start(item[0])
3342 3357 bits = data[idx]
3343 3358 for rev in item:
3344 3359 chunkstart = start(rev)
3345 3360 if inline:
3346 3361 chunkstart += (rev + 1) * iosize
3347 3362 chunklength = length(rev)
3348 3363 ladd(buffer(bits, chunkstart - offset, chunklength))
3349 3364
3350 3365 return chunks
3351 3366
3352 3367 def dodeltachain(rev):
3353 3368 if not cache:
3354 3369 r.clearcaches()
3355 3370 r._deltachain(rev)
3356 3371
3357 3372 def doread(chain):
3358 3373 if not cache:
3359 3374 r.clearcaches()
3360 3375 for item in slicedchain:
3361 3376 segmentforrevs(item[0], item[-1])
3362 3377
3363 3378 def doslice(r, chain, size):
3364 3379 for s in slicechunk(r, chain, targetsize=size):
3365 3380 pass
3366 3381
3367 3382 def dorawchunks(data, chain):
3368 3383 if not cache:
3369 3384 r.clearcaches()
3370 3385 getrawchunks(data, chain)
3371 3386
3372 3387 def dodecompress(chunks):
3373 3388 decomp = r.decompress
3374 3389 for chunk in chunks:
3375 3390 decomp(chunk)
3376 3391
3377 3392 def dopatch(text, bins):
3378 3393 if not cache:
3379 3394 r.clearcaches()
3380 3395 mdiff.patches(text, bins)
3381 3396
3382 3397 def dohash(text):
3383 3398 if not cache:
3384 3399 r.clearcaches()
3385 3400 r.checkhash(text, node, rev=rev)
3386 3401
3387 3402 def dorevision():
3388 3403 if not cache:
3389 3404 r.clearcaches()
3390 3405 r.revision(node)
3391 3406
3392 3407 try:
3393 3408 from mercurial.revlogutils.deltas import slicechunk
3394 3409 except ImportError:
3395 3410 slicechunk = getattr(revlog, '_slicechunk', None)
3396 3411
3397 3412 size = r.length(rev)
3398 3413 chain = r._deltachain(rev)[0]
3399 3414 if not getattr(r, '_withsparseread', False):
3400 3415 slicedchain = (chain,)
3401 3416 else:
3402 3417 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3403 3418 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3404 3419 rawchunks = getrawchunks(data, slicedchain)
3405 3420 bins = r._chunks(chain)
3406 3421 text = bytes(bins[0])
3407 3422 bins = bins[1:]
3408 3423 text = mdiff.patches(text, bins)
3409 3424
3410 3425 benches = [
3411 3426 (lambda: dorevision(), b'full'),
3412 3427 (lambda: dodeltachain(rev), b'deltachain'),
3413 3428 (lambda: doread(chain), b'read'),
3414 3429 ]
3415 3430
3416 3431 if getattr(r, '_withsparseread', False):
3417 3432 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3418 3433 benches.append(slicing)
3419 3434
3420 3435 benches.extend(
3421 3436 [
3422 3437 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3423 3438 (lambda: dodecompress(rawchunks), b'decompress'),
3424 3439 (lambda: dopatch(text, bins), b'patch'),
3425 3440 (lambda: dohash(text), b'hash'),
3426 3441 ]
3427 3442 )
3428 3443
3429 3444 timer, fm = gettimer(ui, opts)
3430 3445 for fn, title in benches:
3431 3446 timer(fn, title=title)
3432 3447 fm.end()
3433 3448
3434 3449
3435 3450 @command(
3436 3451 b'perf::revset|perfrevset',
3437 3452 [
3438 3453 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3439 3454 (b'', b'contexts', False, b'obtain changectx for each revision'),
3440 3455 ]
3441 3456 + formatteropts,
3442 3457 b"REVSET",
3443 3458 )
3444 3459 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3445 3460 """benchmark the execution time of a revset
3446 3461
3447 3462 Use the --clean option if need to evaluate the impact of build volatile
3448 3463 revisions set cache on the revset execution. Volatile cache hold filtered
3449 3464 and obsolete related cache."""
3450 3465 opts = _byteskwargs(opts)
3451 3466
3452 3467 timer, fm = gettimer(ui, opts)
3453 3468
3454 3469 def d():
3455 3470 if clear:
3456 3471 repo.invalidatevolatilesets()
3457 3472 if contexts:
3458 3473 for ctx in repo.set(expr):
3459 3474 pass
3460 3475 else:
3461 3476 for r in repo.revs(expr):
3462 3477 pass
3463 3478
3464 3479 timer(d)
3465 3480 fm.end()
3466 3481
3467 3482
3468 3483 @command(
3469 3484 b'perf::volatilesets|perfvolatilesets',
3470 3485 [
3471 3486 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3472 3487 ]
3473 3488 + formatteropts,
3474 3489 )
3475 3490 def perfvolatilesets(ui, repo, *names, **opts):
3476 3491 """benchmark the computation of various volatile set
3477 3492
3478 3493 Volatile set computes element related to filtering and obsolescence."""
3479 3494 opts = _byteskwargs(opts)
3480 3495 timer, fm = gettimer(ui, opts)
3481 3496 repo = repo.unfiltered()
3482 3497
3483 3498 def getobs(name):
3484 3499 def d():
3485 3500 repo.invalidatevolatilesets()
3486 3501 if opts[b'clear_obsstore']:
3487 3502 clearfilecache(repo, b'obsstore')
3488 3503 obsolete.getrevs(repo, name)
3489 3504
3490 3505 return d
3491 3506
3492 3507 allobs = sorted(obsolete.cachefuncs)
3493 3508 if names:
3494 3509 allobs = [n for n in allobs if n in names]
3495 3510
3496 3511 for name in allobs:
3497 3512 timer(getobs(name), title=name)
3498 3513
3499 3514 def getfiltered(name):
3500 3515 def d():
3501 3516 repo.invalidatevolatilesets()
3502 3517 if opts[b'clear_obsstore']:
3503 3518 clearfilecache(repo, b'obsstore')
3504 3519 repoview.filterrevs(repo, name)
3505 3520
3506 3521 return d
3507 3522
3508 3523 allfilter = sorted(repoview.filtertable)
3509 3524 if names:
3510 3525 allfilter = [n for n in allfilter if n in names]
3511 3526
3512 3527 for name in allfilter:
3513 3528 timer(getfiltered(name), title=name)
3514 3529 fm.end()
3515 3530
3516 3531
3517 3532 @command(
3518 3533 b'perf::branchmap|perfbranchmap',
3519 3534 [
3520 3535 (b'f', b'full', False, b'Includes build time of subset'),
3521 3536 (
3522 3537 b'',
3523 3538 b'clear-revbranch',
3524 3539 False,
3525 3540 b'purge the revbranch cache between computation',
3526 3541 ),
3527 3542 ]
3528 3543 + formatteropts,
3529 3544 )
3530 3545 def perfbranchmap(ui, repo, *filternames, **opts):
3531 3546 """benchmark the update of a branchmap
3532 3547
3533 3548 This benchmarks the full repo.branchmap() call with read and write disabled
3534 3549 """
3535 3550 opts = _byteskwargs(opts)
3536 3551 full = opts.get(b"full", False)
3537 3552 clear_revbranch = opts.get(b"clear_revbranch", False)
3538 3553 timer, fm = gettimer(ui, opts)
3539 3554
3540 3555 def getbranchmap(filtername):
3541 3556 """generate a benchmark function for the filtername"""
3542 3557 if filtername is None:
3543 3558 view = repo
3544 3559 else:
3545 3560 view = repo.filtered(filtername)
3546 3561 if util.safehasattr(view._branchcaches, '_per_filter'):
3547 3562 filtered = view._branchcaches._per_filter
3548 3563 else:
3549 3564 # older versions
3550 3565 filtered = view._branchcaches
3551 3566
3552 3567 def d():
3553 3568 if clear_revbranch:
3554 3569 repo.revbranchcache()._clear()
3555 3570 if full:
3556 3571 view._branchcaches.clear()
3557 3572 else:
3558 3573 filtered.pop(filtername, None)
3559 3574 view.branchmap()
3560 3575
3561 3576 return d
3562 3577
3563 3578 # add filter in smaller subset to bigger subset
3564 3579 possiblefilters = set(repoview.filtertable)
3565 3580 if filternames:
3566 3581 possiblefilters &= set(filternames)
3567 3582 subsettable = getbranchmapsubsettable()
3568 3583 allfilters = []
3569 3584 while possiblefilters:
3570 3585 for name in possiblefilters:
3571 3586 subset = subsettable.get(name)
3572 3587 if subset not in possiblefilters:
3573 3588 break
3574 3589 else:
3575 3590 assert False, b'subset cycle %s!' % possiblefilters
3576 3591 allfilters.append(name)
3577 3592 possiblefilters.remove(name)
3578 3593
3579 3594 # warm the cache
3580 3595 if not full:
3581 3596 for name in allfilters:
3582 3597 repo.filtered(name).branchmap()
3583 3598 if not filternames or b'unfiltered' in filternames:
3584 3599 # add unfiltered
3585 3600 allfilters.append(None)
3586 3601
3587 3602 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3588 3603 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3589 3604 branchcacheread.set(classmethod(lambda *args: None))
3590 3605 else:
3591 3606 # older versions
3592 3607 branchcacheread = safeattrsetter(branchmap, b'read')
3593 3608 branchcacheread.set(lambda *args: None)
3594 3609 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3595 3610 branchcachewrite.set(lambda *args: None)
3596 3611 try:
3597 3612 for name in allfilters:
3598 3613 printname = name
3599 3614 if name is None:
3600 3615 printname = b'unfiltered'
3601 3616 timer(getbranchmap(name), title=printname)
3602 3617 finally:
3603 3618 branchcacheread.restore()
3604 3619 branchcachewrite.restore()
3605 3620 fm.end()
3606 3621
3607 3622
3608 3623 @command(
3609 3624 b'perf::branchmapupdate|perfbranchmapupdate',
3610 3625 [
3611 3626 (b'', b'base', [], b'subset of revision to start from'),
3612 3627 (b'', b'target', [], b'subset of revision to end with'),
3613 3628 (b'', b'clear-caches', False, b'clear cache between each runs'),
3614 3629 ]
3615 3630 + formatteropts,
3616 3631 )
3617 3632 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3618 3633 """benchmark branchmap update from for <base> revs to <target> revs
3619 3634
3620 3635 If `--clear-caches` is passed, the following items will be reset before
3621 3636 each update:
3622 3637 * the changelog instance and associated indexes
3623 3638 * the rev-branch-cache instance
3624 3639
3625 3640 Examples:
3626 3641
3627 3642 # update for the one last revision
3628 3643 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3629 3644
3630 3645 $ update for change coming with a new branch
3631 3646 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3632 3647 """
3633 3648 from mercurial import branchmap
3634 3649 from mercurial import repoview
3635 3650
3636 3651 opts = _byteskwargs(opts)
3637 3652 timer, fm = gettimer(ui, opts)
3638 3653 clearcaches = opts[b'clear_caches']
3639 3654 unfi = repo.unfiltered()
3640 3655 x = [None] # used to pass data between closure
3641 3656
3642 3657 # we use a `list` here to avoid possible side effect from smartset
3643 3658 baserevs = list(scmutil.revrange(repo, base))
3644 3659 targetrevs = list(scmutil.revrange(repo, target))
3645 3660 if not baserevs:
3646 3661 raise error.Abort(b'no revisions selected for --base')
3647 3662 if not targetrevs:
3648 3663 raise error.Abort(b'no revisions selected for --target')
3649 3664
3650 3665 # make sure the target branchmap also contains the one in the base
3651 3666 targetrevs = list(set(baserevs) | set(targetrevs))
3652 3667 targetrevs.sort()
3653 3668
3654 3669 cl = repo.changelog
3655 3670 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3656 3671 allbaserevs.sort()
3657 3672 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3658 3673
3659 3674 newrevs = list(alltargetrevs.difference(allbaserevs))
3660 3675 newrevs.sort()
3661 3676
3662 3677 allrevs = frozenset(unfi.changelog.revs())
3663 3678 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3664 3679 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3665 3680
3666 3681 def basefilter(repo, visibilityexceptions=None):
3667 3682 return basefilterrevs
3668 3683
3669 3684 def targetfilter(repo, visibilityexceptions=None):
3670 3685 return targetfilterrevs
3671 3686
3672 3687 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3673 3688 ui.status(msg % (len(allbaserevs), len(newrevs)))
3674 3689 if targetfilterrevs:
3675 3690 msg = b'(%d revisions still filtered)\n'
3676 3691 ui.status(msg % len(targetfilterrevs))
3677 3692
3678 3693 try:
3679 3694 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3680 3695 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3681 3696
3682 3697 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3683 3698 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3684 3699
3685 3700 # try to find an existing branchmap to reuse
3686 3701 subsettable = getbranchmapsubsettable()
3687 3702 candidatefilter = subsettable.get(None)
3688 3703 while candidatefilter is not None:
3689 3704 candidatebm = repo.filtered(candidatefilter).branchmap()
3690 3705 if candidatebm.validfor(baserepo):
3691 3706 filtered = repoview.filterrevs(repo, candidatefilter)
3692 3707 missing = [r for r in allbaserevs if r in filtered]
3693 3708 base = candidatebm.copy()
3694 3709 base.update(baserepo, missing)
3695 3710 break
3696 3711 candidatefilter = subsettable.get(candidatefilter)
3697 3712 else:
3698 3713 # no suitable subset where found
3699 3714 base = branchmap.branchcache()
3700 3715 base.update(baserepo, allbaserevs)
3701 3716
3702 3717 def setup():
3703 3718 x[0] = base.copy()
3704 3719 if clearcaches:
3705 3720 unfi._revbranchcache = None
3706 3721 clearchangelog(repo)
3707 3722
3708 3723 def bench():
3709 3724 x[0].update(targetrepo, newrevs)
3710 3725
3711 3726 timer(bench, setup=setup)
3712 3727 fm.end()
3713 3728 finally:
3714 3729 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3715 3730 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3716 3731
3717 3732
3718 3733 @command(
3719 3734 b'perf::branchmapload|perfbranchmapload',
3720 3735 [
3721 3736 (b'f', b'filter', b'', b'Specify repoview filter'),
3722 3737 (b'', b'list', False, b'List brachmap filter caches'),
3723 3738 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3724 3739 ]
3725 3740 + formatteropts,
3726 3741 )
3727 3742 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3728 3743 """benchmark reading the branchmap"""
3729 3744 opts = _byteskwargs(opts)
3730 3745 clearrevlogs = opts[b'clear_revlogs']
3731 3746
3732 3747 if list:
3733 3748 for name, kind, st in repo.cachevfs.readdir(stat=True):
3734 3749 if name.startswith(b'branch2'):
3735 3750 filtername = name.partition(b'-')[2] or b'unfiltered'
3736 3751 ui.status(
3737 3752 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3738 3753 )
3739 3754 return
3740 3755 if not filter:
3741 3756 filter = None
3742 3757 subsettable = getbranchmapsubsettable()
3743 3758 if filter is None:
3744 3759 repo = repo.unfiltered()
3745 3760 else:
3746 3761 repo = repoview.repoview(repo, filter)
3747 3762
3748 3763 repo.branchmap() # make sure we have a relevant, up to date branchmap
3749 3764
3750 3765 try:
3751 3766 fromfile = branchmap.branchcache.fromfile
3752 3767 except AttributeError:
3753 3768 # older versions
3754 3769 fromfile = branchmap.read
3755 3770
3756 3771 currentfilter = filter
3757 3772 # try once without timer, the filter may not be cached
3758 3773 while fromfile(repo) is None:
3759 3774 currentfilter = subsettable.get(currentfilter)
3760 3775 if currentfilter is None:
3761 3776 raise error.Abort(
3762 3777 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3763 3778 )
3764 3779 repo = repo.filtered(currentfilter)
3765 3780 timer, fm = gettimer(ui, opts)
3766 3781
3767 3782 def setup():
3768 3783 if clearrevlogs:
3769 3784 clearchangelog(repo)
3770 3785
3771 3786 def bench():
3772 3787 fromfile(repo)
3773 3788
3774 3789 timer(bench, setup=setup)
3775 3790 fm.end()
3776 3791
3777 3792
3778 3793 @command(b'perf::loadmarkers|perfloadmarkers')
3779 3794 def perfloadmarkers(ui, repo):
3780 3795 """benchmark the time to parse the on-disk markers for a repo
3781 3796
3782 3797 Result is the number of markers in the repo."""
3783 3798 timer, fm = gettimer(ui)
3784 3799 svfs = getsvfs(repo)
3785 3800 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3786 3801 fm.end()
3787 3802
3788 3803
3789 3804 @command(
3790 3805 b'perf::lrucachedict|perflrucachedict',
3791 3806 formatteropts
3792 3807 + [
3793 3808 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3794 3809 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3795 3810 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3796 3811 (b'', b'size', 4, b'size of cache'),
3797 3812 (b'', b'gets', 10000, b'number of key lookups'),
3798 3813 (b'', b'sets', 10000, b'number of key sets'),
3799 3814 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3800 3815 (
3801 3816 b'',
3802 3817 b'mixedgetfreq',
3803 3818 50,
3804 3819 b'frequency of get vs set ops in mixed mode',
3805 3820 ),
3806 3821 ],
3807 3822 norepo=True,
3808 3823 )
3809 3824 def perflrucache(
3810 3825 ui,
3811 3826 mincost=0,
3812 3827 maxcost=100,
3813 3828 costlimit=0,
3814 3829 size=4,
3815 3830 gets=10000,
3816 3831 sets=10000,
3817 3832 mixed=10000,
3818 3833 mixedgetfreq=50,
3819 3834 **opts
3820 3835 ):
3821 3836 opts = _byteskwargs(opts)
3822 3837
3823 3838 def doinit():
3824 3839 for i in _xrange(10000):
3825 3840 util.lrucachedict(size)
3826 3841
3827 3842 costrange = list(range(mincost, maxcost + 1))
3828 3843
3829 3844 values = []
3830 3845 for i in _xrange(size):
3831 3846 values.append(random.randint(0, _maxint))
3832 3847
3833 3848 # Get mode fills the cache and tests raw lookup performance with no
3834 3849 # eviction.
3835 3850 getseq = []
3836 3851 for i in _xrange(gets):
3837 3852 getseq.append(random.choice(values))
3838 3853
3839 3854 def dogets():
3840 3855 d = util.lrucachedict(size)
3841 3856 for v in values:
3842 3857 d[v] = v
3843 3858 for key in getseq:
3844 3859 value = d[key]
3845 3860 value # silence pyflakes warning
3846 3861
3847 3862 def dogetscost():
3848 3863 d = util.lrucachedict(size, maxcost=costlimit)
3849 3864 for i, v in enumerate(values):
3850 3865 d.insert(v, v, cost=costs[i])
3851 3866 for key in getseq:
3852 3867 try:
3853 3868 value = d[key]
3854 3869 value # silence pyflakes warning
3855 3870 except KeyError:
3856 3871 pass
3857 3872
3858 3873 # Set mode tests insertion speed with cache eviction.
3859 3874 setseq = []
3860 3875 costs = []
3861 3876 for i in _xrange(sets):
3862 3877 setseq.append(random.randint(0, _maxint))
3863 3878 costs.append(random.choice(costrange))
3864 3879
3865 3880 def doinserts():
3866 3881 d = util.lrucachedict(size)
3867 3882 for v in setseq:
3868 3883 d.insert(v, v)
3869 3884
3870 3885 def doinsertscost():
3871 3886 d = util.lrucachedict(size, maxcost=costlimit)
3872 3887 for i, v in enumerate(setseq):
3873 3888 d.insert(v, v, cost=costs[i])
3874 3889
3875 3890 def dosets():
3876 3891 d = util.lrucachedict(size)
3877 3892 for v in setseq:
3878 3893 d[v] = v
3879 3894
3880 3895 # Mixed mode randomly performs gets and sets with eviction.
3881 3896 mixedops = []
3882 3897 for i in _xrange(mixed):
3883 3898 r = random.randint(0, 100)
3884 3899 if r < mixedgetfreq:
3885 3900 op = 0
3886 3901 else:
3887 3902 op = 1
3888 3903
3889 3904 mixedops.append(
3890 3905 (op, random.randint(0, size * 2), random.choice(costrange))
3891 3906 )
3892 3907
3893 3908 def domixed():
3894 3909 d = util.lrucachedict(size)
3895 3910
3896 3911 for op, v, cost in mixedops:
3897 3912 if op == 0:
3898 3913 try:
3899 3914 d[v]
3900 3915 except KeyError:
3901 3916 pass
3902 3917 else:
3903 3918 d[v] = v
3904 3919
3905 3920 def domixedcost():
3906 3921 d = util.lrucachedict(size, maxcost=costlimit)
3907 3922
3908 3923 for op, v, cost in mixedops:
3909 3924 if op == 0:
3910 3925 try:
3911 3926 d[v]
3912 3927 except KeyError:
3913 3928 pass
3914 3929 else:
3915 3930 d.insert(v, v, cost=cost)
3916 3931
3917 3932 benches = [
3918 3933 (doinit, b'init'),
3919 3934 ]
3920 3935
3921 3936 if costlimit:
3922 3937 benches.extend(
3923 3938 [
3924 3939 (dogetscost, b'gets w/ cost limit'),
3925 3940 (doinsertscost, b'inserts w/ cost limit'),
3926 3941 (domixedcost, b'mixed w/ cost limit'),
3927 3942 ]
3928 3943 )
3929 3944 else:
3930 3945 benches.extend(
3931 3946 [
3932 3947 (dogets, b'gets'),
3933 3948 (doinserts, b'inserts'),
3934 3949 (dosets, b'sets'),
3935 3950 (domixed, b'mixed'),
3936 3951 ]
3937 3952 )
3938 3953
3939 3954 for fn, title in benches:
3940 3955 timer, fm = gettimer(ui, opts)
3941 3956 timer(fn, title=title)
3942 3957 fm.end()
3943 3958
3944 3959
3945 3960 @command(
3946 3961 b'perf::write|perfwrite',
3947 3962 formatteropts
3948 3963 + [
3949 3964 (b'', b'write-method', b'write', b'ui write method'),
3950 3965 (b'', b'nlines', 100, b'number of lines'),
3951 3966 (b'', b'nitems', 100, b'number of items (per line)'),
3952 3967 (b'', b'item', b'x', b'item that is written'),
3953 3968 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3954 3969 (b'', b'flush-line', None, b'flush after each line'),
3955 3970 ],
3956 3971 )
3957 3972 def perfwrite(ui, repo, **opts):
3958 3973 """microbenchmark ui.write (and others)"""
3959 3974 opts = _byteskwargs(opts)
3960 3975
3961 3976 write = getattr(ui, _sysstr(opts[b'write_method']))
3962 3977 nlines = int(opts[b'nlines'])
3963 3978 nitems = int(opts[b'nitems'])
3964 3979 item = opts[b'item']
3965 3980 batch_line = opts.get(b'batch_line')
3966 3981 flush_line = opts.get(b'flush_line')
3967 3982
3968 3983 if batch_line:
3969 3984 line = item * nitems + b'\n'
3970 3985
3971 3986 def benchmark():
3972 3987 for i in pycompat.xrange(nlines):
3973 3988 if batch_line:
3974 3989 write(line)
3975 3990 else:
3976 3991 for i in pycompat.xrange(nitems):
3977 3992 write(item)
3978 3993 write(b'\n')
3979 3994 if flush_line:
3980 3995 ui.flush()
3981 3996 ui.flush()
3982 3997
3983 3998 timer, fm = gettimer(ui, opts)
3984 3999 timer(benchmark)
3985 4000 fm.end()
3986 4001
3987 4002
3988 4003 def uisetup(ui):
3989 4004 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3990 4005 commands, b'debugrevlogopts'
3991 4006 ):
3992 4007 # for "historical portability":
3993 4008 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3994 4009 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3995 4010 # openrevlog() should cause failure, because it has been
3996 4011 # available since 3.5 (or 49c583ca48c4).
3997 4012 def openrevlog(orig, repo, cmd, file_, opts):
3998 4013 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3999 4014 raise error.Abort(
4000 4015 b"This version doesn't support --dir option",
4001 4016 hint=b"use 3.5 or later",
4002 4017 )
4003 4018 return orig(repo, cmd, file_, opts)
4004 4019
4005 4020 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4006 4021
4007 4022
4008 4023 @command(
4009 4024 b'perf::progress|perfprogress',
4010 4025 formatteropts
4011 4026 + [
4012 4027 (b'', b'topic', b'topic', b'topic for progress messages'),
4013 4028 (b'c', b'total', 1000000, b'total value we are progressing to'),
4014 4029 ],
4015 4030 norepo=True,
4016 4031 )
4017 4032 def perfprogress(ui, topic=None, total=None, **opts):
4018 4033 """printing of progress bars"""
4019 4034 opts = _byteskwargs(opts)
4020 4035
4021 4036 timer, fm = gettimer(ui, opts)
4022 4037
4023 4038 def doprogress():
4024 4039 with ui.makeprogress(topic, total=total) as progress:
4025 4040 for i in _xrange(total):
4026 4041 progress.increment()
4027 4042
4028 4043 timer(doprogress)
4029 4044 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now