##// END OF EJS Templates
perf: don't turn byte to string when formatting perfbranchmap...
marmoute -
r46873:cdbde70e default draft
parent child Browse files
Show More
@@ -1,3915 +1,3915
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if 'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf',
295 295 b'pre-run',
296 296 default=mercurial.configitems.dynamicdefault,
297 297 )
298 298 configitem(
299 299 b'perf',
300 300 b'profile-benchmark',
301 301 default=mercurial.configitems.dynamicdefault,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'run-limits',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 except (ImportError, AttributeError):
310 310 pass
311 311 except TypeError:
312 312 # compatibility fix for a11fd395e83f
313 313 # hg version: 5.2
314 314 configitem(
315 315 b'perf',
316 316 b'presleep',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'stub',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf',
326 326 b'parentscount',
327 327 default=mercurial.configitems.dynamicdefault,
328 328 )
329 329 configitem(
330 330 b'perf',
331 331 b'all-timing',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'pre-run',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'profile-benchmark',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'run-limits',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349
350 350
351 351 def getlen(ui):
352 352 if ui.configbool(b"perf", b"stub", False):
353 353 return lambda x: 1
354 354 return len
355 355
356 356
357 357 class noop(object):
358 358 """dummy context manager"""
359 359
360 360 def __enter__(self):
361 361 pass
362 362
363 363 def __exit__(self, *args):
364 364 pass
365 365
366 366
367 367 NOOPCTX = noop()
368 368
369 369
370 370 def gettimer(ui, opts=None):
371 371 """return a timer function and formatter: (timer, formatter)
372 372
373 373 This function exists to gather the creation of formatter in a single
374 374 place instead of duplicating it in all performance commands."""
375 375
376 376 # enforce an idle period before execution to counteract power management
377 377 # experimental config: perf.presleep
378 378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379 379
380 380 if opts is None:
381 381 opts = {}
382 382 # redirect all to stderr unless buffer api is in use
383 383 if not ui._buffers:
384 384 ui = ui.copy()
385 385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 386 if uifout:
387 387 # for "historical portability":
388 388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 389 uifout.set(ui.ferr)
390 390
391 391 # get a formatter
392 392 uiformatter = getattr(ui, 'formatter', None)
393 393 if uiformatter:
394 394 fm = uiformatter(b'perf', opts)
395 395 else:
396 396 # for "historical portability":
397 397 # define formatter locally, because ui.formatter has been
398 398 # available since 2.2 (or ae5f92e154d3)
399 399 from mercurial import node
400 400
401 401 class defaultformatter(object):
402 402 """Minimized composition of baseformatter and plainformatter"""
403 403
404 404 def __init__(self, ui, topic, opts):
405 405 self._ui = ui
406 406 if ui.debugflag:
407 407 self.hexfunc = node.hex
408 408 else:
409 409 self.hexfunc = node.short
410 410
411 411 def __nonzero__(self):
412 412 return False
413 413
414 414 __bool__ = __nonzero__
415 415
416 416 def startitem(self):
417 417 pass
418 418
419 419 def data(self, **data):
420 420 pass
421 421
422 422 def write(self, fields, deftext, *fielddata, **opts):
423 423 self._ui.write(deftext % fielddata, **opts)
424 424
425 425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 426 if cond:
427 427 self._ui.write(deftext % fielddata, **opts)
428 428
429 429 def plain(self, text, **opts):
430 430 self._ui.write(text, **opts)
431 431
432 432 def end(self):
433 433 pass
434 434
435 435 fm = defaultformatter(ui, b'perf', opts)
436 436
437 437 # stub function, runs code only once instead of in a loop
438 438 # experimental config: perf.stub
439 439 if ui.configbool(b"perf", b"stub", False):
440 440 return functools.partial(stub_timer, fm), fm
441 441
442 442 # experimental config: perf.all-timing
443 443 displayall = ui.configbool(b"perf", b"all-timing", False)
444 444
445 445 # experimental config: perf.run-limits
446 446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 447 limits = []
448 448 for item in limitspec:
449 449 parts = item.split(b'-', 1)
450 450 if len(parts) < 2:
451 451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 452 continue
453 453 try:
454 454 time_limit = float(_sysstr(parts[0]))
455 455 except ValueError as e:
456 456 ui.warn(
457 457 (
458 458 b'malformatted run limit entry, %s: %s\n'
459 459 % (_bytestr(e), item)
460 460 )
461 461 )
462 462 continue
463 463 try:
464 464 run_limit = int(_sysstr(parts[1]))
465 465 except ValueError as e:
466 466 ui.warn(
467 467 (
468 468 b'malformatted run limit entry, %s: %s\n'
469 469 % (_bytestr(e), item)
470 470 )
471 471 )
472 472 continue
473 473 limits.append((time_limit, run_limit))
474 474 if not limits:
475 475 limits = DEFAULTLIMITS
476 476
477 477 profiler = None
478 478 if profiling is not None:
479 479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 480 profiler = profiling.profile(ui)
481 481
482 482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 483 t = functools.partial(
484 484 _timer,
485 485 fm,
486 486 displayall=displayall,
487 487 limits=limits,
488 488 prerun=prerun,
489 489 profiler=profiler,
490 490 )
491 491 return t, fm
492 492
493 493
494 494 def stub_timer(fm, func, setup=None, title=None):
495 495 if setup is not None:
496 496 setup()
497 497 func()
498 498
499 499
500 500 @contextlib.contextmanager
501 501 def timeone():
502 502 r = []
503 503 ostart = os.times()
504 504 cstart = util.timer()
505 505 yield r
506 506 cstop = util.timer()
507 507 ostop = os.times()
508 508 a, b = ostart, ostop
509 509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510 510
511 511
512 512 # list of stop condition (elapsed time, minimal run count)
513 513 DEFAULTLIMITS = (
514 514 (3.0, 100),
515 515 (10.0, 3),
516 516 )
517 517
518 518
519 519 def _timer(
520 520 fm,
521 521 func,
522 522 setup=None,
523 523 title=None,
524 524 displayall=False,
525 525 limits=DEFAULTLIMITS,
526 526 prerun=0,
527 527 profiler=None,
528 528 ):
529 529 gc.collect()
530 530 results = []
531 531 begin = util.timer()
532 532 count = 0
533 533 if profiler is None:
534 534 profiler = NOOPCTX
535 535 for i in range(prerun):
536 536 if setup is not None:
537 537 setup()
538 538 func()
539 539 keepgoing = True
540 540 while keepgoing:
541 541 if setup is not None:
542 542 setup()
543 543 with profiler:
544 544 with timeone() as item:
545 545 r = func()
546 546 profiler = NOOPCTX
547 547 count += 1
548 548 results.append(item[0])
549 549 cstop = util.timer()
550 550 # Look for a stop condition.
551 551 elapsed = cstop - begin
552 552 for t, mincount in limits:
553 553 if elapsed >= t and count >= mincount:
554 554 keepgoing = False
555 555 break
556 556
557 557 formatone(fm, results, title=title, result=r, displayall=displayall)
558 558
559 559
560 560 def formatone(fm, timings, title=None, result=None, displayall=False):
561 561
562 562 count = len(timings)
563 563
564 564 fm.startitem()
565 565
566 566 if title:
567 567 fm.write(b'title', b'! %s\n', title)
568 568 if result:
569 569 fm.write(b'result', b'! result: %s\n', result)
570 570
571 571 def display(role, entry):
572 572 prefix = b''
573 573 if role != b'best':
574 574 prefix = b'%s.' % role
575 575 fm.plain(b'!')
576 576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 578 fm.write(prefix + b'user', b' user %f', entry[1])
579 579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 581 fm.plain(b'\n')
582 582
583 583 timings.sort()
584 584 min_val = timings[0]
585 585 display(b'best', min_val)
586 586 if displayall:
587 587 max_val = timings[-1]
588 588 display(b'max', max_val)
589 589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 590 display(b'avg', avg)
591 591 median = timings[len(timings) // 2]
592 592 display(b'median', median)
593 593
594 594
595 595 # utilities for historical portability
596 596
597 597
598 598 def getint(ui, section, name, default):
599 599 # for "historical portability":
600 600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 601 v = ui.config(section, name, None)
602 602 if v is None:
603 603 return default
604 604 try:
605 605 return int(v)
606 606 except ValueError:
607 607 raise error.ConfigError(
608 608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 609 )
610 610
611 611
612 612 def safeattrsetter(obj, name, ignoremissing=False):
613 613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614 614
615 615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 616 at runtime. This avoids overlooking removal of an attribute, which
617 617 breaks assumption of performance measurement, in the future.
618 618
619 619 This function returns the object to (1) assign a new value, and
620 620 (2) restore an original value to the attribute.
621 621
622 622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 623 abortion, and this function returns None. This is useful to
624 624 examine an attribute, which isn't ensured in all Mercurial
625 625 versions.
626 626 """
627 627 if not util.safehasattr(obj, name):
628 628 if ignoremissing:
629 629 return None
630 630 raise error.Abort(
631 631 (
632 632 b"missing attribute %s of %s might break assumption"
633 633 b" of performance measurement"
634 634 )
635 635 % (name, obj)
636 636 )
637 637
638 638 origvalue = getattr(obj, _sysstr(name))
639 639
640 640 class attrutil(object):
641 641 def set(self, newvalue):
642 642 setattr(obj, _sysstr(name), newvalue)
643 643
644 644 def restore(self):
645 645 setattr(obj, _sysstr(name), origvalue)
646 646
647 647 return attrutil()
648 648
649 649
650 650 # utilities to examine each internal API changes
651 651
652 652
653 653 def getbranchmapsubsettable():
654 654 # for "historical portability":
655 655 # subsettable is defined in:
656 656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 657 # - repoview since 2.5 (or 59a9f18d4587)
658 658 # - repoviewutil since 5.0
659 659 for mod in (branchmap, repoview, repoviewutil):
660 660 subsettable = getattr(mod, 'subsettable', None)
661 661 if subsettable:
662 662 return subsettable
663 663
664 664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 665 # branchmap and repoview modules exist, but subsettable attribute
666 666 # doesn't)
667 667 raise error.Abort(
668 668 b"perfbranchmap not available with this Mercurial",
669 669 hint=b"use 2.5 or later",
670 670 )
671 671
672 672
673 673 def getsvfs(repo):
674 674 """Return appropriate object to access files under .hg/store"""
675 675 # for "historical portability":
676 676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 677 svfs = getattr(repo, 'svfs', None)
678 678 if svfs:
679 679 return svfs
680 680 else:
681 681 return getattr(repo, 'sopener')
682 682
683 683
684 684 def getvfs(repo):
685 685 """Return appropriate object to access files under .hg"""
686 686 # for "historical portability":
687 687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 688 vfs = getattr(repo, 'vfs', None)
689 689 if vfs:
690 690 return vfs
691 691 else:
692 692 return getattr(repo, 'opener')
693 693
694 694
695 695 def repocleartagscachefunc(repo):
696 696 """Return the function to clear tags cache according to repo internal API"""
697 697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 699 # correct way to clear tags cache, because existing code paths
700 700 # expect _tagscache to be a structured object.
701 701 def clearcache():
702 702 # _tagscache has been filteredpropertycache since 2.5 (or
703 703 # 98c867ac1330), and delattr() can't work in such case
704 704 if '_tagscache' in vars(repo):
705 705 del repo.__dict__['_tagscache']
706 706
707 707 return clearcache
708 708
709 709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 710 if repotags: # since 1.4 (or 5614a628d173)
711 711 return lambda: repotags.set(None)
712 712
713 713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 715 return lambda: repotagscache.set(None)
716 716
717 717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 718 # this point, but it isn't so problematic, because:
719 719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 720 # in perftags() causes failure soon
721 721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 722 raise error.Abort(b"tags API of this hg command is unknown")
723 723
724 724
725 725 # utilities to clear cache
726 726
727 727
728 728 def clearfilecache(obj, attrname):
729 729 unfiltered = getattr(obj, 'unfiltered', None)
730 730 if unfiltered is not None:
731 731 obj = obj.unfiltered()
732 732 if attrname in vars(obj):
733 733 delattr(obj, attrname)
734 734 obj._filecache.pop(attrname, None)
735 735
736 736
737 737 def clearchangelog(repo):
738 738 if repo is not repo.unfiltered():
739 739 object.__setattr__(repo, '_clcachekey', None)
740 740 object.__setattr__(repo, '_clcache', None)
741 741 clearfilecache(repo.unfiltered(), 'changelog')
742 742
743 743
744 744 # perf commands
745 745
746 746
747 747 @command(b'perfwalk', formatteropts)
748 748 def perfwalk(ui, repo, *pats, **opts):
749 749 opts = _byteskwargs(opts)
750 750 timer, fm = gettimer(ui, opts)
751 751 m = scmutil.match(repo[None], pats, {})
752 752 timer(
753 753 lambda: len(
754 754 list(
755 755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 756 )
757 757 )
758 758 )
759 759 fm.end()
760 760
761 761
762 762 @command(b'perfannotate', formatteropts)
763 763 def perfannotate(ui, repo, f, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 fc = repo[b'.'][f]
767 767 timer(lambda: len(fc.annotate(True)))
768 768 fm.end()
769 769
770 770
771 771 @command(
772 772 b'perfstatus',
773 773 [
774 774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 776 ]
777 777 + formatteropts,
778 778 )
779 779 def perfstatus(ui, repo, **opts):
780 780 """benchmark the performance of a single status call
781 781
782 782 The repository data are preserved between each call.
783 783
784 784 By default, only the status of the tracked file are requested. If
785 785 `--unknown` is passed, the "unknown" files are also tracked.
786 786 """
787 787 opts = _byteskwargs(opts)
788 788 # m = match.always(repo.root, repo.getcwd())
789 789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 790 # False))))
791 791 timer, fm = gettimer(ui, opts)
792 792 if opts[b'dirstate']:
793 793 dirstate = repo.dirstate
794 794 m = scmutil.matchall(repo)
795 795 unknown = opts[b'unknown']
796 796
797 797 def status_dirstate():
798 798 s = dirstate.status(
799 799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 800 )
801 801 sum(map(bool, s))
802 802
803 803 timer(status_dirstate)
804 804 else:
805 805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 806 fm.end()
807 807
808 808
809 809 @command(b'perfaddremove', formatteropts)
810 810 def perfaddremove(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 try:
814 814 oldquiet = repo.ui.quiet
815 815 repo.ui.quiet = True
816 816 matcher = scmutil.match(repo[None])
817 817 opts[b'dry_run'] = True
818 818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 819 uipathfn = scmutil.getuipathfn(repo)
820 820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 821 else:
822 822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 823 finally:
824 824 repo.ui.quiet = oldquiet
825 825 fm.end()
826 826
827 827
828 828 def clearcaches(cl):
829 829 # behave somewhat consistently across internal API changes
830 830 if util.safehasattr(cl, b'clearcaches'):
831 831 cl.clearcaches()
832 832 elif util.safehasattr(cl, b'_nodecache'):
833 833 # <= hg-5.2
834 834 from mercurial.node import nullid, nullrev
835 835
836 836 cl._nodecache = {nullid: nullrev}
837 837 cl._nodepos = None
838 838
839 839
840 840 @command(b'perfheads', formatteropts)
841 841 def perfheads(ui, repo, **opts):
842 842 """benchmark the computation of a changelog heads"""
843 843 opts = _byteskwargs(opts)
844 844 timer, fm = gettimer(ui, opts)
845 845 cl = repo.changelog
846 846
847 847 def s():
848 848 clearcaches(cl)
849 849
850 850 def d():
851 851 len(cl.headrevs())
852 852
853 853 timer(d, setup=s)
854 854 fm.end()
855 855
856 856
857 857 @command(
858 858 b'perftags',
859 859 formatteropts
860 860 + [
861 861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 862 ],
863 863 )
864 864 def perftags(ui, repo, **opts):
865 865 opts = _byteskwargs(opts)
866 866 timer, fm = gettimer(ui, opts)
867 867 repocleartagscache = repocleartagscachefunc(repo)
868 868 clearrevlogs = opts[b'clear_revlogs']
869 869
870 870 def s():
871 871 if clearrevlogs:
872 872 clearchangelog(repo)
873 873 clearfilecache(repo.unfiltered(), 'manifest')
874 874 repocleartagscache()
875 875
876 876 def t():
877 877 return len(repo.tags())
878 878
879 879 timer(t, setup=s)
880 880 fm.end()
881 881
882 882
883 883 @command(b'perfancestors', formatteropts)
884 884 def perfancestors(ui, repo, **opts):
885 885 opts = _byteskwargs(opts)
886 886 timer, fm = gettimer(ui, opts)
887 887 heads = repo.changelog.headrevs()
888 888
889 889 def d():
890 890 for a in repo.changelog.ancestors(heads):
891 891 pass
892 892
893 893 timer(d)
894 894 fm.end()
895 895
896 896
897 897 @command(b'perfancestorset', formatteropts)
898 898 def perfancestorset(ui, repo, revset, **opts):
899 899 opts = _byteskwargs(opts)
900 900 timer, fm = gettimer(ui, opts)
901 901 revs = repo.revs(revset)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 s = repo.changelog.ancestors(heads)
906 906 for rev in revs:
907 907 rev in s
908 908
909 909 timer(d)
910 910 fm.end()
911 911
912 912
913 913 @command(b'perfdiscovery', formatteropts, b'PATH')
914 914 def perfdiscovery(ui, repo, path, **opts):
915 915 """benchmark discovery between local repo and the peer at given path"""
916 916 repos = [repo, None]
917 917 timer, fm = gettimer(ui, opts)
918 918 path = ui.expandpath(path)
919 919
920 920 def s():
921 921 repos[1] = hg.peer(ui, opts, path)
922 922
923 923 def d():
924 924 setdiscovery.findcommonheads(ui, *repos)
925 925
926 926 timer(d, setup=s)
927 927 fm.end()
928 928
929 929
930 930 @command(
931 931 b'perfbookmarks',
932 932 formatteropts
933 933 + [
934 934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 935 ],
936 936 )
937 937 def perfbookmarks(ui, repo, **opts):
938 938 """benchmark parsing bookmarks from disk to memory"""
939 939 opts = _byteskwargs(opts)
940 940 timer, fm = gettimer(ui, opts)
941 941
942 942 clearrevlogs = opts[b'clear_revlogs']
943 943
944 944 def s():
945 945 if clearrevlogs:
946 946 clearchangelog(repo)
947 947 clearfilecache(repo, b'_bookmarks')
948 948
949 949 def d():
950 950 repo._bookmarks
951 951
952 952 timer(d, setup=s)
953 953 fm.end()
954 954
955 955
956 956 @command(b'perfbundleread', formatteropts, b'BUNDLE')
957 957 def perfbundleread(ui, repo, bundlepath, **opts):
958 958 """Benchmark reading of bundle files.
959 959
960 960 This command is meant to isolate the I/O part of bundle reading as
961 961 much as possible.
962 962 """
963 963 from mercurial import (
964 964 bundle2,
965 965 exchange,
966 966 streamclone,
967 967 )
968 968
969 969 opts = _byteskwargs(opts)
970 970
971 971 def makebench(fn):
972 972 def run():
973 973 with open(bundlepath, b'rb') as fh:
974 974 bundle = exchange.readbundle(ui, fh, bundlepath)
975 975 fn(bundle)
976 976
977 977 return run
978 978
979 979 def makereadnbytes(size):
980 980 def run():
981 981 with open(bundlepath, b'rb') as fh:
982 982 bundle = exchange.readbundle(ui, fh, bundlepath)
983 983 while bundle.read(size):
984 984 pass
985 985
986 986 return run
987 987
988 988 def makestdioread(size):
989 989 def run():
990 990 with open(bundlepath, b'rb') as fh:
991 991 while fh.read(size):
992 992 pass
993 993
994 994 return run
995 995
996 996 # bundle1
997 997
998 998 def deltaiter(bundle):
999 999 for delta in bundle.deltaiter():
1000 1000 pass
1001 1001
1002 1002 def iterchunks(bundle):
1003 1003 for chunk in bundle.getchunks():
1004 1004 pass
1005 1005
1006 1006 # bundle2
1007 1007
1008 1008 def forwardchunks(bundle):
1009 1009 for chunk in bundle._forwardchunks():
1010 1010 pass
1011 1011
1012 1012 def iterparts(bundle):
1013 1013 for part in bundle.iterparts():
1014 1014 pass
1015 1015
1016 1016 def iterpartsseekable(bundle):
1017 1017 for part in bundle.iterparts(seekable=True):
1018 1018 pass
1019 1019
1020 1020 def seek(bundle):
1021 1021 for part in bundle.iterparts(seekable=True):
1022 1022 part.seek(0, os.SEEK_END)
1023 1023
1024 1024 def makepartreadnbytes(size):
1025 1025 def run():
1026 1026 with open(bundlepath, b'rb') as fh:
1027 1027 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 1028 for part in bundle.iterparts():
1029 1029 while part.read(size):
1030 1030 pass
1031 1031
1032 1032 return run
1033 1033
1034 1034 benches = [
1035 1035 (makestdioread(8192), b'read(8k)'),
1036 1036 (makestdioread(16384), b'read(16k)'),
1037 1037 (makestdioread(32768), b'read(32k)'),
1038 1038 (makestdioread(131072), b'read(128k)'),
1039 1039 ]
1040 1040
1041 1041 with open(bundlepath, b'rb') as fh:
1042 1042 bundle = exchange.readbundle(ui, fh, bundlepath)
1043 1043
1044 1044 if isinstance(bundle, changegroup.cg1unpacker):
1045 1045 benches.extend(
1046 1046 [
1047 1047 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 1048 (makebench(iterchunks), b'cg1 getchunks()'),
1049 1049 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 1050 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 1051 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 1052 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 1053 ]
1054 1054 )
1055 1055 elif isinstance(bundle, bundle2.unbundle20):
1056 1056 benches.extend(
1057 1057 [
1058 1058 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 1059 (makebench(iterparts), b'bundle2 iterparts()'),
1060 1060 (
1061 1061 makebench(iterpartsseekable),
1062 1062 b'bundle2 iterparts() seekable',
1063 1063 ),
1064 1064 (makebench(seek), b'bundle2 part seek()'),
1065 1065 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 1066 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 1067 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 1068 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 1069 ]
1070 1070 )
1071 1071 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 1072 raise error.Abort(b'stream clone bundles not supported')
1073 1073 else:
1074 1074 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075 1075
1076 1076 for fn, title in benches:
1077 1077 timer, fm = gettimer(ui, opts)
1078 1078 timer(fn, title=title)
1079 1079 fm.end()
1080 1080
1081 1081
1082 1082 @command(
1083 1083 b'perfchangegroupchangelog',
1084 1084 formatteropts
1085 1085 + [
1086 1086 (b'', b'cgversion', b'02', b'changegroup version'),
1087 1087 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 1088 ],
1089 1089 )
1090 1090 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 1091 """Benchmark producing a changelog group for a changegroup.
1092 1092
1093 1093 This measures the time spent processing the changelog during a
1094 1094 bundle operation. This occurs during `hg bundle` and on a server
1095 1095 processing a `getbundle` wire protocol request (handles clones
1096 1096 and pull requests).
1097 1097
1098 1098 By default, all revisions are added to the changegroup.
1099 1099 """
1100 1100 opts = _byteskwargs(opts)
1101 1101 cl = repo.changelog
1102 1102 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 1103 bundler = changegroup.getbundler(cgversion, repo)
1104 1104
1105 1105 def d():
1106 1106 state, chunks = bundler._generatechangelog(cl, nodes)
1107 1107 for chunk in chunks:
1108 1108 pass
1109 1109
1110 1110 timer, fm = gettimer(ui, opts)
1111 1111
1112 1112 # Terminal printing can interfere with timing. So disable it.
1113 1113 with ui.configoverride({(b'progress', b'disable'): True}):
1114 1114 timer(d)
1115 1115
1116 1116 fm.end()
1117 1117
1118 1118
1119 1119 @command(b'perfdirs', formatteropts)
1120 1120 def perfdirs(ui, repo, **opts):
1121 1121 opts = _byteskwargs(opts)
1122 1122 timer, fm = gettimer(ui, opts)
1123 1123 dirstate = repo.dirstate
1124 1124 b'a' in dirstate
1125 1125
1126 1126 def d():
1127 1127 dirstate.hasdir(b'a')
1128 1128 del dirstate._map._dirs
1129 1129
1130 1130 timer(d)
1131 1131 fm.end()
1132 1132
1133 1133
1134 1134 @command(
1135 1135 b'perfdirstate',
1136 1136 [
1137 1137 (
1138 1138 b'',
1139 1139 b'iteration',
1140 1140 None,
1141 1141 b'benchmark a full iteration for the dirstate',
1142 1142 ),
1143 1143 (
1144 1144 b'',
1145 1145 b'contains',
1146 1146 None,
1147 1147 b'benchmark a large amount of `nf in dirstate` calls',
1148 1148 ),
1149 1149 ]
1150 1150 + formatteropts,
1151 1151 )
1152 1152 def perfdirstate(ui, repo, **opts):
1153 1153 """benchmap the time of various distate operations
1154 1154
1155 1155 By default benchmark the time necessary to load a dirstate from scratch.
1156 1156 The dirstate is loaded to the point were a "contains" request can be
1157 1157 answered.
1158 1158 """
1159 1159 opts = _byteskwargs(opts)
1160 1160 timer, fm = gettimer(ui, opts)
1161 1161 b"a" in repo.dirstate
1162 1162
1163 1163 if opts[b'iteration'] and opts[b'contains']:
1164 1164 msg = b'only specify one of --iteration or --contains'
1165 1165 raise error.Abort(msg)
1166 1166
1167 1167 if opts[b'iteration']:
1168 1168 setup = None
1169 1169 dirstate = repo.dirstate
1170 1170
1171 1171 def d():
1172 1172 for f in dirstate:
1173 1173 pass
1174 1174
1175 1175 elif opts[b'contains']:
1176 1176 setup = None
1177 1177 dirstate = repo.dirstate
1178 1178 allfiles = list(dirstate)
1179 1179 # also add file path that will be "missing" from the dirstate
1180 1180 allfiles.extend([f[::-1] for f in allfiles])
1181 1181
1182 1182 def d():
1183 1183 for f in allfiles:
1184 1184 f in dirstate
1185 1185
1186 1186 else:
1187 1187
1188 1188 def setup():
1189 1189 repo.dirstate.invalidate()
1190 1190
1191 1191 def d():
1192 1192 b"a" in repo.dirstate
1193 1193
1194 1194 timer(d, setup=setup)
1195 1195 fm.end()
1196 1196
1197 1197
1198 1198 @command(b'perfdirstatedirs', formatteropts)
1199 1199 def perfdirstatedirs(ui, repo, **opts):
1200 1200 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 1201 opts = _byteskwargs(opts)
1202 1202 timer, fm = gettimer(ui, opts)
1203 1203 repo.dirstate.hasdir(b"a")
1204 1204
1205 1205 def setup():
1206 1206 del repo.dirstate._map._dirs
1207 1207
1208 1208 def d():
1209 1209 repo.dirstate.hasdir(b"a")
1210 1210
1211 1211 timer(d, setup=setup)
1212 1212 fm.end()
1213 1213
1214 1214
1215 1215 @command(b'perfdirstatefoldmap', formatteropts)
1216 1216 def perfdirstatefoldmap(ui, repo, **opts):
1217 1217 """benchmap a `dirstate._map.filefoldmap.get()` request
1218 1218
1219 1219 The dirstate filefoldmap cache is dropped between every request.
1220 1220 """
1221 1221 opts = _byteskwargs(opts)
1222 1222 timer, fm = gettimer(ui, opts)
1223 1223 dirstate = repo.dirstate
1224 1224 dirstate._map.filefoldmap.get(b'a')
1225 1225
1226 1226 def setup():
1227 1227 del dirstate._map.filefoldmap
1228 1228
1229 1229 def d():
1230 1230 dirstate._map.filefoldmap.get(b'a')
1231 1231
1232 1232 timer(d, setup=setup)
1233 1233 fm.end()
1234 1234
1235 1235
1236 1236 @command(b'perfdirfoldmap', formatteropts)
1237 1237 def perfdirfoldmap(ui, repo, **opts):
1238 1238 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239 1239
1240 1240 The dirstate dirfoldmap cache is dropped between every request.
1241 1241 """
1242 1242 opts = _byteskwargs(opts)
1243 1243 timer, fm = gettimer(ui, opts)
1244 1244 dirstate = repo.dirstate
1245 1245 dirstate._map.dirfoldmap.get(b'a')
1246 1246
1247 1247 def setup():
1248 1248 del dirstate._map.dirfoldmap
1249 1249 del dirstate._map._dirs
1250 1250
1251 1251 def d():
1252 1252 dirstate._map.dirfoldmap.get(b'a')
1253 1253
1254 1254 timer(d, setup=setup)
1255 1255 fm.end()
1256 1256
1257 1257
1258 1258 @command(b'perfdirstatewrite', formatteropts)
1259 1259 def perfdirstatewrite(ui, repo, **opts):
1260 1260 """benchmap the time it take to write a dirstate on disk"""
1261 1261 opts = _byteskwargs(opts)
1262 1262 timer, fm = gettimer(ui, opts)
1263 1263 ds = repo.dirstate
1264 1264 b"a" in ds
1265 1265
1266 1266 def setup():
1267 1267 ds._dirty = True
1268 1268
1269 1269 def d():
1270 1270 ds.write(repo.currenttransaction())
1271 1271
1272 1272 timer(d, setup=setup)
1273 1273 fm.end()
1274 1274
1275 1275
1276 1276 def _getmergerevs(repo, opts):
1277 1277 """parse command argument to return rev involved in merge
1278 1278
1279 1279 input: options dictionnary with `rev`, `from` and `bse`
1280 1280 output: (localctx, otherctx, basectx)
1281 1281 """
1282 1282 if opts[b'from']:
1283 1283 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 1284 wctx = repo[fromrev]
1285 1285 else:
1286 1286 wctx = repo[None]
1287 1287 # we don't want working dir files to be stat'd in the benchmark, so
1288 1288 # prime that cache
1289 1289 wctx.dirty()
1290 1290 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 1291 if opts[b'base']:
1292 1292 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 1293 ancestor = repo[fromrev]
1294 1294 else:
1295 1295 ancestor = wctx.ancestor(rctx)
1296 1296 return (wctx, rctx, ancestor)
1297 1297
1298 1298
1299 1299 @command(
1300 1300 b'perfmergecalculate',
1301 1301 [
1302 1302 (b'r', b'rev', b'.', b'rev to merge against'),
1303 1303 (b'', b'from', b'', b'rev to merge from'),
1304 1304 (b'', b'base', b'', b'the revision to use as base'),
1305 1305 ]
1306 1306 + formatteropts,
1307 1307 )
1308 1308 def perfmergecalculate(ui, repo, **opts):
1309 1309 opts = _byteskwargs(opts)
1310 1310 timer, fm = gettimer(ui, opts)
1311 1311
1312 1312 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313 1313
1314 1314 def d():
1315 1315 # acceptremote is True because we don't want prompts in the middle of
1316 1316 # our benchmark
1317 1317 merge.calculateupdates(
1318 1318 repo,
1319 1319 wctx,
1320 1320 rctx,
1321 1321 [ancestor],
1322 1322 branchmerge=False,
1323 1323 force=False,
1324 1324 acceptremote=True,
1325 1325 followcopies=True,
1326 1326 )
1327 1327
1328 1328 timer(d)
1329 1329 fm.end()
1330 1330
1331 1331
1332 1332 @command(
1333 1333 b'perfmergecopies',
1334 1334 [
1335 1335 (b'r', b'rev', b'.', b'rev to merge against'),
1336 1336 (b'', b'from', b'', b'rev to merge from'),
1337 1337 (b'', b'base', b'', b'the revision to use as base'),
1338 1338 ]
1339 1339 + formatteropts,
1340 1340 )
1341 1341 def perfmergecopies(ui, repo, **opts):
1342 1342 """measure runtime of `copies.mergecopies`"""
1343 1343 opts = _byteskwargs(opts)
1344 1344 timer, fm = gettimer(ui, opts)
1345 1345 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346 1346
1347 1347 def d():
1348 1348 # acceptremote is True because we don't want prompts in the middle of
1349 1349 # our benchmark
1350 1350 copies.mergecopies(repo, wctx, rctx, ancestor)
1351 1351
1352 1352 timer(d)
1353 1353 fm.end()
1354 1354
1355 1355
1356 1356 @command(b'perfpathcopies', [], b"REV REV")
1357 1357 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 1358 """benchmark the copy tracing logic"""
1359 1359 opts = _byteskwargs(opts)
1360 1360 timer, fm = gettimer(ui, opts)
1361 1361 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 1362 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363 1363
1364 1364 def d():
1365 1365 copies.pathcopies(ctx1, ctx2)
1366 1366
1367 1367 timer(d)
1368 1368 fm.end()
1369 1369
1370 1370
1371 1371 @command(
1372 1372 b'perfphases',
1373 1373 [
1374 1374 (b'', b'full', False, b'include file reading time too'),
1375 1375 ],
1376 1376 b"",
1377 1377 )
1378 1378 def perfphases(ui, repo, **opts):
1379 1379 """benchmark phasesets computation"""
1380 1380 opts = _byteskwargs(opts)
1381 1381 timer, fm = gettimer(ui, opts)
1382 1382 _phases = repo._phasecache
1383 1383 full = opts.get(b'full')
1384 1384
1385 1385 def d():
1386 1386 phases = _phases
1387 1387 if full:
1388 1388 clearfilecache(repo, b'_phasecache')
1389 1389 phases = repo._phasecache
1390 1390 phases.invalidate()
1391 1391 phases.loadphaserevs(repo)
1392 1392
1393 1393 timer(d)
1394 1394 fm.end()
1395 1395
1396 1396
1397 1397 @command(b'perfphasesremote', [], b"[DEST]")
1398 1398 def perfphasesremote(ui, repo, dest=None, **opts):
1399 1399 """benchmark time needed to analyse phases of the remote server"""
1400 1400 from mercurial.node import bin
1401 1401 from mercurial import (
1402 1402 exchange,
1403 1403 hg,
1404 1404 phases,
1405 1405 )
1406 1406
1407 1407 opts = _byteskwargs(opts)
1408 1408 timer, fm = gettimer(ui, opts)
1409 1409
1410 1410 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1411 1411 if not path:
1412 1412 raise error.Abort(
1413 1413 b'default repository not configured!',
1414 1414 hint=b"see 'hg help config.paths'",
1415 1415 )
1416 1416 dest = path.pushloc or path.loc
1417 1417 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 1418 other = hg.peer(repo, opts, dest)
1419 1419
1420 1420 # easier to perform discovery through the operation
1421 1421 op = exchange.pushoperation(repo, other)
1422 1422 exchange._pushdiscoverychangeset(op)
1423 1423
1424 1424 remotesubset = op.fallbackheads
1425 1425
1426 1426 with other.commandexecutor() as e:
1427 1427 remotephases = e.callcommand(
1428 1428 b'listkeys', {b'namespace': b'phases'}
1429 1429 ).result()
1430 1430 del other
1431 1431 publishing = remotephases.get(b'publishing', False)
1432 1432 if publishing:
1433 1433 ui.statusnoi18n(b'publishing: yes\n')
1434 1434 else:
1435 1435 ui.statusnoi18n(b'publishing: no\n')
1436 1436
1437 1437 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 1438 if has_node is None:
1439 1439 has_node = repo.changelog.nodemap.__contains__
1440 1440 nonpublishroots = 0
1441 1441 for nhex, phase in remotephases.iteritems():
1442 1442 if nhex == b'publishing': # ignore data related to publish option
1443 1443 continue
1444 1444 node = bin(nhex)
1445 1445 if has_node(node) and int(phase):
1446 1446 nonpublishroots += 1
1447 1447 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 1448 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449 1449
1450 1450 def d():
1451 1451 phases.remotephasessummary(repo, remotesubset, remotephases)
1452 1452
1453 1453 timer(d)
1454 1454 fm.end()
1455 1455
1456 1456
1457 1457 @command(
1458 1458 b'perfmanifest',
1459 1459 [
1460 1460 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 1461 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 1462 ]
1463 1463 + formatteropts,
1464 1464 b'REV|NODE',
1465 1465 )
1466 1466 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 1467 """benchmark the time to read a manifest from disk and return a usable
1468 1468 dict-like object
1469 1469
1470 1470 Manifest caches are cleared before retrieval."""
1471 1471 opts = _byteskwargs(opts)
1472 1472 timer, fm = gettimer(ui, opts)
1473 1473 if not manifest_rev:
1474 1474 ctx = scmutil.revsingle(repo, rev, rev)
1475 1475 t = ctx.manifestnode()
1476 1476 else:
1477 1477 from mercurial.node import bin
1478 1478
1479 1479 if len(rev) == 40:
1480 1480 t = bin(rev)
1481 1481 else:
1482 1482 try:
1483 1483 rev = int(rev)
1484 1484
1485 1485 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 1486 t = repo.manifestlog.getstorage(b'').node(rev)
1487 1487 else:
1488 1488 t = repo.manifestlog._revlog.lookup(rev)
1489 1489 except ValueError:
1490 1490 raise error.Abort(
1491 1491 b'manifest revision must be integer or full node'
1492 1492 )
1493 1493
1494 1494 def d():
1495 1495 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 1496 repo.manifestlog[t].read()
1497 1497
1498 1498 timer(d)
1499 1499 fm.end()
1500 1500
1501 1501
1502 1502 @command(b'perfchangeset', formatteropts)
1503 1503 def perfchangeset(ui, repo, rev, **opts):
1504 1504 opts = _byteskwargs(opts)
1505 1505 timer, fm = gettimer(ui, opts)
1506 1506 n = scmutil.revsingle(repo, rev).node()
1507 1507
1508 1508 def d():
1509 1509 repo.changelog.read(n)
1510 1510 # repo.changelog._cache = None
1511 1511
1512 1512 timer(d)
1513 1513 fm.end()
1514 1514
1515 1515
1516 1516 @command(b'perfignore', formatteropts)
1517 1517 def perfignore(ui, repo, **opts):
1518 1518 """benchmark operation related to computing ignore"""
1519 1519 opts = _byteskwargs(opts)
1520 1520 timer, fm = gettimer(ui, opts)
1521 1521 dirstate = repo.dirstate
1522 1522
1523 1523 def setupone():
1524 1524 dirstate.invalidate()
1525 1525 clearfilecache(dirstate, b'_ignore')
1526 1526
1527 1527 def runone():
1528 1528 dirstate._ignore
1529 1529
1530 1530 timer(runone, setup=setupone, title=b"load")
1531 1531 fm.end()
1532 1532
1533 1533
1534 1534 @command(
1535 1535 b'perfindex',
1536 1536 [
1537 1537 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 1538 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 1539 ]
1540 1540 + formatteropts,
1541 1541 )
1542 1542 def perfindex(ui, repo, **opts):
1543 1543 """benchmark index creation time followed by a lookup
1544 1544
1545 1545 The default is to look `tip` up. Depending on the index implementation,
1546 1546 the revision looked up can matters. For example, an implementation
1547 1547 scanning the index will have a faster lookup time for `--rev tip` than for
1548 1548 `--rev 0`. The number of looked up revisions and their order can also
1549 1549 matters.
1550 1550
1551 1551 Example of useful set to test:
1552 1552
1553 1553 * tip
1554 1554 * 0
1555 1555 * -10:
1556 1556 * :10
1557 1557 * -10: + :10
1558 1558 * :10: + -10:
1559 1559 * -10000:
1560 1560 * -10000: + 0
1561 1561
1562 1562 It is not currently possible to check for lookup of a missing node. For
1563 1563 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 1564 import mercurial.revlog
1565 1565
1566 1566 opts = _byteskwargs(opts)
1567 1567 timer, fm = gettimer(ui, opts)
1568 1568 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 1569 if opts[b'no_lookup']:
1570 1570 if opts['rev']:
1571 1571 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 1572 nodes = []
1573 1573 elif not opts[b'rev']:
1574 1574 nodes = [repo[b"tip"].node()]
1575 1575 else:
1576 1576 revs = scmutil.revrange(repo, opts[b'rev'])
1577 1577 cl = repo.changelog
1578 1578 nodes = [cl.node(r) for r in revs]
1579 1579
1580 1580 unfi = repo.unfiltered()
1581 1581 # find the filecache func directly
1582 1582 # This avoid polluting the benchmark with the filecache logic
1583 1583 makecl = unfi.__class__.changelog.func
1584 1584
1585 1585 def setup():
1586 1586 # probably not necessary, but for good measure
1587 1587 clearchangelog(unfi)
1588 1588
1589 1589 def d():
1590 1590 cl = makecl(unfi)
1591 1591 for n in nodes:
1592 1592 cl.rev(n)
1593 1593
1594 1594 timer(d, setup=setup)
1595 1595 fm.end()
1596 1596
1597 1597
1598 1598 @command(
1599 1599 b'perfnodemap',
1600 1600 [
1601 1601 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 1602 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 1603 ]
1604 1604 + formatteropts,
1605 1605 )
1606 1606 def perfnodemap(ui, repo, **opts):
1607 1607 """benchmark the time necessary to look up revision from a cold nodemap
1608 1608
1609 1609 Depending on the implementation, the amount and order of revision we look
1610 1610 up can varies. Example of useful set to test:
1611 1611 * tip
1612 1612 * 0
1613 1613 * -10:
1614 1614 * :10
1615 1615 * -10: + :10
1616 1616 * :10: + -10:
1617 1617 * -10000:
1618 1618 * -10000: + 0
1619 1619
1620 1620 The command currently focus on valid binary lookup. Benchmarking for
1621 1621 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 1622 """
1623 1623 import mercurial.revlog
1624 1624
1625 1625 opts = _byteskwargs(opts)
1626 1626 timer, fm = gettimer(ui, opts)
1627 1627 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628 1628
1629 1629 unfi = repo.unfiltered()
1630 1630 clearcaches = opts[b'clear_caches']
1631 1631 # find the filecache func directly
1632 1632 # This avoid polluting the benchmark with the filecache logic
1633 1633 makecl = unfi.__class__.changelog.func
1634 1634 if not opts[b'rev']:
1635 1635 raise error.Abort(b'use --rev to specify revisions to look up')
1636 1636 revs = scmutil.revrange(repo, opts[b'rev'])
1637 1637 cl = repo.changelog
1638 1638 nodes = [cl.node(r) for r in revs]
1639 1639
1640 1640 # use a list to pass reference to a nodemap from one closure to the next
1641 1641 nodeget = [None]
1642 1642
1643 1643 def setnodeget():
1644 1644 # probably not necessary, but for good measure
1645 1645 clearchangelog(unfi)
1646 1646 cl = makecl(unfi)
1647 1647 if util.safehasattr(cl.index, 'get_rev'):
1648 1648 nodeget[0] = cl.index.get_rev
1649 1649 else:
1650 1650 nodeget[0] = cl.nodemap.get
1651 1651
1652 1652 def d():
1653 1653 get = nodeget[0]
1654 1654 for n in nodes:
1655 1655 get(n)
1656 1656
1657 1657 setup = None
1658 1658 if clearcaches:
1659 1659
1660 1660 def setup():
1661 1661 setnodeget()
1662 1662
1663 1663 else:
1664 1664 setnodeget()
1665 1665 d() # prewarm the data structure
1666 1666 timer(d, setup=setup)
1667 1667 fm.end()
1668 1668
1669 1669
1670 1670 @command(b'perfstartup', formatteropts)
1671 1671 def perfstartup(ui, repo, **opts):
1672 1672 opts = _byteskwargs(opts)
1673 1673 timer, fm = gettimer(ui, opts)
1674 1674
1675 1675 def d():
1676 1676 if os.name != 'nt':
1677 1677 os.system(
1678 1678 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 1679 )
1680 1680 else:
1681 1681 os.environ['HGRCPATH'] = r' '
1682 1682 os.system("%s version -q > NUL" % sys.argv[0])
1683 1683
1684 1684 timer(d)
1685 1685 fm.end()
1686 1686
1687 1687
1688 1688 @command(b'perfparents', formatteropts)
1689 1689 def perfparents(ui, repo, **opts):
1690 1690 """benchmark the time necessary to fetch one changeset's parents.
1691 1691
1692 1692 The fetch is done using the `node identifier`, traversing all object layers
1693 1693 from the repository object. The first N revisions will be used for this
1694 1694 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 1695 (default: 1000).
1696 1696 """
1697 1697 opts = _byteskwargs(opts)
1698 1698 timer, fm = gettimer(ui, opts)
1699 1699 # control the number of commits perfparents iterates over
1700 1700 # experimental config: perf.parentscount
1701 1701 count = getint(ui, b"perf", b"parentscount", 1000)
1702 1702 if len(repo.changelog) < count:
1703 1703 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 1704 repo = repo.unfiltered()
1705 1705 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706 1706
1707 1707 def d():
1708 1708 for n in nl:
1709 1709 repo.changelog.parents(n)
1710 1710
1711 1711 timer(d)
1712 1712 fm.end()
1713 1713
1714 1714
1715 1715 @command(b'perfctxfiles', formatteropts)
1716 1716 def perfctxfiles(ui, repo, x, **opts):
1717 1717 opts = _byteskwargs(opts)
1718 1718 x = int(x)
1719 1719 timer, fm = gettimer(ui, opts)
1720 1720
1721 1721 def d():
1722 1722 len(repo[x].files())
1723 1723
1724 1724 timer(d)
1725 1725 fm.end()
1726 1726
1727 1727
1728 1728 @command(b'perfrawfiles', formatteropts)
1729 1729 def perfrawfiles(ui, repo, x, **opts):
1730 1730 opts = _byteskwargs(opts)
1731 1731 x = int(x)
1732 1732 timer, fm = gettimer(ui, opts)
1733 1733 cl = repo.changelog
1734 1734
1735 1735 def d():
1736 1736 len(cl.read(x)[3])
1737 1737
1738 1738 timer(d)
1739 1739 fm.end()
1740 1740
1741 1741
1742 1742 @command(b'perflookup', formatteropts)
1743 1743 def perflookup(ui, repo, rev, **opts):
1744 1744 opts = _byteskwargs(opts)
1745 1745 timer, fm = gettimer(ui, opts)
1746 1746 timer(lambda: len(repo.lookup(rev)))
1747 1747 fm.end()
1748 1748
1749 1749
1750 1750 @command(
1751 1751 b'perflinelogedits',
1752 1752 [
1753 1753 (b'n', b'edits', 10000, b'number of edits'),
1754 1754 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 1755 ],
1756 1756 norepo=True,
1757 1757 )
1758 1758 def perflinelogedits(ui, **opts):
1759 1759 from mercurial import linelog
1760 1760
1761 1761 opts = _byteskwargs(opts)
1762 1762
1763 1763 edits = opts[b'edits']
1764 1764 maxhunklines = opts[b'max_hunk_lines']
1765 1765
1766 1766 maxb1 = 100000
1767 1767 random.seed(0)
1768 1768 randint = random.randint
1769 1769 currentlines = 0
1770 1770 arglist = []
1771 1771 for rev in _xrange(edits):
1772 1772 a1 = randint(0, currentlines)
1773 1773 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 1774 b1 = randint(0, maxb1)
1775 1775 b2 = randint(b1, b1 + maxhunklines)
1776 1776 currentlines += (b2 - b1) - (a2 - a1)
1777 1777 arglist.append((rev, a1, a2, b1, b2))
1778 1778
1779 1779 def d():
1780 1780 ll = linelog.linelog()
1781 1781 for args in arglist:
1782 1782 ll.replacelines(*args)
1783 1783
1784 1784 timer, fm = gettimer(ui, opts)
1785 1785 timer(d)
1786 1786 fm.end()
1787 1787
1788 1788
1789 1789 @command(b'perfrevrange', formatteropts)
1790 1790 def perfrevrange(ui, repo, *specs, **opts):
1791 1791 opts = _byteskwargs(opts)
1792 1792 timer, fm = gettimer(ui, opts)
1793 1793 revrange = scmutil.revrange
1794 1794 timer(lambda: len(revrange(repo, specs)))
1795 1795 fm.end()
1796 1796
1797 1797
1798 1798 @command(b'perfnodelookup', formatteropts)
1799 1799 def perfnodelookup(ui, repo, rev, **opts):
1800 1800 opts = _byteskwargs(opts)
1801 1801 timer, fm = gettimer(ui, opts)
1802 1802 import mercurial.revlog
1803 1803
1804 1804 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 1805 n = scmutil.revsingle(repo, rev).node()
1806 1806 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807 1807
1808 1808 def d():
1809 1809 cl.rev(n)
1810 1810 clearcaches(cl)
1811 1811
1812 1812 timer(d)
1813 1813 fm.end()
1814 1814
1815 1815
1816 1816 @command(
1817 1817 b'perflog',
1818 1818 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 1819 )
1820 1820 def perflog(ui, repo, rev=None, **opts):
1821 1821 opts = _byteskwargs(opts)
1822 1822 if rev is None:
1823 1823 rev = []
1824 1824 timer, fm = gettimer(ui, opts)
1825 1825 ui.pushbuffer()
1826 1826 timer(
1827 1827 lambda: commands.log(
1828 1828 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 1829 )
1830 1830 )
1831 1831 ui.popbuffer()
1832 1832 fm.end()
1833 1833
1834 1834
1835 1835 @command(b'perfmoonwalk', formatteropts)
1836 1836 def perfmoonwalk(ui, repo, **opts):
1837 1837 """benchmark walking the changelog backwards
1838 1838
1839 1839 This also loads the changelog data for each revision in the changelog.
1840 1840 """
1841 1841 opts = _byteskwargs(opts)
1842 1842 timer, fm = gettimer(ui, opts)
1843 1843
1844 1844 def moonwalk():
1845 1845 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 1846 ctx = repo[i]
1847 1847 ctx.branch() # read changelog data (in addition to the index)
1848 1848
1849 1849 timer(moonwalk)
1850 1850 fm.end()
1851 1851
1852 1852
1853 1853 @command(
1854 1854 b'perftemplating',
1855 1855 [
1856 1856 (b'r', b'rev', [], b'revisions to run the template on'),
1857 1857 ]
1858 1858 + formatteropts,
1859 1859 )
1860 1860 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 1861 """test the rendering time of a given template"""
1862 1862 if makelogtemplater is None:
1863 1863 raise error.Abort(
1864 1864 b"perftemplating not available with this Mercurial",
1865 1865 hint=b"use 4.3 or later",
1866 1866 )
1867 1867
1868 1868 opts = _byteskwargs(opts)
1869 1869
1870 1870 nullui = ui.copy()
1871 1871 nullui.fout = open(os.devnull, 'wb')
1872 1872 nullui.disablepager()
1873 1873 revs = opts.get(b'rev')
1874 1874 if not revs:
1875 1875 revs = [b'all()']
1876 1876 revs = list(scmutil.revrange(repo, revs))
1877 1877
1878 1878 defaulttemplate = (
1879 1879 b'{date|shortdate} [{rev}:{node|short}]'
1880 1880 b' {author|person}: {desc|firstline}\n'
1881 1881 )
1882 1882 if testedtemplate is None:
1883 1883 testedtemplate = defaulttemplate
1884 1884 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885 1885
1886 1886 def format():
1887 1887 for r in revs:
1888 1888 ctx = repo[r]
1889 1889 displayer.show(ctx)
1890 1890 displayer.flush(ctx)
1891 1891
1892 1892 timer, fm = gettimer(ui, opts)
1893 1893 timer(format)
1894 1894 fm.end()
1895 1895
1896 1896
1897 1897 def _displaystats(ui, opts, entries, data):
1898 1898 # use a second formatter because the data are quite different, not sure
1899 1899 # how it flies with the templater.
1900 1900 fm = ui.formatter(b'perf-stats', opts)
1901 1901 for key, title in entries:
1902 1902 values = data[key]
1903 1903 nbvalues = len(data)
1904 1904 values.sort()
1905 1905 stats = {
1906 1906 'key': key,
1907 1907 'title': title,
1908 1908 'nbitems': len(values),
1909 1909 'min': values[0][0],
1910 1910 '10%': values[(nbvalues * 10) // 100][0],
1911 1911 '25%': values[(nbvalues * 25) // 100][0],
1912 1912 '50%': values[(nbvalues * 50) // 100][0],
1913 1913 '75%': values[(nbvalues * 75) // 100][0],
1914 1914 '80%': values[(nbvalues * 80) // 100][0],
1915 1915 '85%': values[(nbvalues * 85) // 100][0],
1916 1916 '90%': values[(nbvalues * 90) // 100][0],
1917 1917 '95%': values[(nbvalues * 95) // 100][0],
1918 1918 '99%': values[(nbvalues * 99) // 100][0],
1919 1919 'max': values[-1][0],
1920 1920 }
1921 1921 fm.startitem()
1922 1922 fm.data(**stats)
1923 1923 # make node pretty for the human output
1924 1924 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 1925 lines = [
1926 1926 'min',
1927 1927 '10%',
1928 1928 '25%',
1929 1929 '50%',
1930 1930 '75%',
1931 1931 '80%',
1932 1932 '85%',
1933 1933 '90%',
1934 1934 '95%',
1935 1935 '99%',
1936 1936 'max',
1937 1937 ]
1938 1938 for l in lines:
1939 1939 fm.plain('%s: %s\n' % (l, stats[l]))
1940 1940 fm.end()
1941 1941
1942 1942
1943 1943 @command(
1944 1944 b'perfhelper-mergecopies',
1945 1945 formatteropts
1946 1946 + [
1947 1947 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 1948 (b'', b'timing', False, b'provides extra data (costly)'),
1949 1949 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 1950 ],
1951 1951 )
1952 1952 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 1953 """find statistics about potential parameters for `perfmergecopies`
1954 1954
1955 1955 This command find (base, p1, p2) triplet relevant for copytracing
1956 1956 benchmarking in the context of a merge. It reports values for some of the
1957 1957 parameters that impact merge copy tracing time during merge.
1958 1958
1959 1959 If `--timing` is set, rename detection is run and the associated timing
1960 1960 will be reported. The extra details come at the cost of slower command
1961 1961 execution.
1962 1962
1963 1963 Since rename detection is only run once, other factors might easily
1964 1964 affect the precision of the timing. However it should give a good
1965 1965 approximation of which revision triplets are very costly.
1966 1966 """
1967 1967 opts = _byteskwargs(opts)
1968 1968 fm = ui.formatter(b'perf', opts)
1969 1969 dotiming = opts[b'timing']
1970 1970 dostats = opts[b'stats']
1971 1971
1972 1972 output_template = [
1973 1973 ("base", "%(base)12s"),
1974 1974 ("p1", "%(p1.node)12s"),
1975 1975 ("p2", "%(p2.node)12s"),
1976 1976 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 1977 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 1978 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 1979 ("p1.time", "%(p1.time)12.3f"),
1980 1980 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 1981 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 1982 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 1983 ("p2.time", "%(p2.time)12.3f"),
1984 1984 ("renames", "%(nbrenamedfiles)12d"),
1985 1985 ("total.time", "%(time)12.3f"),
1986 1986 ]
1987 1987 if not dotiming:
1988 1988 output_template = [
1989 1989 i
1990 1990 for i in output_template
1991 1991 if not ('time' in i[0] or 'renames' in i[0])
1992 1992 ]
1993 1993 header_names = [h for (h, v) in output_template]
1994 1994 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 1995 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 1996 fm.plain(header % tuple(header_names))
1997 1997
1998 1998 if not revs:
1999 1999 revs = ['all()']
2000 2000 revs = scmutil.revrange(repo, revs)
2001 2001
2002 2002 if dostats:
2003 2003 alldata = {
2004 2004 'nbrevs': [],
2005 2005 'nbmissingfiles': [],
2006 2006 }
2007 2007 if dotiming:
2008 2008 alldata['parentnbrenames'] = []
2009 2009 alldata['totalnbrenames'] = []
2010 2010 alldata['parenttime'] = []
2011 2011 alldata['totaltime'] = []
2012 2012
2013 2013 roi = repo.revs('merge() and %ld', revs)
2014 2014 for r in roi:
2015 2015 ctx = repo[r]
2016 2016 p1 = ctx.p1()
2017 2017 p2 = ctx.p2()
2018 2018 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 2019 for b in bases:
2020 2020 b = repo[b]
2021 2021 p1missing = copies._computeforwardmissing(b, p1)
2022 2022 p2missing = copies._computeforwardmissing(b, p2)
2023 2023 data = {
2024 2024 b'base': b.hex(),
2025 2025 b'p1.node': p1.hex(),
2026 2026 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 2027 b'p1.nbmissingfiles': len(p1missing),
2028 2028 b'p2.node': p2.hex(),
2029 2029 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 2030 b'p2.nbmissingfiles': len(p2missing),
2031 2031 }
2032 2032 if dostats:
2033 2033 if p1missing:
2034 2034 alldata['nbrevs'].append(
2035 2035 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 2036 )
2037 2037 alldata['nbmissingfiles'].append(
2038 2038 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 2039 )
2040 2040 if p2missing:
2041 2041 alldata['nbrevs'].append(
2042 2042 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 2043 )
2044 2044 alldata['nbmissingfiles'].append(
2045 2045 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 2046 )
2047 2047 if dotiming:
2048 2048 begin = util.timer()
2049 2049 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 2050 end = util.timer()
2051 2051 # not very stable timing since we did only one run
2052 2052 data['time'] = end - begin
2053 2053 # mergedata contains five dicts: "copy", "movewithdir",
2054 2054 # "diverge", "renamedelete" and "dirmove".
2055 2055 # The first 4 are about renamed file so lets count that.
2056 2056 renames = len(mergedata[0])
2057 2057 renames += len(mergedata[1])
2058 2058 renames += len(mergedata[2])
2059 2059 renames += len(mergedata[3])
2060 2060 data['nbrenamedfiles'] = renames
2061 2061 begin = util.timer()
2062 2062 p1renames = copies.pathcopies(b, p1)
2063 2063 end = util.timer()
2064 2064 data['p1.time'] = end - begin
2065 2065 begin = util.timer()
2066 2066 p2renames = copies.pathcopies(b, p2)
2067 2067 end = util.timer()
2068 2068 data['p2.time'] = end - begin
2069 2069 data['p1.renamedfiles'] = len(p1renames)
2070 2070 data['p2.renamedfiles'] = len(p2renames)
2071 2071
2072 2072 if dostats:
2073 2073 if p1missing:
2074 2074 alldata['parentnbrenames'].append(
2075 2075 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 2076 )
2077 2077 alldata['parenttime'].append(
2078 2078 (data['p1.time'], b.hex(), p1.hex())
2079 2079 )
2080 2080 if p2missing:
2081 2081 alldata['parentnbrenames'].append(
2082 2082 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 2083 )
2084 2084 alldata['parenttime'].append(
2085 2085 (data['p2.time'], b.hex(), p2.hex())
2086 2086 )
2087 2087 if p1missing or p2missing:
2088 2088 alldata['totalnbrenames'].append(
2089 2089 (
2090 2090 data['nbrenamedfiles'],
2091 2091 b.hex(),
2092 2092 p1.hex(),
2093 2093 p2.hex(),
2094 2094 )
2095 2095 )
2096 2096 alldata['totaltime'].append(
2097 2097 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 2098 )
2099 2099 fm.startitem()
2100 2100 fm.data(**data)
2101 2101 # make node pretty for the human output
2102 2102 out = data.copy()
2103 2103 out['base'] = fm.hexfunc(b.node())
2104 2104 out['p1.node'] = fm.hexfunc(p1.node())
2105 2105 out['p2.node'] = fm.hexfunc(p2.node())
2106 2106 fm.plain(output % out)
2107 2107
2108 2108 fm.end()
2109 2109 if dostats:
2110 2110 # use a second formatter because the data are quite different, not sure
2111 2111 # how it flies with the templater.
2112 2112 entries = [
2113 2113 ('nbrevs', 'number of revision covered'),
2114 2114 ('nbmissingfiles', 'number of missing files at head'),
2115 2115 ]
2116 2116 if dotiming:
2117 2117 entries.append(
2118 2118 ('parentnbrenames', 'rename from one parent to base')
2119 2119 )
2120 2120 entries.append(('totalnbrenames', 'total number of renames'))
2121 2121 entries.append(('parenttime', 'time for one parent'))
2122 2122 entries.append(('totaltime', 'time for both parents'))
2123 2123 _displaystats(ui, opts, entries, alldata)
2124 2124
2125 2125
2126 2126 @command(
2127 2127 b'perfhelper-pathcopies',
2128 2128 formatteropts
2129 2129 + [
2130 2130 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 2131 (b'', b'timing', False, b'provides extra data (costly)'),
2132 2132 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 2133 ],
2134 2134 )
2135 2135 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 2136 """find statistic about potential parameters for the `perftracecopies`
2137 2137
2138 2138 This command find source-destination pair relevant for copytracing testing.
2139 2139 It report value for some of the parameters that impact copy tracing time.
2140 2140
2141 2141 If `--timing` is set, rename detection is run and the associated timing
2142 2142 will be reported. The extra details comes at the cost of a slower command
2143 2143 execution.
2144 2144
2145 2145 Since the rename detection is only run once, other factors might easily
2146 2146 affect the precision of the timing. However it should give a good
2147 2147 approximation of which revision pairs are very costly.
2148 2148 """
2149 2149 opts = _byteskwargs(opts)
2150 2150 fm = ui.formatter(b'perf', opts)
2151 2151 dotiming = opts[b'timing']
2152 2152 dostats = opts[b'stats']
2153 2153
2154 2154 if dotiming:
2155 2155 header = '%12s %12s %12s %12s %12s %12s\n'
2156 2156 output = (
2157 2157 "%(source)12s %(destination)12s "
2158 2158 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 2159 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 2160 )
2161 2161 header_names = (
2162 2162 "source",
2163 2163 "destination",
2164 2164 "nb-revs",
2165 2165 "nb-files",
2166 2166 "nb-renames",
2167 2167 "time",
2168 2168 )
2169 2169 fm.plain(header % header_names)
2170 2170 else:
2171 2171 header = '%12s %12s %12s %12s\n'
2172 2172 output = (
2173 2173 "%(source)12s %(destination)12s "
2174 2174 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 2175 )
2176 2176 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177 2177
2178 2178 if not revs:
2179 2179 revs = ['all()']
2180 2180 revs = scmutil.revrange(repo, revs)
2181 2181
2182 2182 if dostats:
2183 2183 alldata = {
2184 2184 'nbrevs': [],
2185 2185 'nbmissingfiles': [],
2186 2186 }
2187 2187 if dotiming:
2188 2188 alldata['nbrenames'] = []
2189 2189 alldata['time'] = []
2190 2190
2191 2191 roi = repo.revs('merge() and %ld', revs)
2192 2192 for r in roi:
2193 2193 ctx = repo[r]
2194 2194 p1 = ctx.p1().rev()
2195 2195 p2 = ctx.p2().rev()
2196 2196 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 2197 for p in (p1, p2):
2198 2198 for b in bases:
2199 2199 base = repo[b]
2200 2200 parent = repo[p]
2201 2201 missing = copies._computeforwardmissing(base, parent)
2202 2202 if not missing:
2203 2203 continue
2204 2204 data = {
2205 2205 b'source': base.hex(),
2206 2206 b'destination': parent.hex(),
2207 2207 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 2208 b'nbmissingfiles': len(missing),
2209 2209 }
2210 2210 if dostats:
2211 2211 alldata['nbrevs'].append(
2212 2212 (
2213 2213 data['nbrevs'],
2214 2214 base.hex(),
2215 2215 parent.hex(),
2216 2216 )
2217 2217 )
2218 2218 alldata['nbmissingfiles'].append(
2219 2219 (
2220 2220 data['nbmissingfiles'],
2221 2221 base.hex(),
2222 2222 parent.hex(),
2223 2223 )
2224 2224 )
2225 2225 if dotiming:
2226 2226 begin = util.timer()
2227 2227 renames = copies.pathcopies(base, parent)
2228 2228 end = util.timer()
2229 2229 # not very stable timing since we did only one run
2230 2230 data['time'] = end - begin
2231 2231 data['nbrenamedfiles'] = len(renames)
2232 2232 if dostats:
2233 2233 alldata['time'].append(
2234 2234 (
2235 2235 data['time'],
2236 2236 base.hex(),
2237 2237 parent.hex(),
2238 2238 )
2239 2239 )
2240 2240 alldata['nbrenames'].append(
2241 2241 (
2242 2242 data['nbrenamedfiles'],
2243 2243 base.hex(),
2244 2244 parent.hex(),
2245 2245 )
2246 2246 )
2247 2247 fm.startitem()
2248 2248 fm.data(**data)
2249 2249 out = data.copy()
2250 2250 out['source'] = fm.hexfunc(base.node())
2251 2251 out['destination'] = fm.hexfunc(parent.node())
2252 2252 fm.plain(output % out)
2253 2253
2254 2254 fm.end()
2255 2255 if dostats:
2256 2256 entries = [
2257 2257 ('nbrevs', 'number of revision covered'),
2258 2258 ('nbmissingfiles', 'number of missing files at head'),
2259 2259 ]
2260 2260 if dotiming:
2261 2261 entries.append(('nbrenames', 'renamed files'))
2262 2262 entries.append(('time', 'time'))
2263 2263 _displaystats(ui, opts, entries, alldata)
2264 2264
2265 2265
2266 2266 @command(b'perfcca', formatteropts)
2267 2267 def perfcca(ui, repo, **opts):
2268 2268 opts = _byteskwargs(opts)
2269 2269 timer, fm = gettimer(ui, opts)
2270 2270 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 2271 fm.end()
2272 2272
2273 2273
2274 2274 @command(b'perffncacheload', formatteropts)
2275 2275 def perffncacheload(ui, repo, **opts):
2276 2276 opts = _byteskwargs(opts)
2277 2277 timer, fm = gettimer(ui, opts)
2278 2278 s = repo.store
2279 2279
2280 2280 def d():
2281 2281 s.fncache._load()
2282 2282
2283 2283 timer(d)
2284 2284 fm.end()
2285 2285
2286 2286
2287 2287 @command(b'perffncachewrite', formatteropts)
2288 2288 def perffncachewrite(ui, repo, **opts):
2289 2289 opts = _byteskwargs(opts)
2290 2290 timer, fm = gettimer(ui, opts)
2291 2291 s = repo.store
2292 2292 lock = repo.lock()
2293 2293 s.fncache._load()
2294 2294 tr = repo.transaction(b'perffncachewrite')
2295 2295 tr.addbackup(b'fncache')
2296 2296
2297 2297 def d():
2298 2298 s.fncache._dirty = True
2299 2299 s.fncache.write(tr)
2300 2300
2301 2301 timer(d)
2302 2302 tr.close()
2303 2303 lock.release()
2304 2304 fm.end()
2305 2305
2306 2306
2307 2307 @command(b'perffncacheencode', formatteropts)
2308 2308 def perffncacheencode(ui, repo, **opts):
2309 2309 opts = _byteskwargs(opts)
2310 2310 timer, fm = gettimer(ui, opts)
2311 2311 s = repo.store
2312 2312 s.fncache._load()
2313 2313
2314 2314 def d():
2315 2315 for p in s.fncache.entries:
2316 2316 s.encode(p)
2317 2317
2318 2318 timer(d)
2319 2319 fm.end()
2320 2320
2321 2321
2322 2322 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 2323 while not done.is_set():
2324 2324 pair = q.get()
2325 2325 while pair is not None:
2326 2326 if xdiff:
2327 2327 mdiff.bdiff.xdiffblocks(*pair)
2328 2328 elif blocks:
2329 2329 mdiff.bdiff.blocks(*pair)
2330 2330 else:
2331 2331 mdiff.textdiff(*pair)
2332 2332 q.task_done()
2333 2333 pair = q.get()
2334 2334 q.task_done() # for the None one
2335 2335 with ready:
2336 2336 ready.wait()
2337 2337
2338 2338
2339 2339 def _manifestrevision(repo, mnode):
2340 2340 ml = repo.manifestlog
2341 2341
2342 2342 if util.safehasattr(ml, b'getstorage'):
2343 2343 store = ml.getstorage(b'')
2344 2344 else:
2345 2345 store = ml._revlog
2346 2346
2347 2347 return store.revision(mnode)
2348 2348
2349 2349
2350 2350 @command(
2351 2351 b'perfbdiff',
2352 2352 revlogopts
2353 2353 + formatteropts
2354 2354 + [
2355 2355 (
2356 2356 b'',
2357 2357 b'count',
2358 2358 1,
2359 2359 b'number of revisions to test (when using --startrev)',
2360 2360 ),
2361 2361 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 2362 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 2363 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 2364 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 2365 ],
2366 2366 b'-c|-m|FILE REV',
2367 2367 )
2368 2368 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 2369 """benchmark a bdiff between revisions
2370 2370
2371 2371 By default, benchmark a bdiff between its delta parent and itself.
2372 2372
2373 2373 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 2374 revisions starting at the specified revision.
2375 2375
2376 2376 With ``--alldata``, assume the requested revision is a changeset and
2377 2377 measure bdiffs for all changes related to that changeset (manifest
2378 2378 and filelogs).
2379 2379 """
2380 2380 opts = _byteskwargs(opts)
2381 2381
2382 2382 if opts[b'xdiff'] and not opts[b'blocks']:
2383 2383 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384 2384
2385 2385 if opts[b'alldata']:
2386 2386 opts[b'changelog'] = True
2387 2387
2388 2388 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 2389 file_, rev = None, file_
2390 2390 elif rev is None:
2391 2391 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392 2392
2393 2393 blocks = opts[b'blocks']
2394 2394 xdiff = opts[b'xdiff']
2395 2395 textpairs = []
2396 2396
2397 2397 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398 2398
2399 2399 startrev = r.rev(r.lookup(rev))
2400 2400 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 2401 if opts[b'alldata']:
2402 2402 # Load revisions associated with changeset.
2403 2403 ctx = repo[rev]
2404 2404 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 2405 for pctx in ctx.parents():
2406 2406 pman = _manifestrevision(repo, pctx.manifestnode())
2407 2407 textpairs.append((pman, mtext))
2408 2408
2409 2409 # Load filelog revisions by iterating manifest delta.
2410 2410 man = ctx.manifest()
2411 2411 pman = ctx.p1().manifest()
2412 2412 for filename, change in pman.diff(man).items():
2413 2413 fctx = repo.file(filename)
2414 2414 f1 = fctx.revision(change[0][0] or -1)
2415 2415 f2 = fctx.revision(change[1][0] or -1)
2416 2416 textpairs.append((f1, f2))
2417 2417 else:
2418 2418 dp = r.deltaparent(rev)
2419 2419 textpairs.append((r.revision(dp), r.revision(rev)))
2420 2420
2421 2421 withthreads = threads > 0
2422 2422 if not withthreads:
2423 2423
2424 2424 def d():
2425 2425 for pair in textpairs:
2426 2426 if xdiff:
2427 2427 mdiff.bdiff.xdiffblocks(*pair)
2428 2428 elif blocks:
2429 2429 mdiff.bdiff.blocks(*pair)
2430 2430 else:
2431 2431 mdiff.textdiff(*pair)
2432 2432
2433 2433 else:
2434 2434 q = queue()
2435 2435 for i in _xrange(threads):
2436 2436 q.put(None)
2437 2437 ready = threading.Condition()
2438 2438 done = threading.Event()
2439 2439 for i in _xrange(threads):
2440 2440 threading.Thread(
2441 2441 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 2442 ).start()
2443 2443 q.join()
2444 2444
2445 2445 def d():
2446 2446 for pair in textpairs:
2447 2447 q.put(pair)
2448 2448 for i in _xrange(threads):
2449 2449 q.put(None)
2450 2450 with ready:
2451 2451 ready.notify_all()
2452 2452 q.join()
2453 2453
2454 2454 timer, fm = gettimer(ui, opts)
2455 2455 timer(d)
2456 2456 fm.end()
2457 2457
2458 2458 if withthreads:
2459 2459 done.set()
2460 2460 for i in _xrange(threads):
2461 2461 q.put(None)
2462 2462 with ready:
2463 2463 ready.notify_all()
2464 2464
2465 2465
2466 2466 @command(
2467 2467 b'perfunidiff',
2468 2468 revlogopts
2469 2469 + formatteropts
2470 2470 + [
2471 2471 (
2472 2472 b'',
2473 2473 b'count',
2474 2474 1,
2475 2475 b'number of revisions to test (when using --startrev)',
2476 2476 ),
2477 2477 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 2478 ],
2479 2479 b'-c|-m|FILE REV',
2480 2480 )
2481 2481 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 2482 """benchmark a unified diff between revisions
2483 2483
2484 2484 This doesn't include any copy tracing - it's just a unified diff
2485 2485 of the texts.
2486 2486
2487 2487 By default, benchmark a diff between its delta parent and itself.
2488 2488
2489 2489 With ``--count``, benchmark diffs between delta parents and self for N
2490 2490 revisions starting at the specified revision.
2491 2491
2492 2492 With ``--alldata``, assume the requested revision is a changeset and
2493 2493 measure diffs for all changes related to that changeset (manifest
2494 2494 and filelogs).
2495 2495 """
2496 2496 opts = _byteskwargs(opts)
2497 2497 if opts[b'alldata']:
2498 2498 opts[b'changelog'] = True
2499 2499
2500 2500 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 2501 file_, rev = None, file_
2502 2502 elif rev is None:
2503 2503 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504 2504
2505 2505 textpairs = []
2506 2506
2507 2507 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508 2508
2509 2509 startrev = r.rev(r.lookup(rev))
2510 2510 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 2511 if opts[b'alldata']:
2512 2512 # Load revisions associated with changeset.
2513 2513 ctx = repo[rev]
2514 2514 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 2515 for pctx in ctx.parents():
2516 2516 pman = _manifestrevision(repo, pctx.manifestnode())
2517 2517 textpairs.append((pman, mtext))
2518 2518
2519 2519 # Load filelog revisions by iterating manifest delta.
2520 2520 man = ctx.manifest()
2521 2521 pman = ctx.p1().manifest()
2522 2522 for filename, change in pman.diff(man).items():
2523 2523 fctx = repo.file(filename)
2524 2524 f1 = fctx.revision(change[0][0] or -1)
2525 2525 f2 = fctx.revision(change[1][0] or -1)
2526 2526 textpairs.append((f1, f2))
2527 2527 else:
2528 2528 dp = r.deltaparent(rev)
2529 2529 textpairs.append((r.revision(dp), r.revision(rev)))
2530 2530
2531 2531 def d():
2532 2532 for left, right in textpairs:
2533 2533 # The date strings don't matter, so we pass empty strings.
2534 2534 headerlines, hunks = mdiff.unidiff(
2535 2535 left, b'', right, b'', b'left', b'right', binary=False
2536 2536 )
2537 2537 # consume iterators in roughly the way patch.py does
2538 2538 b'\n'.join(headerlines)
2539 2539 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540 2540
2541 2541 timer, fm = gettimer(ui, opts)
2542 2542 timer(d)
2543 2543 fm.end()
2544 2544
2545 2545
2546 2546 @command(b'perfdiffwd', formatteropts)
2547 2547 def perfdiffwd(ui, repo, **opts):
2548 2548 """Profile diff of working directory changes"""
2549 2549 opts = _byteskwargs(opts)
2550 2550 timer, fm = gettimer(ui, opts)
2551 2551 options = {
2552 2552 'w': 'ignore_all_space',
2553 2553 'b': 'ignore_space_change',
2554 2554 'B': 'ignore_blank_lines',
2555 2555 }
2556 2556
2557 2557 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 2558 opts = {options[c]: b'1' for c in diffopt}
2559 2559
2560 2560 def d():
2561 2561 ui.pushbuffer()
2562 2562 commands.diff(ui, repo, **opts)
2563 2563 ui.popbuffer()
2564 2564
2565 2565 diffopt = diffopt.encode('ascii')
2566 2566 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 2567 timer(d, title=title)
2568 2568 fm.end()
2569 2569
2570 2570
2571 2571 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2572 2572 def perfrevlogindex(ui, repo, file_=None, **opts):
2573 2573 """Benchmark operations against a revlog index.
2574 2574
2575 2575 This tests constructing a revlog instance, reading index data,
2576 2576 parsing index data, and performing various operations related to
2577 2577 index data.
2578 2578 """
2579 2579
2580 2580 opts = _byteskwargs(opts)
2581 2581
2582 2582 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2583 2583
2584 2584 opener = getattr(rl, 'opener') # trick linter
2585 2585 indexfile = rl.indexfile
2586 2586 data = opener.read(indexfile)
2587 2587
2588 2588 header = struct.unpack(b'>I', data[0:4])[0]
2589 2589 version = header & 0xFFFF
2590 2590 if version == 1:
2591 2591 revlogio = revlog.revlogio()
2592 2592 inline = header & (1 << 16)
2593 2593 else:
2594 2594 raise error.Abort(b'unsupported revlog version: %d' % version)
2595 2595
2596 2596 rllen = len(rl)
2597 2597
2598 2598 node0 = rl.node(0)
2599 2599 node25 = rl.node(rllen // 4)
2600 2600 node50 = rl.node(rllen // 2)
2601 2601 node75 = rl.node(rllen // 4 * 3)
2602 2602 node100 = rl.node(rllen - 1)
2603 2603
2604 2604 allrevs = range(rllen)
2605 2605 allrevsrev = list(reversed(allrevs))
2606 2606 allnodes = [rl.node(rev) for rev in range(rllen)]
2607 2607 allnodesrev = list(reversed(allnodes))
2608 2608
2609 2609 def constructor():
2610 2610 revlog.revlog(opener, indexfile)
2611 2611
2612 2612 def read():
2613 2613 with opener(indexfile) as fh:
2614 2614 fh.read()
2615 2615
2616 2616 def parseindex():
2617 2617 revlogio.parseindex(data, inline)
2618 2618
2619 2619 def getentry(revornode):
2620 2620 index = revlogio.parseindex(data, inline)[0]
2621 2621 index[revornode]
2622 2622
2623 2623 def getentries(revs, count=1):
2624 2624 index = revlogio.parseindex(data, inline)[0]
2625 2625
2626 2626 for i in range(count):
2627 2627 for rev in revs:
2628 2628 index[rev]
2629 2629
2630 2630 def resolvenode(node):
2631 2631 index = revlogio.parseindex(data, inline)[0]
2632 2632 rev = getattr(index, 'rev', None)
2633 2633 if rev is None:
2634 2634 nodemap = getattr(
2635 2635 revlogio.parseindex(data, inline)[0], 'nodemap', None
2636 2636 )
2637 2637 # This only works for the C code.
2638 2638 if nodemap is None:
2639 2639 return
2640 2640 rev = nodemap.__getitem__
2641 2641
2642 2642 try:
2643 2643 rev(node)
2644 2644 except error.RevlogError:
2645 2645 pass
2646 2646
2647 2647 def resolvenodes(nodes, count=1):
2648 2648 index = revlogio.parseindex(data, inline)[0]
2649 2649 rev = getattr(index, 'rev', None)
2650 2650 if rev is None:
2651 2651 nodemap = getattr(
2652 2652 revlogio.parseindex(data, inline)[0], 'nodemap', None
2653 2653 )
2654 2654 # This only works for the C code.
2655 2655 if nodemap is None:
2656 2656 return
2657 2657 rev = nodemap.__getitem__
2658 2658
2659 2659 for i in range(count):
2660 2660 for node in nodes:
2661 2661 try:
2662 2662 rev(node)
2663 2663 except error.RevlogError:
2664 2664 pass
2665 2665
2666 2666 benches = [
2667 2667 (constructor, b'revlog constructor'),
2668 2668 (read, b'read'),
2669 2669 (parseindex, b'create index object'),
2670 2670 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2671 2671 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2672 2672 (lambda: resolvenode(node0), b'look up node at rev 0'),
2673 2673 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2674 2674 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2675 2675 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2676 2676 (lambda: resolvenode(node100), b'look up node at tip'),
2677 2677 # 2x variation is to measure caching impact.
2678 2678 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2679 2679 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2680 2680 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2681 2681 (
2682 2682 lambda: resolvenodes(allnodesrev, 2),
2683 2683 b'look up all nodes 2x (reverse)',
2684 2684 ),
2685 2685 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2686 2686 (
2687 2687 lambda: getentries(allrevs, 2),
2688 2688 b'retrieve all index entries 2x (forward)',
2689 2689 ),
2690 2690 (
2691 2691 lambda: getentries(allrevsrev),
2692 2692 b'retrieve all index entries (reverse)',
2693 2693 ),
2694 2694 (
2695 2695 lambda: getentries(allrevsrev, 2),
2696 2696 b'retrieve all index entries 2x (reverse)',
2697 2697 ),
2698 2698 ]
2699 2699
2700 2700 for fn, title in benches:
2701 2701 timer, fm = gettimer(ui, opts)
2702 2702 timer(fn, title=title)
2703 2703 fm.end()
2704 2704
2705 2705
2706 2706 @command(
2707 2707 b'perfrevlogrevisions',
2708 2708 revlogopts
2709 2709 + formatteropts
2710 2710 + [
2711 2711 (b'd', b'dist', 100, b'distance between the revisions'),
2712 2712 (b's', b'startrev', 0, b'revision to start reading at'),
2713 2713 (b'', b'reverse', False, b'read in reverse'),
2714 2714 ],
2715 2715 b'-c|-m|FILE',
2716 2716 )
2717 2717 def perfrevlogrevisions(
2718 2718 ui, repo, file_=None, startrev=0, reverse=False, **opts
2719 2719 ):
2720 2720 """Benchmark reading a series of revisions from a revlog.
2721 2721
2722 2722 By default, we read every ``-d/--dist`` revision from 0 to tip of
2723 2723 the specified revlog.
2724 2724
2725 2725 The start revision can be defined via ``-s/--startrev``.
2726 2726 """
2727 2727 opts = _byteskwargs(opts)
2728 2728
2729 2729 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2730 2730 rllen = getlen(ui)(rl)
2731 2731
2732 2732 if startrev < 0:
2733 2733 startrev = rllen + startrev
2734 2734
2735 2735 def d():
2736 2736 rl.clearcaches()
2737 2737
2738 2738 beginrev = startrev
2739 2739 endrev = rllen
2740 2740 dist = opts[b'dist']
2741 2741
2742 2742 if reverse:
2743 2743 beginrev, endrev = endrev - 1, beginrev - 1
2744 2744 dist = -1 * dist
2745 2745
2746 2746 for x in _xrange(beginrev, endrev, dist):
2747 2747 # Old revisions don't support passing int.
2748 2748 n = rl.node(x)
2749 2749 rl.revision(n)
2750 2750
2751 2751 timer, fm = gettimer(ui, opts)
2752 2752 timer(d)
2753 2753 fm.end()
2754 2754
2755 2755
2756 2756 @command(
2757 2757 b'perfrevlogwrite',
2758 2758 revlogopts
2759 2759 + formatteropts
2760 2760 + [
2761 2761 (b's', b'startrev', 1000, b'revision to start writing at'),
2762 2762 (b'', b'stoprev', -1, b'last revision to write'),
2763 2763 (b'', b'count', 3, b'number of passes to perform'),
2764 2764 (b'', b'details', False, b'print timing for every revisions tested'),
2765 2765 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2766 2766 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2767 2767 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2768 2768 ],
2769 2769 b'-c|-m|FILE',
2770 2770 )
2771 2771 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2772 2772 """Benchmark writing a series of revisions to a revlog.
2773 2773
2774 2774 Possible source values are:
2775 2775 * `full`: add from a full text (default).
2776 2776 * `parent-1`: add from a delta to the first parent
2777 2777 * `parent-2`: add from a delta to the second parent if it exists
2778 2778 (use a delta from the first parent otherwise)
2779 2779 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2780 2780 * `storage`: add from the existing precomputed deltas
2781 2781
2782 2782 Note: This performance command measures performance in a custom way. As a
2783 2783 result some of the global configuration of the 'perf' command does not
2784 2784 apply to it:
2785 2785
2786 2786 * ``pre-run``: disabled
2787 2787
2788 2788 * ``profile-benchmark``: disabled
2789 2789
2790 2790 * ``run-limits``: disabled use --count instead
2791 2791 """
2792 2792 opts = _byteskwargs(opts)
2793 2793
2794 2794 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2795 2795 rllen = getlen(ui)(rl)
2796 2796 if startrev < 0:
2797 2797 startrev = rllen + startrev
2798 2798 if stoprev < 0:
2799 2799 stoprev = rllen + stoprev
2800 2800
2801 2801 lazydeltabase = opts['lazydeltabase']
2802 2802 source = opts['source']
2803 2803 clearcaches = opts['clear_caches']
2804 2804 validsource = (
2805 2805 b'full',
2806 2806 b'parent-1',
2807 2807 b'parent-2',
2808 2808 b'parent-smallest',
2809 2809 b'storage',
2810 2810 )
2811 2811 if source not in validsource:
2812 2812 raise error.Abort('invalid source type: %s' % source)
2813 2813
2814 2814 ### actually gather results
2815 2815 count = opts['count']
2816 2816 if count <= 0:
2817 2817 raise error.Abort('invalide run count: %d' % count)
2818 2818 allresults = []
2819 2819 for c in range(count):
2820 2820 timing = _timeonewrite(
2821 2821 ui,
2822 2822 rl,
2823 2823 source,
2824 2824 startrev,
2825 2825 stoprev,
2826 2826 c + 1,
2827 2827 lazydeltabase=lazydeltabase,
2828 2828 clearcaches=clearcaches,
2829 2829 )
2830 2830 allresults.append(timing)
2831 2831
2832 2832 ### consolidate the results in a single list
2833 2833 results = []
2834 2834 for idx, (rev, t) in enumerate(allresults[0]):
2835 2835 ts = [t]
2836 2836 for other in allresults[1:]:
2837 2837 orev, ot = other[idx]
2838 2838 assert orev == rev
2839 2839 ts.append(ot)
2840 2840 results.append((rev, ts))
2841 2841 resultcount = len(results)
2842 2842
2843 2843 ### Compute and display relevant statistics
2844 2844
2845 2845 # get a formatter
2846 2846 fm = ui.formatter(b'perf', opts)
2847 2847 displayall = ui.configbool(b"perf", b"all-timing", False)
2848 2848
2849 2849 # print individual details if requested
2850 2850 if opts['details']:
2851 2851 for idx, item in enumerate(results, 1):
2852 2852 rev, data = item
2853 2853 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2854 2854 formatone(fm, data, title=title, displayall=displayall)
2855 2855
2856 2856 # sorts results by median time
2857 2857 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2858 2858 # list of (name, index) to display)
2859 2859 relevants = [
2860 2860 ("min", 0),
2861 2861 ("10%", resultcount * 10 // 100),
2862 2862 ("25%", resultcount * 25 // 100),
2863 2863 ("50%", resultcount * 70 // 100),
2864 2864 ("75%", resultcount * 75 // 100),
2865 2865 ("90%", resultcount * 90 // 100),
2866 2866 ("95%", resultcount * 95 // 100),
2867 2867 ("99%", resultcount * 99 // 100),
2868 2868 ("99.9%", resultcount * 999 // 1000),
2869 2869 ("99.99%", resultcount * 9999 // 10000),
2870 2870 ("99.999%", resultcount * 99999 // 100000),
2871 2871 ("max", -1),
2872 2872 ]
2873 2873 if not ui.quiet:
2874 2874 for name, idx in relevants:
2875 2875 data = results[idx]
2876 2876 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2877 2877 formatone(fm, data[1], title=title, displayall=displayall)
2878 2878
2879 2879 # XXX summing that many float will not be very precise, we ignore this fact
2880 2880 # for now
2881 2881 totaltime = []
2882 2882 for item in allresults:
2883 2883 totaltime.append(
2884 2884 (
2885 2885 sum(x[1][0] for x in item),
2886 2886 sum(x[1][1] for x in item),
2887 2887 sum(x[1][2] for x in item),
2888 2888 )
2889 2889 )
2890 2890 formatone(
2891 2891 fm,
2892 2892 totaltime,
2893 2893 title="total time (%d revs)" % resultcount,
2894 2894 displayall=displayall,
2895 2895 )
2896 2896 fm.end()
2897 2897
2898 2898
2899 2899 class _faketr(object):
2900 2900 def add(s, x, y, z=None):
2901 2901 return None
2902 2902
2903 2903
2904 2904 def _timeonewrite(
2905 2905 ui,
2906 2906 orig,
2907 2907 source,
2908 2908 startrev,
2909 2909 stoprev,
2910 2910 runidx=None,
2911 2911 lazydeltabase=True,
2912 2912 clearcaches=True,
2913 2913 ):
2914 2914 timings = []
2915 2915 tr = _faketr()
2916 2916 with _temprevlog(ui, orig, startrev) as dest:
2917 2917 dest._lazydeltabase = lazydeltabase
2918 2918 revs = list(orig.revs(startrev, stoprev))
2919 2919 total = len(revs)
2920 2920 topic = 'adding'
2921 2921 if runidx is not None:
2922 2922 topic += ' (run #%d)' % runidx
2923 2923 # Support both old and new progress API
2924 2924 if util.safehasattr(ui, 'makeprogress'):
2925 2925 progress = ui.makeprogress(topic, unit='revs', total=total)
2926 2926
2927 2927 def updateprogress(pos):
2928 2928 progress.update(pos)
2929 2929
2930 2930 def completeprogress():
2931 2931 progress.complete()
2932 2932
2933 2933 else:
2934 2934
2935 2935 def updateprogress(pos):
2936 2936 ui.progress(topic, pos, unit='revs', total=total)
2937 2937
2938 2938 def completeprogress():
2939 2939 ui.progress(topic, None, unit='revs', total=total)
2940 2940
2941 2941 for idx, rev in enumerate(revs):
2942 2942 updateprogress(idx)
2943 2943 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2944 2944 if clearcaches:
2945 2945 dest.index.clearcaches()
2946 2946 dest.clearcaches()
2947 2947 with timeone() as r:
2948 2948 dest.addrawrevision(*addargs, **addkwargs)
2949 2949 timings.append((rev, r[0]))
2950 2950 updateprogress(total)
2951 2951 completeprogress()
2952 2952 return timings
2953 2953
2954 2954
2955 2955 def _getrevisionseed(orig, rev, tr, source):
2956 2956 from mercurial.node import nullid
2957 2957
2958 2958 linkrev = orig.linkrev(rev)
2959 2959 node = orig.node(rev)
2960 2960 p1, p2 = orig.parents(node)
2961 2961 flags = orig.flags(rev)
2962 2962 cachedelta = None
2963 2963 text = None
2964 2964
2965 2965 if source == b'full':
2966 2966 text = orig.revision(rev)
2967 2967 elif source == b'parent-1':
2968 2968 baserev = orig.rev(p1)
2969 2969 cachedelta = (baserev, orig.revdiff(p1, rev))
2970 2970 elif source == b'parent-2':
2971 2971 parent = p2
2972 2972 if p2 == nullid:
2973 2973 parent = p1
2974 2974 baserev = orig.rev(parent)
2975 2975 cachedelta = (baserev, orig.revdiff(parent, rev))
2976 2976 elif source == b'parent-smallest':
2977 2977 p1diff = orig.revdiff(p1, rev)
2978 2978 parent = p1
2979 2979 diff = p1diff
2980 2980 if p2 != nullid:
2981 2981 p2diff = orig.revdiff(p2, rev)
2982 2982 if len(p1diff) > len(p2diff):
2983 2983 parent = p2
2984 2984 diff = p2diff
2985 2985 baserev = orig.rev(parent)
2986 2986 cachedelta = (baserev, diff)
2987 2987 elif source == b'storage':
2988 2988 baserev = orig.deltaparent(rev)
2989 2989 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2990 2990
2991 2991 return (
2992 2992 (text, tr, linkrev, p1, p2),
2993 2993 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2994 2994 )
2995 2995
2996 2996
2997 2997 @contextlib.contextmanager
2998 2998 def _temprevlog(ui, orig, truncaterev):
2999 2999 from mercurial import vfs as vfsmod
3000 3000
3001 3001 if orig._inline:
3002 3002 raise error.Abort('not supporting inline revlog (yet)')
3003 3003 revlogkwargs = {}
3004 3004 k = 'upperboundcomp'
3005 3005 if util.safehasattr(orig, k):
3006 3006 revlogkwargs[k] = getattr(orig, k)
3007 3007
3008 3008 origindexpath = orig.opener.join(orig.indexfile)
3009 3009 origdatapath = orig.opener.join(orig.datafile)
3010 3010 indexname = 'revlog.i'
3011 3011 dataname = 'revlog.d'
3012 3012
3013 3013 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3014 3014 try:
3015 3015 # copy the data file in a temporary directory
3016 3016 ui.debug('copying data in %s\n' % tmpdir)
3017 3017 destindexpath = os.path.join(tmpdir, 'revlog.i')
3018 3018 destdatapath = os.path.join(tmpdir, 'revlog.d')
3019 3019 shutil.copyfile(origindexpath, destindexpath)
3020 3020 shutil.copyfile(origdatapath, destdatapath)
3021 3021
3022 3022 # remove the data we want to add again
3023 3023 ui.debug('truncating data to be rewritten\n')
3024 3024 with open(destindexpath, 'ab') as index:
3025 3025 index.seek(0)
3026 3026 index.truncate(truncaterev * orig._io.size)
3027 3027 with open(destdatapath, 'ab') as data:
3028 3028 data.seek(0)
3029 3029 data.truncate(orig.start(truncaterev))
3030 3030
3031 3031 # instantiate a new revlog from the temporary copy
3032 3032 ui.debug('truncating adding to be rewritten\n')
3033 3033 vfs = vfsmod.vfs(tmpdir)
3034 3034 vfs.options = getattr(orig.opener, 'options', None)
3035 3035
3036 3036 dest = revlog.revlog(
3037 3037 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3038 3038 )
3039 3039 if dest._inline:
3040 3040 raise error.Abort('not supporting inline revlog (yet)')
3041 3041 # make sure internals are initialized
3042 3042 dest.revision(len(dest) - 1)
3043 3043 yield dest
3044 3044 del dest, vfs
3045 3045 finally:
3046 3046 shutil.rmtree(tmpdir, True)
3047 3047
3048 3048
3049 3049 @command(
3050 3050 b'perfrevlogchunks',
3051 3051 revlogopts
3052 3052 + formatteropts
3053 3053 + [
3054 3054 (b'e', b'engines', b'', b'compression engines to use'),
3055 3055 (b's', b'startrev', 0, b'revision to start at'),
3056 3056 ],
3057 3057 b'-c|-m|FILE',
3058 3058 )
3059 3059 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3060 3060 """Benchmark operations on revlog chunks.
3061 3061
3062 3062 Logically, each revlog is a collection of fulltext revisions. However,
3063 3063 stored within each revlog are "chunks" of possibly compressed data. This
3064 3064 data needs to be read and decompressed or compressed and written.
3065 3065
3066 3066 This command measures the time it takes to read+decompress and recompress
3067 3067 chunks in a revlog. It effectively isolates I/O and compression performance.
3068 3068 For measurements of higher-level operations like resolving revisions,
3069 3069 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3070 3070 """
3071 3071 opts = _byteskwargs(opts)
3072 3072
3073 3073 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3074 3074
3075 3075 # _chunkraw was renamed to _getsegmentforrevs.
3076 3076 try:
3077 3077 segmentforrevs = rl._getsegmentforrevs
3078 3078 except AttributeError:
3079 3079 segmentforrevs = rl._chunkraw
3080 3080
3081 3081 # Verify engines argument.
3082 3082 if engines:
3083 3083 engines = {e.strip() for e in engines.split(b',')}
3084 3084 for engine in engines:
3085 3085 try:
3086 3086 util.compressionengines[engine]
3087 3087 except KeyError:
3088 3088 raise error.Abort(b'unknown compression engine: %s' % engine)
3089 3089 else:
3090 3090 engines = []
3091 3091 for e in util.compengines:
3092 3092 engine = util.compengines[e]
3093 3093 try:
3094 3094 if engine.available():
3095 3095 engine.revlogcompressor().compress(b'dummy')
3096 3096 engines.append(e)
3097 3097 except NotImplementedError:
3098 3098 pass
3099 3099
3100 3100 revs = list(rl.revs(startrev, len(rl) - 1))
3101 3101
3102 3102 def rlfh(rl):
3103 3103 if rl._inline:
3104 3104 return getsvfs(repo)(rl.indexfile)
3105 3105 else:
3106 3106 return getsvfs(repo)(rl.datafile)
3107 3107
3108 3108 def doread():
3109 3109 rl.clearcaches()
3110 3110 for rev in revs:
3111 3111 segmentforrevs(rev, rev)
3112 3112
3113 3113 def doreadcachedfh():
3114 3114 rl.clearcaches()
3115 3115 fh = rlfh(rl)
3116 3116 for rev in revs:
3117 3117 segmentforrevs(rev, rev, df=fh)
3118 3118
3119 3119 def doreadbatch():
3120 3120 rl.clearcaches()
3121 3121 segmentforrevs(revs[0], revs[-1])
3122 3122
3123 3123 def doreadbatchcachedfh():
3124 3124 rl.clearcaches()
3125 3125 fh = rlfh(rl)
3126 3126 segmentforrevs(revs[0], revs[-1], df=fh)
3127 3127
3128 3128 def dochunk():
3129 3129 rl.clearcaches()
3130 3130 fh = rlfh(rl)
3131 3131 for rev in revs:
3132 3132 rl._chunk(rev, df=fh)
3133 3133
3134 3134 chunks = [None]
3135 3135
3136 3136 def dochunkbatch():
3137 3137 rl.clearcaches()
3138 3138 fh = rlfh(rl)
3139 3139 # Save chunks as a side-effect.
3140 3140 chunks[0] = rl._chunks(revs, df=fh)
3141 3141
3142 3142 def docompress(compressor):
3143 3143 rl.clearcaches()
3144 3144
3145 3145 try:
3146 3146 # Swap in the requested compression engine.
3147 3147 oldcompressor = rl._compressor
3148 3148 rl._compressor = compressor
3149 3149 for chunk in chunks[0]:
3150 3150 rl.compress(chunk)
3151 3151 finally:
3152 3152 rl._compressor = oldcompressor
3153 3153
3154 3154 benches = [
3155 3155 (lambda: doread(), b'read'),
3156 3156 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3157 3157 (lambda: doreadbatch(), b'read batch'),
3158 3158 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3159 3159 (lambda: dochunk(), b'chunk'),
3160 3160 (lambda: dochunkbatch(), b'chunk batch'),
3161 3161 ]
3162 3162
3163 3163 for engine in sorted(engines):
3164 3164 compressor = util.compengines[engine].revlogcompressor()
3165 3165 benches.append(
3166 3166 (
3167 3167 functools.partial(docompress, compressor),
3168 3168 b'compress w/ %s' % engine,
3169 3169 )
3170 3170 )
3171 3171
3172 3172 for fn, title in benches:
3173 3173 timer, fm = gettimer(ui, opts)
3174 3174 timer(fn, title=title)
3175 3175 fm.end()
3176 3176
3177 3177
3178 3178 @command(
3179 3179 b'perfrevlogrevision',
3180 3180 revlogopts
3181 3181 + formatteropts
3182 3182 + [(b'', b'cache', False, b'use caches instead of clearing')],
3183 3183 b'-c|-m|FILE REV',
3184 3184 )
3185 3185 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3186 3186 """Benchmark obtaining a revlog revision.
3187 3187
3188 3188 Obtaining a revlog revision consists of roughly the following steps:
3189 3189
3190 3190 1. Compute the delta chain
3191 3191 2. Slice the delta chain if applicable
3192 3192 3. Obtain the raw chunks for that delta chain
3193 3193 4. Decompress each raw chunk
3194 3194 5. Apply binary patches to obtain fulltext
3195 3195 6. Verify hash of fulltext
3196 3196
3197 3197 This command measures the time spent in each of these phases.
3198 3198 """
3199 3199 opts = _byteskwargs(opts)
3200 3200
3201 3201 if opts.get(b'changelog') or opts.get(b'manifest'):
3202 3202 file_, rev = None, file_
3203 3203 elif rev is None:
3204 3204 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3205 3205
3206 3206 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3207 3207
3208 3208 # _chunkraw was renamed to _getsegmentforrevs.
3209 3209 try:
3210 3210 segmentforrevs = r._getsegmentforrevs
3211 3211 except AttributeError:
3212 3212 segmentforrevs = r._chunkraw
3213 3213
3214 3214 node = r.lookup(rev)
3215 3215 rev = r.rev(node)
3216 3216
3217 3217 def getrawchunks(data, chain):
3218 3218 start = r.start
3219 3219 length = r.length
3220 3220 inline = r._inline
3221 3221 iosize = r._io.size
3222 3222 buffer = util.buffer
3223 3223
3224 3224 chunks = []
3225 3225 ladd = chunks.append
3226 3226 for idx, item in enumerate(chain):
3227 3227 offset = start(item[0])
3228 3228 bits = data[idx]
3229 3229 for rev in item:
3230 3230 chunkstart = start(rev)
3231 3231 if inline:
3232 3232 chunkstart += (rev + 1) * iosize
3233 3233 chunklength = length(rev)
3234 3234 ladd(buffer(bits, chunkstart - offset, chunklength))
3235 3235
3236 3236 return chunks
3237 3237
3238 3238 def dodeltachain(rev):
3239 3239 if not cache:
3240 3240 r.clearcaches()
3241 3241 r._deltachain(rev)
3242 3242
3243 3243 def doread(chain):
3244 3244 if not cache:
3245 3245 r.clearcaches()
3246 3246 for item in slicedchain:
3247 3247 segmentforrevs(item[0], item[-1])
3248 3248
3249 3249 def doslice(r, chain, size):
3250 3250 for s in slicechunk(r, chain, targetsize=size):
3251 3251 pass
3252 3252
3253 3253 def dorawchunks(data, chain):
3254 3254 if not cache:
3255 3255 r.clearcaches()
3256 3256 getrawchunks(data, chain)
3257 3257
3258 3258 def dodecompress(chunks):
3259 3259 decomp = r.decompress
3260 3260 for chunk in chunks:
3261 3261 decomp(chunk)
3262 3262
3263 3263 def dopatch(text, bins):
3264 3264 if not cache:
3265 3265 r.clearcaches()
3266 3266 mdiff.patches(text, bins)
3267 3267
3268 3268 def dohash(text):
3269 3269 if not cache:
3270 3270 r.clearcaches()
3271 3271 r.checkhash(text, node, rev=rev)
3272 3272
3273 3273 def dorevision():
3274 3274 if not cache:
3275 3275 r.clearcaches()
3276 3276 r.revision(node)
3277 3277
3278 3278 try:
3279 3279 from mercurial.revlogutils.deltas import slicechunk
3280 3280 except ImportError:
3281 3281 slicechunk = getattr(revlog, '_slicechunk', None)
3282 3282
3283 3283 size = r.length(rev)
3284 3284 chain = r._deltachain(rev)[0]
3285 3285 if not getattr(r, '_withsparseread', False):
3286 3286 slicedchain = (chain,)
3287 3287 else:
3288 3288 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3289 3289 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3290 3290 rawchunks = getrawchunks(data, slicedchain)
3291 3291 bins = r._chunks(chain)
3292 3292 text = bytes(bins[0])
3293 3293 bins = bins[1:]
3294 3294 text = mdiff.patches(text, bins)
3295 3295
3296 3296 benches = [
3297 3297 (lambda: dorevision(), b'full'),
3298 3298 (lambda: dodeltachain(rev), b'deltachain'),
3299 3299 (lambda: doread(chain), b'read'),
3300 3300 ]
3301 3301
3302 3302 if getattr(r, '_withsparseread', False):
3303 3303 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3304 3304 benches.append(slicing)
3305 3305
3306 3306 benches.extend(
3307 3307 [
3308 3308 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3309 3309 (lambda: dodecompress(rawchunks), b'decompress'),
3310 3310 (lambda: dopatch(text, bins), b'patch'),
3311 3311 (lambda: dohash(text), b'hash'),
3312 3312 ]
3313 3313 )
3314 3314
3315 3315 timer, fm = gettimer(ui, opts)
3316 3316 for fn, title in benches:
3317 3317 timer(fn, title=title)
3318 3318 fm.end()
3319 3319
3320 3320
3321 3321 @command(
3322 3322 b'perfrevset',
3323 3323 [
3324 3324 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3325 3325 (b'', b'contexts', False, b'obtain changectx for each revision'),
3326 3326 ]
3327 3327 + formatteropts,
3328 3328 b"REVSET",
3329 3329 )
3330 3330 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3331 3331 """benchmark the execution time of a revset
3332 3332
3333 3333 Use the --clean option if need to evaluate the impact of build volatile
3334 3334 revisions set cache on the revset execution. Volatile cache hold filtered
3335 3335 and obsolete related cache."""
3336 3336 opts = _byteskwargs(opts)
3337 3337
3338 3338 timer, fm = gettimer(ui, opts)
3339 3339
3340 3340 def d():
3341 3341 if clear:
3342 3342 repo.invalidatevolatilesets()
3343 3343 if contexts:
3344 3344 for ctx in repo.set(expr):
3345 3345 pass
3346 3346 else:
3347 3347 for r in repo.revs(expr):
3348 3348 pass
3349 3349
3350 3350 timer(d)
3351 3351 fm.end()
3352 3352
3353 3353
3354 3354 @command(
3355 3355 b'perfvolatilesets',
3356 3356 [
3357 3357 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3358 3358 ]
3359 3359 + formatteropts,
3360 3360 )
3361 3361 def perfvolatilesets(ui, repo, *names, **opts):
3362 3362 """benchmark the computation of various volatile set
3363 3363
3364 3364 Volatile set computes element related to filtering and obsolescence."""
3365 3365 opts = _byteskwargs(opts)
3366 3366 timer, fm = gettimer(ui, opts)
3367 3367 repo = repo.unfiltered()
3368 3368
3369 3369 def getobs(name):
3370 3370 def d():
3371 3371 repo.invalidatevolatilesets()
3372 3372 if opts[b'clear_obsstore']:
3373 3373 clearfilecache(repo, b'obsstore')
3374 3374 obsolete.getrevs(repo, name)
3375 3375
3376 3376 return d
3377 3377
3378 3378 allobs = sorted(obsolete.cachefuncs)
3379 3379 if names:
3380 3380 allobs = [n for n in allobs if n in names]
3381 3381
3382 3382 for name in allobs:
3383 3383 timer(getobs(name), title=name)
3384 3384
3385 3385 def getfiltered(name):
3386 3386 def d():
3387 3387 repo.invalidatevolatilesets()
3388 3388 if opts[b'clear_obsstore']:
3389 3389 clearfilecache(repo, b'obsstore')
3390 3390 repoview.filterrevs(repo, name)
3391 3391
3392 3392 return d
3393 3393
3394 3394 allfilter = sorted(repoview.filtertable)
3395 3395 if names:
3396 3396 allfilter = [n for n in allfilter if n in names]
3397 3397
3398 3398 for name in allfilter:
3399 3399 timer(getfiltered(name), title=name)
3400 3400 fm.end()
3401 3401
3402 3402
3403 3403 @command(
3404 3404 b'perfbranchmap',
3405 3405 [
3406 3406 (b'f', b'full', False, b'Includes build time of subset'),
3407 3407 (
3408 3408 b'',
3409 3409 b'clear-revbranch',
3410 3410 False,
3411 3411 b'purge the revbranch cache between computation',
3412 3412 ),
3413 3413 ]
3414 3414 + formatteropts,
3415 3415 )
3416 3416 def perfbranchmap(ui, repo, *filternames, **opts):
3417 3417 """benchmark the update of a branchmap
3418 3418
3419 3419 This benchmarks the full repo.branchmap() call with read and write disabled
3420 3420 """
3421 3421 opts = _byteskwargs(opts)
3422 3422 full = opts.get(b"full", False)
3423 3423 clear_revbranch = opts.get(b"clear_revbranch", False)
3424 3424 timer, fm = gettimer(ui, opts)
3425 3425
3426 3426 def getbranchmap(filtername):
3427 3427 """generate a benchmark function for the filtername"""
3428 3428 if filtername is None:
3429 3429 view = repo
3430 3430 else:
3431 3431 view = repo.filtered(filtername)
3432 3432 if util.safehasattr(view._branchcaches, '_per_filter'):
3433 3433 filtered = view._branchcaches._per_filter
3434 3434 else:
3435 3435 # older versions
3436 3436 filtered = view._branchcaches
3437 3437
3438 3438 def d():
3439 3439 if clear_revbranch:
3440 3440 repo.revbranchcache()._clear()
3441 3441 if full:
3442 3442 view._branchcaches.clear()
3443 3443 else:
3444 3444 filtered.pop(filtername, None)
3445 3445 view.branchmap()
3446 3446
3447 3447 return d
3448 3448
3449 3449 # add filter in smaller subset to bigger subset
3450 3450 possiblefilters = set(repoview.filtertable)
3451 3451 if filternames:
3452 3452 possiblefilters &= set(filternames)
3453 3453 subsettable = getbranchmapsubsettable()
3454 3454 allfilters = []
3455 3455 while possiblefilters:
3456 3456 for name in possiblefilters:
3457 3457 subset = subsettable.get(name)
3458 3458 if subset not in possiblefilters:
3459 3459 break
3460 3460 else:
3461 3461 assert False, b'subset cycle %s!' % possiblefilters
3462 3462 allfilters.append(name)
3463 3463 possiblefilters.remove(name)
3464 3464
3465 3465 # warm the cache
3466 3466 if not full:
3467 3467 for name in allfilters:
3468 3468 repo.filtered(name).branchmap()
3469 3469 if not filternames or b'unfiltered' in filternames:
3470 3470 # add unfiltered
3471 3471 allfilters.append(None)
3472 3472
3473 3473 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3474 3474 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3475 3475 branchcacheread.set(classmethod(lambda *args: None))
3476 3476 else:
3477 3477 # older versions
3478 3478 branchcacheread = safeattrsetter(branchmap, b'read')
3479 3479 branchcacheread.set(lambda *args: None)
3480 3480 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3481 3481 branchcachewrite.set(lambda *args: None)
3482 3482 try:
3483 3483 for name in allfilters:
3484 3484 printname = name
3485 3485 if name is None:
3486 3486 printname = b'unfiltered'
3487 timer(getbranchmap(name), title=str(printname))
3487 timer(getbranchmap(name), title=printname)
3488 3488 finally:
3489 3489 branchcacheread.restore()
3490 3490 branchcachewrite.restore()
3491 3491 fm.end()
3492 3492
3493 3493
3494 3494 @command(
3495 3495 b'perfbranchmapupdate',
3496 3496 [
3497 3497 (b'', b'base', [], b'subset of revision to start from'),
3498 3498 (b'', b'target', [], b'subset of revision to end with'),
3499 3499 (b'', b'clear-caches', False, b'clear cache between each runs'),
3500 3500 ]
3501 3501 + formatteropts,
3502 3502 )
3503 3503 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3504 3504 """benchmark branchmap update from for <base> revs to <target> revs
3505 3505
3506 3506 If `--clear-caches` is passed, the following items will be reset before
3507 3507 each update:
3508 3508 * the changelog instance and associated indexes
3509 3509 * the rev-branch-cache instance
3510 3510
3511 3511 Examples:
3512 3512
3513 3513 # update for the one last revision
3514 3514 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3515 3515
3516 3516 $ update for change coming with a new branch
3517 3517 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3518 3518 """
3519 3519 from mercurial import branchmap
3520 3520 from mercurial import repoview
3521 3521
3522 3522 opts = _byteskwargs(opts)
3523 3523 timer, fm = gettimer(ui, opts)
3524 3524 clearcaches = opts[b'clear_caches']
3525 3525 unfi = repo.unfiltered()
3526 3526 x = [None] # used to pass data between closure
3527 3527
3528 3528 # we use a `list` here to avoid possible side effect from smartset
3529 3529 baserevs = list(scmutil.revrange(repo, base))
3530 3530 targetrevs = list(scmutil.revrange(repo, target))
3531 3531 if not baserevs:
3532 3532 raise error.Abort(b'no revisions selected for --base')
3533 3533 if not targetrevs:
3534 3534 raise error.Abort(b'no revisions selected for --target')
3535 3535
3536 3536 # make sure the target branchmap also contains the one in the base
3537 3537 targetrevs = list(set(baserevs) | set(targetrevs))
3538 3538 targetrevs.sort()
3539 3539
3540 3540 cl = repo.changelog
3541 3541 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3542 3542 allbaserevs.sort()
3543 3543 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3544 3544
3545 3545 newrevs = list(alltargetrevs.difference(allbaserevs))
3546 3546 newrevs.sort()
3547 3547
3548 3548 allrevs = frozenset(unfi.changelog.revs())
3549 3549 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3550 3550 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3551 3551
3552 3552 def basefilter(repo, visibilityexceptions=None):
3553 3553 return basefilterrevs
3554 3554
3555 3555 def targetfilter(repo, visibilityexceptions=None):
3556 3556 return targetfilterrevs
3557 3557
3558 3558 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3559 3559 ui.status(msg % (len(allbaserevs), len(newrevs)))
3560 3560 if targetfilterrevs:
3561 3561 msg = b'(%d revisions still filtered)\n'
3562 3562 ui.status(msg % len(targetfilterrevs))
3563 3563
3564 3564 try:
3565 3565 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3566 3566 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3567 3567
3568 3568 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3569 3569 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3570 3570
3571 3571 # try to find an existing branchmap to reuse
3572 3572 subsettable = getbranchmapsubsettable()
3573 3573 candidatefilter = subsettable.get(None)
3574 3574 while candidatefilter is not None:
3575 3575 candidatebm = repo.filtered(candidatefilter).branchmap()
3576 3576 if candidatebm.validfor(baserepo):
3577 3577 filtered = repoview.filterrevs(repo, candidatefilter)
3578 3578 missing = [r for r in allbaserevs if r in filtered]
3579 3579 base = candidatebm.copy()
3580 3580 base.update(baserepo, missing)
3581 3581 break
3582 3582 candidatefilter = subsettable.get(candidatefilter)
3583 3583 else:
3584 3584 # no suitable subset where found
3585 3585 base = branchmap.branchcache()
3586 3586 base.update(baserepo, allbaserevs)
3587 3587
3588 3588 def setup():
3589 3589 x[0] = base.copy()
3590 3590 if clearcaches:
3591 3591 unfi._revbranchcache = None
3592 3592 clearchangelog(repo)
3593 3593
3594 3594 def bench():
3595 3595 x[0].update(targetrepo, newrevs)
3596 3596
3597 3597 timer(bench, setup=setup)
3598 3598 fm.end()
3599 3599 finally:
3600 3600 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3601 3601 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3602 3602
3603 3603
3604 3604 @command(
3605 3605 b'perfbranchmapload',
3606 3606 [
3607 3607 (b'f', b'filter', b'', b'Specify repoview filter'),
3608 3608 (b'', b'list', False, b'List brachmap filter caches'),
3609 3609 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3610 3610 ]
3611 3611 + formatteropts,
3612 3612 )
3613 3613 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3614 3614 """benchmark reading the branchmap"""
3615 3615 opts = _byteskwargs(opts)
3616 3616 clearrevlogs = opts[b'clear_revlogs']
3617 3617
3618 3618 if list:
3619 3619 for name, kind, st in repo.cachevfs.readdir(stat=True):
3620 3620 if name.startswith(b'branch2'):
3621 3621 filtername = name.partition(b'-')[2] or b'unfiltered'
3622 3622 ui.status(
3623 3623 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3624 3624 )
3625 3625 return
3626 3626 if not filter:
3627 3627 filter = None
3628 3628 subsettable = getbranchmapsubsettable()
3629 3629 if filter is None:
3630 3630 repo = repo.unfiltered()
3631 3631 else:
3632 3632 repo = repoview.repoview(repo, filter)
3633 3633
3634 3634 repo.branchmap() # make sure we have a relevant, up to date branchmap
3635 3635
3636 3636 try:
3637 3637 fromfile = branchmap.branchcache.fromfile
3638 3638 except AttributeError:
3639 3639 # older versions
3640 3640 fromfile = branchmap.read
3641 3641
3642 3642 currentfilter = filter
3643 3643 # try once without timer, the filter may not be cached
3644 3644 while fromfile(repo) is None:
3645 3645 currentfilter = subsettable.get(currentfilter)
3646 3646 if currentfilter is None:
3647 3647 raise error.Abort(
3648 3648 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3649 3649 )
3650 3650 repo = repo.filtered(currentfilter)
3651 3651 timer, fm = gettimer(ui, opts)
3652 3652
3653 3653 def setup():
3654 3654 if clearrevlogs:
3655 3655 clearchangelog(repo)
3656 3656
3657 3657 def bench():
3658 3658 fromfile(repo)
3659 3659
3660 3660 timer(bench, setup=setup)
3661 3661 fm.end()
3662 3662
3663 3663
3664 3664 @command(b'perfloadmarkers')
3665 3665 def perfloadmarkers(ui, repo):
3666 3666 """benchmark the time to parse the on-disk markers for a repo
3667 3667
3668 3668 Result is the number of markers in the repo."""
3669 3669 timer, fm = gettimer(ui)
3670 3670 svfs = getsvfs(repo)
3671 3671 timer(lambda: len(obsolete.obsstore(svfs)))
3672 3672 fm.end()
3673 3673
3674 3674
3675 3675 @command(
3676 3676 b'perflrucachedict',
3677 3677 formatteropts
3678 3678 + [
3679 3679 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3680 3680 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3681 3681 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3682 3682 (b'', b'size', 4, b'size of cache'),
3683 3683 (b'', b'gets', 10000, b'number of key lookups'),
3684 3684 (b'', b'sets', 10000, b'number of key sets'),
3685 3685 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3686 3686 (
3687 3687 b'',
3688 3688 b'mixedgetfreq',
3689 3689 50,
3690 3690 b'frequency of get vs set ops in mixed mode',
3691 3691 ),
3692 3692 ],
3693 3693 norepo=True,
3694 3694 )
3695 3695 def perflrucache(
3696 3696 ui,
3697 3697 mincost=0,
3698 3698 maxcost=100,
3699 3699 costlimit=0,
3700 3700 size=4,
3701 3701 gets=10000,
3702 3702 sets=10000,
3703 3703 mixed=10000,
3704 3704 mixedgetfreq=50,
3705 3705 **opts
3706 3706 ):
3707 3707 opts = _byteskwargs(opts)
3708 3708
3709 3709 def doinit():
3710 3710 for i in _xrange(10000):
3711 3711 util.lrucachedict(size)
3712 3712
3713 3713 costrange = list(range(mincost, maxcost + 1))
3714 3714
3715 3715 values = []
3716 3716 for i in _xrange(size):
3717 3717 values.append(random.randint(0, _maxint))
3718 3718
3719 3719 # Get mode fills the cache and tests raw lookup performance with no
3720 3720 # eviction.
3721 3721 getseq = []
3722 3722 for i in _xrange(gets):
3723 3723 getseq.append(random.choice(values))
3724 3724
3725 3725 def dogets():
3726 3726 d = util.lrucachedict(size)
3727 3727 for v in values:
3728 3728 d[v] = v
3729 3729 for key in getseq:
3730 3730 value = d[key]
3731 3731 value # silence pyflakes warning
3732 3732
3733 3733 def dogetscost():
3734 3734 d = util.lrucachedict(size, maxcost=costlimit)
3735 3735 for i, v in enumerate(values):
3736 3736 d.insert(v, v, cost=costs[i])
3737 3737 for key in getseq:
3738 3738 try:
3739 3739 value = d[key]
3740 3740 value # silence pyflakes warning
3741 3741 except KeyError:
3742 3742 pass
3743 3743
3744 3744 # Set mode tests insertion speed with cache eviction.
3745 3745 setseq = []
3746 3746 costs = []
3747 3747 for i in _xrange(sets):
3748 3748 setseq.append(random.randint(0, _maxint))
3749 3749 costs.append(random.choice(costrange))
3750 3750
3751 3751 def doinserts():
3752 3752 d = util.lrucachedict(size)
3753 3753 for v in setseq:
3754 3754 d.insert(v, v)
3755 3755
3756 3756 def doinsertscost():
3757 3757 d = util.lrucachedict(size, maxcost=costlimit)
3758 3758 for i, v in enumerate(setseq):
3759 3759 d.insert(v, v, cost=costs[i])
3760 3760
3761 3761 def dosets():
3762 3762 d = util.lrucachedict(size)
3763 3763 for v in setseq:
3764 3764 d[v] = v
3765 3765
3766 3766 # Mixed mode randomly performs gets and sets with eviction.
3767 3767 mixedops = []
3768 3768 for i in _xrange(mixed):
3769 3769 r = random.randint(0, 100)
3770 3770 if r < mixedgetfreq:
3771 3771 op = 0
3772 3772 else:
3773 3773 op = 1
3774 3774
3775 3775 mixedops.append(
3776 3776 (op, random.randint(0, size * 2), random.choice(costrange))
3777 3777 )
3778 3778
3779 3779 def domixed():
3780 3780 d = util.lrucachedict(size)
3781 3781
3782 3782 for op, v, cost in mixedops:
3783 3783 if op == 0:
3784 3784 try:
3785 3785 d[v]
3786 3786 except KeyError:
3787 3787 pass
3788 3788 else:
3789 3789 d[v] = v
3790 3790
3791 3791 def domixedcost():
3792 3792 d = util.lrucachedict(size, maxcost=costlimit)
3793 3793
3794 3794 for op, v, cost in mixedops:
3795 3795 if op == 0:
3796 3796 try:
3797 3797 d[v]
3798 3798 except KeyError:
3799 3799 pass
3800 3800 else:
3801 3801 d.insert(v, v, cost=cost)
3802 3802
3803 3803 benches = [
3804 3804 (doinit, b'init'),
3805 3805 ]
3806 3806
3807 3807 if costlimit:
3808 3808 benches.extend(
3809 3809 [
3810 3810 (dogetscost, b'gets w/ cost limit'),
3811 3811 (doinsertscost, b'inserts w/ cost limit'),
3812 3812 (domixedcost, b'mixed w/ cost limit'),
3813 3813 ]
3814 3814 )
3815 3815 else:
3816 3816 benches.extend(
3817 3817 [
3818 3818 (dogets, b'gets'),
3819 3819 (doinserts, b'inserts'),
3820 3820 (dosets, b'sets'),
3821 3821 (domixed, b'mixed'),
3822 3822 ]
3823 3823 )
3824 3824
3825 3825 for fn, title in benches:
3826 3826 timer, fm = gettimer(ui, opts)
3827 3827 timer(fn, title=title)
3828 3828 fm.end()
3829 3829
3830 3830
3831 3831 @command(
3832 3832 b'perfwrite',
3833 3833 formatteropts
3834 3834 + [
3835 3835 (b'', b'write-method', b'write', b'ui write method'),
3836 3836 (b'', b'nlines', 100, b'number of lines'),
3837 3837 (b'', b'nitems', 100, b'number of items (per line)'),
3838 3838 (b'', b'item', b'x', b'item that is written'),
3839 3839 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3840 3840 (b'', b'flush-line', None, b'flush after each line'),
3841 3841 ],
3842 3842 )
3843 3843 def perfwrite(ui, repo, **opts):
3844 3844 """microbenchmark ui.write (and others)"""
3845 3845 opts = _byteskwargs(opts)
3846 3846
3847 3847 write = getattr(ui, _sysstr(opts[b'write_method']))
3848 3848 nlines = int(opts[b'nlines'])
3849 3849 nitems = int(opts[b'nitems'])
3850 3850 item = opts[b'item']
3851 3851 batch_line = opts.get(b'batch_line')
3852 3852 flush_line = opts.get(b'flush_line')
3853 3853
3854 3854 if batch_line:
3855 3855 line = item * nitems + b'\n'
3856 3856
3857 3857 def benchmark():
3858 3858 for i in pycompat.xrange(nlines):
3859 3859 if batch_line:
3860 3860 write(line)
3861 3861 else:
3862 3862 for i in pycompat.xrange(nitems):
3863 3863 write(item)
3864 3864 write(b'\n')
3865 3865 if flush_line:
3866 3866 ui.flush()
3867 3867 ui.flush()
3868 3868
3869 3869 timer, fm = gettimer(ui, opts)
3870 3870 timer(benchmark)
3871 3871 fm.end()
3872 3872
3873 3873
3874 3874 def uisetup(ui):
3875 3875 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3876 3876 commands, b'debugrevlogopts'
3877 3877 ):
3878 3878 # for "historical portability":
3879 3879 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3880 3880 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3881 3881 # openrevlog() should cause failure, because it has been
3882 3882 # available since 3.5 (or 49c583ca48c4).
3883 3883 def openrevlog(orig, repo, cmd, file_, opts):
3884 3884 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3885 3885 raise error.Abort(
3886 3886 b"This version doesn't support --dir option",
3887 3887 hint=b"use 3.5 or later",
3888 3888 )
3889 3889 return orig(repo, cmd, file_, opts)
3890 3890
3891 3891 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3892 3892
3893 3893
3894 3894 @command(
3895 3895 b'perfprogress',
3896 3896 formatteropts
3897 3897 + [
3898 3898 (b'', b'topic', b'topic', b'topic for progress messages'),
3899 3899 (b'c', b'total', 1000000, b'total value we are progressing to'),
3900 3900 ],
3901 3901 norepo=True,
3902 3902 )
3903 3903 def perfprogress(ui, topic=None, total=None, **opts):
3904 3904 """printing of progress bars"""
3905 3905 opts = _byteskwargs(opts)
3906 3906
3907 3907 timer, fm = gettimer(ui, opts)
3908 3908
3909 3909 def doprogress():
3910 3910 with ui.makeprogress(topic, total=total) as progress:
3911 3911 for i in _xrange(total):
3912 3912 progress.increment()
3913 3913
3914 3914 timer(doprogress)
3915 3915 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now