##// END OF EJS Templates
perf: document `perfstatus`
marmoute -
r43390:97f80dd2 default
parent child Browse files
Show More
@@ -1,3744 +1,3751 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if b'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'profile-benchmark',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 )
301 301 configitem(
302 302 b'perf',
303 303 b'run-limits',
304 304 default=mercurial.configitems.dynamicdefault,
305 305 experimental=True,
306 306 )
307 307 except (ImportError, AttributeError):
308 308 pass
309 309 except TypeError:
310 310 # compatibility fix for a11fd395e83f
311 311 # hg version: 5.2
312 312 configitem(
313 313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 314 )
315 315 configitem(
316 316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 320 )
321 321 configitem(
322 322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 326 )
327 327 configitem(
328 328 b'perf',
329 329 b'profile-benchmark',
330 330 default=mercurial.configitems.dynamicdefault,
331 331 )
332 332 configitem(
333 333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 334 )
335 335
336 336
337 337 def getlen(ui):
338 338 if ui.configbool(b"perf", b"stub", False):
339 339 return lambda x: 1
340 340 return len
341 341
342 342
343 343 class noop(object):
344 344 """dummy context manager"""
345 345
346 346 def __enter__(self):
347 347 pass
348 348
349 349 def __exit__(self, *args):
350 350 pass
351 351
352 352
353 353 NOOPCTX = noop()
354 354
355 355
356 356 def gettimer(ui, opts=None):
357 357 """return a timer function and formatter: (timer, formatter)
358 358
359 359 This function exists to gather the creation of formatter in a single
360 360 place instead of duplicating it in all performance commands."""
361 361
362 362 # enforce an idle period before execution to counteract power management
363 363 # experimental config: perf.presleep
364 364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365 365
366 366 if opts is None:
367 367 opts = {}
368 368 # redirect all to stderr unless buffer api is in use
369 369 if not ui._buffers:
370 370 ui = ui.copy()
371 371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 372 if uifout:
373 373 # for "historical portability":
374 374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 375 uifout.set(ui.ferr)
376 376
377 377 # get a formatter
378 378 uiformatter = getattr(ui, 'formatter', None)
379 379 if uiformatter:
380 380 fm = uiformatter(b'perf', opts)
381 381 else:
382 382 # for "historical portability":
383 383 # define formatter locally, because ui.formatter has been
384 384 # available since 2.2 (or ae5f92e154d3)
385 385 from mercurial import node
386 386
387 387 class defaultformatter(object):
388 388 """Minimized composition of baseformatter and plainformatter
389 389 """
390 390
391 391 def __init__(self, ui, topic, opts):
392 392 self._ui = ui
393 393 if ui.debugflag:
394 394 self.hexfunc = node.hex
395 395 else:
396 396 self.hexfunc = node.short
397 397
398 398 def __nonzero__(self):
399 399 return False
400 400
401 401 __bool__ = __nonzero__
402 402
403 403 def startitem(self):
404 404 pass
405 405
406 406 def data(self, **data):
407 407 pass
408 408
409 409 def write(self, fields, deftext, *fielddata, **opts):
410 410 self._ui.write(deftext % fielddata, **opts)
411 411
412 412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 413 if cond:
414 414 self._ui.write(deftext % fielddata, **opts)
415 415
416 416 def plain(self, text, **opts):
417 417 self._ui.write(text, **opts)
418 418
419 419 def end(self):
420 420 pass
421 421
422 422 fm = defaultformatter(ui, b'perf', opts)
423 423
424 424 # stub function, runs code only once instead of in a loop
425 425 # experimental config: perf.stub
426 426 if ui.configbool(b"perf", b"stub", False):
427 427 return functools.partial(stub_timer, fm), fm
428 428
429 429 # experimental config: perf.all-timing
430 430 displayall = ui.configbool(b"perf", b"all-timing", False)
431 431
432 432 # experimental config: perf.run-limits
433 433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 434 limits = []
435 435 for item in limitspec:
436 436 parts = item.split(b'-', 1)
437 437 if len(parts) < 2:
438 438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 439 continue
440 440 try:
441 441 time_limit = float(_sysstr(parts[0]))
442 442 except ValueError as e:
443 443 ui.warn(
444 444 (
445 445 b'malformatted run limit entry, %s: %s\n'
446 446 % (_bytestr(e), item)
447 447 )
448 448 )
449 449 continue
450 450 try:
451 451 run_limit = int(_sysstr(parts[1]))
452 452 except ValueError as e:
453 453 ui.warn(
454 454 (
455 455 b'malformatted run limit entry, %s: %s\n'
456 456 % (_bytestr(e), item)
457 457 )
458 458 )
459 459 continue
460 460 limits.append((time_limit, run_limit))
461 461 if not limits:
462 462 limits = DEFAULTLIMITS
463 463
464 464 profiler = None
465 465 if profiling is not None:
466 466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 467 profiler = profiling.profile(ui)
468 468
469 469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 470 t = functools.partial(
471 471 _timer,
472 472 fm,
473 473 displayall=displayall,
474 474 limits=limits,
475 475 prerun=prerun,
476 476 profiler=profiler,
477 477 )
478 478 return t, fm
479 479
480 480
481 481 def stub_timer(fm, func, setup=None, title=None):
482 482 if setup is not None:
483 483 setup()
484 484 func()
485 485
486 486
487 487 @contextlib.contextmanager
488 488 def timeone():
489 489 r = []
490 490 ostart = os.times()
491 491 cstart = util.timer()
492 492 yield r
493 493 cstop = util.timer()
494 494 ostop = os.times()
495 495 a, b = ostart, ostop
496 496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497 497
498 498
499 499 # list of stop condition (elapsed time, minimal run count)
500 500 DEFAULTLIMITS = (
501 501 (3.0, 100),
502 502 (10.0, 3),
503 503 )
504 504
505 505
506 506 def _timer(
507 507 fm,
508 508 func,
509 509 setup=None,
510 510 title=None,
511 511 displayall=False,
512 512 limits=DEFAULTLIMITS,
513 513 prerun=0,
514 514 profiler=None,
515 515 ):
516 516 gc.collect()
517 517 results = []
518 518 begin = util.timer()
519 519 count = 0
520 520 if profiler is None:
521 521 profiler = NOOPCTX
522 522 for i in range(prerun):
523 523 if setup is not None:
524 524 setup()
525 525 func()
526 526 keepgoing = True
527 527 while keepgoing:
528 528 if setup is not None:
529 529 setup()
530 530 with profiler:
531 531 with timeone() as item:
532 532 r = func()
533 533 profiler = NOOPCTX
534 534 count += 1
535 535 results.append(item[0])
536 536 cstop = util.timer()
537 537 # Look for a stop condition.
538 538 elapsed = cstop - begin
539 539 for t, mincount in limits:
540 540 if elapsed >= t and count >= mincount:
541 541 keepgoing = False
542 542 break
543 543
544 544 formatone(fm, results, title=title, result=r, displayall=displayall)
545 545
546 546
547 547 def formatone(fm, timings, title=None, result=None, displayall=False):
548 548
549 549 count = len(timings)
550 550
551 551 fm.startitem()
552 552
553 553 if title:
554 554 fm.write(b'title', b'! %s\n', title)
555 555 if result:
556 556 fm.write(b'result', b'! result: %s\n', result)
557 557
558 558 def display(role, entry):
559 559 prefix = b''
560 560 if role != b'best':
561 561 prefix = b'%s.' % role
562 562 fm.plain(b'!')
563 563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 565 fm.write(prefix + b'user', b' user %f', entry[1])
566 566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 568 fm.plain(b'\n')
569 569
570 570 timings.sort()
571 571 min_val = timings[0]
572 572 display(b'best', min_val)
573 573 if displayall:
574 574 max_val = timings[-1]
575 575 display(b'max', max_val)
576 576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 577 display(b'avg', avg)
578 578 median = timings[len(timings) // 2]
579 579 display(b'median', median)
580 580
581 581
582 582 # utilities for historical portability
583 583
584 584
585 585 def getint(ui, section, name, default):
586 586 # for "historical portability":
587 587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 588 v = ui.config(section, name, None)
589 589 if v is None:
590 590 return default
591 591 try:
592 592 return int(v)
593 593 except ValueError:
594 594 raise error.ConfigError(
595 595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 596 )
597 597
598 598
599 599 def safeattrsetter(obj, name, ignoremissing=False):
600 600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601 601
602 602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 603 at runtime. This avoids overlooking removal of an attribute, which
604 604 breaks assumption of performance measurement, in the future.
605 605
606 606 This function returns the object to (1) assign a new value, and
607 607 (2) restore an original value to the attribute.
608 608
609 609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 610 abortion, and this function returns None. This is useful to
611 611 examine an attribute, which isn't ensured in all Mercurial
612 612 versions.
613 613 """
614 614 if not util.safehasattr(obj, name):
615 615 if ignoremissing:
616 616 return None
617 617 raise error.Abort(
618 618 (
619 619 b"missing attribute %s of %s might break assumption"
620 620 b" of performance measurement"
621 621 )
622 622 % (name, obj)
623 623 )
624 624
625 625 origvalue = getattr(obj, _sysstr(name))
626 626
627 627 class attrutil(object):
628 628 def set(self, newvalue):
629 629 setattr(obj, _sysstr(name), newvalue)
630 630
631 631 def restore(self):
632 632 setattr(obj, _sysstr(name), origvalue)
633 633
634 634 return attrutil()
635 635
636 636
637 637 # utilities to examine each internal API changes
638 638
639 639
640 640 def getbranchmapsubsettable():
641 641 # for "historical portability":
642 642 # subsettable is defined in:
643 643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 644 # - repoview since 2.5 (or 59a9f18d4587)
645 645 # - repoviewutil since 5.0
646 646 for mod in (branchmap, repoview, repoviewutil):
647 647 subsettable = getattr(mod, 'subsettable', None)
648 648 if subsettable:
649 649 return subsettable
650 650
651 651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 652 # branchmap and repoview modules exist, but subsettable attribute
653 653 # doesn't)
654 654 raise error.Abort(
655 655 b"perfbranchmap not available with this Mercurial",
656 656 hint=b"use 2.5 or later",
657 657 )
658 658
659 659
660 660 def getsvfs(repo):
661 661 """Return appropriate object to access files under .hg/store
662 662 """
663 663 # for "historical portability":
664 664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 665 svfs = getattr(repo, 'svfs', None)
666 666 if svfs:
667 667 return svfs
668 668 else:
669 669 return getattr(repo, 'sopener')
670 670
671 671
672 672 def getvfs(repo):
673 673 """Return appropriate object to access files under .hg
674 674 """
675 675 # for "historical portability":
676 676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 677 vfs = getattr(repo, 'vfs', None)
678 678 if vfs:
679 679 return vfs
680 680 else:
681 681 return getattr(repo, 'opener')
682 682
683 683
684 684 def repocleartagscachefunc(repo):
685 685 """Return the function to clear tags cache according to repo internal API
686 686 """
687 687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 689 # correct way to clear tags cache, because existing code paths
690 690 # expect _tagscache to be a structured object.
691 691 def clearcache():
692 692 # _tagscache has been filteredpropertycache since 2.5 (or
693 693 # 98c867ac1330), and delattr() can't work in such case
694 694 if b'_tagscache' in vars(repo):
695 695 del repo.__dict__[b'_tagscache']
696 696
697 697 return clearcache
698 698
699 699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 700 if repotags: # since 1.4 (or 5614a628d173)
701 701 return lambda: repotags.set(None)
702 702
703 703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 705 return lambda: repotagscache.set(None)
706 706
707 707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 708 # this point, but it isn't so problematic, because:
709 709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 710 # in perftags() causes failure soon
711 711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 712 raise error.Abort(b"tags API of this hg command is unknown")
713 713
714 714
715 715 # utilities to clear cache
716 716
717 717
718 718 def clearfilecache(obj, attrname):
719 719 unfiltered = getattr(obj, 'unfiltered', None)
720 720 if unfiltered is not None:
721 721 obj = obj.unfiltered()
722 722 if attrname in vars(obj):
723 723 delattr(obj, attrname)
724 724 obj._filecache.pop(attrname, None)
725 725
726 726
727 727 def clearchangelog(repo):
728 728 if repo is not repo.unfiltered():
729 729 object.__setattr__(repo, r'_clcachekey', None)
730 730 object.__setattr__(repo, r'_clcache', None)
731 731 clearfilecache(repo.unfiltered(), 'changelog')
732 732
733 733
734 734 # perf commands
735 735
736 736
737 737 @command(b'perfwalk', formatteropts)
738 738 def perfwalk(ui, repo, *pats, **opts):
739 739 opts = _byteskwargs(opts)
740 740 timer, fm = gettimer(ui, opts)
741 741 m = scmutil.match(repo[None], pats, {})
742 742 timer(
743 743 lambda: len(
744 744 list(
745 745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 746 )
747 747 )
748 748 )
749 749 fm.end()
750 750
751 751
752 752 @command(b'perfannotate', formatteropts)
753 753 def perfannotate(ui, repo, f, **opts):
754 754 opts = _byteskwargs(opts)
755 755 timer, fm = gettimer(ui, opts)
756 756 fc = repo[b'.'][f]
757 757 timer(lambda: len(fc.annotate(True)))
758 758 fm.end()
759 759
760 760
761 761 @command(
762 762 b'perfstatus',
763 763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 764 + formatteropts,
765 765 )
766 766 def perfstatus(ui, repo, **opts):
767 """benchmark the performance of a single status call
768
769 The repository data are preserved between each call.
770
771 By default, only the status of the tracked file are requested. If
772 `--unknown` is passed, the "unknown" files are also tracked.
773 """
767 774 opts = _byteskwargs(opts)
768 775 # m = match.always(repo.root, repo.getcwd())
769 776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
770 777 # False))))
771 778 timer, fm = gettimer(ui, opts)
772 779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
773 780 fm.end()
774 781
775 782
776 783 @command(b'perfaddremove', formatteropts)
777 784 def perfaddremove(ui, repo, **opts):
778 785 opts = _byteskwargs(opts)
779 786 timer, fm = gettimer(ui, opts)
780 787 try:
781 788 oldquiet = repo.ui.quiet
782 789 repo.ui.quiet = True
783 790 matcher = scmutil.match(repo[None])
784 791 opts[b'dry_run'] = True
785 792 if b'uipathfn' in getargspec(scmutil.addremove).args:
786 793 uipathfn = scmutil.getuipathfn(repo)
787 794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
788 795 else:
789 796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
790 797 finally:
791 798 repo.ui.quiet = oldquiet
792 799 fm.end()
793 800
794 801
795 802 def clearcaches(cl):
796 803 # behave somewhat consistently across internal API changes
797 804 if util.safehasattr(cl, b'clearcaches'):
798 805 cl.clearcaches()
799 806 elif util.safehasattr(cl, b'_nodecache'):
800 807 from mercurial.node import nullid, nullrev
801 808
802 809 cl._nodecache = {nullid: nullrev}
803 810 cl._nodepos = None
804 811
805 812
806 813 @command(b'perfheads', formatteropts)
807 814 def perfheads(ui, repo, **opts):
808 815 """benchmark the computation of a changelog heads"""
809 816 opts = _byteskwargs(opts)
810 817 timer, fm = gettimer(ui, opts)
811 818 cl = repo.changelog
812 819
813 820 def s():
814 821 clearcaches(cl)
815 822
816 823 def d():
817 824 len(cl.headrevs())
818 825
819 826 timer(d, setup=s)
820 827 fm.end()
821 828
822 829
823 830 @command(
824 831 b'perftags',
825 832 formatteropts
826 833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
827 834 )
828 835 def perftags(ui, repo, **opts):
829 836 opts = _byteskwargs(opts)
830 837 timer, fm = gettimer(ui, opts)
831 838 repocleartagscache = repocleartagscachefunc(repo)
832 839 clearrevlogs = opts[b'clear_revlogs']
833 840
834 841 def s():
835 842 if clearrevlogs:
836 843 clearchangelog(repo)
837 844 clearfilecache(repo.unfiltered(), 'manifest')
838 845 repocleartagscache()
839 846
840 847 def t():
841 848 return len(repo.tags())
842 849
843 850 timer(t, setup=s)
844 851 fm.end()
845 852
846 853
847 854 @command(b'perfancestors', formatteropts)
848 855 def perfancestors(ui, repo, **opts):
849 856 opts = _byteskwargs(opts)
850 857 timer, fm = gettimer(ui, opts)
851 858 heads = repo.changelog.headrevs()
852 859
853 860 def d():
854 861 for a in repo.changelog.ancestors(heads):
855 862 pass
856 863
857 864 timer(d)
858 865 fm.end()
859 866
860 867
861 868 @command(b'perfancestorset', formatteropts)
862 869 def perfancestorset(ui, repo, revset, **opts):
863 870 opts = _byteskwargs(opts)
864 871 timer, fm = gettimer(ui, opts)
865 872 revs = repo.revs(revset)
866 873 heads = repo.changelog.headrevs()
867 874
868 875 def d():
869 876 s = repo.changelog.ancestors(heads)
870 877 for rev in revs:
871 878 rev in s
872 879
873 880 timer(d)
874 881 fm.end()
875 882
876 883
877 884 @command(b'perfdiscovery', formatteropts, b'PATH')
878 885 def perfdiscovery(ui, repo, path, **opts):
879 886 """benchmark discovery between local repo and the peer at given path
880 887 """
881 888 repos = [repo, None]
882 889 timer, fm = gettimer(ui, opts)
883 890 path = ui.expandpath(path)
884 891
885 892 def s():
886 893 repos[1] = hg.peer(ui, opts, path)
887 894
888 895 def d():
889 896 setdiscovery.findcommonheads(ui, *repos)
890 897
891 898 timer(d, setup=s)
892 899 fm.end()
893 900
894 901
895 902 @command(
896 903 b'perfbookmarks',
897 904 formatteropts
898 905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
899 906 )
900 907 def perfbookmarks(ui, repo, **opts):
901 908 """benchmark parsing bookmarks from disk to memory"""
902 909 opts = _byteskwargs(opts)
903 910 timer, fm = gettimer(ui, opts)
904 911
905 912 clearrevlogs = opts[b'clear_revlogs']
906 913
907 914 def s():
908 915 if clearrevlogs:
909 916 clearchangelog(repo)
910 917 clearfilecache(repo, b'_bookmarks')
911 918
912 919 def d():
913 920 repo._bookmarks
914 921
915 922 timer(d, setup=s)
916 923 fm.end()
917 924
918 925
919 926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
920 927 def perfbundleread(ui, repo, bundlepath, **opts):
921 928 """Benchmark reading of bundle files.
922 929
923 930 This command is meant to isolate the I/O part of bundle reading as
924 931 much as possible.
925 932 """
926 933 from mercurial import (
927 934 bundle2,
928 935 exchange,
929 936 streamclone,
930 937 )
931 938
932 939 opts = _byteskwargs(opts)
933 940
934 941 def makebench(fn):
935 942 def run():
936 943 with open(bundlepath, b'rb') as fh:
937 944 bundle = exchange.readbundle(ui, fh, bundlepath)
938 945 fn(bundle)
939 946
940 947 return run
941 948
942 949 def makereadnbytes(size):
943 950 def run():
944 951 with open(bundlepath, b'rb') as fh:
945 952 bundle = exchange.readbundle(ui, fh, bundlepath)
946 953 while bundle.read(size):
947 954 pass
948 955
949 956 return run
950 957
951 958 def makestdioread(size):
952 959 def run():
953 960 with open(bundlepath, b'rb') as fh:
954 961 while fh.read(size):
955 962 pass
956 963
957 964 return run
958 965
959 966 # bundle1
960 967
961 968 def deltaiter(bundle):
962 969 for delta in bundle.deltaiter():
963 970 pass
964 971
965 972 def iterchunks(bundle):
966 973 for chunk in bundle.getchunks():
967 974 pass
968 975
969 976 # bundle2
970 977
971 978 def forwardchunks(bundle):
972 979 for chunk in bundle._forwardchunks():
973 980 pass
974 981
975 982 def iterparts(bundle):
976 983 for part in bundle.iterparts():
977 984 pass
978 985
979 986 def iterpartsseekable(bundle):
980 987 for part in bundle.iterparts(seekable=True):
981 988 pass
982 989
983 990 def seek(bundle):
984 991 for part in bundle.iterparts(seekable=True):
985 992 part.seek(0, os.SEEK_END)
986 993
987 994 def makepartreadnbytes(size):
988 995 def run():
989 996 with open(bundlepath, b'rb') as fh:
990 997 bundle = exchange.readbundle(ui, fh, bundlepath)
991 998 for part in bundle.iterparts():
992 999 while part.read(size):
993 1000 pass
994 1001
995 1002 return run
996 1003
997 1004 benches = [
998 1005 (makestdioread(8192), b'read(8k)'),
999 1006 (makestdioread(16384), b'read(16k)'),
1000 1007 (makestdioread(32768), b'read(32k)'),
1001 1008 (makestdioread(131072), b'read(128k)'),
1002 1009 ]
1003 1010
1004 1011 with open(bundlepath, b'rb') as fh:
1005 1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1006 1013
1007 1014 if isinstance(bundle, changegroup.cg1unpacker):
1008 1015 benches.extend(
1009 1016 [
1010 1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1011 1018 (makebench(iterchunks), b'cg1 getchunks()'),
1012 1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1013 1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1014 1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1015 1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1016 1023 ]
1017 1024 )
1018 1025 elif isinstance(bundle, bundle2.unbundle20):
1019 1026 benches.extend(
1020 1027 [
1021 1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1022 1029 (makebench(iterparts), b'bundle2 iterparts()'),
1023 1030 (
1024 1031 makebench(iterpartsseekable),
1025 1032 b'bundle2 iterparts() seekable',
1026 1033 ),
1027 1034 (makebench(seek), b'bundle2 part seek()'),
1028 1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1029 1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1030 1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1031 1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1032 1039 ]
1033 1040 )
1034 1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1035 1042 raise error.Abort(b'stream clone bundles not supported')
1036 1043 else:
1037 1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1038 1045
1039 1046 for fn, title in benches:
1040 1047 timer, fm = gettimer(ui, opts)
1041 1048 timer(fn, title=title)
1042 1049 fm.end()
1043 1050
1044 1051
1045 1052 @command(
1046 1053 b'perfchangegroupchangelog',
1047 1054 formatteropts
1048 1055 + [
1049 1056 (b'', b'cgversion', b'02', b'changegroup version'),
1050 1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1051 1058 ],
1052 1059 )
1053 1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1054 1061 """Benchmark producing a changelog group for a changegroup.
1055 1062
1056 1063 This measures the time spent processing the changelog during a
1057 1064 bundle operation. This occurs during `hg bundle` and on a server
1058 1065 processing a `getbundle` wire protocol request (handles clones
1059 1066 and pull requests).
1060 1067
1061 1068 By default, all revisions are added to the changegroup.
1062 1069 """
1063 1070 opts = _byteskwargs(opts)
1064 1071 cl = repo.changelog
1065 1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1066 1073 bundler = changegroup.getbundler(cgversion, repo)
1067 1074
1068 1075 def d():
1069 1076 state, chunks = bundler._generatechangelog(cl, nodes)
1070 1077 for chunk in chunks:
1071 1078 pass
1072 1079
1073 1080 timer, fm = gettimer(ui, opts)
1074 1081
1075 1082 # Terminal printing can interfere with timing. So disable it.
1076 1083 with ui.configoverride({(b'progress', b'disable'): True}):
1077 1084 timer(d)
1078 1085
1079 1086 fm.end()
1080 1087
1081 1088
1082 1089 @command(b'perfdirs', formatteropts)
1083 1090 def perfdirs(ui, repo, **opts):
1084 1091 opts = _byteskwargs(opts)
1085 1092 timer, fm = gettimer(ui, opts)
1086 1093 dirstate = repo.dirstate
1087 1094 b'a' in dirstate
1088 1095
1089 1096 def d():
1090 1097 dirstate.hasdir(b'a')
1091 1098 del dirstate._map._dirs
1092 1099
1093 1100 timer(d)
1094 1101 fm.end()
1095 1102
1096 1103
1097 1104 @command(b'perfdirstate', formatteropts)
1098 1105 def perfdirstate(ui, repo, **opts):
1099 1106 opts = _byteskwargs(opts)
1100 1107 timer, fm = gettimer(ui, opts)
1101 1108 b"a" in repo.dirstate
1102 1109
1103 1110 def d():
1104 1111 repo.dirstate.invalidate()
1105 1112 b"a" in repo.dirstate
1106 1113
1107 1114 timer(d)
1108 1115 fm.end()
1109 1116
1110 1117
1111 1118 @command(b'perfdirstatedirs', formatteropts)
1112 1119 def perfdirstatedirs(ui, repo, **opts):
1113 1120 opts = _byteskwargs(opts)
1114 1121 timer, fm = gettimer(ui, opts)
1115 1122 b"a" in repo.dirstate
1116 1123
1117 1124 def d():
1118 1125 repo.dirstate.hasdir(b"a")
1119 1126 del repo.dirstate._map._dirs
1120 1127
1121 1128 timer(d)
1122 1129 fm.end()
1123 1130
1124 1131
1125 1132 @command(b'perfdirstatefoldmap', formatteropts)
1126 1133 def perfdirstatefoldmap(ui, repo, **opts):
1127 1134 opts = _byteskwargs(opts)
1128 1135 timer, fm = gettimer(ui, opts)
1129 1136 dirstate = repo.dirstate
1130 1137 b'a' in dirstate
1131 1138
1132 1139 def d():
1133 1140 dirstate._map.filefoldmap.get(b'a')
1134 1141 del dirstate._map.filefoldmap
1135 1142
1136 1143 timer(d)
1137 1144 fm.end()
1138 1145
1139 1146
1140 1147 @command(b'perfdirfoldmap', formatteropts)
1141 1148 def perfdirfoldmap(ui, repo, **opts):
1142 1149 opts = _byteskwargs(opts)
1143 1150 timer, fm = gettimer(ui, opts)
1144 1151 dirstate = repo.dirstate
1145 1152 b'a' in dirstate
1146 1153
1147 1154 def d():
1148 1155 dirstate._map.dirfoldmap.get(b'a')
1149 1156 del dirstate._map.dirfoldmap
1150 1157 del dirstate._map._dirs
1151 1158
1152 1159 timer(d)
1153 1160 fm.end()
1154 1161
1155 1162
1156 1163 @command(b'perfdirstatewrite', formatteropts)
1157 1164 def perfdirstatewrite(ui, repo, **opts):
1158 1165 opts = _byteskwargs(opts)
1159 1166 timer, fm = gettimer(ui, opts)
1160 1167 ds = repo.dirstate
1161 1168 b"a" in ds
1162 1169
1163 1170 def d():
1164 1171 ds._dirty = True
1165 1172 ds.write(repo.currenttransaction())
1166 1173
1167 1174 timer(d)
1168 1175 fm.end()
1169 1176
1170 1177
1171 1178 def _getmergerevs(repo, opts):
1172 1179 """parse command argument to return rev involved in merge
1173 1180
1174 1181 input: options dictionnary with `rev`, `from` and `bse`
1175 1182 output: (localctx, otherctx, basectx)
1176 1183 """
1177 1184 if opts[b'from']:
1178 1185 fromrev = scmutil.revsingle(repo, opts[b'from'])
1179 1186 wctx = repo[fromrev]
1180 1187 else:
1181 1188 wctx = repo[None]
1182 1189 # we don't want working dir files to be stat'd in the benchmark, so
1183 1190 # prime that cache
1184 1191 wctx.dirty()
1185 1192 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1186 1193 if opts[b'base']:
1187 1194 fromrev = scmutil.revsingle(repo, opts[b'base'])
1188 1195 ancestor = repo[fromrev]
1189 1196 else:
1190 1197 ancestor = wctx.ancestor(rctx)
1191 1198 return (wctx, rctx, ancestor)
1192 1199
1193 1200
1194 1201 @command(
1195 1202 b'perfmergecalculate',
1196 1203 [
1197 1204 (b'r', b'rev', b'.', b'rev to merge against'),
1198 1205 (b'', b'from', b'', b'rev to merge from'),
1199 1206 (b'', b'base', b'', b'the revision to use as base'),
1200 1207 ]
1201 1208 + formatteropts,
1202 1209 )
1203 1210 def perfmergecalculate(ui, repo, **opts):
1204 1211 opts = _byteskwargs(opts)
1205 1212 timer, fm = gettimer(ui, opts)
1206 1213
1207 1214 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1208 1215
1209 1216 def d():
1210 1217 # acceptremote is True because we don't want prompts in the middle of
1211 1218 # our benchmark
1212 1219 merge.calculateupdates(
1213 1220 repo,
1214 1221 wctx,
1215 1222 rctx,
1216 1223 [ancestor],
1217 1224 branchmerge=False,
1218 1225 force=False,
1219 1226 acceptremote=True,
1220 1227 followcopies=True,
1221 1228 )
1222 1229
1223 1230 timer(d)
1224 1231 fm.end()
1225 1232
1226 1233
1227 1234 @command(
1228 1235 b'perfmergecopies',
1229 1236 [
1230 1237 (b'r', b'rev', b'.', b'rev to merge against'),
1231 1238 (b'', b'from', b'', b'rev to merge from'),
1232 1239 (b'', b'base', b'', b'the revision to use as base'),
1233 1240 ]
1234 1241 + formatteropts,
1235 1242 )
1236 1243 def perfmergecopies(ui, repo, **opts):
1237 1244 """measure runtime of `copies.mergecopies`"""
1238 1245 opts = _byteskwargs(opts)
1239 1246 timer, fm = gettimer(ui, opts)
1240 1247 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1241 1248
1242 1249 def d():
1243 1250 # acceptremote is True because we don't want prompts in the middle of
1244 1251 # our benchmark
1245 1252 copies.mergecopies(repo, wctx, rctx, ancestor)
1246 1253
1247 1254 timer(d)
1248 1255 fm.end()
1249 1256
1250 1257
1251 1258 @command(b'perfpathcopies', [], b"REV REV")
1252 1259 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1253 1260 """benchmark the copy tracing logic"""
1254 1261 opts = _byteskwargs(opts)
1255 1262 timer, fm = gettimer(ui, opts)
1256 1263 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1257 1264 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1258 1265
1259 1266 def d():
1260 1267 copies.pathcopies(ctx1, ctx2)
1261 1268
1262 1269 timer(d)
1263 1270 fm.end()
1264 1271
1265 1272
1266 1273 @command(
1267 1274 b'perfphases',
1268 1275 [(b'', b'full', False, b'include file reading time too'),],
1269 1276 b"",
1270 1277 )
1271 1278 def perfphases(ui, repo, **opts):
1272 1279 """benchmark phasesets computation"""
1273 1280 opts = _byteskwargs(opts)
1274 1281 timer, fm = gettimer(ui, opts)
1275 1282 _phases = repo._phasecache
1276 1283 full = opts.get(b'full')
1277 1284
1278 1285 def d():
1279 1286 phases = _phases
1280 1287 if full:
1281 1288 clearfilecache(repo, b'_phasecache')
1282 1289 phases = repo._phasecache
1283 1290 phases.invalidate()
1284 1291 phases.loadphaserevs(repo)
1285 1292
1286 1293 timer(d)
1287 1294 fm.end()
1288 1295
1289 1296
1290 1297 @command(b'perfphasesremote', [], b"[DEST]")
1291 1298 def perfphasesremote(ui, repo, dest=None, **opts):
1292 1299 """benchmark time needed to analyse phases of the remote server"""
1293 1300 from mercurial.node import bin
1294 1301 from mercurial import (
1295 1302 exchange,
1296 1303 hg,
1297 1304 phases,
1298 1305 )
1299 1306
1300 1307 opts = _byteskwargs(opts)
1301 1308 timer, fm = gettimer(ui, opts)
1302 1309
1303 1310 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1304 1311 if not path:
1305 1312 raise error.Abort(
1306 1313 b'default repository not configured!',
1307 1314 hint=b"see 'hg help config.paths'",
1308 1315 )
1309 1316 dest = path.pushloc or path.loc
1310 1317 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1311 1318 other = hg.peer(repo, opts, dest)
1312 1319
1313 1320 # easier to perform discovery through the operation
1314 1321 op = exchange.pushoperation(repo, other)
1315 1322 exchange._pushdiscoverychangeset(op)
1316 1323
1317 1324 remotesubset = op.fallbackheads
1318 1325
1319 1326 with other.commandexecutor() as e:
1320 1327 remotephases = e.callcommand(
1321 1328 b'listkeys', {b'namespace': b'phases'}
1322 1329 ).result()
1323 1330 del other
1324 1331 publishing = remotephases.get(b'publishing', False)
1325 1332 if publishing:
1326 1333 ui.statusnoi18n(b'publishing: yes\n')
1327 1334 else:
1328 1335 ui.statusnoi18n(b'publishing: no\n')
1329 1336
1330 1337 nodemap = repo.changelog.nodemap
1331 1338 nonpublishroots = 0
1332 1339 for nhex, phase in remotephases.iteritems():
1333 1340 if nhex == b'publishing': # ignore data related to publish option
1334 1341 continue
1335 1342 node = bin(nhex)
1336 1343 if node in nodemap and int(phase):
1337 1344 nonpublishroots += 1
1338 1345 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1339 1346 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1340 1347
1341 1348 def d():
1342 1349 phases.remotephasessummary(repo, remotesubset, remotephases)
1343 1350
1344 1351 timer(d)
1345 1352 fm.end()
1346 1353
1347 1354
1348 1355 @command(
1349 1356 b'perfmanifest',
1350 1357 [
1351 1358 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1352 1359 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1353 1360 ]
1354 1361 + formatteropts,
1355 1362 b'REV|NODE',
1356 1363 )
1357 1364 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1358 1365 """benchmark the time to read a manifest from disk and return a usable
1359 1366 dict-like object
1360 1367
1361 1368 Manifest caches are cleared before retrieval."""
1362 1369 opts = _byteskwargs(opts)
1363 1370 timer, fm = gettimer(ui, opts)
1364 1371 if not manifest_rev:
1365 1372 ctx = scmutil.revsingle(repo, rev, rev)
1366 1373 t = ctx.manifestnode()
1367 1374 else:
1368 1375 from mercurial.node import bin
1369 1376
1370 1377 if len(rev) == 40:
1371 1378 t = bin(rev)
1372 1379 else:
1373 1380 try:
1374 1381 rev = int(rev)
1375 1382
1376 1383 if util.safehasattr(repo.manifestlog, b'getstorage'):
1377 1384 t = repo.manifestlog.getstorage(b'').node(rev)
1378 1385 else:
1379 1386 t = repo.manifestlog._revlog.lookup(rev)
1380 1387 except ValueError:
1381 1388 raise error.Abort(
1382 1389 b'manifest revision must be integer or full node'
1383 1390 )
1384 1391
1385 1392 def d():
1386 1393 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1387 1394 repo.manifestlog[t].read()
1388 1395
1389 1396 timer(d)
1390 1397 fm.end()
1391 1398
1392 1399
1393 1400 @command(b'perfchangeset', formatteropts)
1394 1401 def perfchangeset(ui, repo, rev, **opts):
1395 1402 opts = _byteskwargs(opts)
1396 1403 timer, fm = gettimer(ui, opts)
1397 1404 n = scmutil.revsingle(repo, rev).node()
1398 1405
1399 1406 def d():
1400 1407 repo.changelog.read(n)
1401 1408 # repo.changelog._cache = None
1402 1409
1403 1410 timer(d)
1404 1411 fm.end()
1405 1412
1406 1413
1407 1414 @command(b'perfignore', formatteropts)
1408 1415 def perfignore(ui, repo, **opts):
1409 1416 """benchmark operation related to computing ignore"""
1410 1417 opts = _byteskwargs(opts)
1411 1418 timer, fm = gettimer(ui, opts)
1412 1419 dirstate = repo.dirstate
1413 1420
1414 1421 def setupone():
1415 1422 dirstate.invalidate()
1416 1423 clearfilecache(dirstate, b'_ignore')
1417 1424
1418 1425 def runone():
1419 1426 dirstate._ignore
1420 1427
1421 1428 timer(runone, setup=setupone, title=b"load")
1422 1429 fm.end()
1423 1430
1424 1431
1425 1432 @command(
1426 1433 b'perfindex',
1427 1434 [
1428 1435 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1429 1436 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1430 1437 ]
1431 1438 + formatteropts,
1432 1439 )
1433 1440 def perfindex(ui, repo, **opts):
1434 1441 """benchmark index creation time followed by a lookup
1435 1442
1436 1443 The default is to look `tip` up. Depending on the index implementation,
1437 1444 the revision looked up can matters. For example, an implementation
1438 1445 scanning the index will have a faster lookup time for `--rev tip` than for
1439 1446 `--rev 0`. The number of looked up revisions and their order can also
1440 1447 matters.
1441 1448
1442 1449 Example of useful set to test:
1443 1450 * tip
1444 1451 * 0
1445 1452 * -10:
1446 1453 * :10
1447 1454 * -10: + :10
1448 1455 * :10: + -10:
1449 1456 * -10000:
1450 1457 * -10000: + 0
1451 1458
1452 1459 It is not currently possible to check for lookup of a missing node. For
1453 1460 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1454 1461 import mercurial.revlog
1455 1462
1456 1463 opts = _byteskwargs(opts)
1457 1464 timer, fm = gettimer(ui, opts)
1458 1465 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1459 1466 if opts[b'no_lookup']:
1460 1467 if opts['rev']:
1461 1468 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1462 1469 nodes = []
1463 1470 elif not opts[b'rev']:
1464 1471 nodes = [repo[b"tip"].node()]
1465 1472 else:
1466 1473 revs = scmutil.revrange(repo, opts[b'rev'])
1467 1474 cl = repo.changelog
1468 1475 nodes = [cl.node(r) for r in revs]
1469 1476
1470 1477 unfi = repo.unfiltered()
1471 1478 # find the filecache func directly
1472 1479 # This avoid polluting the benchmark with the filecache logic
1473 1480 makecl = unfi.__class__.changelog.func
1474 1481
1475 1482 def setup():
1476 1483 # probably not necessary, but for good measure
1477 1484 clearchangelog(unfi)
1478 1485
1479 1486 def d():
1480 1487 cl = makecl(unfi)
1481 1488 for n in nodes:
1482 1489 cl.rev(n)
1483 1490
1484 1491 timer(d, setup=setup)
1485 1492 fm.end()
1486 1493
1487 1494
1488 1495 @command(
1489 1496 b'perfnodemap',
1490 1497 [
1491 1498 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1492 1499 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1493 1500 ]
1494 1501 + formatteropts,
1495 1502 )
1496 1503 def perfnodemap(ui, repo, **opts):
1497 1504 """benchmark the time necessary to look up revision from a cold nodemap
1498 1505
1499 1506 Depending on the implementation, the amount and order of revision we look
1500 1507 up can varies. Example of useful set to test:
1501 1508 * tip
1502 1509 * 0
1503 1510 * -10:
1504 1511 * :10
1505 1512 * -10: + :10
1506 1513 * :10: + -10:
1507 1514 * -10000:
1508 1515 * -10000: + 0
1509 1516
1510 1517 The command currently focus on valid binary lookup. Benchmarking for
1511 1518 hexlookup, prefix lookup and missing lookup would also be valuable.
1512 1519 """
1513 1520 import mercurial.revlog
1514 1521
1515 1522 opts = _byteskwargs(opts)
1516 1523 timer, fm = gettimer(ui, opts)
1517 1524 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1518 1525
1519 1526 unfi = repo.unfiltered()
1520 1527 clearcaches = opts['clear_caches']
1521 1528 # find the filecache func directly
1522 1529 # This avoid polluting the benchmark with the filecache logic
1523 1530 makecl = unfi.__class__.changelog.func
1524 1531 if not opts[b'rev']:
1525 1532 raise error.Abort('use --rev to specify revisions to look up')
1526 1533 revs = scmutil.revrange(repo, opts[b'rev'])
1527 1534 cl = repo.changelog
1528 1535 nodes = [cl.node(r) for r in revs]
1529 1536
1530 1537 # use a list to pass reference to a nodemap from one closure to the next
1531 1538 nodeget = [None]
1532 1539
1533 1540 def setnodeget():
1534 1541 # probably not necessary, but for good measure
1535 1542 clearchangelog(unfi)
1536 1543 nodeget[0] = makecl(unfi).nodemap.get
1537 1544
1538 1545 def d():
1539 1546 get = nodeget[0]
1540 1547 for n in nodes:
1541 1548 get(n)
1542 1549
1543 1550 setup = None
1544 1551 if clearcaches:
1545 1552
1546 1553 def setup():
1547 1554 setnodeget()
1548 1555
1549 1556 else:
1550 1557 setnodeget()
1551 1558 d() # prewarm the data structure
1552 1559 timer(d, setup=setup)
1553 1560 fm.end()
1554 1561
1555 1562
1556 1563 @command(b'perfstartup', formatteropts)
1557 1564 def perfstartup(ui, repo, **opts):
1558 1565 opts = _byteskwargs(opts)
1559 1566 timer, fm = gettimer(ui, opts)
1560 1567
1561 1568 def d():
1562 1569 if os.name != r'nt':
1563 1570 os.system(
1564 1571 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1565 1572 )
1566 1573 else:
1567 1574 os.environ[r'HGRCPATH'] = r' '
1568 1575 os.system(r"%s version -q > NUL" % sys.argv[0])
1569 1576
1570 1577 timer(d)
1571 1578 fm.end()
1572 1579
1573 1580
1574 1581 @command(b'perfparents', formatteropts)
1575 1582 def perfparents(ui, repo, **opts):
1576 1583 """benchmark the time necessary to fetch one changeset's parents.
1577 1584
1578 1585 The fetch is done using the `node identifier`, traversing all object layers
1579 1586 from the repository object. The first N revisions will be used for this
1580 1587 benchmark. N is controlled by the ``perf.parentscount`` config option
1581 1588 (default: 1000).
1582 1589 """
1583 1590 opts = _byteskwargs(opts)
1584 1591 timer, fm = gettimer(ui, opts)
1585 1592 # control the number of commits perfparents iterates over
1586 1593 # experimental config: perf.parentscount
1587 1594 count = getint(ui, b"perf", b"parentscount", 1000)
1588 1595 if len(repo.changelog) < count:
1589 1596 raise error.Abort(b"repo needs %d commits for this test" % count)
1590 1597 repo = repo.unfiltered()
1591 1598 nl = [repo.changelog.node(i) for i in _xrange(count)]
1592 1599
1593 1600 def d():
1594 1601 for n in nl:
1595 1602 repo.changelog.parents(n)
1596 1603
1597 1604 timer(d)
1598 1605 fm.end()
1599 1606
1600 1607
1601 1608 @command(b'perfctxfiles', formatteropts)
1602 1609 def perfctxfiles(ui, repo, x, **opts):
1603 1610 opts = _byteskwargs(opts)
1604 1611 x = int(x)
1605 1612 timer, fm = gettimer(ui, opts)
1606 1613
1607 1614 def d():
1608 1615 len(repo[x].files())
1609 1616
1610 1617 timer(d)
1611 1618 fm.end()
1612 1619
1613 1620
1614 1621 @command(b'perfrawfiles', formatteropts)
1615 1622 def perfrawfiles(ui, repo, x, **opts):
1616 1623 opts = _byteskwargs(opts)
1617 1624 x = int(x)
1618 1625 timer, fm = gettimer(ui, opts)
1619 1626 cl = repo.changelog
1620 1627
1621 1628 def d():
1622 1629 len(cl.read(x)[3])
1623 1630
1624 1631 timer(d)
1625 1632 fm.end()
1626 1633
1627 1634
1628 1635 @command(b'perflookup', formatteropts)
1629 1636 def perflookup(ui, repo, rev, **opts):
1630 1637 opts = _byteskwargs(opts)
1631 1638 timer, fm = gettimer(ui, opts)
1632 1639 timer(lambda: len(repo.lookup(rev)))
1633 1640 fm.end()
1634 1641
1635 1642
1636 1643 @command(
1637 1644 b'perflinelogedits',
1638 1645 [
1639 1646 (b'n', b'edits', 10000, b'number of edits'),
1640 1647 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1641 1648 ],
1642 1649 norepo=True,
1643 1650 )
1644 1651 def perflinelogedits(ui, **opts):
1645 1652 from mercurial import linelog
1646 1653
1647 1654 opts = _byteskwargs(opts)
1648 1655
1649 1656 edits = opts[b'edits']
1650 1657 maxhunklines = opts[b'max_hunk_lines']
1651 1658
1652 1659 maxb1 = 100000
1653 1660 random.seed(0)
1654 1661 randint = random.randint
1655 1662 currentlines = 0
1656 1663 arglist = []
1657 1664 for rev in _xrange(edits):
1658 1665 a1 = randint(0, currentlines)
1659 1666 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1660 1667 b1 = randint(0, maxb1)
1661 1668 b2 = randint(b1, b1 + maxhunklines)
1662 1669 currentlines += (b2 - b1) - (a2 - a1)
1663 1670 arglist.append((rev, a1, a2, b1, b2))
1664 1671
1665 1672 def d():
1666 1673 ll = linelog.linelog()
1667 1674 for args in arglist:
1668 1675 ll.replacelines(*args)
1669 1676
1670 1677 timer, fm = gettimer(ui, opts)
1671 1678 timer(d)
1672 1679 fm.end()
1673 1680
1674 1681
1675 1682 @command(b'perfrevrange', formatteropts)
1676 1683 def perfrevrange(ui, repo, *specs, **opts):
1677 1684 opts = _byteskwargs(opts)
1678 1685 timer, fm = gettimer(ui, opts)
1679 1686 revrange = scmutil.revrange
1680 1687 timer(lambda: len(revrange(repo, specs)))
1681 1688 fm.end()
1682 1689
1683 1690
1684 1691 @command(b'perfnodelookup', formatteropts)
1685 1692 def perfnodelookup(ui, repo, rev, **opts):
1686 1693 opts = _byteskwargs(opts)
1687 1694 timer, fm = gettimer(ui, opts)
1688 1695 import mercurial.revlog
1689 1696
1690 1697 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1691 1698 n = scmutil.revsingle(repo, rev).node()
1692 1699 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1693 1700
1694 1701 def d():
1695 1702 cl.rev(n)
1696 1703 clearcaches(cl)
1697 1704
1698 1705 timer(d)
1699 1706 fm.end()
1700 1707
1701 1708
1702 1709 @command(
1703 1710 b'perflog',
1704 1711 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1705 1712 )
1706 1713 def perflog(ui, repo, rev=None, **opts):
1707 1714 opts = _byteskwargs(opts)
1708 1715 if rev is None:
1709 1716 rev = []
1710 1717 timer, fm = gettimer(ui, opts)
1711 1718 ui.pushbuffer()
1712 1719 timer(
1713 1720 lambda: commands.log(
1714 1721 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1715 1722 )
1716 1723 )
1717 1724 ui.popbuffer()
1718 1725 fm.end()
1719 1726
1720 1727
1721 1728 @command(b'perfmoonwalk', formatteropts)
1722 1729 def perfmoonwalk(ui, repo, **opts):
1723 1730 """benchmark walking the changelog backwards
1724 1731
1725 1732 This also loads the changelog data for each revision in the changelog.
1726 1733 """
1727 1734 opts = _byteskwargs(opts)
1728 1735 timer, fm = gettimer(ui, opts)
1729 1736
1730 1737 def moonwalk():
1731 1738 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1732 1739 ctx = repo[i]
1733 1740 ctx.branch() # read changelog data (in addition to the index)
1734 1741
1735 1742 timer(moonwalk)
1736 1743 fm.end()
1737 1744
1738 1745
1739 1746 @command(
1740 1747 b'perftemplating',
1741 1748 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1742 1749 )
1743 1750 def perftemplating(ui, repo, testedtemplate=None, **opts):
1744 1751 """test the rendering time of a given template"""
1745 1752 if makelogtemplater is None:
1746 1753 raise error.Abort(
1747 1754 b"perftemplating not available with this Mercurial",
1748 1755 hint=b"use 4.3 or later",
1749 1756 )
1750 1757
1751 1758 opts = _byteskwargs(opts)
1752 1759
1753 1760 nullui = ui.copy()
1754 1761 nullui.fout = open(os.devnull, r'wb')
1755 1762 nullui.disablepager()
1756 1763 revs = opts.get(b'rev')
1757 1764 if not revs:
1758 1765 revs = [b'all()']
1759 1766 revs = list(scmutil.revrange(repo, revs))
1760 1767
1761 1768 defaulttemplate = (
1762 1769 b'{date|shortdate} [{rev}:{node|short}]'
1763 1770 b' {author|person}: {desc|firstline}\n'
1764 1771 )
1765 1772 if testedtemplate is None:
1766 1773 testedtemplate = defaulttemplate
1767 1774 displayer = makelogtemplater(nullui, repo, testedtemplate)
1768 1775
1769 1776 def format():
1770 1777 for r in revs:
1771 1778 ctx = repo[r]
1772 1779 displayer.show(ctx)
1773 1780 displayer.flush(ctx)
1774 1781
1775 1782 timer, fm = gettimer(ui, opts)
1776 1783 timer(format)
1777 1784 fm.end()
1778 1785
1779 1786
1780 1787 def _displaystats(ui, opts, entries, data):
1781 1788 pass
1782 1789 # use a second formatter because the data are quite different, not sure
1783 1790 # how it flies with the templater.
1784 1791 fm = ui.formatter(b'perf-stats', opts)
1785 1792 for key, title in entries:
1786 1793 values = data[key]
1787 1794 nbvalues = len(data)
1788 1795 values.sort()
1789 1796 stats = {
1790 1797 'key': key,
1791 1798 'title': title,
1792 1799 'nbitems': len(values),
1793 1800 'min': values[0][0],
1794 1801 '10%': values[(nbvalues * 10) // 100][0],
1795 1802 '25%': values[(nbvalues * 25) // 100][0],
1796 1803 '50%': values[(nbvalues * 50) // 100][0],
1797 1804 '75%': values[(nbvalues * 75) // 100][0],
1798 1805 '80%': values[(nbvalues * 80) // 100][0],
1799 1806 '85%': values[(nbvalues * 85) // 100][0],
1800 1807 '90%': values[(nbvalues * 90) // 100][0],
1801 1808 '95%': values[(nbvalues * 95) // 100][0],
1802 1809 '99%': values[(nbvalues * 99) // 100][0],
1803 1810 'max': values[-1][0],
1804 1811 }
1805 1812 fm.startitem()
1806 1813 fm.data(**stats)
1807 1814 # make node pretty for the human output
1808 1815 fm.plain('### %s (%d items)\n' % (title, len(values)))
1809 1816 lines = [
1810 1817 'min',
1811 1818 '10%',
1812 1819 '25%',
1813 1820 '50%',
1814 1821 '75%',
1815 1822 '80%',
1816 1823 '85%',
1817 1824 '90%',
1818 1825 '95%',
1819 1826 '99%',
1820 1827 'max',
1821 1828 ]
1822 1829 for l in lines:
1823 1830 fm.plain('%s: %s\n' % (l, stats[l]))
1824 1831 fm.end()
1825 1832
1826 1833
1827 1834 @command(
1828 1835 b'perfhelper-mergecopies',
1829 1836 formatteropts
1830 1837 + [
1831 1838 (b'r', b'revs', [], b'restrict search to these revisions'),
1832 1839 (b'', b'timing', False, b'provides extra data (costly)'),
1833 1840 (b'', b'stats', False, b'provides statistic about the measured data'),
1834 1841 ],
1835 1842 )
1836 1843 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1837 1844 """find statistics about potential parameters for `perfmergecopies`
1838 1845
1839 1846 This command find (base, p1, p2) triplet relevant for copytracing
1840 1847 benchmarking in the context of a merge. It reports values for some of the
1841 1848 parameters that impact merge copy tracing time during merge.
1842 1849
1843 1850 If `--timing` is set, rename detection is run and the associated timing
1844 1851 will be reported. The extra details come at the cost of slower command
1845 1852 execution.
1846 1853
1847 1854 Since rename detection is only run once, other factors might easily
1848 1855 affect the precision of the timing. However it should give a good
1849 1856 approximation of which revision triplets are very costly.
1850 1857 """
1851 1858 opts = _byteskwargs(opts)
1852 1859 fm = ui.formatter(b'perf', opts)
1853 1860 dotiming = opts[b'timing']
1854 1861 dostats = opts[b'stats']
1855 1862
1856 1863 output_template = [
1857 1864 ("base", "%(base)12s"),
1858 1865 ("p1", "%(p1.node)12s"),
1859 1866 ("p2", "%(p2.node)12s"),
1860 1867 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1861 1868 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1862 1869 ("p1.renames", "%(p1.renamedfiles)12d"),
1863 1870 ("p1.time", "%(p1.time)12.3f"),
1864 1871 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1865 1872 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1866 1873 ("p2.renames", "%(p2.renamedfiles)12d"),
1867 1874 ("p2.time", "%(p2.time)12.3f"),
1868 1875 ("renames", "%(nbrenamedfiles)12d"),
1869 1876 ("total.time", "%(time)12.3f"),
1870 1877 ]
1871 1878 if not dotiming:
1872 1879 output_template = [
1873 1880 i
1874 1881 for i in output_template
1875 1882 if not ('time' in i[0] or 'renames' in i[0])
1876 1883 ]
1877 1884 header_names = [h for (h, v) in output_template]
1878 1885 output = ' '.join([v for (h, v) in output_template]) + '\n'
1879 1886 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1880 1887 fm.plain(header % tuple(header_names))
1881 1888
1882 1889 if not revs:
1883 1890 revs = ['all()']
1884 1891 revs = scmutil.revrange(repo, revs)
1885 1892
1886 1893 if dostats:
1887 1894 alldata = {
1888 1895 'nbrevs': [],
1889 1896 'nbmissingfiles': [],
1890 1897 }
1891 1898 if dotiming:
1892 1899 alldata['parentnbrenames'] = []
1893 1900 alldata['totalnbrenames'] = []
1894 1901 alldata['parenttime'] = []
1895 1902 alldata['totaltime'] = []
1896 1903
1897 1904 roi = repo.revs('merge() and %ld', revs)
1898 1905 for r in roi:
1899 1906 ctx = repo[r]
1900 1907 p1 = ctx.p1()
1901 1908 p2 = ctx.p2()
1902 1909 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1903 1910 for b in bases:
1904 1911 b = repo[b]
1905 1912 p1missing = copies._computeforwardmissing(b, p1)
1906 1913 p2missing = copies._computeforwardmissing(b, p2)
1907 1914 data = {
1908 1915 b'base': b.hex(),
1909 1916 b'p1.node': p1.hex(),
1910 1917 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1911 1918 b'p1.nbmissingfiles': len(p1missing),
1912 1919 b'p2.node': p2.hex(),
1913 1920 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1914 1921 b'p2.nbmissingfiles': len(p2missing),
1915 1922 }
1916 1923 if dostats:
1917 1924 if p1missing:
1918 1925 alldata['nbrevs'].append(
1919 1926 (data['p1.nbrevs'], b.hex(), p1.hex())
1920 1927 )
1921 1928 alldata['nbmissingfiles'].append(
1922 1929 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1923 1930 )
1924 1931 if p2missing:
1925 1932 alldata['nbrevs'].append(
1926 1933 (data['p2.nbrevs'], b.hex(), p2.hex())
1927 1934 )
1928 1935 alldata['nbmissingfiles'].append(
1929 1936 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1930 1937 )
1931 1938 if dotiming:
1932 1939 begin = util.timer()
1933 1940 mergedata = copies.mergecopies(repo, p1, p2, b)
1934 1941 end = util.timer()
1935 1942 # not very stable timing since we did only one run
1936 1943 data['time'] = end - begin
1937 1944 # mergedata contains five dicts: "copy", "movewithdir",
1938 1945 # "diverge", "renamedelete" and "dirmove".
1939 1946 # The first 4 are about renamed file so lets count that.
1940 1947 renames = len(mergedata[0])
1941 1948 renames += len(mergedata[1])
1942 1949 renames += len(mergedata[2])
1943 1950 renames += len(mergedata[3])
1944 1951 data['nbrenamedfiles'] = renames
1945 1952 begin = util.timer()
1946 1953 p1renames = copies.pathcopies(b, p1)
1947 1954 end = util.timer()
1948 1955 data['p1.time'] = end - begin
1949 1956 begin = util.timer()
1950 1957 p2renames = copies.pathcopies(b, p2)
1951 1958 data['p2.time'] = end - begin
1952 1959 end = util.timer()
1953 1960 data['p1.renamedfiles'] = len(p1renames)
1954 1961 data['p2.renamedfiles'] = len(p2renames)
1955 1962
1956 1963 if dostats:
1957 1964 if p1missing:
1958 1965 alldata['parentnbrenames'].append(
1959 1966 (data['p1.renamedfiles'], b.hex(), p1.hex())
1960 1967 )
1961 1968 alldata['parenttime'].append(
1962 1969 (data['p1.time'], b.hex(), p1.hex())
1963 1970 )
1964 1971 if p2missing:
1965 1972 alldata['parentnbrenames'].append(
1966 1973 (data['p2.renamedfiles'], b.hex(), p2.hex())
1967 1974 )
1968 1975 alldata['parenttime'].append(
1969 1976 (data['p2.time'], b.hex(), p2.hex())
1970 1977 )
1971 1978 if p1missing or p2missing:
1972 1979 alldata['totalnbrenames'].append(
1973 1980 (
1974 1981 data['nbrenamedfiles'],
1975 1982 b.hex(),
1976 1983 p1.hex(),
1977 1984 p2.hex(),
1978 1985 )
1979 1986 )
1980 1987 alldata['totaltime'].append(
1981 1988 (data['time'], b.hex(), p1.hex(), p2.hex())
1982 1989 )
1983 1990 fm.startitem()
1984 1991 fm.data(**data)
1985 1992 # make node pretty for the human output
1986 1993 out = data.copy()
1987 1994 out['base'] = fm.hexfunc(b.node())
1988 1995 out['p1.node'] = fm.hexfunc(p1.node())
1989 1996 out['p2.node'] = fm.hexfunc(p2.node())
1990 1997 fm.plain(output % out)
1991 1998
1992 1999 fm.end()
1993 2000 if dostats:
1994 2001 # use a second formatter because the data are quite different, not sure
1995 2002 # how it flies with the templater.
1996 2003 entries = [
1997 2004 ('nbrevs', 'number of revision covered'),
1998 2005 ('nbmissingfiles', 'number of missing files at head'),
1999 2006 ]
2000 2007 if dotiming:
2001 2008 entries.append(
2002 2009 ('parentnbrenames', 'rename from one parent to base')
2003 2010 )
2004 2011 entries.append(('totalnbrenames', 'total number of renames'))
2005 2012 entries.append(('parenttime', 'time for one parent'))
2006 2013 entries.append(('totaltime', 'time for both parents'))
2007 2014 _displaystats(ui, opts, entries, alldata)
2008 2015
2009 2016
2010 2017 @command(
2011 2018 b'perfhelper-pathcopies',
2012 2019 formatteropts
2013 2020 + [
2014 2021 (b'r', b'revs', [], b'restrict search to these revisions'),
2015 2022 (b'', b'timing', False, b'provides extra data (costly)'),
2016 2023 (b'', b'stats', False, b'provides statistic about the measured data'),
2017 2024 ],
2018 2025 )
2019 2026 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2020 2027 """find statistic about potential parameters for the `perftracecopies`
2021 2028
2022 2029 This command find source-destination pair relevant for copytracing testing.
2023 2030 It report value for some of the parameters that impact copy tracing time.
2024 2031
2025 2032 If `--timing` is set, rename detection is run and the associated timing
2026 2033 will be reported. The extra details comes at the cost of a slower command
2027 2034 execution.
2028 2035
2029 2036 Since the rename detection is only run once, other factors might easily
2030 2037 affect the precision of the timing. However it should give a good
2031 2038 approximation of which revision pairs are very costly.
2032 2039 """
2033 2040 opts = _byteskwargs(opts)
2034 2041 fm = ui.formatter(b'perf', opts)
2035 2042 dotiming = opts[b'timing']
2036 2043 dostats = opts[b'stats']
2037 2044
2038 2045 if dotiming:
2039 2046 header = '%12s %12s %12s %12s %12s %12s\n'
2040 2047 output = (
2041 2048 "%(source)12s %(destination)12s "
2042 2049 "%(nbrevs)12d %(nbmissingfiles)12d "
2043 2050 "%(nbrenamedfiles)12d %(time)18.5f\n"
2044 2051 )
2045 2052 header_names = (
2046 2053 "source",
2047 2054 "destination",
2048 2055 "nb-revs",
2049 2056 "nb-files",
2050 2057 "nb-renames",
2051 2058 "time",
2052 2059 )
2053 2060 fm.plain(header % header_names)
2054 2061 else:
2055 2062 header = '%12s %12s %12s %12s\n'
2056 2063 output = (
2057 2064 "%(source)12s %(destination)12s "
2058 2065 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2059 2066 )
2060 2067 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2061 2068
2062 2069 if not revs:
2063 2070 revs = ['all()']
2064 2071 revs = scmutil.revrange(repo, revs)
2065 2072
2066 2073 if dostats:
2067 2074 alldata = {
2068 2075 'nbrevs': [],
2069 2076 'nbmissingfiles': [],
2070 2077 }
2071 2078 if dotiming:
2072 2079 alldata['nbrenames'] = []
2073 2080 alldata['time'] = []
2074 2081
2075 2082 roi = repo.revs('merge() and %ld', revs)
2076 2083 for r in roi:
2077 2084 ctx = repo[r]
2078 2085 p1 = ctx.p1().rev()
2079 2086 p2 = ctx.p2().rev()
2080 2087 bases = repo.changelog._commonancestorsheads(p1, p2)
2081 2088 for p in (p1, p2):
2082 2089 for b in bases:
2083 2090 base = repo[b]
2084 2091 parent = repo[p]
2085 2092 missing = copies._computeforwardmissing(base, parent)
2086 2093 if not missing:
2087 2094 continue
2088 2095 data = {
2089 2096 b'source': base.hex(),
2090 2097 b'destination': parent.hex(),
2091 2098 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2092 2099 b'nbmissingfiles': len(missing),
2093 2100 }
2094 2101 if dostats:
2095 2102 alldata['nbrevs'].append(
2096 2103 (data['nbrevs'], base.hex(), parent.hex(),)
2097 2104 )
2098 2105 alldata['nbmissingfiles'].append(
2099 2106 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2100 2107 )
2101 2108 if dotiming:
2102 2109 begin = util.timer()
2103 2110 renames = copies.pathcopies(base, parent)
2104 2111 end = util.timer()
2105 2112 # not very stable timing since we did only one run
2106 2113 data['time'] = end - begin
2107 2114 data['nbrenamedfiles'] = len(renames)
2108 2115 if dostats:
2109 2116 alldata['time'].append(
2110 2117 (data['time'], base.hex(), parent.hex(),)
2111 2118 )
2112 2119 alldata['nbrenames'].append(
2113 2120 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2114 2121 )
2115 2122 fm.startitem()
2116 2123 fm.data(**data)
2117 2124 out = data.copy()
2118 2125 out['source'] = fm.hexfunc(base.node())
2119 2126 out['destination'] = fm.hexfunc(parent.node())
2120 2127 fm.plain(output % out)
2121 2128
2122 2129 fm.end()
2123 2130 if dostats:
2124 2131 # use a second formatter because the data are quite different, not sure
2125 2132 # how it flies with the templater.
2126 2133 fm = ui.formatter(b'perf', opts)
2127 2134 entries = [
2128 2135 ('nbrevs', 'number of revision covered'),
2129 2136 ('nbmissingfiles', 'number of missing files at head'),
2130 2137 ]
2131 2138 if dotiming:
2132 2139 entries.append(('nbrenames', 'renamed files'))
2133 2140 entries.append(('time', 'time'))
2134 2141 _displaystats(ui, opts, entries, alldata)
2135 2142
2136 2143
2137 2144 @command(b'perfcca', formatteropts)
2138 2145 def perfcca(ui, repo, **opts):
2139 2146 opts = _byteskwargs(opts)
2140 2147 timer, fm = gettimer(ui, opts)
2141 2148 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2142 2149 fm.end()
2143 2150
2144 2151
2145 2152 @command(b'perffncacheload', formatteropts)
2146 2153 def perffncacheload(ui, repo, **opts):
2147 2154 opts = _byteskwargs(opts)
2148 2155 timer, fm = gettimer(ui, opts)
2149 2156 s = repo.store
2150 2157
2151 2158 def d():
2152 2159 s.fncache._load()
2153 2160
2154 2161 timer(d)
2155 2162 fm.end()
2156 2163
2157 2164
2158 2165 @command(b'perffncachewrite', formatteropts)
2159 2166 def perffncachewrite(ui, repo, **opts):
2160 2167 opts = _byteskwargs(opts)
2161 2168 timer, fm = gettimer(ui, opts)
2162 2169 s = repo.store
2163 2170 lock = repo.lock()
2164 2171 s.fncache._load()
2165 2172 tr = repo.transaction(b'perffncachewrite')
2166 2173 tr.addbackup(b'fncache')
2167 2174
2168 2175 def d():
2169 2176 s.fncache._dirty = True
2170 2177 s.fncache.write(tr)
2171 2178
2172 2179 timer(d)
2173 2180 tr.close()
2174 2181 lock.release()
2175 2182 fm.end()
2176 2183
2177 2184
2178 2185 @command(b'perffncacheencode', formatteropts)
2179 2186 def perffncacheencode(ui, repo, **opts):
2180 2187 opts = _byteskwargs(opts)
2181 2188 timer, fm = gettimer(ui, opts)
2182 2189 s = repo.store
2183 2190 s.fncache._load()
2184 2191
2185 2192 def d():
2186 2193 for p in s.fncache.entries:
2187 2194 s.encode(p)
2188 2195
2189 2196 timer(d)
2190 2197 fm.end()
2191 2198
2192 2199
2193 2200 def _bdiffworker(q, blocks, xdiff, ready, done):
2194 2201 while not done.is_set():
2195 2202 pair = q.get()
2196 2203 while pair is not None:
2197 2204 if xdiff:
2198 2205 mdiff.bdiff.xdiffblocks(*pair)
2199 2206 elif blocks:
2200 2207 mdiff.bdiff.blocks(*pair)
2201 2208 else:
2202 2209 mdiff.textdiff(*pair)
2203 2210 q.task_done()
2204 2211 pair = q.get()
2205 2212 q.task_done() # for the None one
2206 2213 with ready:
2207 2214 ready.wait()
2208 2215
2209 2216
2210 2217 def _manifestrevision(repo, mnode):
2211 2218 ml = repo.manifestlog
2212 2219
2213 2220 if util.safehasattr(ml, b'getstorage'):
2214 2221 store = ml.getstorage(b'')
2215 2222 else:
2216 2223 store = ml._revlog
2217 2224
2218 2225 return store.revision(mnode)
2219 2226
2220 2227
2221 2228 @command(
2222 2229 b'perfbdiff',
2223 2230 revlogopts
2224 2231 + formatteropts
2225 2232 + [
2226 2233 (
2227 2234 b'',
2228 2235 b'count',
2229 2236 1,
2230 2237 b'number of revisions to test (when using --startrev)',
2231 2238 ),
2232 2239 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2233 2240 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2234 2241 (b'', b'blocks', False, b'test computing diffs into blocks'),
2235 2242 (b'', b'xdiff', False, b'use xdiff algorithm'),
2236 2243 ],
2237 2244 b'-c|-m|FILE REV',
2238 2245 )
2239 2246 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2240 2247 """benchmark a bdiff between revisions
2241 2248
2242 2249 By default, benchmark a bdiff between its delta parent and itself.
2243 2250
2244 2251 With ``--count``, benchmark bdiffs between delta parents and self for N
2245 2252 revisions starting at the specified revision.
2246 2253
2247 2254 With ``--alldata``, assume the requested revision is a changeset and
2248 2255 measure bdiffs for all changes related to that changeset (manifest
2249 2256 and filelogs).
2250 2257 """
2251 2258 opts = _byteskwargs(opts)
2252 2259
2253 2260 if opts[b'xdiff'] and not opts[b'blocks']:
2254 2261 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2255 2262
2256 2263 if opts[b'alldata']:
2257 2264 opts[b'changelog'] = True
2258 2265
2259 2266 if opts.get(b'changelog') or opts.get(b'manifest'):
2260 2267 file_, rev = None, file_
2261 2268 elif rev is None:
2262 2269 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2263 2270
2264 2271 blocks = opts[b'blocks']
2265 2272 xdiff = opts[b'xdiff']
2266 2273 textpairs = []
2267 2274
2268 2275 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2269 2276
2270 2277 startrev = r.rev(r.lookup(rev))
2271 2278 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2272 2279 if opts[b'alldata']:
2273 2280 # Load revisions associated with changeset.
2274 2281 ctx = repo[rev]
2275 2282 mtext = _manifestrevision(repo, ctx.manifestnode())
2276 2283 for pctx in ctx.parents():
2277 2284 pman = _manifestrevision(repo, pctx.manifestnode())
2278 2285 textpairs.append((pman, mtext))
2279 2286
2280 2287 # Load filelog revisions by iterating manifest delta.
2281 2288 man = ctx.manifest()
2282 2289 pman = ctx.p1().manifest()
2283 2290 for filename, change in pman.diff(man).items():
2284 2291 fctx = repo.file(filename)
2285 2292 f1 = fctx.revision(change[0][0] or -1)
2286 2293 f2 = fctx.revision(change[1][0] or -1)
2287 2294 textpairs.append((f1, f2))
2288 2295 else:
2289 2296 dp = r.deltaparent(rev)
2290 2297 textpairs.append((r.revision(dp), r.revision(rev)))
2291 2298
2292 2299 withthreads = threads > 0
2293 2300 if not withthreads:
2294 2301
2295 2302 def d():
2296 2303 for pair in textpairs:
2297 2304 if xdiff:
2298 2305 mdiff.bdiff.xdiffblocks(*pair)
2299 2306 elif blocks:
2300 2307 mdiff.bdiff.blocks(*pair)
2301 2308 else:
2302 2309 mdiff.textdiff(*pair)
2303 2310
2304 2311 else:
2305 2312 q = queue()
2306 2313 for i in _xrange(threads):
2307 2314 q.put(None)
2308 2315 ready = threading.Condition()
2309 2316 done = threading.Event()
2310 2317 for i in _xrange(threads):
2311 2318 threading.Thread(
2312 2319 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2313 2320 ).start()
2314 2321 q.join()
2315 2322
2316 2323 def d():
2317 2324 for pair in textpairs:
2318 2325 q.put(pair)
2319 2326 for i in _xrange(threads):
2320 2327 q.put(None)
2321 2328 with ready:
2322 2329 ready.notify_all()
2323 2330 q.join()
2324 2331
2325 2332 timer, fm = gettimer(ui, opts)
2326 2333 timer(d)
2327 2334 fm.end()
2328 2335
2329 2336 if withthreads:
2330 2337 done.set()
2331 2338 for i in _xrange(threads):
2332 2339 q.put(None)
2333 2340 with ready:
2334 2341 ready.notify_all()
2335 2342
2336 2343
2337 2344 @command(
2338 2345 b'perfunidiff',
2339 2346 revlogopts
2340 2347 + formatteropts
2341 2348 + [
2342 2349 (
2343 2350 b'',
2344 2351 b'count',
2345 2352 1,
2346 2353 b'number of revisions to test (when using --startrev)',
2347 2354 ),
2348 2355 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2349 2356 ],
2350 2357 b'-c|-m|FILE REV',
2351 2358 )
2352 2359 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2353 2360 """benchmark a unified diff between revisions
2354 2361
2355 2362 This doesn't include any copy tracing - it's just a unified diff
2356 2363 of the texts.
2357 2364
2358 2365 By default, benchmark a diff between its delta parent and itself.
2359 2366
2360 2367 With ``--count``, benchmark diffs between delta parents and self for N
2361 2368 revisions starting at the specified revision.
2362 2369
2363 2370 With ``--alldata``, assume the requested revision is a changeset and
2364 2371 measure diffs for all changes related to that changeset (manifest
2365 2372 and filelogs).
2366 2373 """
2367 2374 opts = _byteskwargs(opts)
2368 2375 if opts[b'alldata']:
2369 2376 opts[b'changelog'] = True
2370 2377
2371 2378 if opts.get(b'changelog') or opts.get(b'manifest'):
2372 2379 file_, rev = None, file_
2373 2380 elif rev is None:
2374 2381 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2375 2382
2376 2383 textpairs = []
2377 2384
2378 2385 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2379 2386
2380 2387 startrev = r.rev(r.lookup(rev))
2381 2388 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2382 2389 if opts[b'alldata']:
2383 2390 # Load revisions associated with changeset.
2384 2391 ctx = repo[rev]
2385 2392 mtext = _manifestrevision(repo, ctx.manifestnode())
2386 2393 for pctx in ctx.parents():
2387 2394 pman = _manifestrevision(repo, pctx.manifestnode())
2388 2395 textpairs.append((pman, mtext))
2389 2396
2390 2397 # Load filelog revisions by iterating manifest delta.
2391 2398 man = ctx.manifest()
2392 2399 pman = ctx.p1().manifest()
2393 2400 for filename, change in pman.diff(man).items():
2394 2401 fctx = repo.file(filename)
2395 2402 f1 = fctx.revision(change[0][0] or -1)
2396 2403 f2 = fctx.revision(change[1][0] or -1)
2397 2404 textpairs.append((f1, f2))
2398 2405 else:
2399 2406 dp = r.deltaparent(rev)
2400 2407 textpairs.append((r.revision(dp), r.revision(rev)))
2401 2408
2402 2409 def d():
2403 2410 for left, right in textpairs:
2404 2411 # The date strings don't matter, so we pass empty strings.
2405 2412 headerlines, hunks = mdiff.unidiff(
2406 2413 left, b'', right, b'', b'left', b'right', binary=False
2407 2414 )
2408 2415 # consume iterators in roughly the way patch.py does
2409 2416 b'\n'.join(headerlines)
2410 2417 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2411 2418
2412 2419 timer, fm = gettimer(ui, opts)
2413 2420 timer(d)
2414 2421 fm.end()
2415 2422
2416 2423
2417 2424 @command(b'perfdiffwd', formatteropts)
2418 2425 def perfdiffwd(ui, repo, **opts):
2419 2426 """Profile diff of working directory changes"""
2420 2427 opts = _byteskwargs(opts)
2421 2428 timer, fm = gettimer(ui, opts)
2422 2429 options = {
2423 2430 'w': 'ignore_all_space',
2424 2431 'b': 'ignore_space_change',
2425 2432 'B': 'ignore_blank_lines',
2426 2433 }
2427 2434
2428 2435 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2429 2436 opts = dict((options[c], b'1') for c in diffopt)
2430 2437
2431 2438 def d():
2432 2439 ui.pushbuffer()
2433 2440 commands.diff(ui, repo, **opts)
2434 2441 ui.popbuffer()
2435 2442
2436 2443 diffopt = diffopt.encode('ascii')
2437 2444 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2438 2445 timer(d, title=title)
2439 2446 fm.end()
2440 2447
2441 2448
2442 2449 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2443 2450 def perfrevlogindex(ui, repo, file_=None, **opts):
2444 2451 """Benchmark operations against a revlog index.
2445 2452
2446 2453 This tests constructing a revlog instance, reading index data,
2447 2454 parsing index data, and performing various operations related to
2448 2455 index data.
2449 2456 """
2450 2457
2451 2458 opts = _byteskwargs(opts)
2452 2459
2453 2460 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2454 2461
2455 2462 opener = getattr(rl, 'opener') # trick linter
2456 2463 indexfile = rl.indexfile
2457 2464 data = opener.read(indexfile)
2458 2465
2459 2466 header = struct.unpack(b'>I', data[0:4])[0]
2460 2467 version = header & 0xFFFF
2461 2468 if version == 1:
2462 2469 revlogio = revlog.revlogio()
2463 2470 inline = header & (1 << 16)
2464 2471 else:
2465 2472 raise error.Abort(b'unsupported revlog version: %d' % version)
2466 2473
2467 2474 rllen = len(rl)
2468 2475
2469 2476 node0 = rl.node(0)
2470 2477 node25 = rl.node(rllen // 4)
2471 2478 node50 = rl.node(rllen // 2)
2472 2479 node75 = rl.node(rllen // 4 * 3)
2473 2480 node100 = rl.node(rllen - 1)
2474 2481
2475 2482 allrevs = range(rllen)
2476 2483 allrevsrev = list(reversed(allrevs))
2477 2484 allnodes = [rl.node(rev) for rev in range(rllen)]
2478 2485 allnodesrev = list(reversed(allnodes))
2479 2486
2480 2487 def constructor():
2481 2488 revlog.revlog(opener, indexfile)
2482 2489
2483 2490 def read():
2484 2491 with opener(indexfile) as fh:
2485 2492 fh.read()
2486 2493
2487 2494 def parseindex():
2488 2495 revlogio.parseindex(data, inline)
2489 2496
2490 2497 def getentry(revornode):
2491 2498 index = revlogio.parseindex(data, inline)[0]
2492 2499 index[revornode]
2493 2500
2494 2501 def getentries(revs, count=1):
2495 2502 index = revlogio.parseindex(data, inline)[0]
2496 2503
2497 2504 for i in range(count):
2498 2505 for rev in revs:
2499 2506 index[rev]
2500 2507
2501 2508 def resolvenode(node):
2502 2509 nodemap = revlogio.parseindex(data, inline)[1]
2503 2510 # This only works for the C code.
2504 2511 if nodemap is None:
2505 2512 return
2506 2513
2507 2514 try:
2508 2515 nodemap[node]
2509 2516 except error.RevlogError:
2510 2517 pass
2511 2518
2512 2519 def resolvenodes(nodes, count=1):
2513 2520 nodemap = revlogio.parseindex(data, inline)[1]
2514 2521 if nodemap is None:
2515 2522 return
2516 2523
2517 2524 for i in range(count):
2518 2525 for node in nodes:
2519 2526 try:
2520 2527 nodemap[node]
2521 2528 except error.RevlogError:
2522 2529 pass
2523 2530
2524 2531 benches = [
2525 2532 (constructor, b'revlog constructor'),
2526 2533 (read, b'read'),
2527 2534 (parseindex, b'create index object'),
2528 2535 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2529 2536 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2530 2537 (lambda: resolvenode(node0), b'look up node at rev 0'),
2531 2538 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2532 2539 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2533 2540 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2534 2541 (lambda: resolvenode(node100), b'look up node at tip'),
2535 2542 # 2x variation is to measure caching impact.
2536 2543 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2537 2544 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2538 2545 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2539 2546 (
2540 2547 lambda: resolvenodes(allnodesrev, 2),
2541 2548 b'look up all nodes 2x (reverse)',
2542 2549 ),
2543 2550 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2544 2551 (
2545 2552 lambda: getentries(allrevs, 2),
2546 2553 b'retrieve all index entries 2x (forward)',
2547 2554 ),
2548 2555 (
2549 2556 lambda: getentries(allrevsrev),
2550 2557 b'retrieve all index entries (reverse)',
2551 2558 ),
2552 2559 (
2553 2560 lambda: getentries(allrevsrev, 2),
2554 2561 b'retrieve all index entries 2x (reverse)',
2555 2562 ),
2556 2563 ]
2557 2564
2558 2565 for fn, title in benches:
2559 2566 timer, fm = gettimer(ui, opts)
2560 2567 timer(fn, title=title)
2561 2568 fm.end()
2562 2569
2563 2570
2564 2571 @command(
2565 2572 b'perfrevlogrevisions',
2566 2573 revlogopts
2567 2574 + formatteropts
2568 2575 + [
2569 2576 (b'd', b'dist', 100, b'distance between the revisions'),
2570 2577 (b's', b'startrev', 0, b'revision to start reading at'),
2571 2578 (b'', b'reverse', False, b'read in reverse'),
2572 2579 ],
2573 2580 b'-c|-m|FILE',
2574 2581 )
2575 2582 def perfrevlogrevisions(
2576 2583 ui, repo, file_=None, startrev=0, reverse=False, **opts
2577 2584 ):
2578 2585 """Benchmark reading a series of revisions from a revlog.
2579 2586
2580 2587 By default, we read every ``-d/--dist`` revision from 0 to tip of
2581 2588 the specified revlog.
2582 2589
2583 2590 The start revision can be defined via ``-s/--startrev``.
2584 2591 """
2585 2592 opts = _byteskwargs(opts)
2586 2593
2587 2594 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2588 2595 rllen = getlen(ui)(rl)
2589 2596
2590 2597 if startrev < 0:
2591 2598 startrev = rllen + startrev
2592 2599
2593 2600 def d():
2594 2601 rl.clearcaches()
2595 2602
2596 2603 beginrev = startrev
2597 2604 endrev = rllen
2598 2605 dist = opts[b'dist']
2599 2606
2600 2607 if reverse:
2601 2608 beginrev, endrev = endrev - 1, beginrev - 1
2602 2609 dist = -1 * dist
2603 2610
2604 2611 for x in _xrange(beginrev, endrev, dist):
2605 2612 # Old revisions don't support passing int.
2606 2613 n = rl.node(x)
2607 2614 rl.revision(n)
2608 2615
2609 2616 timer, fm = gettimer(ui, opts)
2610 2617 timer(d)
2611 2618 fm.end()
2612 2619
2613 2620
2614 2621 @command(
2615 2622 b'perfrevlogwrite',
2616 2623 revlogopts
2617 2624 + formatteropts
2618 2625 + [
2619 2626 (b's', b'startrev', 1000, b'revision to start writing at'),
2620 2627 (b'', b'stoprev', -1, b'last revision to write'),
2621 2628 (b'', b'count', 3, b'number of passes to perform'),
2622 2629 (b'', b'details', False, b'print timing for every revisions tested'),
2623 2630 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2624 2631 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2625 2632 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2626 2633 ],
2627 2634 b'-c|-m|FILE',
2628 2635 )
2629 2636 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2630 2637 """Benchmark writing a series of revisions to a revlog.
2631 2638
2632 2639 Possible source values are:
2633 2640 * `full`: add from a full text (default).
2634 2641 * `parent-1`: add from a delta to the first parent
2635 2642 * `parent-2`: add from a delta to the second parent if it exists
2636 2643 (use a delta from the first parent otherwise)
2637 2644 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2638 2645 * `storage`: add from the existing precomputed deltas
2639 2646
2640 2647 Note: This performance command measures performance in a custom way. As a
2641 2648 result some of the global configuration of the 'perf' command does not
2642 2649 apply to it:
2643 2650
2644 2651 * ``pre-run``: disabled
2645 2652
2646 2653 * ``profile-benchmark``: disabled
2647 2654
2648 2655 * ``run-limits``: disabled use --count instead
2649 2656 """
2650 2657 opts = _byteskwargs(opts)
2651 2658
2652 2659 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2653 2660 rllen = getlen(ui)(rl)
2654 2661 if startrev < 0:
2655 2662 startrev = rllen + startrev
2656 2663 if stoprev < 0:
2657 2664 stoprev = rllen + stoprev
2658 2665
2659 2666 lazydeltabase = opts['lazydeltabase']
2660 2667 source = opts['source']
2661 2668 clearcaches = opts['clear_caches']
2662 2669 validsource = (
2663 2670 b'full',
2664 2671 b'parent-1',
2665 2672 b'parent-2',
2666 2673 b'parent-smallest',
2667 2674 b'storage',
2668 2675 )
2669 2676 if source not in validsource:
2670 2677 raise error.Abort('invalid source type: %s' % source)
2671 2678
2672 2679 ### actually gather results
2673 2680 count = opts['count']
2674 2681 if count <= 0:
2675 2682 raise error.Abort('invalide run count: %d' % count)
2676 2683 allresults = []
2677 2684 for c in range(count):
2678 2685 timing = _timeonewrite(
2679 2686 ui,
2680 2687 rl,
2681 2688 source,
2682 2689 startrev,
2683 2690 stoprev,
2684 2691 c + 1,
2685 2692 lazydeltabase=lazydeltabase,
2686 2693 clearcaches=clearcaches,
2687 2694 )
2688 2695 allresults.append(timing)
2689 2696
2690 2697 ### consolidate the results in a single list
2691 2698 results = []
2692 2699 for idx, (rev, t) in enumerate(allresults[0]):
2693 2700 ts = [t]
2694 2701 for other in allresults[1:]:
2695 2702 orev, ot = other[idx]
2696 2703 assert orev == rev
2697 2704 ts.append(ot)
2698 2705 results.append((rev, ts))
2699 2706 resultcount = len(results)
2700 2707
2701 2708 ### Compute and display relevant statistics
2702 2709
2703 2710 # get a formatter
2704 2711 fm = ui.formatter(b'perf', opts)
2705 2712 displayall = ui.configbool(b"perf", b"all-timing", False)
2706 2713
2707 2714 # print individual details if requested
2708 2715 if opts['details']:
2709 2716 for idx, item in enumerate(results, 1):
2710 2717 rev, data = item
2711 2718 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2712 2719 formatone(fm, data, title=title, displayall=displayall)
2713 2720
2714 2721 # sorts results by median time
2715 2722 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2716 2723 # list of (name, index) to display)
2717 2724 relevants = [
2718 2725 ("min", 0),
2719 2726 ("10%", resultcount * 10 // 100),
2720 2727 ("25%", resultcount * 25 // 100),
2721 2728 ("50%", resultcount * 70 // 100),
2722 2729 ("75%", resultcount * 75 // 100),
2723 2730 ("90%", resultcount * 90 // 100),
2724 2731 ("95%", resultcount * 95 // 100),
2725 2732 ("99%", resultcount * 99 // 100),
2726 2733 ("99.9%", resultcount * 999 // 1000),
2727 2734 ("99.99%", resultcount * 9999 // 10000),
2728 2735 ("99.999%", resultcount * 99999 // 100000),
2729 2736 ("max", -1),
2730 2737 ]
2731 2738 if not ui.quiet:
2732 2739 for name, idx in relevants:
2733 2740 data = results[idx]
2734 2741 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2735 2742 formatone(fm, data[1], title=title, displayall=displayall)
2736 2743
2737 2744 # XXX summing that many float will not be very precise, we ignore this fact
2738 2745 # for now
2739 2746 totaltime = []
2740 2747 for item in allresults:
2741 2748 totaltime.append(
2742 2749 (
2743 2750 sum(x[1][0] for x in item),
2744 2751 sum(x[1][1] for x in item),
2745 2752 sum(x[1][2] for x in item),
2746 2753 )
2747 2754 )
2748 2755 formatone(
2749 2756 fm,
2750 2757 totaltime,
2751 2758 title="total time (%d revs)" % resultcount,
2752 2759 displayall=displayall,
2753 2760 )
2754 2761 fm.end()
2755 2762
2756 2763
2757 2764 class _faketr(object):
2758 2765 def add(s, x, y, z=None):
2759 2766 return None
2760 2767
2761 2768
2762 2769 def _timeonewrite(
2763 2770 ui,
2764 2771 orig,
2765 2772 source,
2766 2773 startrev,
2767 2774 stoprev,
2768 2775 runidx=None,
2769 2776 lazydeltabase=True,
2770 2777 clearcaches=True,
2771 2778 ):
2772 2779 timings = []
2773 2780 tr = _faketr()
2774 2781 with _temprevlog(ui, orig, startrev) as dest:
2775 2782 dest._lazydeltabase = lazydeltabase
2776 2783 revs = list(orig.revs(startrev, stoprev))
2777 2784 total = len(revs)
2778 2785 topic = 'adding'
2779 2786 if runidx is not None:
2780 2787 topic += ' (run #%d)' % runidx
2781 2788 # Support both old and new progress API
2782 2789 if util.safehasattr(ui, 'makeprogress'):
2783 2790 progress = ui.makeprogress(topic, unit='revs', total=total)
2784 2791
2785 2792 def updateprogress(pos):
2786 2793 progress.update(pos)
2787 2794
2788 2795 def completeprogress():
2789 2796 progress.complete()
2790 2797
2791 2798 else:
2792 2799
2793 2800 def updateprogress(pos):
2794 2801 ui.progress(topic, pos, unit='revs', total=total)
2795 2802
2796 2803 def completeprogress():
2797 2804 ui.progress(topic, None, unit='revs', total=total)
2798 2805
2799 2806 for idx, rev in enumerate(revs):
2800 2807 updateprogress(idx)
2801 2808 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2802 2809 if clearcaches:
2803 2810 dest.index.clearcaches()
2804 2811 dest.clearcaches()
2805 2812 with timeone() as r:
2806 2813 dest.addrawrevision(*addargs, **addkwargs)
2807 2814 timings.append((rev, r[0]))
2808 2815 updateprogress(total)
2809 2816 completeprogress()
2810 2817 return timings
2811 2818
2812 2819
2813 2820 def _getrevisionseed(orig, rev, tr, source):
2814 2821 from mercurial.node import nullid
2815 2822
2816 2823 linkrev = orig.linkrev(rev)
2817 2824 node = orig.node(rev)
2818 2825 p1, p2 = orig.parents(node)
2819 2826 flags = orig.flags(rev)
2820 2827 cachedelta = None
2821 2828 text = None
2822 2829
2823 2830 if source == b'full':
2824 2831 text = orig.revision(rev)
2825 2832 elif source == b'parent-1':
2826 2833 baserev = orig.rev(p1)
2827 2834 cachedelta = (baserev, orig.revdiff(p1, rev))
2828 2835 elif source == b'parent-2':
2829 2836 parent = p2
2830 2837 if p2 == nullid:
2831 2838 parent = p1
2832 2839 baserev = orig.rev(parent)
2833 2840 cachedelta = (baserev, orig.revdiff(parent, rev))
2834 2841 elif source == b'parent-smallest':
2835 2842 p1diff = orig.revdiff(p1, rev)
2836 2843 parent = p1
2837 2844 diff = p1diff
2838 2845 if p2 != nullid:
2839 2846 p2diff = orig.revdiff(p2, rev)
2840 2847 if len(p1diff) > len(p2diff):
2841 2848 parent = p2
2842 2849 diff = p2diff
2843 2850 baserev = orig.rev(parent)
2844 2851 cachedelta = (baserev, diff)
2845 2852 elif source == b'storage':
2846 2853 baserev = orig.deltaparent(rev)
2847 2854 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2848 2855
2849 2856 return (
2850 2857 (text, tr, linkrev, p1, p2),
2851 2858 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2852 2859 )
2853 2860
2854 2861
2855 2862 @contextlib.contextmanager
2856 2863 def _temprevlog(ui, orig, truncaterev):
2857 2864 from mercurial import vfs as vfsmod
2858 2865
2859 2866 if orig._inline:
2860 2867 raise error.Abort('not supporting inline revlog (yet)')
2861 2868 revlogkwargs = {}
2862 2869 k = 'upperboundcomp'
2863 2870 if util.safehasattr(orig, k):
2864 2871 revlogkwargs[k] = getattr(orig, k)
2865 2872
2866 2873 origindexpath = orig.opener.join(orig.indexfile)
2867 2874 origdatapath = orig.opener.join(orig.datafile)
2868 2875 indexname = 'revlog.i'
2869 2876 dataname = 'revlog.d'
2870 2877
2871 2878 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2872 2879 try:
2873 2880 # copy the data file in a temporary directory
2874 2881 ui.debug('copying data in %s\n' % tmpdir)
2875 2882 destindexpath = os.path.join(tmpdir, 'revlog.i')
2876 2883 destdatapath = os.path.join(tmpdir, 'revlog.d')
2877 2884 shutil.copyfile(origindexpath, destindexpath)
2878 2885 shutil.copyfile(origdatapath, destdatapath)
2879 2886
2880 2887 # remove the data we want to add again
2881 2888 ui.debug('truncating data to be rewritten\n')
2882 2889 with open(destindexpath, 'ab') as index:
2883 2890 index.seek(0)
2884 2891 index.truncate(truncaterev * orig._io.size)
2885 2892 with open(destdatapath, 'ab') as data:
2886 2893 data.seek(0)
2887 2894 data.truncate(orig.start(truncaterev))
2888 2895
2889 2896 # instantiate a new revlog from the temporary copy
2890 2897 ui.debug('truncating adding to be rewritten\n')
2891 2898 vfs = vfsmod.vfs(tmpdir)
2892 2899 vfs.options = getattr(orig.opener, 'options', None)
2893 2900
2894 2901 dest = revlog.revlog(
2895 2902 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2896 2903 )
2897 2904 if dest._inline:
2898 2905 raise error.Abort('not supporting inline revlog (yet)')
2899 2906 # make sure internals are initialized
2900 2907 dest.revision(len(dest) - 1)
2901 2908 yield dest
2902 2909 del dest, vfs
2903 2910 finally:
2904 2911 shutil.rmtree(tmpdir, True)
2905 2912
2906 2913
2907 2914 @command(
2908 2915 b'perfrevlogchunks',
2909 2916 revlogopts
2910 2917 + formatteropts
2911 2918 + [
2912 2919 (b'e', b'engines', b'', b'compression engines to use'),
2913 2920 (b's', b'startrev', 0, b'revision to start at'),
2914 2921 ],
2915 2922 b'-c|-m|FILE',
2916 2923 )
2917 2924 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2918 2925 """Benchmark operations on revlog chunks.
2919 2926
2920 2927 Logically, each revlog is a collection of fulltext revisions. However,
2921 2928 stored within each revlog are "chunks" of possibly compressed data. This
2922 2929 data needs to be read and decompressed or compressed and written.
2923 2930
2924 2931 This command measures the time it takes to read+decompress and recompress
2925 2932 chunks in a revlog. It effectively isolates I/O and compression performance.
2926 2933 For measurements of higher-level operations like resolving revisions,
2927 2934 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2928 2935 """
2929 2936 opts = _byteskwargs(opts)
2930 2937
2931 2938 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2932 2939
2933 2940 # _chunkraw was renamed to _getsegmentforrevs.
2934 2941 try:
2935 2942 segmentforrevs = rl._getsegmentforrevs
2936 2943 except AttributeError:
2937 2944 segmentforrevs = rl._chunkraw
2938 2945
2939 2946 # Verify engines argument.
2940 2947 if engines:
2941 2948 engines = set(e.strip() for e in engines.split(b','))
2942 2949 for engine in engines:
2943 2950 try:
2944 2951 util.compressionengines[engine]
2945 2952 except KeyError:
2946 2953 raise error.Abort(b'unknown compression engine: %s' % engine)
2947 2954 else:
2948 2955 engines = []
2949 2956 for e in util.compengines:
2950 2957 engine = util.compengines[e]
2951 2958 try:
2952 2959 if engine.available():
2953 2960 engine.revlogcompressor().compress(b'dummy')
2954 2961 engines.append(e)
2955 2962 except NotImplementedError:
2956 2963 pass
2957 2964
2958 2965 revs = list(rl.revs(startrev, len(rl) - 1))
2959 2966
2960 2967 def rlfh(rl):
2961 2968 if rl._inline:
2962 2969 return getsvfs(repo)(rl.indexfile)
2963 2970 else:
2964 2971 return getsvfs(repo)(rl.datafile)
2965 2972
2966 2973 def doread():
2967 2974 rl.clearcaches()
2968 2975 for rev in revs:
2969 2976 segmentforrevs(rev, rev)
2970 2977
2971 2978 def doreadcachedfh():
2972 2979 rl.clearcaches()
2973 2980 fh = rlfh(rl)
2974 2981 for rev in revs:
2975 2982 segmentforrevs(rev, rev, df=fh)
2976 2983
2977 2984 def doreadbatch():
2978 2985 rl.clearcaches()
2979 2986 segmentforrevs(revs[0], revs[-1])
2980 2987
2981 2988 def doreadbatchcachedfh():
2982 2989 rl.clearcaches()
2983 2990 fh = rlfh(rl)
2984 2991 segmentforrevs(revs[0], revs[-1], df=fh)
2985 2992
2986 2993 def dochunk():
2987 2994 rl.clearcaches()
2988 2995 fh = rlfh(rl)
2989 2996 for rev in revs:
2990 2997 rl._chunk(rev, df=fh)
2991 2998
2992 2999 chunks = [None]
2993 3000
2994 3001 def dochunkbatch():
2995 3002 rl.clearcaches()
2996 3003 fh = rlfh(rl)
2997 3004 # Save chunks as a side-effect.
2998 3005 chunks[0] = rl._chunks(revs, df=fh)
2999 3006
3000 3007 def docompress(compressor):
3001 3008 rl.clearcaches()
3002 3009
3003 3010 try:
3004 3011 # Swap in the requested compression engine.
3005 3012 oldcompressor = rl._compressor
3006 3013 rl._compressor = compressor
3007 3014 for chunk in chunks[0]:
3008 3015 rl.compress(chunk)
3009 3016 finally:
3010 3017 rl._compressor = oldcompressor
3011 3018
3012 3019 benches = [
3013 3020 (lambda: doread(), b'read'),
3014 3021 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3015 3022 (lambda: doreadbatch(), b'read batch'),
3016 3023 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3017 3024 (lambda: dochunk(), b'chunk'),
3018 3025 (lambda: dochunkbatch(), b'chunk batch'),
3019 3026 ]
3020 3027
3021 3028 for engine in sorted(engines):
3022 3029 compressor = util.compengines[engine].revlogcompressor()
3023 3030 benches.append(
3024 3031 (
3025 3032 functools.partial(docompress, compressor),
3026 3033 b'compress w/ %s' % engine,
3027 3034 )
3028 3035 )
3029 3036
3030 3037 for fn, title in benches:
3031 3038 timer, fm = gettimer(ui, opts)
3032 3039 timer(fn, title=title)
3033 3040 fm.end()
3034 3041
3035 3042
3036 3043 @command(
3037 3044 b'perfrevlogrevision',
3038 3045 revlogopts
3039 3046 + formatteropts
3040 3047 + [(b'', b'cache', False, b'use caches instead of clearing')],
3041 3048 b'-c|-m|FILE REV',
3042 3049 )
3043 3050 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3044 3051 """Benchmark obtaining a revlog revision.
3045 3052
3046 3053 Obtaining a revlog revision consists of roughly the following steps:
3047 3054
3048 3055 1. Compute the delta chain
3049 3056 2. Slice the delta chain if applicable
3050 3057 3. Obtain the raw chunks for that delta chain
3051 3058 4. Decompress each raw chunk
3052 3059 5. Apply binary patches to obtain fulltext
3053 3060 6. Verify hash of fulltext
3054 3061
3055 3062 This command measures the time spent in each of these phases.
3056 3063 """
3057 3064 opts = _byteskwargs(opts)
3058 3065
3059 3066 if opts.get(b'changelog') or opts.get(b'manifest'):
3060 3067 file_, rev = None, file_
3061 3068 elif rev is None:
3062 3069 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3063 3070
3064 3071 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3065 3072
3066 3073 # _chunkraw was renamed to _getsegmentforrevs.
3067 3074 try:
3068 3075 segmentforrevs = r._getsegmentforrevs
3069 3076 except AttributeError:
3070 3077 segmentforrevs = r._chunkraw
3071 3078
3072 3079 node = r.lookup(rev)
3073 3080 rev = r.rev(node)
3074 3081
3075 3082 def getrawchunks(data, chain):
3076 3083 start = r.start
3077 3084 length = r.length
3078 3085 inline = r._inline
3079 3086 iosize = r._io.size
3080 3087 buffer = util.buffer
3081 3088
3082 3089 chunks = []
3083 3090 ladd = chunks.append
3084 3091 for idx, item in enumerate(chain):
3085 3092 offset = start(item[0])
3086 3093 bits = data[idx]
3087 3094 for rev in item:
3088 3095 chunkstart = start(rev)
3089 3096 if inline:
3090 3097 chunkstart += (rev + 1) * iosize
3091 3098 chunklength = length(rev)
3092 3099 ladd(buffer(bits, chunkstart - offset, chunklength))
3093 3100
3094 3101 return chunks
3095 3102
3096 3103 def dodeltachain(rev):
3097 3104 if not cache:
3098 3105 r.clearcaches()
3099 3106 r._deltachain(rev)
3100 3107
3101 3108 def doread(chain):
3102 3109 if not cache:
3103 3110 r.clearcaches()
3104 3111 for item in slicedchain:
3105 3112 segmentforrevs(item[0], item[-1])
3106 3113
3107 3114 def doslice(r, chain, size):
3108 3115 for s in slicechunk(r, chain, targetsize=size):
3109 3116 pass
3110 3117
3111 3118 def dorawchunks(data, chain):
3112 3119 if not cache:
3113 3120 r.clearcaches()
3114 3121 getrawchunks(data, chain)
3115 3122
3116 3123 def dodecompress(chunks):
3117 3124 decomp = r.decompress
3118 3125 for chunk in chunks:
3119 3126 decomp(chunk)
3120 3127
3121 3128 def dopatch(text, bins):
3122 3129 if not cache:
3123 3130 r.clearcaches()
3124 3131 mdiff.patches(text, bins)
3125 3132
3126 3133 def dohash(text):
3127 3134 if not cache:
3128 3135 r.clearcaches()
3129 3136 r.checkhash(text, node, rev=rev)
3130 3137
3131 3138 def dorevision():
3132 3139 if not cache:
3133 3140 r.clearcaches()
3134 3141 r.revision(node)
3135 3142
3136 3143 try:
3137 3144 from mercurial.revlogutils.deltas import slicechunk
3138 3145 except ImportError:
3139 3146 slicechunk = getattr(revlog, '_slicechunk', None)
3140 3147
3141 3148 size = r.length(rev)
3142 3149 chain = r._deltachain(rev)[0]
3143 3150 if not getattr(r, '_withsparseread', False):
3144 3151 slicedchain = (chain,)
3145 3152 else:
3146 3153 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3147 3154 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3148 3155 rawchunks = getrawchunks(data, slicedchain)
3149 3156 bins = r._chunks(chain)
3150 3157 text = bytes(bins[0])
3151 3158 bins = bins[1:]
3152 3159 text = mdiff.patches(text, bins)
3153 3160
3154 3161 benches = [
3155 3162 (lambda: dorevision(), b'full'),
3156 3163 (lambda: dodeltachain(rev), b'deltachain'),
3157 3164 (lambda: doread(chain), b'read'),
3158 3165 ]
3159 3166
3160 3167 if getattr(r, '_withsparseread', False):
3161 3168 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3162 3169 benches.append(slicing)
3163 3170
3164 3171 benches.extend(
3165 3172 [
3166 3173 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3167 3174 (lambda: dodecompress(rawchunks), b'decompress'),
3168 3175 (lambda: dopatch(text, bins), b'patch'),
3169 3176 (lambda: dohash(text), b'hash'),
3170 3177 ]
3171 3178 )
3172 3179
3173 3180 timer, fm = gettimer(ui, opts)
3174 3181 for fn, title in benches:
3175 3182 timer(fn, title=title)
3176 3183 fm.end()
3177 3184
3178 3185
3179 3186 @command(
3180 3187 b'perfrevset',
3181 3188 [
3182 3189 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3183 3190 (b'', b'contexts', False, b'obtain changectx for each revision'),
3184 3191 ]
3185 3192 + formatteropts,
3186 3193 b"REVSET",
3187 3194 )
3188 3195 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3189 3196 """benchmark the execution time of a revset
3190 3197
3191 3198 Use the --clean option if need to evaluate the impact of build volatile
3192 3199 revisions set cache on the revset execution. Volatile cache hold filtered
3193 3200 and obsolete related cache."""
3194 3201 opts = _byteskwargs(opts)
3195 3202
3196 3203 timer, fm = gettimer(ui, opts)
3197 3204
3198 3205 def d():
3199 3206 if clear:
3200 3207 repo.invalidatevolatilesets()
3201 3208 if contexts:
3202 3209 for ctx in repo.set(expr):
3203 3210 pass
3204 3211 else:
3205 3212 for r in repo.revs(expr):
3206 3213 pass
3207 3214
3208 3215 timer(d)
3209 3216 fm.end()
3210 3217
3211 3218
3212 3219 @command(
3213 3220 b'perfvolatilesets',
3214 3221 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3215 3222 + formatteropts,
3216 3223 )
3217 3224 def perfvolatilesets(ui, repo, *names, **opts):
3218 3225 """benchmark the computation of various volatile set
3219 3226
3220 3227 Volatile set computes element related to filtering and obsolescence."""
3221 3228 opts = _byteskwargs(opts)
3222 3229 timer, fm = gettimer(ui, opts)
3223 3230 repo = repo.unfiltered()
3224 3231
3225 3232 def getobs(name):
3226 3233 def d():
3227 3234 repo.invalidatevolatilesets()
3228 3235 if opts[b'clear_obsstore']:
3229 3236 clearfilecache(repo, b'obsstore')
3230 3237 obsolete.getrevs(repo, name)
3231 3238
3232 3239 return d
3233 3240
3234 3241 allobs = sorted(obsolete.cachefuncs)
3235 3242 if names:
3236 3243 allobs = [n for n in allobs if n in names]
3237 3244
3238 3245 for name in allobs:
3239 3246 timer(getobs(name), title=name)
3240 3247
3241 3248 def getfiltered(name):
3242 3249 def d():
3243 3250 repo.invalidatevolatilesets()
3244 3251 if opts[b'clear_obsstore']:
3245 3252 clearfilecache(repo, b'obsstore')
3246 3253 repoview.filterrevs(repo, name)
3247 3254
3248 3255 return d
3249 3256
3250 3257 allfilter = sorted(repoview.filtertable)
3251 3258 if names:
3252 3259 allfilter = [n for n in allfilter if n in names]
3253 3260
3254 3261 for name in allfilter:
3255 3262 timer(getfiltered(name), title=name)
3256 3263 fm.end()
3257 3264
3258 3265
3259 3266 @command(
3260 3267 b'perfbranchmap',
3261 3268 [
3262 3269 (b'f', b'full', False, b'Includes build time of subset'),
3263 3270 (
3264 3271 b'',
3265 3272 b'clear-revbranch',
3266 3273 False,
3267 3274 b'purge the revbranch cache between computation',
3268 3275 ),
3269 3276 ]
3270 3277 + formatteropts,
3271 3278 )
3272 3279 def perfbranchmap(ui, repo, *filternames, **opts):
3273 3280 """benchmark the update of a branchmap
3274 3281
3275 3282 This benchmarks the full repo.branchmap() call with read and write disabled
3276 3283 """
3277 3284 opts = _byteskwargs(opts)
3278 3285 full = opts.get(b"full", False)
3279 3286 clear_revbranch = opts.get(b"clear_revbranch", False)
3280 3287 timer, fm = gettimer(ui, opts)
3281 3288
3282 3289 def getbranchmap(filtername):
3283 3290 """generate a benchmark function for the filtername"""
3284 3291 if filtername is None:
3285 3292 view = repo
3286 3293 else:
3287 3294 view = repo.filtered(filtername)
3288 3295 if util.safehasattr(view._branchcaches, '_per_filter'):
3289 3296 filtered = view._branchcaches._per_filter
3290 3297 else:
3291 3298 # older versions
3292 3299 filtered = view._branchcaches
3293 3300
3294 3301 def d():
3295 3302 if clear_revbranch:
3296 3303 repo.revbranchcache()._clear()
3297 3304 if full:
3298 3305 view._branchcaches.clear()
3299 3306 else:
3300 3307 filtered.pop(filtername, None)
3301 3308 view.branchmap()
3302 3309
3303 3310 return d
3304 3311
3305 3312 # add filter in smaller subset to bigger subset
3306 3313 possiblefilters = set(repoview.filtertable)
3307 3314 if filternames:
3308 3315 possiblefilters &= set(filternames)
3309 3316 subsettable = getbranchmapsubsettable()
3310 3317 allfilters = []
3311 3318 while possiblefilters:
3312 3319 for name in possiblefilters:
3313 3320 subset = subsettable.get(name)
3314 3321 if subset not in possiblefilters:
3315 3322 break
3316 3323 else:
3317 3324 assert False, b'subset cycle %s!' % possiblefilters
3318 3325 allfilters.append(name)
3319 3326 possiblefilters.remove(name)
3320 3327
3321 3328 # warm the cache
3322 3329 if not full:
3323 3330 for name in allfilters:
3324 3331 repo.filtered(name).branchmap()
3325 3332 if not filternames or b'unfiltered' in filternames:
3326 3333 # add unfiltered
3327 3334 allfilters.append(None)
3328 3335
3329 3336 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3330 3337 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3331 3338 branchcacheread.set(classmethod(lambda *args: None))
3332 3339 else:
3333 3340 # older versions
3334 3341 branchcacheread = safeattrsetter(branchmap, b'read')
3335 3342 branchcacheread.set(lambda *args: None)
3336 3343 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3337 3344 branchcachewrite.set(lambda *args: None)
3338 3345 try:
3339 3346 for name in allfilters:
3340 3347 printname = name
3341 3348 if name is None:
3342 3349 printname = b'unfiltered'
3343 3350 timer(getbranchmap(name), title=str(printname))
3344 3351 finally:
3345 3352 branchcacheread.restore()
3346 3353 branchcachewrite.restore()
3347 3354 fm.end()
3348 3355
3349 3356
3350 3357 @command(
3351 3358 b'perfbranchmapupdate',
3352 3359 [
3353 3360 (b'', b'base', [], b'subset of revision to start from'),
3354 3361 (b'', b'target', [], b'subset of revision to end with'),
3355 3362 (b'', b'clear-caches', False, b'clear cache between each runs'),
3356 3363 ]
3357 3364 + formatteropts,
3358 3365 )
3359 3366 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3360 3367 """benchmark branchmap update from for <base> revs to <target> revs
3361 3368
3362 3369 If `--clear-caches` is passed, the following items will be reset before
3363 3370 each update:
3364 3371 * the changelog instance and associated indexes
3365 3372 * the rev-branch-cache instance
3366 3373
3367 3374 Examples:
3368 3375
3369 3376 # update for the one last revision
3370 3377 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3371 3378
3372 3379 $ update for change coming with a new branch
3373 3380 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3374 3381 """
3375 3382 from mercurial import branchmap
3376 3383 from mercurial import repoview
3377 3384
3378 3385 opts = _byteskwargs(opts)
3379 3386 timer, fm = gettimer(ui, opts)
3380 3387 clearcaches = opts[b'clear_caches']
3381 3388 unfi = repo.unfiltered()
3382 3389 x = [None] # used to pass data between closure
3383 3390
3384 3391 # we use a `list` here to avoid possible side effect from smartset
3385 3392 baserevs = list(scmutil.revrange(repo, base))
3386 3393 targetrevs = list(scmutil.revrange(repo, target))
3387 3394 if not baserevs:
3388 3395 raise error.Abort(b'no revisions selected for --base')
3389 3396 if not targetrevs:
3390 3397 raise error.Abort(b'no revisions selected for --target')
3391 3398
3392 3399 # make sure the target branchmap also contains the one in the base
3393 3400 targetrevs = list(set(baserevs) | set(targetrevs))
3394 3401 targetrevs.sort()
3395 3402
3396 3403 cl = repo.changelog
3397 3404 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3398 3405 allbaserevs.sort()
3399 3406 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3400 3407
3401 3408 newrevs = list(alltargetrevs.difference(allbaserevs))
3402 3409 newrevs.sort()
3403 3410
3404 3411 allrevs = frozenset(unfi.changelog.revs())
3405 3412 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3406 3413 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3407 3414
3408 3415 def basefilter(repo, visibilityexceptions=None):
3409 3416 return basefilterrevs
3410 3417
3411 3418 def targetfilter(repo, visibilityexceptions=None):
3412 3419 return targetfilterrevs
3413 3420
3414 3421 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3415 3422 ui.status(msg % (len(allbaserevs), len(newrevs)))
3416 3423 if targetfilterrevs:
3417 3424 msg = b'(%d revisions still filtered)\n'
3418 3425 ui.status(msg % len(targetfilterrevs))
3419 3426
3420 3427 try:
3421 3428 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3422 3429 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3423 3430
3424 3431 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3425 3432 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3426 3433
3427 3434 # try to find an existing branchmap to reuse
3428 3435 subsettable = getbranchmapsubsettable()
3429 3436 candidatefilter = subsettable.get(None)
3430 3437 while candidatefilter is not None:
3431 3438 candidatebm = repo.filtered(candidatefilter).branchmap()
3432 3439 if candidatebm.validfor(baserepo):
3433 3440 filtered = repoview.filterrevs(repo, candidatefilter)
3434 3441 missing = [r for r in allbaserevs if r in filtered]
3435 3442 base = candidatebm.copy()
3436 3443 base.update(baserepo, missing)
3437 3444 break
3438 3445 candidatefilter = subsettable.get(candidatefilter)
3439 3446 else:
3440 3447 # no suitable subset where found
3441 3448 base = branchmap.branchcache()
3442 3449 base.update(baserepo, allbaserevs)
3443 3450
3444 3451 def setup():
3445 3452 x[0] = base.copy()
3446 3453 if clearcaches:
3447 3454 unfi._revbranchcache = None
3448 3455 clearchangelog(repo)
3449 3456
3450 3457 def bench():
3451 3458 x[0].update(targetrepo, newrevs)
3452 3459
3453 3460 timer(bench, setup=setup)
3454 3461 fm.end()
3455 3462 finally:
3456 3463 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3457 3464 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3458 3465
3459 3466
3460 3467 @command(
3461 3468 b'perfbranchmapload',
3462 3469 [
3463 3470 (b'f', b'filter', b'', b'Specify repoview filter'),
3464 3471 (b'', b'list', False, b'List brachmap filter caches'),
3465 3472 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3466 3473 ]
3467 3474 + formatteropts,
3468 3475 )
3469 3476 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3470 3477 """benchmark reading the branchmap"""
3471 3478 opts = _byteskwargs(opts)
3472 3479 clearrevlogs = opts[b'clear_revlogs']
3473 3480
3474 3481 if list:
3475 3482 for name, kind, st in repo.cachevfs.readdir(stat=True):
3476 3483 if name.startswith(b'branch2'):
3477 3484 filtername = name.partition(b'-')[2] or b'unfiltered'
3478 3485 ui.status(
3479 3486 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3480 3487 )
3481 3488 return
3482 3489 if not filter:
3483 3490 filter = None
3484 3491 subsettable = getbranchmapsubsettable()
3485 3492 if filter is None:
3486 3493 repo = repo.unfiltered()
3487 3494 else:
3488 3495 repo = repoview.repoview(repo, filter)
3489 3496
3490 3497 repo.branchmap() # make sure we have a relevant, up to date branchmap
3491 3498
3492 3499 try:
3493 3500 fromfile = branchmap.branchcache.fromfile
3494 3501 except AttributeError:
3495 3502 # older versions
3496 3503 fromfile = branchmap.read
3497 3504
3498 3505 currentfilter = filter
3499 3506 # try once without timer, the filter may not be cached
3500 3507 while fromfile(repo) is None:
3501 3508 currentfilter = subsettable.get(currentfilter)
3502 3509 if currentfilter is None:
3503 3510 raise error.Abort(
3504 3511 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3505 3512 )
3506 3513 repo = repo.filtered(currentfilter)
3507 3514 timer, fm = gettimer(ui, opts)
3508 3515
3509 3516 def setup():
3510 3517 if clearrevlogs:
3511 3518 clearchangelog(repo)
3512 3519
3513 3520 def bench():
3514 3521 fromfile(repo)
3515 3522
3516 3523 timer(bench, setup=setup)
3517 3524 fm.end()
3518 3525
3519 3526
3520 3527 @command(b'perfloadmarkers')
3521 3528 def perfloadmarkers(ui, repo):
3522 3529 """benchmark the time to parse the on-disk markers for a repo
3523 3530
3524 3531 Result is the number of markers in the repo."""
3525 3532 timer, fm = gettimer(ui)
3526 3533 svfs = getsvfs(repo)
3527 3534 timer(lambda: len(obsolete.obsstore(svfs)))
3528 3535 fm.end()
3529 3536
3530 3537
3531 3538 @command(
3532 3539 b'perflrucachedict',
3533 3540 formatteropts
3534 3541 + [
3535 3542 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3536 3543 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3537 3544 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3538 3545 (b'', b'size', 4, b'size of cache'),
3539 3546 (b'', b'gets', 10000, b'number of key lookups'),
3540 3547 (b'', b'sets', 10000, b'number of key sets'),
3541 3548 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3542 3549 (
3543 3550 b'',
3544 3551 b'mixedgetfreq',
3545 3552 50,
3546 3553 b'frequency of get vs set ops in mixed mode',
3547 3554 ),
3548 3555 ],
3549 3556 norepo=True,
3550 3557 )
3551 3558 def perflrucache(
3552 3559 ui,
3553 3560 mincost=0,
3554 3561 maxcost=100,
3555 3562 costlimit=0,
3556 3563 size=4,
3557 3564 gets=10000,
3558 3565 sets=10000,
3559 3566 mixed=10000,
3560 3567 mixedgetfreq=50,
3561 3568 **opts
3562 3569 ):
3563 3570 opts = _byteskwargs(opts)
3564 3571
3565 3572 def doinit():
3566 3573 for i in _xrange(10000):
3567 3574 util.lrucachedict(size)
3568 3575
3569 3576 costrange = list(range(mincost, maxcost + 1))
3570 3577
3571 3578 values = []
3572 3579 for i in _xrange(size):
3573 3580 values.append(random.randint(0, _maxint))
3574 3581
3575 3582 # Get mode fills the cache and tests raw lookup performance with no
3576 3583 # eviction.
3577 3584 getseq = []
3578 3585 for i in _xrange(gets):
3579 3586 getseq.append(random.choice(values))
3580 3587
3581 3588 def dogets():
3582 3589 d = util.lrucachedict(size)
3583 3590 for v in values:
3584 3591 d[v] = v
3585 3592 for key in getseq:
3586 3593 value = d[key]
3587 3594 value # silence pyflakes warning
3588 3595
3589 3596 def dogetscost():
3590 3597 d = util.lrucachedict(size, maxcost=costlimit)
3591 3598 for i, v in enumerate(values):
3592 3599 d.insert(v, v, cost=costs[i])
3593 3600 for key in getseq:
3594 3601 try:
3595 3602 value = d[key]
3596 3603 value # silence pyflakes warning
3597 3604 except KeyError:
3598 3605 pass
3599 3606
3600 3607 # Set mode tests insertion speed with cache eviction.
3601 3608 setseq = []
3602 3609 costs = []
3603 3610 for i in _xrange(sets):
3604 3611 setseq.append(random.randint(0, _maxint))
3605 3612 costs.append(random.choice(costrange))
3606 3613
3607 3614 def doinserts():
3608 3615 d = util.lrucachedict(size)
3609 3616 for v in setseq:
3610 3617 d.insert(v, v)
3611 3618
3612 3619 def doinsertscost():
3613 3620 d = util.lrucachedict(size, maxcost=costlimit)
3614 3621 for i, v in enumerate(setseq):
3615 3622 d.insert(v, v, cost=costs[i])
3616 3623
3617 3624 def dosets():
3618 3625 d = util.lrucachedict(size)
3619 3626 for v in setseq:
3620 3627 d[v] = v
3621 3628
3622 3629 # Mixed mode randomly performs gets and sets with eviction.
3623 3630 mixedops = []
3624 3631 for i in _xrange(mixed):
3625 3632 r = random.randint(0, 100)
3626 3633 if r < mixedgetfreq:
3627 3634 op = 0
3628 3635 else:
3629 3636 op = 1
3630 3637
3631 3638 mixedops.append(
3632 3639 (op, random.randint(0, size * 2), random.choice(costrange))
3633 3640 )
3634 3641
3635 3642 def domixed():
3636 3643 d = util.lrucachedict(size)
3637 3644
3638 3645 for op, v, cost in mixedops:
3639 3646 if op == 0:
3640 3647 try:
3641 3648 d[v]
3642 3649 except KeyError:
3643 3650 pass
3644 3651 else:
3645 3652 d[v] = v
3646 3653
3647 3654 def domixedcost():
3648 3655 d = util.lrucachedict(size, maxcost=costlimit)
3649 3656
3650 3657 for op, v, cost in mixedops:
3651 3658 if op == 0:
3652 3659 try:
3653 3660 d[v]
3654 3661 except KeyError:
3655 3662 pass
3656 3663 else:
3657 3664 d.insert(v, v, cost=cost)
3658 3665
3659 3666 benches = [
3660 3667 (doinit, b'init'),
3661 3668 ]
3662 3669
3663 3670 if costlimit:
3664 3671 benches.extend(
3665 3672 [
3666 3673 (dogetscost, b'gets w/ cost limit'),
3667 3674 (doinsertscost, b'inserts w/ cost limit'),
3668 3675 (domixedcost, b'mixed w/ cost limit'),
3669 3676 ]
3670 3677 )
3671 3678 else:
3672 3679 benches.extend(
3673 3680 [
3674 3681 (dogets, b'gets'),
3675 3682 (doinserts, b'inserts'),
3676 3683 (dosets, b'sets'),
3677 3684 (domixed, b'mixed'),
3678 3685 ]
3679 3686 )
3680 3687
3681 3688 for fn, title in benches:
3682 3689 timer, fm = gettimer(ui, opts)
3683 3690 timer(fn, title=title)
3684 3691 fm.end()
3685 3692
3686 3693
3687 3694 @command(b'perfwrite', formatteropts)
3688 3695 def perfwrite(ui, repo, **opts):
3689 3696 """microbenchmark ui.write
3690 3697 """
3691 3698 opts = _byteskwargs(opts)
3692 3699
3693 3700 timer, fm = gettimer(ui, opts)
3694 3701
3695 3702 def write():
3696 3703 for i in range(100000):
3697 3704 ui.writenoi18n(b'Testing write performance\n')
3698 3705
3699 3706 timer(write)
3700 3707 fm.end()
3701 3708
3702 3709
3703 3710 def uisetup(ui):
3704 3711 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3705 3712 commands, b'debugrevlogopts'
3706 3713 ):
3707 3714 # for "historical portability":
3708 3715 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3709 3716 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3710 3717 # openrevlog() should cause failure, because it has been
3711 3718 # available since 3.5 (or 49c583ca48c4).
3712 3719 def openrevlog(orig, repo, cmd, file_, opts):
3713 3720 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3714 3721 raise error.Abort(
3715 3722 b"This version doesn't support --dir option",
3716 3723 hint=b"use 3.5 or later",
3717 3724 )
3718 3725 return orig(repo, cmd, file_, opts)
3719 3726
3720 3727 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3721 3728
3722 3729
3723 3730 @command(
3724 3731 b'perfprogress',
3725 3732 formatteropts
3726 3733 + [
3727 3734 (b'', b'topic', b'topic', b'topic for progress messages'),
3728 3735 (b'c', b'total', 1000000, b'total value we are progressing to'),
3729 3736 ],
3730 3737 norepo=True,
3731 3738 )
3732 3739 def perfprogress(ui, topic=None, total=None, **opts):
3733 3740 """printing of progress bars"""
3734 3741 opts = _byteskwargs(opts)
3735 3742
3736 3743 timer, fm = gettimer(ui, opts)
3737 3744
3738 3745 def doprogress():
3739 3746 with ui.makeprogress(topic, total=total) as progress:
3740 3747 for i in _xrange(total):
3741 3748 progress.increment()
3742 3749
3743 3750 timer(doprogress)
3744 3751 fm.end()
@@ -1,396 +1,396 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perfaddremove
82 82 (no help text available)
83 83 perfancestors
84 84 (no help text available)
85 85 perfancestorset
86 86 (no help text available)
87 87 perfannotate (no help text available)
88 88 perfbdiff benchmark a bdiff between revisions
89 89 perfbookmarks
90 90 benchmark parsing bookmarks from disk to memory
91 91 perfbranchmap
92 92 benchmark the update of a branchmap
93 93 perfbranchmapload
94 94 benchmark reading the branchmap
95 95 perfbranchmapupdate
96 96 benchmark branchmap update from for <base> revs to <target>
97 97 revs
98 98 perfbundleread
99 99 Benchmark reading of bundle files.
100 100 perfcca (no help text available)
101 101 perfchangegroupchangelog
102 102 Benchmark producing a changelog group for a changegroup.
103 103 perfchangeset
104 104 (no help text available)
105 105 perfctxfiles (no help text available)
106 106 perfdiffwd Profile diff of working directory changes
107 107 perfdirfoldmap
108 108 (no help text available)
109 109 perfdirs (no help text available)
110 110 perfdirstate (no help text available)
111 111 perfdirstatedirs
112 112 (no help text available)
113 113 perfdirstatefoldmap
114 114 (no help text available)
115 115 perfdirstatewrite
116 116 (no help text available)
117 117 perfdiscovery
118 118 benchmark discovery between local repo and the peer at given
119 119 path
120 120 perffncacheencode
121 121 (no help text available)
122 122 perffncacheload
123 123 (no help text available)
124 124 perffncachewrite
125 125 (no help text available)
126 126 perfheads benchmark the computation of a changelog heads
127 127 perfhelper-mergecopies
128 128 find statistics about potential parameters for
129 129 'perfmergecopies'
130 130 perfhelper-pathcopies
131 131 find statistic about potential parameters for the
132 132 'perftracecopies'
133 133 perfignore benchmark operation related to computing ignore
134 134 perfindex benchmark index creation time followed by a lookup
135 135 perflinelogedits
136 136 (no help text available)
137 137 perfloadmarkers
138 138 benchmark the time to parse the on-disk markers for a repo
139 139 perflog (no help text available)
140 140 perflookup (no help text available)
141 141 perflrucachedict
142 142 (no help text available)
143 143 perfmanifest benchmark the time to read a manifest from disk and return a
144 144 usable
145 145 perfmergecalculate
146 146 (no help text available)
147 147 perfmergecopies
148 148 measure runtime of 'copies.mergecopies'
149 149 perfmoonwalk benchmark walking the changelog backwards
150 150 perfnodelookup
151 151 (no help text available)
152 152 perfnodemap benchmark the time necessary to look up revision from a cold
153 153 nodemap
154 154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 155 perfpathcopies
156 156 benchmark the copy tracing logic
157 157 perfphases benchmark phasesets computation
158 158 perfphasesremote
159 159 benchmark time needed to analyse phases of the remote server
160 160 perfprogress printing of progress bars
161 161 perfrawfiles (no help text available)
162 162 perfrevlogchunks
163 163 Benchmark operations on revlog chunks.
164 164 perfrevlogindex
165 165 Benchmark operations against a revlog index.
166 166 perfrevlogrevision
167 167 Benchmark obtaining a revlog revision.
168 168 perfrevlogrevisions
169 169 Benchmark reading a series of revisions from a revlog.
170 170 perfrevlogwrite
171 171 Benchmark writing a series of revisions to a revlog.
172 172 perfrevrange (no help text available)
173 173 perfrevset benchmark the execution time of a revset
174 174 perfstartup (no help text available)
175 perfstatus (no help text available)
175 perfstatus benchmark the performance of a single status call
176 176 perftags (no help text available)
177 177 perftemplating
178 178 test the rendering time of a given template
179 179 perfunidiff benchmark a unified diff between revisions
180 180 perfvolatilesets
181 181 benchmark the computation of various volatile set
182 182 perfwalk (no help text available)
183 183 perfwrite microbenchmark ui.write
184 184
185 185 (use 'hg help -v perf' to show built-in aliases and global options)
186 186 $ hg perfaddremove
187 187 $ hg perfancestors
188 188 $ hg perfancestorset 2
189 189 $ hg perfannotate a
190 190 $ hg perfbdiff -c 1
191 191 $ hg perfbdiff --alldata 1
192 192 $ hg perfunidiff -c 1
193 193 $ hg perfunidiff --alldata 1
194 194 $ hg perfbookmarks
195 195 $ hg perfbranchmap
196 196 $ hg perfbranchmapload
197 197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 198 benchmark of branchmap with 3 revisions with 1 new ones
199 199 $ hg perfcca
200 200 $ hg perfchangegroupchangelog
201 201 $ hg perfchangegroupchangelog --cgversion 01
202 202 $ hg perfchangeset 2
203 203 $ hg perfctxfiles 2
204 204 $ hg perfdiffwd
205 205 $ hg perfdirfoldmap
206 206 $ hg perfdirs
207 207 $ hg perfdirstate
208 208 $ hg perfdirstatedirs
209 209 $ hg perfdirstatefoldmap
210 210 $ hg perfdirstatewrite
211 211 #if repofncache
212 212 $ hg perffncacheencode
213 213 $ hg perffncacheload
214 214 $ hg debugrebuildfncache
215 215 fncache already up to date
216 216 $ hg perffncachewrite
217 217 $ hg debugrebuildfncache
218 218 fncache already up to date
219 219 #endif
220 220 $ hg perfheads
221 221 $ hg perfignore
222 222 $ hg perfindex
223 223 $ hg perflinelogedits -n 1
224 224 $ hg perfloadmarkers
225 225 $ hg perflog
226 226 $ hg perflookup 2
227 227 $ hg perflrucache
228 228 $ hg perfmanifest 2
229 229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 230 $ hg perfmanifest -m 44fe2c8352bb
231 231 abort: manifest revision must be integer or full node
232 232 [255]
233 233 $ hg perfmergecalculate -r 3
234 234 $ hg perfmoonwalk
235 235 $ hg perfnodelookup 2
236 236 $ hg perfpathcopies 1 2
237 237 $ hg perfprogress --total 1000
238 238 $ hg perfrawfiles 2
239 239 $ hg perfrevlogindex -c
240 240 #if reporevlogstore
241 241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 242 #endif
243 243 $ hg perfrevlogrevision -m 0
244 244 $ hg perfrevlogchunks -c
245 245 $ hg perfrevrange
246 246 $ hg perfrevset 'all()'
247 247 $ hg perfstartup
248 248 $ hg perfstatus
249 249 $ hg perftags
250 250 $ hg perftemplating
251 251 $ hg perfvolatilesets
252 252 $ hg perfwalk
253 253 $ hg perfparents
254 254 $ hg perfdiscovery -q .
255 255
256 256 Test run control
257 257 ----------------
258 258
259 259 Simple single entry
260 260
261 261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 262 ! wall * comb * user * sys * (best of 15) (glob)
263 263
264 264 Multiple entries
265 265
266 266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 267 ! wall * comb * user * sys * (best of 5) (glob)
268 268
269 269 error case are ignored
270 270
271 271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 272 malformatted run limit entry, missing "-": 500
273 273 ! wall * comb * user * sys * (best of 5) (glob)
274 274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 277 ! wall * comb * user * sys * (best of 5) (glob)
278 278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 280 ! wall * comb * user * sys * (best of 5) (glob)
281 281
282 282 test actual output
283 283 ------------------
284 284
285 285 normal output:
286 286
287 287 $ hg perfheads --config perf.stub=no
288 288 ! wall * comb * user * sys * (best of *) (glob)
289 289
290 290 detailed output:
291 291
292 292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 293 ! wall * comb * user * sys * (best of *) (glob)
294 294 ! wall * comb * user * sys * (max of *) (glob)
295 295 ! wall * comb * user * sys * (avg of *) (glob)
296 296 ! wall * comb * user * sys * (median of *) (glob)
297 297
298 298 test json output
299 299 ----------------
300 300
301 301 normal output:
302 302
303 303 $ hg perfheads --template json --config perf.stub=no
304 304 [
305 305 {
306 306 "comb": *, (glob)
307 307 "count": *, (glob)
308 308 "sys": *, (glob)
309 309 "user": *, (glob)
310 310 "wall": * (glob)
311 311 }
312 312 ]
313 313
314 314 detailed output:
315 315
316 316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 317 [
318 318 {
319 319 "avg.comb": *, (glob)
320 320 "avg.count": *, (glob)
321 321 "avg.sys": *, (glob)
322 322 "avg.user": *, (glob)
323 323 "avg.wall": *, (glob)
324 324 "comb": *, (glob)
325 325 "count": *, (glob)
326 326 "max.comb": *, (glob)
327 327 "max.count": *, (glob)
328 328 "max.sys": *, (glob)
329 329 "max.user": *, (glob)
330 330 "max.wall": *, (glob)
331 331 "median.comb": *, (glob)
332 332 "median.count": *, (glob)
333 333 "median.sys": *, (glob)
334 334 "median.user": *, (glob)
335 335 "median.wall": *, (glob)
336 336 "sys": *, (glob)
337 337 "user": *, (glob)
338 338 "wall": * (glob)
339 339 }
340 340 ]
341 341
342 342 Test pre-run feature
343 343 --------------------
344 344
345 345 (perf discovery has some spurious output)
346 346
347 347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 348 ! wall * comb * user * sys * (best of 1) (glob)
349 349 searching for changes
350 350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 351 ! wall * comb * user * sys * (best of 1) (glob)
352 352 searching for changes
353 353 searching for changes
354 354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 355 ! wall * comb * user * sys * (best of 1) (glob)
356 356 searching for changes
357 357 searching for changes
358 358 searching for changes
359 359 searching for changes
360 360
361 361 test profile-benchmark option
362 362 ------------------------------
363 363
364 364 Function to check that statprof ran
365 365 $ statprofran () {
366 366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 367 > }
368 368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369 369
370 370 Check perf.py for historical portability
371 371 ----------------------------------------
372 372
373 373 $ cd "$TESTDIR/.."
374 374
375 375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 378 contrib/perf.py:\d+: (re)
379 379 > from mercurial import (
380 380 import newer module separately in try clause for early Mercurial
381 381 contrib/perf.py:\d+: (re)
382 382 > from mercurial import (
383 383 import newer module separately in try clause for early Mercurial
384 384 contrib/perf.py:\d+: (re)
385 385 > origindexpath = orig.opener.join(orig.indexfile)
386 386 use getvfs()/getsvfs() for early Mercurial
387 387 contrib/perf.py:\d+: (re)
388 388 > origdatapath = orig.opener.join(orig.datafile)
389 389 use getvfs()/getsvfs() for early Mercurial
390 390 contrib/perf.py:\d+: (re)
391 391 > vfs = vfsmod.vfs(tmpdir)
392 392 use getvfs()/getsvfs() for early Mercurial
393 393 contrib/perf.py:\d+: (re)
394 394 > vfs.options = getattr(orig.opener, 'options', None)
395 395 use getvfs()/getsvfs() for early Mercurial
396 396 [1]
General Comments 0
You need to be logged in to leave comments. Login now