##// END OF EJS Templates
phases: use a more generic way to trigger a phases computation for perf...
marmoute -
r52306:8fc92193 default
parent child Browse files
Show More
@@ -1,4637 +1,4638 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", True)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 885 def _default_clear_on_disk_tags_cache(repo):
886 886 from mercurial import tags
887 887
888 888 repo.cachevfs.tryunlink(tags._filename(repo))
889 889
890 890
891 891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 892 from mercurial import tags
893 893
894 894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895 895
896 896
897 897 def _default_forget_fnodes(repo, revs):
898 898 """function used by the perf extension to prune some entries from the
899 899 fnodes cache"""
900 900 from mercurial import tags
901 901
902 902 missing_1 = b'\xff' * 4
903 903 missing_2 = b'\xff' * 20
904 904 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 905 for r in revs:
906 906 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 907 cache.write()
908 908
909 909
910 910 @command(
911 911 b'perf::tags|perftags',
912 912 formatteropts
913 913 + [
914 914 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 915 (
916 916 b'',
917 917 b'clear-on-disk-cache',
918 918 False,
919 919 b'clear on disk tags cache (DESTRUCTIVE)',
920 920 ),
921 921 (
922 922 b'',
923 923 b'clear-fnode-cache-all',
924 924 False,
925 925 b'clear on disk file node cache (DESTRUCTIVE),',
926 926 ),
927 927 (
928 928 b'',
929 929 b'clear-fnode-cache-rev',
930 930 [],
931 931 b'clear on disk file node cache (DESTRUCTIVE),',
932 932 b'REVS',
933 933 ),
934 934 (
935 935 b'',
936 936 b'update-last',
937 937 b'',
938 938 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 939 b'N',
940 940 ),
941 941 ],
942 942 )
943 943 def perftags(ui, repo, **opts):
944 944 """Benchmark tags retrieval in various situation
945 945
946 946 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 947 altering performance after the command was run. However, it does not
948 948 destroy any stored data.
949 949 """
950 950 from mercurial import tags
951 951
952 952 opts = _byteskwargs(opts)
953 953 timer, fm = gettimer(ui, opts)
954 954 repocleartagscache = repocleartagscachefunc(repo)
955 955 clearrevlogs = opts[b'clear_revlogs']
956 956 clear_disk = opts[b'clear_on_disk_cache']
957 957 clear_fnode = opts[b'clear_fnode_cache_all']
958 958
959 959 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 960 update_last_str = opts[b'update_last']
961 961 update_last = None
962 962 if update_last_str:
963 963 try:
964 964 update_last = int(update_last_str)
965 965 except ValueError:
966 966 msg = b'could not parse value for update-last: "%s"'
967 967 msg %= update_last_str
968 968 hint = b'value should be an integer'
969 969 raise error.Abort(msg, hint=hint)
970 970
971 971 clear_disk_fn = getattr(
972 972 tags,
973 973 "clear_cache_on_disk",
974 974 _default_clear_on_disk_tags_cache,
975 975 )
976 976 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 977 clear_fnodes_fn = tags.clear_cache_fnodes
978 978 else:
979 979 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 980 clear_fnodes_rev_fn = getattr(
981 981 tags,
982 982 "forget_fnodes",
983 983 _default_forget_fnodes,
984 984 )
985 985
986 986 clear_revs = []
987 987 if clear_fnode_revs:
988 988 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989 989
990 990 if update_last:
991 991 revset = b'last(all(), %d)' % update_last
992 992 last_revs = repo.unfiltered().revs(revset)
993 993 clear_revs.extend(last_revs)
994 994
995 995 from mercurial import repoview
996 996
997 997 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 998 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 999 filter_id = repoview.extrafilter(repo.ui)
1000 1000
1001 1001 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 1002 pre_repo = repo.filtered(filter_name)
1003 1003 pre_repo.tags() # warm the cache
1004 1004 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 1005 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 1006
1007 1007 clear_revs = sorted(set(clear_revs))
1008 1008
1009 1009 def s():
1010 1010 if update_last:
1011 1011 util.copyfile(old_tags_path, new_tags_path)
1012 1012 if clearrevlogs:
1013 1013 clearchangelog(repo)
1014 1014 clearfilecache(repo.unfiltered(), 'manifest')
1015 1015 if clear_disk:
1016 1016 clear_disk_fn(repo)
1017 1017 if clear_fnode:
1018 1018 clear_fnodes_fn(repo)
1019 1019 elif clear_revs:
1020 1020 clear_fnodes_rev_fn(repo, clear_revs)
1021 1021 repocleartagscache()
1022 1022
1023 1023 def t():
1024 1024 len(repo.tags())
1025 1025
1026 1026 timer(t, setup=s)
1027 1027 fm.end()
1028 1028
1029 1029
1030 1030 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 1031 def perfancestors(ui, repo, **opts):
1032 1032 opts = _byteskwargs(opts)
1033 1033 timer, fm = gettimer(ui, opts)
1034 1034 heads = repo.changelog.headrevs()
1035 1035
1036 1036 def d():
1037 1037 for a in repo.changelog.ancestors(heads):
1038 1038 pass
1039 1039
1040 1040 timer(d)
1041 1041 fm.end()
1042 1042
1043 1043
1044 1044 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 1045 def perfancestorset(ui, repo, revset, **opts):
1046 1046 opts = _byteskwargs(opts)
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 revs = repo.revs(revset)
1049 1049 heads = repo.changelog.headrevs()
1050 1050
1051 1051 def d():
1052 1052 s = repo.changelog.ancestors(heads)
1053 1053 for rev in revs:
1054 1054 rev in s
1055 1055
1056 1056 timer(d)
1057 1057 fm.end()
1058 1058
1059 1059
1060 1060 @command(
1061 1061 b'perf::delta-find',
1062 1062 revlogopts + formatteropts,
1063 1063 b'-c|-m|FILE REV',
1064 1064 )
1065 1065 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 1066 """benchmark the process of finding a valid delta for a revlog revision
1067 1067
1068 1068 When a revlog receives a new revision (e.g. from a commit, or from an
1069 1069 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 1070 This perf command measures how much time we spend in this process. It
1071 1071 operates on an already stored revision.
1072 1072
1073 1073 See `hg help debug-delta-find` for another related command.
1074 1074 """
1075 1075 from mercurial import revlogutils
1076 1076 import mercurial.revlogutils.deltas as deltautil
1077 1077
1078 1078 opts = _byteskwargs(opts)
1079 1079 if arg_2 is None:
1080 1080 file_ = None
1081 1081 rev = arg_1
1082 1082 else:
1083 1083 file_ = arg_1
1084 1084 rev = arg_2
1085 1085
1086 1086 repo = repo.unfiltered()
1087 1087
1088 1088 timer, fm = gettimer(ui, opts)
1089 1089
1090 1090 rev = int(rev)
1091 1091
1092 1092 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 1093
1094 1094 deltacomputer = deltautil.deltacomputer(revlog)
1095 1095
1096 1096 node = revlog.node(rev)
1097 1097 p1r, p2r = revlog.parentrevs(rev)
1098 1098 p1 = revlog.node(p1r)
1099 1099 p2 = revlog.node(p2r)
1100 1100 full_text = revlog.revision(rev)
1101 1101 textlen = len(full_text)
1102 1102 cachedelta = None
1103 1103 flags = revlog.flags(rev)
1104 1104
1105 1105 revinfo = revlogutils.revisioninfo(
1106 1106 node,
1107 1107 p1,
1108 1108 p2,
1109 1109 [full_text], # btext
1110 1110 textlen,
1111 1111 cachedelta,
1112 1112 flags,
1113 1113 )
1114 1114
1115 1115 # Note: we should probably purge the potential caches (like the full
1116 1116 # manifest cache) between runs.
1117 1117 def find_one():
1118 1118 with revlog._datafp() as fh:
1119 1119 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 1120
1121 1121 timer(find_one)
1122 1122 fm.end()
1123 1123
1124 1124
1125 1125 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 1126 def perfdiscovery(ui, repo, path, **opts):
1127 1127 """benchmark discovery between local repo and the peer at given path"""
1128 1128 repos = [repo, None]
1129 1129 timer, fm = gettimer(ui, opts)
1130 1130
1131 1131 try:
1132 1132 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 1133
1134 1134 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 1135 except ImportError:
1136 1136 try:
1137 1137 from mercurial.utils.urlutil import get_unique_pull_path
1138 1138
1139 1139 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 1140 except ImportError:
1141 1141 path = ui.expandpath(path)
1142 1142
1143 1143 def s():
1144 1144 repos[1] = hg.peer(ui, opts, path)
1145 1145
1146 1146 def d():
1147 1147 setdiscovery.findcommonheads(ui, *repos)
1148 1148
1149 1149 timer(d, setup=s)
1150 1150 fm.end()
1151 1151
1152 1152
1153 1153 @command(
1154 1154 b'perf::bookmarks|perfbookmarks',
1155 1155 formatteropts
1156 1156 + [
1157 1157 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 1158 ],
1159 1159 )
1160 1160 def perfbookmarks(ui, repo, **opts):
1161 1161 """benchmark parsing bookmarks from disk to memory"""
1162 1162 opts = _byteskwargs(opts)
1163 1163 timer, fm = gettimer(ui, opts)
1164 1164
1165 1165 clearrevlogs = opts[b'clear_revlogs']
1166 1166
1167 1167 def s():
1168 1168 if clearrevlogs:
1169 1169 clearchangelog(repo)
1170 1170 clearfilecache(repo, b'_bookmarks')
1171 1171
1172 1172 def d():
1173 1173 repo._bookmarks
1174 1174
1175 1175 timer(d, setup=s)
1176 1176 fm.end()
1177 1177
1178 1178
1179 1179 @command(
1180 1180 b'perf::bundle',
1181 1181 [
1182 1182 (
1183 1183 b'r',
1184 1184 b'rev',
1185 1185 [],
1186 1186 b'changesets to bundle',
1187 1187 b'REV',
1188 1188 ),
1189 1189 (
1190 1190 b't',
1191 1191 b'type',
1192 1192 b'none',
1193 1193 b'bundlespec to use (see `hg help bundlespec`)',
1194 1194 b'TYPE',
1195 1195 ),
1196 1196 ]
1197 1197 + formatteropts,
1198 1198 b'REVS',
1199 1199 )
1200 1200 def perfbundle(ui, repo, *revs, **opts):
1201 1201 """benchmark the creation of a bundle from a repository
1202 1202
1203 1203 For now, this only supports "none" compression.
1204 1204 """
1205 1205 try:
1206 1206 from mercurial import bundlecaches
1207 1207
1208 1208 parsebundlespec = bundlecaches.parsebundlespec
1209 1209 except ImportError:
1210 1210 from mercurial import exchange
1211 1211
1212 1212 parsebundlespec = exchange.parsebundlespec
1213 1213
1214 1214 from mercurial import discovery
1215 1215 from mercurial import bundle2
1216 1216
1217 1217 opts = _byteskwargs(opts)
1218 1218 timer, fm = gettimer(ui, opts)
1219 1219
1220 1220 cl = repo.changelog
1221 1221 revs = list(revs)
1222 1222 revs.extend(opts.get(b'rev', ()))
1223 1223 revs = scmutil.revrange(repo, revs)
1224 1224 if not revs:
1225 1225 raise error.Abort(b"not revision specified")
1226 1226 # make it a consistent set (ie: without topological gaps)
1227 1227 old_len = len(revs)
1228 1228 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 1229 if old_len != len(revs):
1230 1230 new_count = len(revs) - old_len
1231 1231 msg = b"add %d new revisions to make it a consistent set\n"
1232 1232 ui.write_err(msg % new_count)
1233 1233
1234 1234 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 1235 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 1236 outgoing = discovery.outgoing(repo, bases, targets)
1237 1237
1238 1238 bundle_spec = opts.get(b'type')
1239 1239
1240 1240 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 1241
1242 1242 cgversion = bundle_spec.params.get(b"cg.version")
1243 1243 if cgversion is None:
1244 1244 if bundle_spec.version == b'v1':
1245 1245 cgversion = b'01'
1246 1246 if bundle_spec.version == b'v2':
1247 1247 cgversion = b'02'
1248 1248 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 1249 err = b"repository does not support bundle version %s"
1250 1250 raise error.Abort(err % cgversion)
1251 1251
1252 1252 if cgversion == b'01': # bundle1
1253 1253 bversion = b'HG10' + bundle_spec.wirecompression
1254 1254 bcompression = None
1255 1255 elif cgversion in (b'02', b'03'):
1256 1256 bversion = b'HG20'
1257 1257 bcompression = bundle_spec.wirecompression
1258 1258 else:
1259 1259 err = b'perf::bundle: unexpected changegroup version %s'
1260 1260 raise error.ProgrammingError(err % cgversion)
1261 1261
1262 1262 if bcompression is None:
1263 1263 bcompression = b'UN'
1264 1264
1265 1265 if bcompression != b'UN':
1266 1266 err = b'perf::bundle: compression currently unsupported: %s'
1267 1267 raise error.ProgrammingError(err % bcompression)
1268 1268
1269 1269 def do_bundle():
1270 1270 bundle2.writenewbundle(
1271 1271 ui,
1272 1272 repo,
1273 1273 b'perf::bundle',
1274 1274 os.devnull,
1275 1275 bversion,
1276 1276 outgoing,
1277 1277 bundle_spec.params,
1278 1278 )
1279 1279
1280 1280 timer(do_bundle)
1281 1281 fm.end()
1282 1282
1283 1283
1284 1284 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 1285 def perfbundleread(ui, repo, bundlepath, **opts):
1286 1286 """Benchmark reading of bundle files.
1287 1287
1288 1288 This command is meant to isolate the I/O part of bundle reading as
1289 1289 much as possible.
1290 1290 """
1291 1291 from mercurial import (
1292 1292 bundle2,
1293 1293 exchange,
1294 1294 streamclone,
1295 1295 )
1296 1296
1297 1297 opts = _byteskwargs(opts)
1298 1298
1299 1299 def makebench(fn):
1300 1300 def run():
1301 1301 with open(bundlepath, b'rb') as fh:
1302 1302 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 1303 fn(bundle)
1304 1304
1305 1305 return run
1306 1306
1307 1307 def makereadnbytes(size):
1308 1308 def run():
1309 1309 with open(bundlepath, b'rb') as fh:
1310 1310 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 1311 while bundle.read(size):
1312 1312 pass
1313 1313
1314 1314 return run
1315 1315
1316 1316 def makestdioread(size):
1317 1317 def run():
1318 1318 with open(bundlepath, b'rb') as fh:
1319 1319 while fh.read(size):
1320 1320 pass
1321 1321
1322 1322 return run
1323 1323
1324 1324 # bundle1
1325 1325
1326 1326 def deltaiter(bundle):
1327 1327 for delta in bundle.deltaiter():
1328 1328 pass
1329 1329
1330 1330 def iterchunks(bundle):
1331 1331 for chunk in bundle.getchunks():
1332 1332 pass
1333 1333
1334 1334 # bundle2
1335 1335
1336 1336 def forwardchunks(bundle):
1337 1337 for chunk in bundle._forwardchunks():
1338 1338 pass
1339 1339
1340 1340 def iterparts(bundle):
1341 1341 for part in bundle.iterparts():
1342 1342 pass
1343 1343
1344 1344 def iterpartsseekable(bundle):
1345 1345 for part in bundle.iterparts(seekable=True):
1346 1346 pass
1347 1347
1348 1348 def seek(bundle):
1349 1349 for part in bundle.iterparts(seekable=True):
1350 1350 part.seek(0, os.SEEK_END)
1351 1351
1352 1352 def makepartreadnbytes(size):
1353 1353 def run():
1354 1354 with open(bundlepath, b'rb') as fh:
1355 1355 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 1356 for part in bundle.iterparts():
1357 1357 while part.read(size):
1358 1358 pass
1359 1359
1360 1360 return run
1361 1361
1362 1362 benches = [
1363 1363 (makestdioread(8192), b'read(8k)'),
1364 1364 (makestdioread(16384), b'read(16k)'),
1365 1365 (makestdioread(32768), b'read(32k)'),
1366 1366 (makestdioread(131072), b'read(128k)'),
1367 1367 ]
1368 1368
1369 1369 with open(bundlepath, b'rb') as fh:
1370 1370 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 1371
1372 1372 if isinstance(bundle, changegroup.cg1unpacker):
1373 1373 benches.extend(
1374 1374 [
1375 1375 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 1376 (makebench(iterchunks), b'cg1 getchunks()'),
1377 1377 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 1378 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 1379 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 1380 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 1381 ]
1382 1382 )
1383 1383 elif isinstance(bundle, bundle2.unbundle20):
1384 1384 benches.extend(
1385 1385 [
1386 1386 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 1387 (makebench(iterparts), b'bundle2 iterparts()'),
1388 1388 (
1389 1389 makebench(iterpartsseekable),
1390 1390 b'bundle2 iterparts() seekable',
1391 1391 ),
1392 1392 (makebench(seek), b'bundle2 part seek()'),
1393 1393 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 1394 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 1395 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 1396 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 1397 ]
1398 1398 )
1399 1399 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 1400 raise error.Abort(b'stream clone bundles not supported')
1401 1401 else:
1402 1402 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 1403
1404 1404 for fn, title in benches:
1405 1405 timer, fm = gettimer(ui, opts)
1406 1406 timer(fn, title=title)
1407 1407 fm.end()
1408 1408
1409 1409
1410 1410 @command(
1411 1411 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 1412 formatteropts
1413 1413 + [
1414 1414 (b'', b'cgversion', b'02', b'changegroup version'),
1415 1415 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 1416 ],
1417 1417 )
1418 1418 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 1419 """Benchmark producing a changelog group for a changegroup.
1420 1420
1421 1421 This measures the time spent processing the changelog during a
1422 1422 bundle operation. This occurs during `hg bundle` and on a server
1423 1423 processing a `getbundle` wire protocol request (handles clones
1424 1424 and pull requests).
1425 1425
1426 1426 By default, all revisions are added to the changegroup.
1427 1427 """
1428 1428 opts = _byteskwargs(opts)
1429 1429 cl = repo.changelog
1430 1430 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 1431 bundler = changegroup.getbundler(cgversion, repo)
1432 1432
1433 1433 def d():
1434 1434 state, chunks = bundler._generatechangelog(cl, nodes)
1435 1435 for chunk in chunks:
1436 1436 pass
1437 1437
1438 1438 timer, fm = gettimer(ui, opts)
1439 1439
1440 1440 # Terminal printing can interfere with timing. So disable it.
1441 1441 with ui.configoverride({(b'progress', b'disable'): True}):
1442 1442 timer(d)
1443 1443
1444 1444 fm.end()
1445 1445
1446 1446
1447 1447 @command(b'perf::dirs|perfdirs', formatteropts)
1448 1448 def perfdirs(ui, repo, **opts):
1449 1449 opts = _byteskwargs(opts)
1450 1450 timer, fm = gettimer(ui, opts)
1451 1451 dirstate = repo.dirstate
1452 1452 b'a' in dirstate
1453 1453
1454 1454 def d():
1455 1455 dirstate.hasdir(b'a')
1456 1456 try:
1457 1457 del dirstate._map._dirs
1458 1458 except AttributeError:
1459 1459 pass
1460 1460
1461 1461 timer(d)
1462 1462 fm.end()
1463 1463
1464 1464
1465 1465 @command(
1466 1466 b'perf::dirstate|perfdirstate',
1467 1467 [
1468 1468 (
1469 1469 b'',
1470 1470 b'iteration',
1471 1471 None,
1472 1472 b'benchmark a full iteration for the dirstate',
1473 1473 ),
1474 1474 (
1475 1475 b'',
1476 1476 b'contains',
1477 1477 None,
1478 1478 b'benchmark a large amount of `nf in dirstate` calls',
1479 1479 ),
1480 1480 ]
1481 1481 + formatteropts,
1482 1482 )
1483 1483 def perfdirstate(ui, repo, **opts):
1484 1484 """benchmap the time of various distate operations
1485 1485
1486 1486 By default benchmark the time necessary to load a dirstate from scratch.
1487 1487 The dirstate is loaded to the point were a "contains" request can be
1488 1488 answered.
1489 1489 """
1490 1490 opts = _byteskwargs(opts)
1491 1491 timer, fm = gettimer(ui, opts)
1492 1492 b"a" in repo.dirstate
1493 1493
1494 1494 if opts[b'iteration'] and opts[b'contains']:
1495 1495 msg = b'only specify one of --iteration or --contains'
1496 1496 raise error.Abort(msg)
1497 1497
1498 1498 if opts[b'iteration']:
1499 1499 setup = None
1500 1500 dirstate = repo.dirstate
1501 1501
1502 1502 def d():
1503 1503 for f in dirstate:
1504 1504 pass
1505 1505
1506 1506 elif opts[b'contains']:
1507 1507 setup = None
1508 1508 dirstate = repo.dirstate
1509 1509 allfiles = list(dirstate)
1510 1510 # also add file path that will be "missing" from the dirstate
1511 1511 allfiles.extend([f[::-1] for f in allfiles])
1512 1512
1513 1513 def d():
1514 1514 for f in allfiles:
1515 1515 f in dirstate
1516 1516
1517 1517 else:
1518 1518
1519 1519 def setup():
1520 1520 repo.dirstate.invalidate()
1521 1521
1522 1522 def d():
1523 1523 b"a" in repo.dirstate
1524 1524
1525 1525 timer(d, setup=setup)
1526 1526 fm.end()
1527 1527
1528 1528
1529 1529 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 1530 def perfdirstatedirs(ui, repo, **opts):
1531 1531 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 1532 opts = _byteskwargs(opts)
1533 1533 timer, fm = gettimer(ui, opts)
1534 1534 repo.dirstate.hasdir(b"a")
1535 1535
1536 1536 def setup():
1537 1537 try:
1538 1538 del repo.dirstate._map._dirs
1539 1539 except AttributeError:
1540 1540 pass
1541 1541
1542 1542 def d():
1543 1543 repo.dirstate.hasdir(b"a")
1544 1544
1545 1545 timer(d, setup=setup)
1546 1546 fm.end()
1547 1547
1548 1548
1549 1549 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 1550 def perfdirstatefoldmap(ui, repo, **opts):
1551 1551 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 1552
1553 1553 The dirstate filefoldmap cache is dropped between every request.
1554 1554 """
1555 1555 opts = _byteskwargs(opts)
1556 1556 timer, fm = gettimer(ui, opts)
1557 1557 dirstate = repo.dirstate
1558 1558 dirstate._map.filefoldmap.get(b'a')
1559 1559
1560 1560 def setup():
1561 1561 del dirstate._map.filefoldmap
1562 1562
1563 1563 def d():
1564 1564 dirstate._map.filefoldmap.get(b'a')
1565 1565
1566 1566 timer(d, setup=setup)
1567 1567 fm.end()
1568 1568
1569 1569
1570 1570 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 1571 def perfdirfoldmap(ui, repo, **opts):
1572 1572 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 1573
1574 1574 The dirstate dirfoldmap cache is dropped between every request.
1575 1575 """
1576 1576 opts = _byteskwargs(opts)
1577 1577 timer, fm = gettimer(ui, opts)
1578 1578 dirstate = repo.dirstate
1579 1579 dirstate._map.dirfoldmap.get(b'a')
1580 1580
1581 1581 def setup():
1582 1582 del dirstate._map.dirfoldmap
1583 1583 try:
1584 1584 del dirstate._map._dirs
1585 1585 except AttributeError:
1586 1586 pass
1587 1587
1588 1588 def d():
1589 1589 dirstate._map.dirfoldmap.get(b'a')
1590 1590
1591 1591 timer(d, setup=setup)
1592 1592 fm.end()
1593 1593
1594 1594
1595 1595 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 1596 def perfdirstatewrite(ui, repo, **opts):
1597 1597 """benchmap the time it take to write a dirstate on disk"""
1598 1598 opts = _byteskwargs(opts)
1599 1599 timer, fm = gettimer(ui, opts)
1600 1600 ds = repo.dirstate
1601 1601 b"a" in ds
1602 1602
1603 1603 def setup():
1604 1604 ds._dirty = True
1605 1605
1606 1606 def d():
1607 1607 ds.write(repo.currenttransaction())
1608 1608
1609 1609 with repo.wlock():
1610 1610 timer(d, setup=setup)
1611 1611 fm.end()
1612 1612
1613 1613
1614 1614 def _getmergerevs(repo, opts):
1615 1615 """parse command argument to return rev involved in merge
1616 1616
1617 1617 input: options dictionnary with `rev`, `from` and `bse`
1618 1618 output: (localctx, otherctx, basectx)
1619 1619 """
1620 1620 if opts[b'from']:
1621 1621 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 1622 wctx = repo[fromrev]
1623 1623 else:
1624 1624 wctx = repo[None]
1625 1625 # we don't want working dir files to be stat'd in the benchmark, so
1626 1626 # prime that cache
1627 1627 wctx.dirty()
1628 1628 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 1629 if opts[b'base']:
1630 1630 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 1631 ancestor = repo[fromrev]
1632 1632 else:
1633 1633 ancestor = wctx.ancestor(rctx)
1634 1634 return (wctx, rctx, ancestor)
1635 1635
1636 1636
1637 1637 @command(
1638 1638 b'perf::mergecalculate|perfmergecalculate',
1639 1639 [
1640 1640 (b'r', b'rev', b'.', b'rev to merge against'),
1641 1641 (b'', b'from', b'', b'rev to merge from'),
1642 1642 (b'', b'base', b'', b'the revision to use as base'),
1643 1643 ]
1644 1644 + formatteropts,
1645 1645 )
1646 1646 def perfmergecalculate(ui, repo, **opts):
1647 1647 opts = _byteskwargs(opts)
1648 1648 timer, fm = gettimer(ui, opts)
1649 1649
1650 1650 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 1651
1652 1652 def d():
1653 1653 # acceptremote is True because we don't want prompts in the middle of
1654 1654 # our benchmark
1655 1655 merge.calculateupdates(
1656 1656 repo,
1657 1657 wctx,
1658 1658 rctx,
1659 1659 [ancestor],
1660 1660 branchmerge=False,
1661 1661 force=False,
1662 1662 acceptremote=True,
1663 1663 followcopies=True,
1664 1664 )
1665 1665
1666 1666 timer(d)
1667 1667 fm.end()
1668 1668
1669 1669
1670 1670 @command(
1671 1671 b'perf::mergecopies|perfmergecopies',
1672 1672 [
1673 1673 (b'r', b'rev', b'.', b'rev to merge against'),
1674 1674 (b'', b'from', b'', b'rev to merge from'),
1675 1675 (b'', b'base', b'', b'the revision to use as base'),
1676 1676 ]
1677 1677 + formatteropts,
1678 1678 )
1679 1679 def perfmergecopies(ui, repo, **opts):
1680 1680 """measure runtime of `copies.mergecopies`"""
1681 1681 opts = _byteskwargs(opts)
1682 1682 timer, fm = gettimer(ui, opts)
1683 1683 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 1684
1685 1685 def d():
1686 1686 # acceptremote is True because we don't want prompts in the middle of
1687 1687 # our benchmark
1688 1688 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 1689
1690 1690 timer(d)
1691 1691 fm.end()
1692 1692
1693 1693
1694 1694 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 1695 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 1696 """benchmark the copy tracing logic"""
1697 1697 opts = _byteskwargs(opts)
1698 1698 timer, fm = gettimer(ui, opts)
1699 1699 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 1700 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 1701
1702 1702 def d():
1703 1703 copies.pathcopies(ctx1, ctx2)
1704 1704
1705 1705 timer(d)
1706 1706 fm.end()
1707 1707
1708 1708
1709 1709 @command(
1710 1710 b'perf::phases|perfphases',
1711 1711 [
1712 1712 (b'', b'full', False, b'include file reading time too'),
1713 1713 ],
1714 1714 b"",
1715 1715 )
1716 1716 def perfphases(ui, repo, **opts):
1717 1717 """benchmark phasesets computation"""
1718 1718 opts = _byteskwargs(opts)
1719 1719 timer, fm = gettimer(ui, opts)
1720 1720 _phases = repo._phasecache
1721 1721 full = opts.get(b'full')
1722 tip_rev = repo.changelog.tiprev()
1722 1723
1723 1724 def d():
1724 1725 phases = _phases
1725 1726 if full:
1726 1727 clearfilecache(repo, b'_phasecache')
1727 1728 phases = repo._phasecache
1728 1729 phases.invalidate()
1729 phases.loadphaserevs(repo)
1730 phases.phase(repo, tip_rev)
1730 1731
1731 1732 timer(d)
1732 1733 fm.end()
1733 1734
1734 1735
1735 1736 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1736 1737 def perfphasesremote(ui, repo, dest=None, **opts):
1737 1738 """benchmark time needed to analyse phases of the remote server"""
1738 1739 from mercurial.node import bin
1739 1740 from mercurial import (
1740 1741 exchange,
1741 1742 hg,
1742 1743 phases,
1743 1744 )
1744 1745
1745 1746 opts = _byteskwargs(opts)
1746 1747 timer, fm = gettimer(ui, opts)
1747 1748
1748 1749 path = ui.getpath(dest, default=(b'default-push', b'default'))
1749 1750 if not path:
1750 1751 raise error.Abort(
1751 1752 b'default repository not configured!',
1752 1753 hint=b"see 'hg help config.paths'",
1753 1754 )
1754 1755 if util.safehasattr(path, 'main_path'):
1755 1756 path = path.get_push_variant()
1756 1757 dest = path.loc
1757 1758 else:
1758 1759 dest = path.pushloc or path.loc
1759 1760 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1760 1761 other = hg.peer(repo, opts, dest)
1761 1762
1762 1763 # easier to perform discovery through the operation
1763 1764 op = exchange.pushoperation(repo, other)
1764 1765 exchange._pushdiscoverychangeset(op)
1765 1766
1766 1767 remotesubset = op.fallbackheads
1767 1768
1768 1769 with other.commandexecutor() as e:
1769 1770 remotephases = e.callcommand(
1770 1771 b'listkeys', {b'namespace': b'phases'}
1771 1772 ).result()
1772 1773 del other
1773 1774 publishing = remotephases.get(b'publishing', False)
1774 1775 if publishing:
1775 1776 ui.statusnoi18n(b'publishing: yes\n')
1776 1777 else:
1777 1778 ui.statusnoi18n(b'publishing: no\n')
1778 1779
1779 1780 has_node = getattr(repo.changelog.index, 'has_node', None)
1780 1781 if has_node is None:
1781 1782 has_node = repo.changelog.nodemap.__contains__
1782 1783 nonpublishroots = 0
1783 1784 for nhex, phase in remotephases.iteritems():
1784 1785 if nhex == b'publishing': # ignore data related to publish option
1785 1786 continue
1786 1787 node = bin(nhex)
1787 1788 if has_node(node) and int(phase):
1788 1789 nonpublishroots += 1
1789 1790 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1790 1791 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1791 1792
1792 1793 def d():
1793 1794 phases.remotephasessummary(repo, remotesubset, remotephases)
1794 1795
1795 1796 timer(d)
1796 1797 fm.end()
1797 1798
1798 1799
1799 1800 @command(
1800 1801 b'perf::manifest|perfmanifest',
1801 1802 [
1802 1803 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1803 1804 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1804 1805 ]
1805 1806 + formatteropts,
1806 1807 b'REV|NODE',
1807 1808 )
1808 1809 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1809 1810 """benchmark the time to read a manifest from disk and return a usable
1810 1811 dict-like object
1811 1812
1812 1813 Manifest caches are cleared before retrieval."""
1813 1814 opts = _byteskwargs(opts)
1814 1815 timer, fm = gettimer(ui, opts)
1815 1816 if not manifest_rev:
1816 1817 ctx = scmutil.revsingle(repo, rev, rev)
1817 1818 t = ctx.manifestnode()
1818 1819 else:
1819 1820 from mercurial.node import bin
1820 1821
1821 1822 if len(rev) == 40:
1822 1823 t = bin(rev)
1823 1824 else:
1824 1825 try:
1825 1826 rev = int(rev)
1826 1827
1827 1828 if util.safehasattr(repo.manifestlog, b'getstorage'):
1828 1829 t = repo.manifestlog.getstorage(b'').node(rev)
1829 1830 else:
1830 1831 t = repo.manifestlog._revlog.lookup(rev)
1831 1832 except ValueError:
1832 1833 raise error.Abort(
1833 1834 b'manifest revision must be integer or full node'
1834 1835 )
1835 1836
1836 1837 def d():
1837 1838 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1838 1839 repo.manifestlog[t].read()
1839 1840
1840 1841 timer(d)
1841 1842 fm.end()
1842 1843
1843 1844
1844 1845 @command(b'perf::changeset|perfchangeset', formatteropts)
1845 1846 def perfchangeset(ui, repo, rev, **opts):
1846 1847 opts = _byteskwargs(opts)
1847 1848 timer, fm = gettimer(ui, opts)
1848 1849 n = scmutil.revsingle(repo, rev).node()
1849 1850
1850 1851 def d():
1851 1852 repo.changelog.read(n)
1852 1853 # repo.changelog._cache = None
1853 1854
1854 1855 timer(d)
1855 1856 fm.end()
1856 1857
1857 1858
1858 1859 @command(b'perf::ignore|perfignore', formatteropts)
1859 1860 def perfignore(ui, repo, **opts):
1860 1861 """benchmark operation related to computing ignore"""
1861 1862 opts = _byteskwargs(opts)
1862 1863 timer, fm = gettimer(ui, opts)
1863 1864 dirstate = repo.dirstate
1864 1865
1865 1866 def setupone():
1866 1867 dirstate.invalidate()
1867 1868 clearfilecache(dirstate, b'_ignore')
1868 1869
1869 1870 def runone():
1870 1871 dirstate._ignore
1871 1872
1872 1873 timer(runone, setup=setupone, title=b"load")
1873 1874 fm.end()
1874 1875
1875 1876
1876 1877 @command(
1877 1878 b'perf::index|perfindex',
1878 1879 [
1879 1880 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1880 1881 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1881 1882 ]
1882 1883 + formatteropts,
1883 1884 )
1884 1885 def perfindex(ui, repo, **opts):
1885 1886 """benchmark index creation time followed by a lookup
1886 1887
1887 1888 The default is to look `tip` up. Depending on the index implementation,
1888 1889 the revision looked up can matters. For example, an implementation
1889 1890 scanning the index will have a faster lookup time for `--rev tip` than for
1890 1891 `--rev 0`. The number of looked up revisions and their order can also
1891 1892 matters.
1892 1893
1893 1894 Example of useful set to test:
1894 1895
1895 1896 * tip
1896 1897 * 0
1897 1898 * -10:
1898 1899 * :10
1899 1900 * -10: + :10
1900 1901 * :10: + -10:
1901 1902 * -10000:
1902 1903 * -10000: + 0
1903 1904
1904 1905 It is not currently possible to check for lookup of a missing node. For
1905 1906 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1906 1907 import mercurial.revlog
1907 1908
1908 1909 opts = _byteskwargs(opts)
1909 1910 timer, fm = gettimer(ui, opts)
1910 1911 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1911 1912 if opts[b'no_lookup']:
1912 1913 if opts['rev']:
1913 1914 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1914 1915 nodes = []
1915 1916 elif not opts[b'rev']:
1916 1917 nodes = [repo[b"tip"].node()]
1917 1918 else:
1918 1919 revs = scmutil.revrange(repo, opts[b'rev'])
1919 1920 cl = repo.changelog
1920 1921 nodes = [cl.node(r) for r in revs]
1921 1922
1922 1923 unfi = repo.unfiltered()
1923 1924 # find the filecache func directly
1924 1925 # This avoid polluting the benchmark with the filecache logic
1925 1926 makecl = unfi.__class__.changelog.func
1926 1927
1927 1928 def setup():
1928 1929 # probably not necessary, but for good measure
1929 1930 clearchangelog(unfi)
1930 1931
1931 1932 def d():
1932 1933 cl = makecl(unfi)
1933 1934 for n in nodes:
1934 1935 cl.rev(n)
1935 1936
1936 1937 timer(d, setup=setup)
1937 1938 fm.end()
1938 1939
1939 1940
1940 1941 @command(
1941 1942 b'perf::nodemap|perfnodemap',
1942 1943 [
1943 1944 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1944 1945 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1945 1946 ]
1946 1947 + formatteropts,
1947 1948 )
1948 1949 def perfnodemap(ui, repo, **opts):
1949 1950 """benchmark the time necessary to look up revision from a cold nodemap
1950 1951
1951 1952 Depending on the implementation, the amount and order of revision we look
1952 1953 up can varies. Example of useful set to test:
1953 1954 * tip
1954 1955 * 0
1955 1956 * -10:
1956 1957 * :10
1957 1958 * -10: + :10
1958 1959 * :10: + -10:
1959 1960 * -10000:
1960 1961 * -10000: + 0
1961 1962
1962 1963 The command currently focus on valid binary lookup. Benchmarking for
1963 1964 hexlookup, prefix lookup and missing lookup would also be valuable.
1964 1965 """
1965 1966 import mercurial.revlog
1966 1967
1967 1968 opts = _byteskwargs(opts)
1968 1969 timer, fm = gettimer(ui, opts)
1969 1970 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1970 1971
1971 1972 unfi = repo.unfiltered()
1972 1973 clearcaches = opts[b'clear_caches']
1973 1974 # find the filecache func directly
1974 1975 # This avoid polluting the benchmark with the filecache logic
1975 1976 makecl = unfi.__class__.changelog.func
1976 1977 if not opts[b'rev']:
1977 1978 raise error.Abort(b'use --rev to specify revisions to look up')
1978 1979 revs = scmutil.revrange(repo, opts[b'rev'])
1979 1980 cl = repo.changelog
1980 1981 nodes = [cl.node(r) for r in revs]
1981 1982
1982 1983 # use a list to pass reference to a nodemap from one closure to the next
1983 1984 nodeget = [None]
1984 1985
1985 1986 def setnodeget():
1986 1987 # probably not necessary, but for good measure
1987 1988 clearchangelog(unfi)
1988 1989 cl = makecl(unfi)
1989 1990 if util.safehasattr(cl.index, 'get_rev'):
1990 1991 nodeget[0] = cl.index.get_rev
1991 1992 else:
1992 1993 nodeget[0] = cl.nodemap.get
1993 1994
1994 1995 def d():
1995 1996 get = nodeget[0]
1996 1997 for n in nodes:
1997 1998 get(n)
1998 1999
1999 2000 setup = None
2000 2001 if clearcaches:
2001 2002
2002 2003 def setup():
2003 2004 setnodeget()
2004 2005
2005 2006 else:
2006 2007 setnodeget()
2007 2008 d() # prewarm the data structure
2008 2009 timer(d, setup=setup)
2009 2010 fm.end()
2010 2011
2011 2012
2012 2013 @command(b'perf::startup|perfstartup', formatteropts)
2013 2014 def perfstartup(ui, repo, **opts):
2014 2015 opts = _byteskwargs(opts)
2015 2016 timer, fm = gettimer(ui, opts)
2016 2017
2017 2018 def d():
2018 2019 if os.name != 'nt':
2019 2020 os.system(
2020 2021 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2021 2022 )
2022 2023 else:
2023 2024 os.environ['HGRCPATH'] = r' '
2024 2025 os.system("%s version -q > NUL" % sys.argv[0])
2025 2026
2026 2027 timer(d)
2027 2028 fm.end()
2028 2029
2029 2030
2030 2031 def _find_stream_generator(version):
2031 2032 """find the proper generator function for this stream version"""
2032 2033 import mercurial.streamclone
2033 2034
2034 2035 available = {}
2035 2036
2036 2037 # try to fetch a v1 generator
2037 2038 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2038 2039 if generatev1 is not None:
2039 2040
2040 2041 def generate(repo):
2041 2042 entries, bytes, data = generatev2(repo, None, None, True)
2042 2043 return data
2043 2044
2044 2045 available[b'v1'] = generatev1
2045 2046 # try to fetch a v2 generator
2046 2047 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2047 2048 if generatev2 is not None:
2048 2049
2049 2050 def generate(repo):
2050 2051 entries, bytes, data = generatev2(repo, None, None, True)
2051 2052 return data
2052 2053
2053 2054 available[b'v2'] = generate
2054 2055 # try to fetch a v3 generator
2055 2056 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2056 2057 if generatev3 is not None:
2057 2058
2058 2059 def generate(repo):
2059 2060 entries, bytes, data = generatev3(repo, None, None, True)
2060 2061 return data
2061 2062
2062 2063 available[b'v3-exp'] = generate
2063 2064
2064 2065 # resolve the request
2065 2066 if version == b"latest":
2066 2067 # latest is the highest non experimental version
2067 2068 latest_key = max(v for v in available if b'-exp' not in v)
2068 2069 return available[latest_key]
2069 2070 elif version in available:
2070 2071 return available[version]
2071 2072 else:
2072 2073 msg = b"unkown or unavailable version: %s"
2073 2074 msg %= version
2074 2075 hint = b"available versions: %s"
2075 2076 hint %= b', '.join(sorted(available))
2076 2077 raise error.Abort(msg, hint=hint)
2077 2078
2078 2079
2079 2080 @command(
2080 2081 b'perf::stream-locked-section',
2081 2082 [
2082 2083 (
2083 2084 b'',
2084 2085 b'stream-version',
2085 2086 b'latest',
2086 2087 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
2087 2088 ),
2088 2089 ]
2089 2090 + formatteropts,
2090 2091 )
2091 2092 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2092 2093 """benchmark the initial, repo-locked, section of a stream-clone"""
2093 2094
2094 2095 opts = _byteskwargs(opts)
2095 2096 timer, fm = gettimer(ui, opts)
2096 2097
2097 2098 # deletion of the generator may trigger some cleanup that we do not want to
2098 2099 # measure
2099 2100 result_holder = [None]
2100 2101
2101 2102 def setupone():
2102 2103 result_holder[0] = None
2103 2104
2104 2105 generate = _find_stream_generator(stream_version)
2105 2106
2106 2107 def runone():
2107 2108 # the lock is held for the duration the initialisation
2108 2109 result_holder[0] = generate(repo)
2109 2110
2110 2111 timer(runone, setup=setupone, title=b"load")
2111 2112 fm.end()
2112 2113
2113 2114
2114 2115 @command(
2115 2116 b'perf::stream-generate',
2116 2117 [
2117 2118 (
2118 2119 b'',
2119 2120 b'stream-version',
2120 2121 b'latest',
2121 2122 b'stream version to us ("v1", "v2" or "latest", (the default))',
2122 2123 ),
2123 2124 ]
2124 2125 + formatteropts,
2125 2126 )
2126 2127 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2127 2128 """benchmark the full generation of a stream clone"""
2128 2129
2129 2130 opts = _byteskwargs(opts)
2130 2131 timer, fm = gettimer(ui, opts)
2131 2132
2132 2133 # deletion of the generator may trigger some cleanup that we do not want to
2133 2134 # measure
2134 2135
2135 2136 generate = _find_stream_generator(stream_version)
2136 2137
2137 2138 def runone():
2138 2139 # the lock is held for the duration the initialisation
2139 2140 for chunk in generate(repo):
2140 2141 pass
2141 2142
2142 2143 timer(runone, title=b"generate")
2143 2144 fm.end()
2144 2145
2145 2146
2146 2147 @command(
2147 2148 b'perf::stream-consume',
2148 2149 formatteropts,
2149 2150 )
2150 2151 def perf_stream_clone_consume(ui, repo, filename, **opts):
2151 2152 """benchmark the full application of a stream clone
2152 2153
2153 2154 This include the creation of the repository
2154 2155 """
2155 2156 # try except to appease check code
2156 2157 msg = b"mercurial too old, missing necessary module: %s"
2157 2158 try:
2158 2159 from mercurial import bundle2
2159 2160 except ImportError as exc:
2160 2161 msg %= _bytestr(exc)
2161 2162 raise error.Abort(msg)
2162 2163 try:
2163 2164 from mercurial import exchange
2164 2165 except ImportError as exc:
2165 2166 msg %= _bytestr(exc)
2166 2167 raise error.Abort(msg)
2167 2168 try:
2168 2169 from mercurial import hg
2169 2170 except ImportError as exc:
2170 2171 msg %= _bytestr(exc)
2171 2172 raise error.Abort(msg)
2172 2173 try:
2173 2174 from mercurial import localrepo
2174 2175 except ImportError as exc:
2175 2176 msg %= _bytestr(exc)
2176 2177 raise error.Abort(msg)
2177 2178
2178 2179 opts = _byteskwargs(opts)
2179 2180 timer, fm = gettimer(ui, opts)
2180 2181
2181 2182 # deletion of the generator may trigger some cleanup that we do not want to
2182 2183 # measure
2183 2184 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2184 2185 raise error.Abort("not a readable file: %s" % filename)
2185 2186
2186 2187 run_variables = [None, None]
2187 2188
2188 2189 @contextlib.contextmanager
2189 2190 def context():
2190 2191 with open(filename, mode='rb') as bundle:
2191 2192 with tempfile.TemporaryDirectory() as tmp_dir:
2192 2193 tmp_dir = fsencode(tmp_dir)
2193 2194 run_variables[0] = bundle
2194 2195 run_variables[1] = tmp_dir
2195 2196 yield
2196 2197 run_variables[0] = None
2197 2198 run_variables[1] = None
2198 2199
2199 2200 def runone():
2200 2201 bundle = run_variables[0]
2201 2202 tmp_dir = run_variables[1]
2202 2203 # only pass ui when no srcrepo
2203 2204 localrepo.createrepository(
2204 2205 repo.ui, tmp_dir, requirements=repo.requirements
2205 2206 )
2206 2207 target = hg.repository(repo.ui, tmp_dir)
2207 2208 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2208 2209 # stream v1
2209 2210 if util.safehasattr(gen, 'apply'):
2210 2211 gen.apply(target)
2211 2212 else:
2212 2213 with target.transaction(b"perf::stream-consume") as tr:
2213 2214 bundle2.applybundle(
2214 2215 target,
2215 2216 gen,
2216 2217 tr,
2217 2218 source=b'unbundle',
2218 2219 url=filename,
2219 2220 )
2220 2221
2221 2222 timer(runone, context=context, title=b"consume")
2222 2223 fm.end()
2223 2224
2224 2225
2225 2226 @command(b'perf::parents|perfparents', formatteropts)
2226 2227 def perfparents(ui, repo, **opts):
2227 2228 """benchmark the time necessary to fetch one changeset's parents.
2228 2229
2229 2230 The fetch is done using the `node identifier`, traversing all object layers
2230 2231 from the repository object. The first N revisions will be used for this
2231 2232 benchmark. N is controlled by the ``perf.parentscount`` config option
2232 2233 (default: 1000).
2233 2234 """
2234 2235 opts = _byteskwargs(opts)
2235 2236 timer, fm = gettimer(ui, opts)
2236 2237 # control the number of commits perfparents iterates over
2237 2238 # experimental config: perf.parentscount
2238 2239 count = getint(ui, b"perf", b"parentscount", 1000)
2239 2240 if len(repo.changelog) < count:
2240 2241 raise error.Abort(b"repo needs %d commits for this test" % count)
2241 2242 repo = repo.unfiltered()
2242 2243 nl = [repo.changelog.node(i) for i in _xrange(count)]
2243 2244
2244 2245 def d():
2245 2246 for n in nl:
2246 2247 repo.changelog.parents(n)
2247 2248
2248 2249 timer(d)
2249 2250 fm.end()
2250 2251
2251 2252
2252 2253 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2253 2254 def perfctxfiles(ui, repo, x, **opts):
2254 2255 opts = _byteskwargs(opts)
2255 2256 x = int(x)
2256 2257 timer, fm = gettimer(ui, opts)
2257 2258
2258 2259 def d():
2259 2260 len(repo[x].files())
2260 2261
2261 2262 timer(d)
2262 2263 fm.end()
2263 2264
2264 2265
2265 2266 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2266 2267 def perfrawfiles(ui, repo, x, **opts):
2267 2268 opts = _byteskwargs(opts)
2268 2269 x = int(x)
2269 2270 timer, fm = gettimer(ui, opts)
2270 2271 cl = repo.changelog
2271 2272
2272 2273 def d():
2273 2274 len(cl.read(x)[3])
2274 2275
2275 2276 timer(d)
2276 2277 fm.end()
2277 2278
2278 2279
2279 2280 @command(b'perf::lookup|perflookup', formatteropts)
2280 2281 def perflookup(ui, repo, rev, **opts):
2281 2282 opts = _byteskwargs(opts)
2282 2283 timer, fm = gettimer(ui, opts)
2283 2284 timer(lambda: len(repo.lookup(rev)))
2284 2285 fm.end()
2285 2286
2286 2287
2287 2288 @command(
2288 2289 b'perf::linelogedits|perflinelogedits',
2289 2290 [
2290 2291 (b'n', b'edits', 10000, b'number of edits'),
2291 2292 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2292 2293 ],
2293 2294 norepo=True,
2294 2295 )
2295 2296 def perflinelogedits(ui, **opts):
2296 2297 from mercurial import linelog
2297 2298
2298 2299 opts = _byteskwargs(opts)
2299 2300
2300 2301 edits = opts[b'edits']
2301 2302 maxhunklines = opts[b'max_hunk_lines']
2302 2303
2303 2304 maxb1 = 100000
2304 2305 random.seed(0)
2305 2306 randint = random.randint
2306 2307 currentlines = 0
2307 2308 arglist = []
2308 2309 for rev in _xrange(edits):
2309 2310 a1 = randint(0, currentlines)
2310 2311 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2311 2312 b1 = randint(0, maxb1)
2312 2313 b2 = randint(b1, b1 + maxhunklines)
2313 2314 currentlines += (b2 - b1) - (a2 - a1)
2314 2315 arglist.append((rev, a1, a2, b1, b2))
2315 2316
2316 2317 def d():
2317 2318 ll = linelog.linelog()
2318 2319 for args in arglist:
2319 2320 ll.replacelines(*args)
2320 2321
2321 2322 timer, fm = gettimer(ui, opts)
2322 2323 timer(d)
2323 2324 fm.end()
2324 2325
2325 2326
2326 2327 @command(b'perf::revrange|perfrevrange', formatteropts)
2327 2328 def perfrevrange(ui, repo, *specs, **opts):
2328 2329 opts = _byteskwargs(opts)
2329 2330 timer, fm = gettimer(ui, opts)
2330 2331 revrange = scmutil.revrange
2331 2332 timer(lambda: len(revrange(repo, specs)))
2332 2333 fm.end()
2333 2334
2334 2335
2335 2336 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2336 2337 def perfnodelookup(ui, repo, rev, **opts):
2337 2338 opts = _byteskwargs(opts)
2338 2339 timer, fm = gettimer(ui, opts)
2339 2340 import mercurial.revlog
2340 2341
2341 2342 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2342 2343 n = scmutil.revsingle(repo, rev).node()
2343 2344
2344 2345 try:
2345 2346 cl = revlog(getsvfs(repo), radix=b"00changelog")
2346 2347 except TypeError:
2347 2348 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2348 2349
2349 2350 def d():
2350 2351 cl.rev(n)
2351 2352 clearcaches(cl)
2352 2353
2353 2354 timer(d)
2354 2355 fm.end()
2355 2356
2356 2357
2357 2358 @command(
2358 2359 b'perf::log|perflog',
2359 2360 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2360 2361 )
2361 2362 def perflog(ui, repo, rev=None, **opts):
2362 2363 opts = _byteskwargs(opts)
2363 2364 if rev is None:
2364 2365 rev = []
2365 2366 timer, fm = gettimer(ui, opts)
2366 2367 ui.pushbuffer()
2367 2368 timer(
2368 2369 lambda: commands.log(
2369 2370 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2370 2371 )
2371 2372 )
2372 2373 ui.popbuffer()
2373 2374 fm.end()
2374 2375
2375 2376
2376 2377 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2377 2378 def perfmoonwalk(ui, repo, **opts):
2378 2379 """benchmark walking the changelog backwards
2379 2380
2380 2381 This also loads the changelog data for each revision in the changelog.
2381 2382 """
2382 2383 opts = _byteskwargs(opts)
2383 2384 timer, fm = gettimer(ui, opts)
2384 2385
2385 2386 def moonwalk():
2386 2387 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2387 2388 ctx = repo[i]
2388 2389 ctx.branch() # read changelog data (in addition to the index)
2389 2390
2390 2391 timer(moonwalk)
2391 2392 fm.end()
2392 2393
2393 2394
2394 2395 @command(
2395 2396 b'perf::templating|perftemplating',
2396 2397 [
2397 2398 (b'r', b'rev', [], b'revisions to run the template on'),
2398 2399 ]
2399 2400 + formatteropts,
2400 2401 )
2401 2402 def perftemplating(ui, repo, testedtemplate=None, **opts):
2402 2403 """test the rendering time of a given template"""
2403 2404 if makelogtemplater is None:
2404 2405 raise error.Abort(
2405 2406 b"perftemplating not available with this Mercurial",
2406 2407 hint=b"use 4.3 or later",
2407 2408 )
2408 2409
2409 2410 opts = _byteskwargs(opts)
2410 2411
2411 2412 nullui = ui.copy()
2412 2413 nullui.fout = open(os.devnull, 'wb')
2413 2414 nullui.disablepager()
2414 2415 revs = opts.get(b'rev')
2415 2416 if not revs:
2416 2417 revs = [b'all()']
2417 2418 revs = list(scmutil.revrange(repo, revs))
2418 2419
2419 2420 defaulttemplate = (
2420 2421 b'{date|shortdate} [{rev}:{node|short}]'
2421 2422 b' {author|person}: {desc|firstline}\n'
2422 2423 )
2423 2424 if testedtemplate is None:
2424 2425 testedtemplate = defaulttemplate
2425 2426 displayer = makelogtemplater(nullui, repo, testedtemplate)
2426 2427
2427 2428 def format():
2428 2429 for r in revs:
2429 2430 ctx = repo[r]
2430 2431 displayer.show(ctx)
2431 2432 displayer.flush(ctx)
2432 2433
2433 2434 timer, fm = gettimer(ui, opts)
2434 2435 timer(format)
2435 2436 fm.end()
2436 2437
2437 2438
2438 2439 def _displaystats(ui, opts, entries, data):
2439 2440 # use a second formatter because the data are quite different, not sure
2440 2441 # how it flies with the templater.
2441 2442 fm = ui.formatter(b'perf-stats', opts)
2442 2443 for key, title in entries:
2443 2444 values = data[key]
2444 2445 nbvalues = len(data)
2445 2446 values.sort()
2446 2447 stats = {
2447 2448 'key': key,
2448 2449 'title': title,
2449 2450 'nbitems': len(values),
2450 2451 'min': values[0][0],
2451 2452 '10%': values[(nbvalues * 10) // 100][0],
2452 2453 '25%': values[(nbvalues * 25) // 100][0],
2453 2454 '50%': values[(nbvalues * 50) // 100][0],
2454 2455 '75%': values[(nbvalues * 75) // 100][0],
2455 2456 '80%': values[(nbvalues * 80) // 100][0],
2456 2457 '85%': values[(nbvalues * 85) // 100][0],
2457 2458 '90%': values[(nbvalues * 90) // 100][0],
2458 2459 '95%': values[(nbvalues * 95) // 100][0],
2459 2460 '99%': values[(nbvalues * 99) // 100][0],
2460 2461 'max': values[-1][0],
2461 2462 }
2462 2463 fm.startitem()
2463 2464 fm.data(**stats)
2464 2465 # make node pretty for the human output
2465 2466 fm.plain('### %s (%d items)\n' % (title, len(values)))
2466 2467 lines = [
2467 2468 'min',
2468 2469 '10%',
2469 2470 '25%',
2470 2471 '50%',
2471 2472 '75%',
2472 2473 '80%',
2473 2474 '85%',
2474 2475 '90%',
2475 2476 '95%',
2476 2477 '99%',
2477 2478 'max',
2478 2479 ]
2479 2480 for l in lines:
2480 2481 fm.plain('%s: %s\n' % (l, stats[l]))
2481 2482 fm.end()
2482 2483
2483 2484
2484 2485 @command(
2485 2486 b'perf::helper-mergecopies|perfhelper-mergecopies',
2486 2487 formatteropts
2487 2488 + [
2488 2489 (b'r', b'revs', [], b'restrict search to these revisions'),
2489 2490 (b'', b'timing', False, b'provides extra data (costly)'),
2490 2491 (b'', b'stats', False, b'provides statistic about the measured data'),
2491 2492 ],
2492 2493 )
2493 2494 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2494 2495 """find statistics about potential parameters for `perfmergecopies`
2495 2496
2496 2497 This command find (base, p1, p2) triplet relevant for copytracing
2497 2498 benchmarking in the context of a merge. It reports values for some of the
2498 2499 parameters that impact merge copy tracing time during merge.
2499 2500
2500 2501 If `--timing` is set, rename detection is run and the associated timing
2501 2502 will be reported. The extra details come at the cost of slower command
2502 2503 execution.
2503 2504
2504 2505 Since rename detection is only run once, other factors might easily
2505 2506 affect the precision of the timing. However it should give a good
2506 2507 approximation of which revision triplets are very costly.
2507 2508 """
2508 2509 opts = _byteskwargs(opts)
2509 2510 fm = ui.formatter(b'perf', opts)
2510 2511 dotiming = opts[b'timing']
2511 2512 dostats = opts[b'stats']
2512 2513
2513 2514 output_template = [
2514 2515 ("base", "%(base)12s"),
2515 2516 ("p1", "%(p1.node)12s"),
2516 2517 ("p2", "%(p2.node)12s"),
2517 2518 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2518 2519 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2519 2520 ("p1.renames", "%(p1.renamedfiles)12d"),
2520 2521 ("p1.time", "%(p1.time)12.3f"),
2521 2522 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2522 2523 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2523 2524 ("p2.renames", "%(p2.renamedfiles)12d"),
2524 2525 ("p2.time", "%(p2.time)12.3f"),
2525 2526 ("renames", "%(nbrenamedfiles)12d"),
2526 2527 ("total.time", "%(time)12.3f"),
2527 2528 ]
2528 2529 if not dotiming:
2529 2530 output_template = [
2530 2531 i
2531 2532 for i in output_template
2532 2533 if not ('time' in i[0] or 'renames' in i[0])
2533 2534 ]
2534 2535 header_names = [h for (h, v) in output_template]
2535 2536 output = ' '.join([v for (h, v) in output_template]) + '\n'
2536 2537 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2537 2538 fm.plain(header % tuple(header_names))
2538 2539
2539 2540 if not revs:
2540 2541 revs = ['all()']
2541 2542 revs = scmutil.revrange(repo, revs)
2542 2543
2543 2544 if dostats:
2544 2545 alldata = {
2545 2546 'nbrevs': [],
2546 2547 'nbmissingfiles': [],
2547 2548 }
2548 2549 if dotiming:
2549 2550 alldata['parentnbrenames'] = []
2550 2551 alldata['totalnbrenames'] = []
2551 2552 alldata['parenttime'] = []
2552 2553 alldata['totaltime'] = []
2553 2554
2554 2555 roi = repo.revs('merge() and %ld', revs)
2555 2556 for r in roi:
2556 2557 ctx = repo[r]
2557 2558 p1 = ctx.p1()
2558 2559 p2 = ctx.p2()
2559 2560 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2560 2561 for b in bases:
2561 2562 b = repo[b]
2562 2563 p1missing = copies._computeforwardmissing(b, p1)
2563 2564 p2missing = copies._computeforwardmissing(b, p2)
2564 2565 data = {
2565 2566 b'base': b.hex(),
2566 2567 b'p1.node': p1.hex(),
2567 2568 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2568 2569 b'p1.nbmissingfiles': len(p1missing),
2569 2570 b'p2.node': p2.hex(),
2570 2571 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2571 2572 b'p2.nbmissingfiles': len(p2missing),
2572 2573 }
2573 2574 if dostats:
2574 2575 if p1missing:
2575 2576 alldata['nbrevs'].append(
2576 2577 (data['p1.nbrevs'], b.hex(), p1.hex())
2577 2578 )
2578 2579 alldata['nbmissingfiles'].append(
2579 2580 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2580 2581 )
2581 2582 if p2missing:
2582 2583 alldata['nbrevs'].append(
2583 2584 (data['p2.nbrevs'], b.hex(), p2.hex())
2584 2585 )
2585 2586 alldata['nbmissingfiles'].append(
2586 2587 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2587 2588 )
2588 2589 if dotiming:
2589 2590 begin = util.timer()
2590 2591 mergedata = copies.mergecopies(repo, p1, p2, b)
2591 2592 end = util.timer()
2592 2593 # not very stable timing since we did only one run
2593 2594 data['time'] = end - begin
2594 2595 # mergedata contains five dicts: "copy", "movewithdir",
2595 2596 # "diverge", "renamedelete" and "dirmove".
2596 2597 # The first 4 are about renamed file so lets count that.
2597 2598 renames = len(mergedata[0])
2598 2599 renames += len(mergedata[1])
2599 2600 renames += len(mergedata[2])
2600 2601 renames += len(mergedata[3])
2601 2602 data['nbrenamedfiles'] = renames
2602 2603 begin = util.timer()
2603 2604 p1renames = copies.pathcopies(b, p1)
2604 2605 end = util.timer()
2605 2606 data['p1.time'] = end - begin
2606 2607 begin = util.timer()
2607 2608 p2renames = copies.pathcopies(b, p2)
2608 2609 end = util.timer()
2609 2610 data['p2.time'] = end - begin
2610 2611 data['p1.renamedfiles'] = len(p1renames)
2611 2612 data['p2.renamedfiles'] = len(p2renames)
2612 2613
2613 2614 if dostats:
2614 2615 if p1missing:
2615 2616 alldata['parentnbrenames'].append(
2616 2617 (data['p1.renamedfiles'], b.hex(), p1.hex())
2617 2618 )
2618 2619 alldata['parenttime'].append(
2619 2620 (data['p1.time'], b.hex(), p1.hex())
2620 2621 )
2621 2622 if p2missing:
2622 2623 alldata['parentnbrenames'].append(
2623 2624 (data['p2.renamedfiles'], b.hex(), p2.hex())
2624 2625 )
2625 2626 alldata['parenttime'].append(
2626 2627 (data['p2.time'], b.hex(), p2.hex())
2627 2628 )
2628 2629 if p1missing or p2missing:
2629 2630 alldata['totalnbrenames'].append(
2630 2631 (
2631 2632 data['nbrenamedfiles'],
2632 2633 b.hex(),
2633 2634 p1.hex(),
2634 2635 p2.hex(),
2635 2636 )
2636 2637 )
2637 2638 alldata['totaltime'].append(
2638 2639 (data['time'], b.hex(), p1.hex(), p2.hex())
2639 2640 )
2640 2641 fm.startitem()
2641 2642 fm.data(**data)
2642 2643 # make node pretty for the human output
2643 2644 out = data.copy()
2644 2645 out['base'] = fm.hexfunc(b.node())
2645 2646 out['p1.node'] = fm.hexfunc(p1.node())
2646 2647 out['p2.node'] = fm.hexfunc(p2.node())
2647 2648 fm.plain(output % out)
2648 2649
2649 2650 fm.end()
2650 2651 if dostats:
2651 2652 # use a second formatter because the data are quite different, not sure
2652 2653 # how it flies with the templater.
2653 2654 entries = [
2654 2655 ('nbrevs', 'number of revision covered'),
2655 2656 ('nbmissingfiles', 'number of missing files at head'),
2656 2657 ]
2657 2658 if dotiming:
2658 2659 entries.append(
2659 2660 ('parentnbrenames', 'rename from one parent to base')
2660 2661 )
2661 2662 entries.append(('totalnbrenames', 'total number of renames'))
2662 2663 entries.append(('parenttime', 'time for one parent'))
2663 2664 entries.append(('totaltime', 'time for both parents'))
2664 2665 _displaystats(ui, opts, entries, alldata)
2665 2666
2666 2667
2667 2668 @command(
2668 2669 b'perf::helper-pathcopies|perfhelper-pathcopies',
2669 2670 formatteropts
2670 2671 + [
2671 2672 (b'r', b'revs', [], b'restrict search to these revisions'),
2672 2673 (b'', b'timing', False, b'provides extra data (costly)'),
2673 2674 (b'', b'stats', False, b'provides statistic about the measured data'),
2674 2675 ],
2675 2676 )
2676 2677 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2677 2678 """find statistic about potential parameters for the `perftracecopies`
2678 2679
2679 2680 This command find source-destination pair relevant for copytracing testing.
2680 2681 It report value for some of the parameters that impact copy tracing time.
2681 2682
2682 2683 If `--timing` is set, rename detection is run and the associated timing
2683 2684 will be reported. The extra details comes at the cost of a slower command
2684 2685 execution.
2685 2686
2686 2687 Since the rename detection is only run once, other factors might easily
2687 2688 affect the precision of the timing. However it should give a good
2688 2689 approximation of which revision pairs are very costly.
2689 2690 """
2690 2691 opts = _byteskwargs(opts)
2691 2692 fm = ui.formatter(b'perf', opts)
2692 2693 dotiming = opts[b'timing']
2693 2694 dostats = opts[b'stats']
2694 2695
2695 2696 if dotiming:
2696 2697 header = '%12s %12s %12s %12s %12s %12s\n'
2697 2698 output = (
2698 2699 "%(source)12s %(destination)12s "
2699 2700 "%(nbrevs)12d %(nbmissingfiles)12d "
2700 2701 "%(nbrenamedfiles)12d %(time)18.5f\n"
2701 2702 )
2702 2703 header_names = (
2703 2704 "source",
2704 2705 "destination",
2705 2706 "nb-revs",
2706 2707 "nb-files",
2707 2708 "nb-renames",
2708 2709 "time",
2709 2710 )
2710 2711 fm.plain(header % header_names)
2711 2712 else:
2712 2713 header = '%12s %12s %12s %12s\n'
2713 2714 output = (
2714 2715 "%(source)12s %(destination)12s "
2715 2716 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2716 2717 )
2717 2718 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2718 2719
2719 2720 if not revs:
2720 2721 revs = ['all()']
2721 2722 revs = scmutil.revrange(repo, revs)
2722 2723
2723 2724 if dostats:
2724 2725 alldata = {
2725 2726 'nbrevs': [],
2726 2727 'nbmissingfiles': [],
2727 2728 }
2728 2729 if dotiming:
2729 2730 alldata['nbrenames'] = []
2730 2731 alldata['time'] = []
2731 2732
2732 2733 roi = repo.revs('merge() and %ld', revs)
2733 2734 for r in roi:
2734 2735 ctx = repo[r]
2735 2736 p1 = ctx.p1().rev()
2736 2737 p2 = ctx.p2().rev()
2737 2738 bases = repo.changelog._commonancestorsheads(p1, p2)
2738 2739 for p in (p1, p2):
2739 2740 for b in bases:
2740 2741 base = repo[b]
2741 2742 parent = repo[p]
2742 2743 missing = copies._computeforwardmissing(base, parent)
2743 2744 if not missing:
2744 2745 continue
2745 2746 data = {
2746 2747 b'source': base.hex(),
2747 2748 b'destination': parent.hex(),
2748 2749 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2749 2750 b'nbmissingfiles': len(missing),
2750 2751 }
2751 2752 if dostats:
2752 2753 alldata['nbrevs'].append(
2753 2754 (
2754 2755 data['nbrevs'],
2755 2756 base.hex(),
2756 2757 parent.hex(),
2757 2758 )
2758 2759 )
2759 2760 alldata['nbmissingfiles'].append(
2760 2761 (
2761 2762 data['nbmissingfiles'],
2762 2763 base.hex(),
2763 2764 parent.hex(),
2764 2765 )
2765 2766 )
2766 2767 if dotiming:
2767 2768 begin = util.timer()
2768 2769 renames = copies.pathcopies(base, parent)
2769 2770 end = util.timer()
2770 2771 # not very stable timing since we did only one run
2771 2772 data['time'] = end - begin
2772 2773 data['nbrenamedfiles'] = len(renames)
2773 2774 if dostats:
2774 2775 alldata['time'].append(
2775 2776 (
2776 2777 data['time'],
2777 2778 base.hex(),
2778 2779 parent.hex(),
2779 2780 )
2780 2781 )
2781 2782 alldata['nbrenames'].append(
2782 2783 (
2783 2784 data['nbrenamedfiles'],
2784 2785 base.hex(),
2785 2786 parent.hex(),
2786 2787 )
2787 2788 )
2788 2789 fm.startitem()
2789 2790 fm.data(**data)
2790 2791 out = data.copy()
2791 2792 out['source'] = fm.hexfunc(base.node())
2792 2793 out['destination'] = fm.hexfunc(parent.node())
2793 2794 fm.plain(output % out)
2794 2795
2795 2796 fm.end()
2796 2797 if dostats:
2797 2798 entries = [
2798 2799 ('nbrevs', 'number of revision covered'),
2799 2800 ('nbmissingfiles', 'number of missing files at head'),
2800 2801 ]
2801 2802 if dotiming:
2802 2803 entries.append(('nbrenames', 'renamed files'))
2803 2804 entries.append(('time', 'time'))
2804 2805 _displaystats(ui, opts, entries, alldata)
2805 2806
2806 2807
2807 2808 @command(b'perf::cca|perfcca', formatteropts)
2808 2809 def perfcca(ui, repo, **opts):
2809 2810 opts = _byteskwargs(opts)
2810 2811 timer, fm = gettimer(ui, opts)
2811 2812 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2812 2813 fm.end()
2813 2814
2814 2815
2815 2816 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2816 2817 def perffncacheload(ui, repo, **opts):
2817 2818 opts = _byteskwargs(opts)
2818 2819 timer, fm = gettimer(ui, opts)
2819 2820 s = repo.store
2820 2821
2821 2822 def d():
2822 2823 s.fncache._load()
2823 2824
2824 2825 timer(d)
2825 2826 fm.end()
2826 2827
2827 2828
2828 2829 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2829 2830 def perffncachewrite(ui, repo, **opts):
2830 2831 opts = _byteskwargs(opts)
2831 2832 timer, fm = gettimer(ui, opts)
2832 2833 s = repo.store
2833 2834 lock = repo.lock()
2834 2835 s.fncache._load()
2835 2836 tr = repo.transaction(b'perffncachewrite')
2836 2837 tr.addbackup(b'fncache')
2837 2838
2838 2839 def d():
2839 2840 s.fncache._dirty = True
2840 2841 s.fncache.write(tr)
2841 2842
2842 2843 timer(d)
2843 2844 tr.close()
2844 2845 lock.release()
2845 2846 fm.end()
2846 2847
2847 2848
2848 2849 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2849 2850 def perffncacheencode(ui, repo, **opts):
2850 2851 opts = _byteskwargs(opts)
2851 2852 timer, fm = gettimer(ui, opts)
2852 2853 s = repo.store
2853 2854 s.fncache._load()
2854 2855
2855 2856 def d():
2856 2857 for p in s.fncache.entries:
2857 2858 s.encode(p)
2858 2859
2859 2860 timer(d)
2860 2861 fm.end()
2861 2862
2862 2863
2863 2864 def _bdiffworker(q, blocks, xdiff, ready, done):
2864 2865 while not done.is_set():
2865 2866 pair = q.get()
2866 2867 while pair is not None:
2867 2868 if xdiff:
2868 2869 mdiff.bdiff.xdiffblocks(*pair)
2869 2870 elif blocks:
2870 2871 mdiff.bdiff.blocks(*pair)
2871 2872 else:
2872 2873 mdiff.textdiff(*pair)
2873 2874 q.task_done()
2874 2875 pair = q.get()
2875 2876 q.task_done() # for the None one
2876 2877 with ready:
2877 2878 ready.wait()
2878 2879
2879 2880
2880 2881 def _manifestrevision(repo, mnode):
2881 2882 ml = repo.manifestlog
2882 2883
2883 2884 if util.safehasattr(ml, b'getstorage'):
2884 2885 store = ml.getstorage(b'')
2885 2886 else:
2886 2887 store = ml._revlog
2887 2888
2888 2889 return store.revision(mnode)
2889 2890
2890 2891
2891 2892 @command(
2892 2893 b'perf::bdiff|perfbdiff',
2893 2894 revlogopts
2894 2895 + formatteropts
2895 2896 + [
2896 2897 (
2897 2898 b'',
2898 2899 b'count',
2899 2900 1,
2900 2901 b'number of revisions to test (when using --startrev)',
2901 2902 ),
2902 2903 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2903 2904 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2904 2905 (b'', b'blocks', False, b'test computing diffs into blocks'),
2905 2906 (b'', b'xdiff', False, b'use xdiff algorithm'),
2906 2907 ],
2907 2908 b'-c|-m|FILE REV',
2908 2909 )
2909 2910 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2910 2911 """benchmark a bdiff between revisions
2911 2912
2912 2913 By default, benchmark a bdiff between its delta parent and itself.
2913 2914
2914 2915 With ``--count``, benchmark bdiffs between delta parents and self for N
2915 2916 revisions starting at the specified revision.
2916 2917
2917 2918 With ``--alldata``, assume the requested revision is a changeset and
2918 2919 measure bdiffs for all changes related to that changeset (manifest
2919 2920 and filelogs).
2920 2921 """
2921 2922 opts = _byteskwargs(opts)
2922 2923
2923 2924 if opts[b'xdiff'] and not opts[b'blocks']:
2924 2925 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2925 2926
2926 2927 if opts[b'alldata']:
2927 2928 opts[b'changelog'] = True
2928 2929
2929 2930 if opts.get(b'changelog') or opts.get(b'manifest'):
2930 2931 file_, rev = None, file_
2931 2932 elif rev is None:
2932 2933 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2933 2934
2934 2935 blocks = opts[b'blocks']
2935 2936 xdiff = opts[b'xdiff']
2936 2937 textpairs = []
2937 2938
2938 2939 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2939 2940
2940 2941 startrev = r.rev(r.lookup(rev))
2941 2942 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2942 2943 if opts[b'alldata']:
2943 2944 # Load revisions associated with changeset.
2944 2945 ctx = repo[rev]
2945 2946 mtext = _manifestrevision(repo, ctx.manifestnode())
2946 2947 for pctx in ctx.parents():
2947 2948 pman = _manifestrevision(repo, pctx.manifestnode())
2948 2949 textpairs.append((pman, mtext))
2949 2950
2950 2951 # Load filelog revisions by iterating manifest delta.
2951 2952 man = ctx.manifest()
2952 2953 pman = ctx.p1().manifest()
2953 2954 for filename, change in pman.diff(man).items():
2954 2955 fctx = repo.file(filename)
2955 2956 f1 = fctx.revision(change[0][0] or -1)
2956 2957 f2 = fctx.revision(change[1][0] or -1)
2957 2958 textpairs.append((f1, f2))
2958 2959 else:
2959 2960 dp = r.deltaparent(rev)
2960 2961 textpairs.append((r.revision(dp), r.revision(rev)))
2961 2962
2962 2963 withthreads = threads > 0
2963 2964 if not withthreads:
2964 2965
2965 2966 def d():
2966 2967 for pair in textpairs:
2967 2968 if xdiff:
2968 2969 mdiff.bdiff.xdiffblocks(*pair)
2969 2970 elif blocks:
2970 2971 mdiff.bdiff.blocks(*pair)
2971 2972 else:
2972 2973 mdiff.textdiff(*pair)
2973 2974
2974 2975 else:
2975 2976 q = queue()
2976 2977 for i in _xrange(threads):
2977 2978 q.put(None)
2978 2979 ready = threading.Condition()
2979 2980 done = threading.Event()
2980 2981 for i in _xrange(threads):
2981 2982 threading.Thread(
2982 2983 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2983 2984 ).start()
2984 2985 q.join()
2985 2986
2986 2987 def d():
2987 2988 for pair in textpairs:
2988 2989 q.put(pair)
2989 2990 for i in _xrange(threads):
2990 2991 q.put(None)
2991 2992 with ready:
2992 2993 ready.notify_all()
2993 2994 q.join()
2994 2995
2995 2996 timer, fm = gettimer(ui, opts)
2996 2997 timer(d)
2997 2998 fm.end()
2998 2999
2999 3000 if withthreads:
3000 3001 done.set()
3001 3002 for i in _xrange(threads):
3002 3003 q.put(None)
3003 3004 with ready:
3004 3005 ready.notify_all()
3005 3006
3006 3007
3007 3008 @command(
3008 3009 b'perf::unbundle',
3009 3010 formatteropts,
3010 3011 b'BUNDLE_FILE',
3011 3012 )
3012 3013 def perf_unbundle(ui, repo, fname, **opts):
3013 3014 """benchmark application of a bundle in a repository.
3014 3015
3015 3016 This does not include the final transaction processing"""
3016 3017
3017 3018 from mercurial import exchange
3018 3019 from mercurial import bundle2
3019 3020 from mercurial import transaction
3020 3021
3021 3022 opts = _byteskwargs(opts)
3022 3023
3023 3024 ### some compatibility hotfix
3024 3025 #
3025 3026 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3026 3027 # critical regression that break transaction rollback for files that are
3027 3028 # de-inlined.
3028 3029 method = transaction.transaction._addentry
3029 3030 pre_63edc384d3b7 = "data" in getargspec(method).args
3030 3031 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3031 3032 # a changeset that is a close descendant of 18415fc918a1, the changeset
3032 3033 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3033 3034 args = getargspec(error.Abort.__init__).args
3034 3035 post_18415fc918a1 = "detailed_exit_code" in args
3035 3036
3036 3037 old_max_inline = None
3037 3038 try:
3038 3039 if not (pre_63edc384d3b7 or post_18415fc918a1):
3039 3040 # disable inlining
3040 3041 old_max_inline = mercurial.revlog._maxinline
3041 3042 # large enough to never happen
3042 3043 mercurial.revlog._maxinline = 2 ** 50
3043 3044
3044 3045 with repo.lock():
3045 3046 bundle = [None, None]
3046 3047 orig_quiet = repo.ui.quiet
3047 3048 try:
3048 3049 repo.ui.quiet = True
3049 3050 with open(fname, mode="rb") as f:
3050 3051
3051 3052 def noop_report(*args, **kwargs):
3052 3053 pass
3053 3054
3054 3055 def setup():
3055 3056 gen, tr = bundle
3056 3057 if tr is not None:
3057 3058 tr.abort()
3058 3059 bundle[:] = [None, None]
3059 3060 f.seek(0)
3060 3061 bundle[0] = exchange.readbundle(ui, f, fname)
3061 3062 bundle[1] = repo.transaction(b'perf::unbundle')
3062 3063 # silence the transaction
3063 3064 bundle[1]._report = noop_report
3064 3065
3065 3066 def apply():
3066 3067 gen, tr = bundle
3067 3068 bundle2.applybundle(
3068 3069 repo,
3069 3070 gen,
3070 3071 tr,
3071 3072 source=b'perf::unbundle',
3072 3073 url=fname,
3073 3074 )
3074 3075
3075 3076 timer, fm = gettimer(ui, opts)
3076 3077 timer(apply, setup=setup)
3077 3078 fm.end()
3078 3079 finally:
3079 3080 repo.ui.quiet == orig_quiet
3080 3081 gen, tr = bundle
3081 3082 if tr is not None:
3082 3083 tr.abort()
3083 3084 finally:
3084 3085 if old_max_inline is not None:
3085 3086 mercurial.revlog._maxinline = old_max_inline
3086 3087
3087 3088
3088 3089 @command(
3089 3090 b'perf::unidiff|perfunidiff',
3090 3091 revlogopts
3091 3092 + formatteropts
3092 3093 + [
3093 3094 (
3094 3095 b'',
3095 3096 b'count',
3096 3097 1,
3097 3098 b'number of revisions to test (when using --startrev)',
3098 3099 ),
3099 3100 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3100 3101 ],
3101 3102 b'-c|-m|FILE REV',
3102 3103 )
3103 3104 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3104 3105 """benchmark a unified diff between revisions
3105 3106
3106 3107 This doesn't include any copy tracing - it's just a unified diff
3107 3108 of the texts.
3108 3109
3109 3110 By default, benchmark a diff between its delta parent and itself.
3110 3111
3111 3112 With ``--count``, benchmark diffs between delta parents and self for N
3112 3113 revisions starting at the specified revision.
3113 3114
3114 3115 With ``--alldata``, assume the requested revision is a changeset and
3115 3116 measure diffs for all changes related to that changeset (manifest
3116 3117 and filelogs).
3117 3118 """
3118 3119 opts = _byteskwargs(opts)
3119 3120 if opts[b'alldata']:
3120 3121 opts[b'changelog'] = True
3121 3122
3122 3123 if opts.get(b'changelog') or opts.get(b'manifest'):
3123 3124 file_, rev = None, file_
3124 3125 elif rev is None:
3125 3126 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3126 3127
3127 3128 textpairs = []
3128 3129
3129 3130 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3130 3131
3131 3132 startrev = r.rev(r.lookup(rev))
3132 3133 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3133 3134 if opts[b'alldata']:
3134 3135 # Load revisions associated with changeset.
3135 3136 ctx = repo[rev]
3136 3137 mtext = _manifestrevision(repo, ctx.manifestnode())
3137 3138 for pctx in ctx.parents():
3138 3139 pman = _manifestrevision(repo, pctx.manifestnode())
3139 3140 textpairs.append((pman, mtext))
3140 3141
3141 3142 # Load filelog revisions by iterating manifest delta.
3142 3143 man = ctx.manifest()
3143 3144 pman = ctx.p1().manifest()
3144 3145 for filename, change in pman.diff(man).items():
3145 3146 fctx = repo.file(filename)
3146 3147 f1 = fctx.revision(change[0][0] or -1)
3147 3148 f2 = fctx.revision(change[1][0] or -1)
3148 3149 textpairs.append((f1, f2))
3149 3150 else:
3150 3151 dp = r.deltaparent(rev)
3151 3152 textpairs.append((r.revision(dp), r.revision(rev)))
3152 3153
3153 3154 def d():
3154 3155 for left, right in textpairs:
3155 3156 # The date strings don't matter, so we pass empty strings.
3156 3157 headerlines, hunks = mdiff.unidiff(
3157 3158 left, b'', right, b'', b'left', b'right', binary=False
3158 3159 )
3159 3160 # consume iterators in roughly the way patch.py does
3160 3161 b'\n'.join(headerlines)
3161 3162 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3162 3163
3163 3164 timer, fm = gettimer(ui, opts)
3164 3165 timer(d)
3165 3166 fm.end()
3166 3167
3167 3168
3168 3169 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3169 3170 def perfdiffwd(ui, repo, **opts):
3170 3171 """Profile diff of working directory changes"""
3171 3172 opts = _byteskwargs(opts)
3172 3173 timer, fm = gettimer(ui, opts)
3173 3174 options = {
3174 3175 'w': 'ignore_all_space',
3175 3176 'b': 'ignore_space_change',
3176 3177 'B': 'ignore_blank_lines',
3177 3178 }
3178 3179
3179 3180 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3180 3181 opts = {options[c]: b'1' for c in diffopt}
3181 3182
3182 3183 def d():
3183 3184 ui.pushbuffer()
3184 3185 commands.diff(ui, repo, **opts)
3185 3186 ui.popbuffer()
3186 3187
3187 3188 diffopt = diffopt.encode('ascii')
3188 3189 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3189 3190 timer(d, title=title)
3190 3191 fm.end()
3191 3192
3192 3193
3193 3194 @command(
3194 3195 b'perf::revlogindex|perfrevlogindex',
3195 3196 revlogopts + formatteropts,
3196 3197 b'-c|-m|FILE',
3197 3198 )
3198 3199 def perfrevlogindex(ui, repo, file_=None, **opts):
3199 3200 """Benchmark operations against a revlog index.
3200 3201
3201 3202 This tests constructing a revlog instance, reading index data,
3202 3203 parsing index data, and performing various operations related to
3203 3204 index data.
3204 3205 """
3205 3206
3206 3207 opts = _byteskwargs(opts)
3207 3208
3208 3209 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3209 3210
3210 3211 opener = getattr(rl, 'opener') # trick linter
3211 3212 # compat with hg <= 5.8
3212 3213 radix = getattr(rl, 'radix', None)
3213 3214 indexfile = getattr(rl, '_indexfile', None)
3214 3215 if indexfile is None:
3215 3216 # compatibility with <= hg-5.8
3216 3217 indexfile = getattr(rl, 'indexfile')
3217 3218 data = opener.read(indexfile)
3218 3219
3219 3220 header = struct.unpack(b'>I', data[0:4])[0]
3220 3221 version = header & 0xFFFF
3221 3222 if version == 1:
3222 3223 inline = header & (1 << 16)
3223 3224 else:
3224 3225 raise error.Abort(b'unsupported revlog version: %d' % version)
3225 3226
3226 3227 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3227 3228 if parse_index_v1 is None:
3228 3229 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3229 3230
3230 3231 rllen = len(rl)
3231 3232
3232 3233 node0 = rl.node(0)
3233 3234 node25 = rl.node(rllen // 4)
3234 3235 node50 = rl.node(rllen // 2)
3235 3236 node75 = rl.node(rllen // 4 * 3)
3236 3237 node100 = rl.node(rllen - 1)
3237 3238
3238 3239 allrevs = range(rllen)
3239 3240 allrevsrev = list(reversed(allrevs))
3240 3241 allnodes = [rl.node(rev) for rev in range(rllen)]
3241 3242 allnodesrev = list(reversed(allnodes))
3242 3243
3243 3244 def constructor():
3244 3245 if radix is not None:
3245 3246 revlog(opener, radix=radix)
3246 3247 else:
3247 3248 # hg <= 5.8
3248 3249 revlog(opener, indexfile=indexfile)
3249 3250
3250 3251 def read():
3251 3252 with opener(indexfile) as fh:
3252 3253 fh.read()
3253 3254
3254 3255 def parseindex():
3255 3256 parse_index_v1(data, inline)
3256 3257
3257 3258 def getentry(revornode):
3258 3259 index = parse_index_v1(data, inline)[0]
3259 3260 index[revornode]
3260 3261
3261 3262 def getentries(revs, count=1):
3262 3263 index = parse_index_v1(data, inline)[0]
3263 3264
3264 3265 for i in range(count):
3265 3266 for rev in revs:
3266 3267 index[rev]
3267 3268
3268 3269 def resolvenode(node):
3269 3270 index = parse_index_v1(data, inline)[0]
3270 3271 rev = getattr(index, 'rev', None)
3271 3272 if rev is None:
3272 3273 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3273 3274 # This only works for the C code.
3274 3275 if nodemap is None:
3275 3276 return
3276 3277 rev = nodemap.__getitem__
3277 3278
3278 3279 try:
3279 3280 rev(node)
3280 3281 except error.RevlogError:
3281 3282 pass
3282 3283
3283 3284 def resolvenodes(nodes, count=1):
3284 3285 index = parse_index_v1(data, inline)[0]
3285 3286 rev = getattr(index, 'rev', None)
3286 3287 if rev is None:
3287 3288 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3288 3289 # This only works for the C code.
3289 3290 if nodemap is None:
3290 3291 return
3291 3292 rev = nodemap.__getitem__
3292 3293
3293 3294 for i in range(count):
3294 3295 for node in nodes:
3295 3296 try:
3296 3297 rev(node)
3297 3298 except error.RevlogError:
3298 3299 pass
3299 3300
3300 3301 benches = [
3301 3302 (constructor, b'revlog constructor'),
3302 3303 (read, b'read'),
3303 3304 (parseindex, b'create index object'),
3304 3305 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3305 3306 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3306 3307 (lambda: resolvenode(node0), b'look up node at rev 0'),
3307 3308 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3308 3309 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3309 3310 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3310 3311 (lambda: resolvenode(node100), b'look up node at tip'),
3311 3312 # 2x variation is to measure caching impact.
3312 3313 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3313 3314 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3314 3315 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3315 3316 (
3316 3317 lambda: resolvenodes(allnodesrev, 2),
3317 3318 b'look up all nodes 2x (reverse)',
3318 3319 ),
3319 3320 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3320 3321 (
3321 3322 lambda: getentries(allrevs, 2),
3322 3323 b'retrieve all index entries 2x (forward)',
3323 3324 ),
3324 3325 (
3325 3326 lambda: getentries(allrevsrev),
3326 3327 b'retrieve all index entries (reverse)',
3327 3328 ),
3328 3329 (
3329 3330 lambda: getentries(allrevsrev, 2),
3330 3331 b'retrieve all index entries 2x (reverse)',
3331 3332 ),
3332 3333 ]
3333 3334
3334 3335 for fn, title in benches:
3335 3336 timer, fm = gettimer(ui, opts)
3336 3337 timer(fn, title=title)
3337 3338 fm.end()
3338 3339
3339 3340
3340 3341 @command(
3341 3342 b'perf::revlogrevisions|perfrevlogrevisions',
3342 3343 revlogopts
3343 3344 + formatteropts
3344 3345 + [
3345 3346 (b'd', b'dist', 100, b'distance between the revisions'),
3346 3347 (b's', b'startrev', 0, b'revision to start reading at'),
3347 3348 (b'', b'reverse', False, b'read in reverse'),
3348 3349 ],
3349 3350 b'-c|-m|FILE',
3350 3351 )
3351 3352 def perfrevlogrevisions(
3352 3353 ui, repo, file_=None, startrev=0, reverse=False, **opts
3353 3354 ):
3354 3355 """Benchmark reading a series of revisions from a revlog.
3355 3356
3356 3357 By default, we read every ``-d/--dist`` revision from 0 to tip of
3357 3358 the specified revlog.
3358 3359
3359 3360 The start revision can be defined via ``-s/--startrev``.
3360 3361 """
3361 3362 opts = _byteskwargs(opts)
3362 3363
3363 3364 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3364 3365 rllen = getlen(ui)(rl)
3365 3366
3366 3367 if startrev < 0:
3367 3368 startrev = rllen + startrev
3368 3369
3369 3370 def d():
3370 3371 rl.clearcaches()
3371 3372
3372 3373 beginrev = startrev
3373 3374 endrev = rllen
3374 3375 dist = opts[b'dist']
3375 3376
3376 3377 if reverse:
3377 3378 beginrev, endrev = endrev - 1, beginrev - 1
3378 3379 dist = -1 * dist
3379 3380
3380 3381 for x in _xrange(beginrev, endrev, dist):
3381 3382 # Old revisions don't support passing int.
3382 3383 n = rl.node(x)
3383 3384 rl.revision(n)
3384 3385
3385 3386 timer, fm = gettimer(ui, opts)
3386 3387 timer(d)
3387 3388 fm.end()
3388 3389
3389 3390
3390 3391 @command(
3391 3392 b'perf::revlogwrite|perfrevlogwrite',
3392 3393 revlogopts
3393 3394 + formatteropts
3394 3395 + [
3395 3396 (b's', b'startrev', 1000, b'revision to start writing at'),
3396 3397 (b'', b'stoprev', -1, b'last revision to write'),
3397 3398 (b'', b'count', 3, b'number of passes to perform'),
3398 3399 (b'', b'details', False, b'print timing for every revisions tested'),
3399 3400 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3400 3401 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3401 3402 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3402 3403 ],
3403 3404 b'-c|-m|FILE',
3404 3405 )
3405 3406 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3406 3407 """Benchmark writing a series of revisions to a revlog.
3407 3408
3408 3409 Possible source values are:
3409 3410 * `full`: add from a full text (default).
3410 3411 * `parent-1`: add from a delta to the first parent
3411 3412 * `parent-2`: add from a delta to the second parent if it exists
3412 3413 (use a delta from the first parent otherwise)
3413 3414 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3414 3415 * `storage`: add from the existing precomputed deltas
3415 3416
3416 3417 Note: This performance command measures performance in a custom way. As a
3417 3418 result some of the global configuration of the 'perf' command does not
3418 3419 apply to it:
3419 3420
3420 3421 * ``pre-run``: disabled
3421 3422
3422 3423 * ``profile-benchmark``: disabled
3423 3424
3424 3425 * ``run-limits``: disabled use --count instead
3425 3426 """
3426 3427 opts = _byteskwargs(opts)
3427 3428
3428 3429 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3429 3430 rllen = getlen(ui)(rl)
3430 3431 if startrev < 0:
3431 3432 startrev = rllen + startrev
3432 3433 if stoprev < 0:
3433 3434 stoprev = rllen + stoprev
3434 3435
3435 3436 lazydeltabase = opts['lazydeltabase']
3436 3437 source = opts['source']
3437 3438 clearcaches = opts['clear_caches']
3438 3439 validsource = (
3439 3440 b'full',
3440 3441 b'parent-1',
3441 3442 b'parent-2',
3442 3443 b'parent-smallest',
3443 3444 b'storage',
3444 3445 )
3445 3446 if source not in validsource:
3446 3447 raise error.Abort('invalid source type: %s' % source)
3447 3448
3448 3449 ### actually gather results
3449 3450 count = opts['count']
3450 3451 if count <= 0:
3451 3452 raise error.Abort('invalide run count: %d' % count)
3452 3453 allresults = []
3453 3454 for c in range(count):
3454 3455 timing = _timeonewrite(
3455 3456 ui,
3456 3457 rl,
3457 3458 source,
3458 3459 startrev,
3459 3460 stoprev,
3460 3461 c + 1,
3461 3462 lazydeltabase=lazydeltabase,
3462 3463 clearcaches=clearcaches,
3463 3464 )
3464 3465 allresults.append(timing)
3465 3466
3466 3467 ### consolidate the results in a single list
3467 3468 results = []
3468 3469 for idx, (rev, t) in enumerate(allresults[0]):
3469 3470 ts = [t]
3470 3471 for other in allresults[1:]:
3471 3472 orev, ot = other[idx]
3472 3473 assert orev == rev
3473 3474 ts.append(ot)
3474 3475 results.append((rev, ts))
3475 3476 resultcount = len(results)
3476 3477
3477 3478 ### Compute and display relevant statistics
3478 3479
3479 3480 # get a formatter
3480 3481 fm = ui.formatter(b'perf', opts)
3481 3482 displayall = ui.configbool(b"perf", b"all-timing", True)
3482 3483
3483 3484 # print individual details if requested
3484 3485 if opts['details']:
3485 3486 for idx, item in enumerate(results, 1):
3486 3487 rev, data = item
3487 3488 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3488 3489 formatone(fm, data, title=title, displayall=displayall)
3489 3490
3490 3491 # sorts results by median time
3491 3492 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3492 3493 # list of (name, index) to display)
3493 3494 relevants = [
3494 3495 ("min", 0),
3495 3496 ("10%", resultcount * 10 // 100),
3496 3497 ("25%", resultcount * 25 // 100),
3497 3498 ("50%", resultcount * 70 // 100),
3498 3499 ("75%", resultcount * 75 // 100),
3499 3500 ("90%", resultcount * 90 // 100),
3500 3501 ("95%", resultcount * 95 // 100),
3501 3502 ("99%", resultcount * 99 // 100),
3502 3503 ("99.9%", resultcount * 999 // 1000),
3503 3504 ("99.99%", resultcount * 9999 // 10000),
3504 3505 ("99.999%", resultcount * 99999 // 100000),
3505 3506 ("max", -1),
3506 3507 ]
3507 3508 if not ui.quiet:
3508 3509 for name, idx in relevants:
3509 3510 data = results[idx]
3510 3511 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3511 3512 formatone(fm, data[1], title=title, displayall=displayall)
3512 3513
3513 3514 # XXX summing that many float will not be very precise, we ignore this fact
3514 3515 # for now
3515 3516 totaltime = []
3516 3517 for item in allresults:
3517 3518 totaltime.append(
3518 3519 (
3519 3520 sum(x[1][0] for x in item),
3520 3521 sum(x[1][1] for x in item),
3521 3522 sum(x[1][2] for x in item),
3522 3523 )
3523 3524 )
3524 3525 formatone(
3525 3526 fm,
3526 3527 totaltime,
3527 3528 title="total time (%d revs)" % resultcount,
3528 3529 displayall=displayall,
3529 3530 )
3530 3531 fm.end()
3531 3532
3532 3533
3533 3534 class _faketr:
3534 3535 def add(s, x, y, z=None):
3535 3536 return None
3536 3537
3537 3538
3538 3539 def _timeonewrite(
3539 3540 ui,
3540 3541 orig,
3541 3542 source,
3542 3543 startrev,
3543 3544 stoprev,
3544 3545 runidx=None,
3545 3546 lazydeltabase=True,
3546 3547 clearcaches=True,
3547 3548 ):
3548 3549 timings = []
3549 3550 tr = _faketr()
3550 3551 with _temprevlog(ui, orig, startrev) as dest:
3551 3552 if hasattr(dest, "delta_config"):
3552 3553 dest.delta_config.lazy_delta_base = lazydeltabase
3553 3554 else:
3554 3555 dest._lazydeltabase = lazydeltabase
3555 3556 revs = list(orig.revs(startrev, stoprev))
3556 3557 total = len(revs)
3557 3558 topic = 'adding'
3558 3559 if runidx is not None:
3559 3560 topic += ' (run #%d)' % runidx
3560 3561 # Support both old and new progress API
3561 3562 if util.safehasattr(ui, 'makeprogress'):
3562 3563 progress = ui.makeprogress(topic, unit='revs', total=total)
3563 3564
3564 3565 def updateprogress(pos):
3565 3566 progress.update(pos)
3566 3567
3567 3568 def completeprogress():
3568 3569 progress.complete()
3569 3570
3570 3571 else:
3571 3572
3572 3573 def updateprogress(pos):
3573 3574 ui.progress(topic, pos, unit='revs', total=total)
3574 3575
3575 3576 def completeprogress():
3576 3577 ui.progress(topic, None, unit='revs', total=total)
3577 3578
3578 3579 for idx, rev in enumerate(revs):
3579 3580 updateprogress(idx)
3580 3581 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3581 3582 if clearcaches:
3582 3583 dest.index.clearcaches()
3583 3584 dest.clearcaches()
3584 3585 with timeone() as r:
3585 3586 dest.addrawrevision(*addargs, **addkwargs)
3586 3587 timings.append((rev, r[0]))
3587 3588 updateprogress(total)
3588 3589 completeprogress()
3589 3590 return timings
3590 3591
3591 3592
3592 3593 def _getrevisionseed(orig, rev, tr, source):
3593 3594 from mercurial.node import nullid
3594 3595
3595 3596 linkrev = orig.linkrev(rev)
3596 3597 node = orig.node(rev)
3597 3598 p1, p2 = orig.parents(node)
3598 3599 flags = orig.flags(rev)
3599 3600 cachedelta = None
3600 3601 text = None
3601 3602
3602 3603 if source == b'full':
3603 3604 text = orig.revision(rev)
3604 3605 elif source == b'parent-1':
3605 3606 baserev = orig.rev(p1)
3606 3607 cachedelta = (baserev, orig.revdiff(p1, rev))
3607 3608 elif source == b'parent-2':
3608 3609 parent = p2
3609 3610 if p2 == nullid:
3610 3611 parent = p1
3611 3612 baserev = orig.rev(parent)
3612 3613 cachedelta = (baserev, orig.revdiff(parent, rev))
3613 3614 elif source == b'parent-smallest':
3614 3615 p1diff = orig.revdiff(p1, rev)
3615 3616 parent = p1
3616 3617 diff = p1diff
3617 3618 if p2 != nullid:
3618 3619 p2diff = orig.revdiff(p2, rev)
3619 3620 if len(p1diff) > len(p2diff):
3620 3621 parent = p2
3621 3622 diff = p2diff
3622 3623 baserev = orig.rev(parent)
3623 3624 cachedelta = (baserev, diff)
3624 3625 elif source == b'storage':
3625 3626 baserev = orig.deltaparent(rev)
3626 3627 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3627 3628
3628 3629 return (
3629 3630 (text, tr, linkrev, p1, p2),
3630 3631 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3631 3632 )
3632 3633
3633 3634
3634 3635 @contextlib.contextmanager
3635 3636 def _temprevlog(ui, orig, truncaterev):
3636 3637 from mercurial import vfs as vfsmod
3637 3638
3638 3639 if orig._inline:
3639 3640 raise error.Abort('not supporting inline revlog (yet)')
3640 3641 revlogkwargs = {}
3641 3642 k = 'upperboundcomp'
3642 3643 if util.safehasattr(orig, k):
3643 3644 revlogkwargs[k] = getattr(orig, k)
3644 3645
3645 3646 indexfile = getattr(orig, '_indexfile', None)
3646 3647 if indexfile is None:
3647 3648 # compatibility with <= hg-5.8
3648 3649 indexfile = getattr(orig, 'indexfile')
3649 3650 origindexpath = orig.opener.join(indexfile)
3650 3651
3651 3652 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3652 3653 origdatapath = orig.opener.join(datafile)
3653 3654 radix = b'revlog'
3654 3655 indexname = b'revlog.i'
3655 3656 dataname = b'revlog.d'
3656 3657
3657 3658 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3658 3659 try:
3659 3660 # copy the data file in a temporary directory
3660 3661 ui.debug('copying data in %s\n' % tmpdir)
3661 3662 destindexpath = os.path.join(tmpdir, 'revlog.i')
3662 3663 destdatapath = os.path.join(tmpdir, 'revlog.d')
3663 3664 shutil.copyfile(origindexpath, destindexpath)
3664 3665 shutil.copyfile(origdatapath, destdatapath)
3665 3666
3666 3667 # remove the data we want to add again
3667 3668 ui.debug('truncating data to be rewritten\n')
3668 3669 with open(destindexpath, 'ab') as index:
3669 3670 index.seek(0)
3670 3671 index.truncate(truncaterev * orig._io.size)
3671 3672 with open(destdatapath, 'ab') as data:
3672 3673 data.seek(0)
3673 3674 data.truncate(orig.start(truncaterev))
3674 3675
3675 3676 # instantiate a new revlog from the temporary copy
3676 3677 ui.debug('truncating adding to be rewritten\n')
3677 3678 vfs = vfsmod.vfs(tmpdir)
3678 3679 vfs.options = getattr(orig.opener, 'options', None)
3679 3680
3680 3681 try:
3681 3682 dest = revlog(vfs, radix=radix, **revlogkwargs)
3682 3683 except TypeError:
3683 3684 dest = revlog(
3684 3685 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3685 3686 )
3686 3687 if dest._inline:
3687 3688 raise error.Abort('not supporting inline revlog (yet)')
3688 3689 # make sure internals are initialized
3689 3690 dest.revision(len(dest) - 1)
3690 3691 yield dest
3691 3692 del dest, vfs
3692 3693 finally:
3693 3694 shutil.rmtree(tmpdir, True)
3694 3695
3695 3696
3696 3697 @command(
3697 3698 b'perf::revlogchunks|perfrevlogchunks',
3698 3699 revlogopts
3699 3700 + formatteropts
3700 3701 + [
3701 3702 (b'e', b'engines', b'', b'compression engines to use'),
3702 3703 (b's', b'startrev', 0, b'revision to start at'),
3703 3704 ],
3704 3705 b'-c|-m|FILE',
3705 3706 )
3706 3707 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3707 3708 """Benchmark operations on revlog chunks.
3708 3709
3709 3710 Logically, each revlog is a collection of fulltext revisions. However,
3710 3711 stored within each revlog are "chunks" of possibly compressed data. This
3711 3712 data needs to be read and decompressed or compressed and written.
3712 3713
3713 3714 This command measures the time it takes to read+decompress and recompress
3714 3715 chunks in a revlog. It effectively isolates I/O and compression performance.
3715 3716 For measurements of higher-level operations like resolving revisions,
3716 3717 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3717 3718 """
3718 3719 opts = _byteskwargs(opts)
3719 3720
3720 3721 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3721 3722
3722 3723 # - _chunkraw was renamed to _getsegmentforrevs
3723 3724 # - _getsegmentforrevs was moved on the inner object
3724 3725 try:
3725 3726 segmentforrevs = rl._inner.get_segment_for_revs
3726 3727 except AttributeError:
3727 3728 try:
3728 3729 segmentforrevs = rl._getsegmentforrevs
3729 3730 except AttributeError:
3730 3731 segmentforrevs = rl._chunkraw
3731 3732
3732 3733 # Verify engines argument.
3733 3734 if engines:
3734 3735 engines = {e.strip() for e in engines.split(b',')}
3735 3736 for engine in engines:
3736 3737 try:
3737 3738 util.compressionengines[engine]
3738 3739 except KeyError:
3739 3740 raise error.Abort(b'unknown compression engine: %s' % engine)
3740 3741 else:
3741 3742 engines = []
3742 3743 for e in util.compengines:
3743 3744 engine = util.compengines[e]
3744 3745 try:
3745 3746 if engine.available():
3746 3747 engine.revlogcompressor().compress(b'dummy')
3747 3748 engines.append(e)
3748 3749 except NotImplementedError:
3749 3750 pass
3750 3751
3751 3752 revs = list(rl.revs(startrev, len(rl) - 1))
3752 3753
3753 3754 @contextlib.contextmanager
3754 3755 def reading(rl):
3755 3756 if getattr(rl, 'reading', None) is not None:
3756 3757 with rl.reading():
3757 3758 yield None
3758 3759 elif rl._inline:
3759 3760 indexfile = getattr(rl, '_indexfile', None)
3760 3761 if indexfile is None:
3761 3762 # compatibility with <= hg-5.8
3762 3763 indexfile = getattr(rl, 'indexfile')
3763 3764 yield getsvfs(repo)(indexfile)
3764 3765 else:
3765 3766 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3766 3767 yield getsvfs(repo)(datafile)
3767 3768
3768 3769 if getattr(rl, 'reading', None) is not None:
3769 3770
3770 3771 @contextlib.contextmanager
3771 3772 def lazy_reading(rl):
3772 3773 with rl.reading():
3773 3774 yield
3774 3775
3775 3776 else:
3776 3777
3777 3778 @contextlib.contextmanager
3778 3779 def lazy_reading(rl):
3779 3780 yield
3780 3781
3781 3782 def doread():
3782 3783 rl.clearcaches()
3783 3784 for rev in revs:
3784 3785 with lazy_reading(rl):
3785 3786 segmentforrevs(rev, rev)
3786 3787
3787 3788 def doreadcachedfh():
3788 3789 rl.clearcaches()
3789 3790 with reading(rl) as fh:
3790 3791 if fh is not None:
3791 3792 for rev in revs:
3792 3793 segmentforrevs(rev, rev, df=fh)
3793 3794 else:
3794 3795 for rev in revs:
3795 3796 segmentforrevs(rev, rev)
3796 3797
3797 3798 def doreadbatch():
3798 3799 rl.clearcaches()
3799 3800 with lazy_reading(rl):
3800 3801 segmentforrevs(revs[0], revs[-1])
3801 3802
3802 3803 def doreadbatchcachedfh():
3803 3804 rl.clearcaches()
3804 3805 with reading(rl) as fh:
3805 3806 if fh is not None:
3806 3807 segmentforrevs(revs[0], revs[-1], df=fh)
3807 3808 else:
3808 3809 segmentforrevs(revs[0], revs[-1])
3809 3810
3810 3811 def dochunk():
3811 3812 rl.clearcaches()
3812 3813 # chunk used to be available directly on the revlog
3813 3814 _chunk = getattr(rl, '_inner', rl)._chunk
3814 3815 with reading(rl) as fh:
3815 3816 if fh is not None:
3816 3817 for rev in revs:
3817 3818 _chunk(rev, df=fh)
3818 3819 else:
3819 3820 for rev in revs:
3820 3821 _chunk(rev)
3821 3822
3822 3823 chunks = [None]
3823 3824
3824 3825 def dochunkbatch():
3825 3826 rl.clearcaches()
3826 3827 _chunks = getattr(rl, '_inner', rl)._chunks
3827 3828 with reading(rl) as fh:
3828 3829 if fh is not None:
3829 3830 # Save chunks as a side-effect.
3830 3831 chunks[0] = _chunks(revs, df=fh)
3831 3832 else:
3832 3833 # Save chunks as a side-effect.
3833 3834 chunks[0] = _chunks(revs)
3834 3835
3835 3836 def docompress(compressor):
3836 3837 rl.clearcaches()
3837 3838
3838 3839 compressor_holder = getattr(rl, '_inner', rl)
3839 3840
3840 3841 try:
3841 3842 # Swap in the requested compression engine.
3842 3843 oldcompressor = compressor_holder._compressor
3843 3844 compressor_holder._compressor = compressor
3844 3845 for chunk in chunks[0]:
3845 3846 rl.compress(chunk)
3846 3847 finally:
3847 3848 compressor_holder._compressor = oldcompressor
3848 3849
3849 3850 benches = [
3850 3851 (lambda: doread(), b'read'),
3851 3852 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3852 3853 (lambda: doreadbatch(), b'read batch'),
3853 3854 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3854 3855 (lambda: dochunk(), b'chunk'),
3855 3856 (lambda: dochunkbatch(), b'chunk batch'),
3856 3857 ]
3857 3858
3858 3859 for engine in sorted(engines):
3859 3860 compressor = util.compengines[engine].revlogcompressor()
3860 3861 benches.append(
3861 3862 (
3862 3863 functools.partial(docompress, compressor),
3863 3864 b'compress w/ %s' % engine,
3864 3865 )
3865 3866 )
3866 3867
3867 3868 for fn, title in benches:
3868 3869 timer, fm = gettimer(ui, opts)
3869 3870 timer(fn, title=title)
3870 3871 fm.end()
3871 3872
3872 3873
3873 3874 @command(
3874 3875 b'perf::revlogrevision|perfrevlogrevision',
3875 3876 revlogopts
3876 3877 + formatteropts
3877 3878 + [(b'', b'cache', False, b'use caches instead of clearing')],
3878 3879 b'-c|-m|FILE REV',
3879 3880 )
3880 3881 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3881 3882 """Benchmark obtaining a revlog revision.
3882 3883
3883 3884 Obtaining a revlog revision consists of roughly the following steps:
3884 3885
3885 3886 1. Compute the delta chain
3886 3887 2. Slice the delta chain if applicable
3887 3888 3. Obtain the raw chunks for that delta chain
3888 3889 4. Decompress each raw chunk
3889 3890 5. Apply binary patches to obtain fulltext
3890 3891 6. Verify hash of fulltext
3891 3892
3892 3893 This command measures the time spent in each of these phases.
3893 3894 """
3894 3895 opts = _byteskwargs(opts)
3895 3896
3896 3897 if opts.get(b'changelog') or opts.get(b'manifest'):
3897 3898 file_, rev = None, file_
3898 3899 elif rev is None:
3899 3900 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3900 3901
3901 3902 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3902 3903
3903 3904 # _chunkraw was renamed to _getsegmentforrevs.
3904 3905 try:
3905 3906 segmentforrevs = r._inner.get_segment_for_revs
3906 3907 except AttributeError:
3907 3908 try:
3908 3909 segmentforrevs = r._getsegmentforrevs
3909 3910 except AttributeError:
3910 3911 segmentforrevs = r._chunkraw
3911 3912
3912 3913 node = r.lookup(rev)
3913 3914 rev = r.rev(node)
3914 3915
3915 3916 if getattr(r, 'reading', None) is not None:
3916 3917
3917 3918 @contextlib.contextmanager
3918 3919 def lazy_reading(r):
3919 3920 with r.reading():
3920 3921 yield
3921 3922
3922 3923 else:
3923 3924
3924 3925 @contextlib.contextmanager
3925 3926 def lazy_reading(r):
3926 3927 yield
3927 3928
3928 3929 def getrawchunks(data, chain):
3929 3930 start = r.start
3930 3931 length = r.length
3931 3932 inline = r._inline
3932 3933 try:
3933 3934 iosize = r.index.entry_size
3934 3935 except AttributeError:
3935 3936 iosize = r._io.size
3936 3937 buffer = util.buffer
3937 3938
3938 3939 chunks = []
3939 3940 ladd = chunks.append
3940 3941 for idx, item in enumerate(chain):
3941 3942 offset = start(item[0])
3942 3943 bits = data[idx]
3943 3944 for rev in item:
3944 3945 chunkstart = start(rev)
3945 3946 if inline:
3946 3947 chunkstart += (rev + 1) * iosize
3947 3948 chunklength = length(rev)
3948 3949 ladd(buffer(bits, chunkstart - offset, chunklength))
3949 3950
3950 3951 return chunks
3951 3952
3952 3953 def dodeltachain(rev):
3953 3954 if not cache:
3954 3955 r.clearcaches()
3955 3956 r._deltachain(rev)
3956 3957
3957 3958 def doread(chain):
3958 3959 if not cache:
3959 3960 r.clearcaches()
3960 3961 for item in slicedchain:
3961 3962 with lazy_reading(r):
3962 3963 segmentforrevs(item[0], item[-1])
3963 3964
3964 3965 def doslice(r, chain, size):
3965 3966 for s in slicechunk(r, chain, targetsize=size):
3966 3967 pass
3967 3968
3968 3969 def dorawchunks(data, chain):
3969 3970 if not cache:
3970 3971 r.clearcaches()
3971 3972 getrawchunks(data, chain)
3972 3973
3973 3974 def dodecompress(chunks):
3974 3975 decomp = r.decompress
3975 3976 for chunk in chunks:
3976 3977 decomp(chunk)
3977 3978
3978 3979 def dopatch(text, bins):
3979 3980 if not cache:
3980 3981 r.clearcaches()
3981 3982 mdiff.patches(text, bins)
3982 3983
3983 3984 def dohash(text):
3984 3985 if not cache:
3985 3986 r.clearcaches()
3986 3987 r.checkhash(text, node, rev=rev)
3987 3988
3988 3989 def dorevision():
3989 3990 if not cache:
3990 3991 r.clearcaches()
3991 3992 r.revision(node)
3992 3993
3993 3994 try:
3994 3995 from mercurial.revlogutils.deltas import slicechunk
3995 3996 except ImportError:
3996 3997 slicechunk = getattr(revlog, '_slicechunk', None)
3997 3998
3998 3999 size = r.length(rev)
3999 4000 chain = r._deltachain(rev)[0]
4000 4001
4001 4002 with_sparse_read = False
4002 4003 if hasattr(r, 'data_config'):
4003 4004 with_sparse_read = r.data_config.with_sparse_read
4004 4005 elif hasattr(r, '_withsparseread'):
4005 4006 with_sparse_read = r._withsparseread
4006 4007 if with_sparse_read:
4007 4008 slicedchain = (chain,)
4008 4009 else:
4009 4010 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4010 4011 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4011 4012 rawchunks = getrawchunks(data, slicedchain)
4012 4013 bins = r._inner._chunks(chain)
4013 4014 text = bytes(bins[0])
4014 4015 bins = bins[1:]
4015 4016 text = mdiff.patches(text, bins)
4016 4017
4017 4018 benches = [
4018 4019 (lambda: dorevision(), b'full'),
4019 4020 (lambda: dodeltachain(rev), b'deltachain'),
4020 4021 (lambda: doread(chain), b'read'),
4021 4022 ]
4022 4023
4023 4024 if with_sparse_read:
4024 4025 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4025 4026 benches.append(slicing)
4026 4027
4027 4028 benches.extend(
4028 4029 [
4029 4030 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4030 4031 (lambda: dodecompress(rawchunks), b'decompress'),
4031 4032 (lambda: dopatch(text, bins), b'patch'),
4032 4033 (lambda: dohash(text), b'hash'),
4033 4034 ]
4034 4035 )
4035 4036
4036 4037 timer, fm = gettimer(ui, opts)
4037 4038 for fn, title in benches:
4038 4039 timer(fn, title=title)
4039 4040 fm.end()
4040 4041
4041 4042
4042 4043 @command(
4043 4044 b'perf::revset|perfrevset',
4044 4045 [
4045 4046 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4046 4047 (b'', b'contexts', False, b'obtain changectx for each revision'),
4047 4048 ]
4048 4049 + formatteropts,
4049 4050 b"REVSET",
4050 4051 )
4051 4052 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4052 4053 """benchmark the execution time of a revset
4053 4054
4054 4055 Use the --clean option if need to evaluate the impact of build volatile
4055 4056 revisions set cache on the revset execution. Volatile cache hold filtered
4056 4057 and obsolete related cache."""
4057 4058 opts = _byteskwargs(opts)
4058 4059
4059 4060 timer, fm = gettimer(ui, opts)
4060 4061
4061 4062 def d():
4062 4063 if clear:
4063 4064 repo.invalidatevolatilesets()
4064 4065 if contexts:
4065 4066 for ctx in repo.set(expr):
4066 4067 pass
4067 4068 else:
4068 4069 for r in repo.revs(expr):
4069 4070 pass
4070 4071
4071 4072 timer(d)
4072 4073 fm.end()
4073 4074
4074 4075
4075 4076 @command(
4076 4077 b'perf::volatilesets|perfvolatilesets',
4077 4078 [
4078 4079 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4079 4080 ]
4080 4081 + formatteropts,
4081 4082 )
4082 4083 def perfvolatilesets(ui, repo, *names, **opts):
4083 4084 """benchmark the computation of various volatile set
4084 4085
4085 4086 Volatile set computes element related to filtering and obsolescence."""
4086 4087 opts = _byteskwargs(opts)
4087 4088 timer, fm = gettimer(ui, opts)
4088 4089 repo = repo.unfiltered()
4089 4090
4090 4091 def getobs(name):
4091 4092 def d():
4092 4093 repo.invalidatevolatilesets()
4093 4094 if opts[b'clear_obsstore']:
4094 4095 clearfilecache(repo, b'obsstore')
4095 4096 obsolete.getrevs(repo, name)
4096 4097
4097 4098 return d
4098 4099
4099 4100 allobs = sorted(obsolete.cachefuncs)
4100 4101 if names:
4101 4102 allobs = [n for n in allobs if n in names]
4102 4103
4103 4104 for name in allobs:
4104 4105 timer(getobs(name), title=name)
4105 4106
4106 4107 def getfiltered(name):
4107 4108 def d():
4108 4109 repo.invalidatevolatilesets()
4109 4110 if opts[b'clear_obsstore']:
4110 4111 clearfilecache(repo, b'obsstore')
4111 4112 repoview.filterrevs(repo, name)
4112 4113
4113 4114 return d
4114 4115
4115 4116 allfilter = sorted(repoview.filtertable)
4116 4117 if names:
4117 4118 allfilter = [n for n in allfilter if n in names]
4118 4119
4119 4120 for name in allfilter:
4120 4121 timer(getfiltered(name), title=name)
4121 4122 fm.end()
4122 4123
4123 4124
4124 4125 @command(
4125 4126 b'perf::branchmap|perfbranchmap',
4126 4127 [
4127 4128 (b'f', b'full', False, b'Includes build time of subset'),
4128 4129 (
4129 4130 b'',
4130 4131 b'clear-revbranch',
4131 4132 False,
4132 4133 b'purge the revbranch cache between computation',
4133 4134 ),
4134 4135 ]
4135 4136 + formatteropts,
4136 4137 )
4137 4138 def perfbranchmap(ui, repo, *filternames, **opts):
4138 4139 """benchmark the update of a branchmap
4139 4140
4140 4141 This benchmarks the full repo.branchmap() call with read and write disabled
4141 4142 """
4142 4143 opts = _byteskwargs(opts)
4143 4144 full = opts.get(b"full", False)
4144 4145 clear_revbranch = opts.get(b"clear_revbranch", False)
4145 4146 timer, fm = gettimer(ui, opts)
4146 4147
4147 4148 def getbranchmap(filtername):
4148 4149 """generate a benchmark function for the filtername"""
4149 4150 if filtername is None:
4150 4151 view = repo
4151 4152 else:
4152 4153 view = repo.filtered(filtername)
4153 4154 if util.safehasattr(view._branchcaches, '_per_filter'):
4154 4155 filtered = view._branchcaches._per_filter
4155 4156 else:
4156 4157 # older versions
4157 4158 filtered = view._branchcaches
4158 4159
4159 4160 def d():
4160 4161 if clear_revbranch:
4161 4162 repo.revbranchcache()._clear()
4162 4163 if full:
4163 4164 view._branchcaches.clear()
4164 4165 else:
4165 4166 filtered.pop(filtername, None)
4166 4167 view.branchmap()
4167 4168
4168 4169 return d
4169 4170
4170 4171 # add filter in smaller subset to bigger subset
4171 4172 possiblefilters = set(repoview.filtertable)
4172 4173 if filternames:
4173 4174 possiblefilters &= set(filternames)
4174 4175 subsettable = getbranchmapsubsettable()
4175 4176 allfilters = []
4176 4177 while possiblefilters:
4177 4178 for name in possiblefilters:
4178 4179 subset = subsettable.get(name)
4179 4180 if subset not in possiblefilters:
4180 4181 break
4181 4182 else:
4182 4183 assert False, b'subset cycle %s!' % possiblefilters
4183 4184 allfilters.append(name)
4184 4185 possiblefilters.remove(name)
4185 4186
4186 4187 # warm the cache
4187 4188 if not full:
4188 4189 for name in allfilters:
4189 4190 repo.filtered(name).branchmap()
4190 4191 if not filternames or b'unfiltered' in filternames:
4191 4192 # add unfiltered
4192 4193 allfilters.append(None)
4193 4194
4194 4195 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4195 4196 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4196 4197 branchcacheread.set(classmethod(lambda *args: None))
4197 4198 else:
4198 4199 # older versions
4199 4200 branchcacheread = safeattrsetter(branchmap, b'read')
4200 4201 branchcacheread.set(lambda *args: None)
4201 4202 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4202 4203 branchcachewrite.set(lambda *args: None)
4203 4204 try:
4204 4205 for name in allfilters:
4205 4206 printname = name
4206 4207 if name is None:
4207 4208 printname = b'unfiltered'
4208 4209 timer(getbranchmap(name), title=printname)
4209 4210 finally:
4210 4211 branchcacheread.restore()
4211 4212 branchcachewrite.restore()
4212 4213 fm.end()
4213 4214
4214 4215
4215 4216 @command(
4216 4217 b'perf::branchmapupdate|perfbranchmapupdate',
4217 4218 [
4218 4219 (b'', b'base', [], b'subset of revision to start from'),
4219 4220 (b'', b'target', [], b'subset of revision to end with'),
4220 4221 (b'', b'clear-caches', False, b'clear cache between each runs'),
4221 4222 ]
4222 4223 + formatteropts,
4223 4224 )
4224 4225 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4225 4226 """benchmark branchmap update from for <base> revs to <target> revs
4226 4227
4227 4228 If `--clear-caches` is passed, the following items will be reset before
4228 4229 each update:
4229 4230 * the changelog instance and associated indexes
4230 4231 * the rev-branch-cache instance
4231 4232
4232 4233 Examples:
4233 4234
4234 4235 # update for the one last revision
4235 4236 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4236 4237
4237 4238 $ update for change coming with a new branch
4238 4239 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4239 4240 """
4240 4241 from mercurial import branchmap
4241 4242 from mercurial import repoview
4242 4243
4243 4244 opts = _byteskwargs(opts)
4244 4245 timer, fm = gettimer(ui, opts)
4245 4246 clearcaches = opts[b'clear_caches']
4246 4247 unfi = repo.unfiltered()
4247 4248 x = [None] # used to pass data between closure
4248 4249
4249 4250 # we use a `list` here to avoid possible side effect from smartset
4250 4251 baserevs = list(scmutil.revrange(repo, base))
4251 4252 targetrevs = list(scmutil.revrange(repo, target))
4252 4253 if not baserevs:
4253 4254 raise error.Abort(b'no revisions selected for --base')
4254 4255 if not targetrevs:
4255 4256 raise error.Abort(b'no revisions selected for --target')
4256 4257
4257 4258 # make sure the target branchmap also contains the one in the base
4258 4259 targetrevs = list(set(baserevs) | set(targetrevs))
4259 4260 targetrevs.sort()
4260 4261
4261 4262 cl = repo.changelog
4262 4263 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4263 4264 allbaserevs.sort()
4264 4265 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4265 4266
4266 4267 newrevs = list(alltargetrevs.difference(allbaserevs))
4267 4268 newrevs.sort()
4268 4269
4269 4270 allrevs = frozenset(unfi.changelog.revs())
4270 4271 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4271 4272 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4272 4273
4273 4274 def basefilter(repo, visibilityexceptions=None):
4274 4275 return basefilterrevs
4275 4276
4276 4277 def targetfilter(repo, visibilityexceptions=None):
4277 4278 return targetfilterrevs
4278 4279
4279 4280 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4280 4281 ui.status(msg % (len(allbaserevs), len(newrevs)))
4281 4282 if targetfilterrevs:
4282 4283 msg = b'(%d revisions still filtered)\n'
4283 4284 ui.status(msg % len(targetfilterrevs))
4284 4285
4285 4286 try:
4286 4287 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4287 4288 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4288 4289
4289 4290 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4290 4291 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4291 4292
4292 4293 # try to find an existing branchmap to reuse
4293 4294 subsettable = getbranchmapsubsettable()
4294 4295 candidatefilter = subsettable.get(None)
4295 4296 while candidatefilter is not None:
4296 4297 candidatebm = repo.filtered(candidatefilter).branchmap()
4297 4298 if candidatebm.validfor(baserepo):
4298 4299 filtered = repoview.filterrevs(repo, candidatefilter)
4299 4300 missing = [r for r in allbaserevs if r in filtered]
4300 4301 base = candidatebm.copy()
4301 4302 base.update(baserepo, missing)
4302 4303 break
4303 4304 candidatefilter = subsettable.get(candidatefilter)
4304 4305 else:
4305 4306 # no suitable subset where found
4306 4307 base = branchmap.branchcache()
4307 4308 base.update(baserepo, allbaserevs)
4308 4309
4309 4310 def setup():
4310 4311 x[0] = base.copy()
4311 4312 if clearcaches:
4312 4313 unfi._revbranchcache = None
4313 4314 clearchangelog(repo)
4314 4315
4315 4316 def bench():
4316 4317 x[0].update(targetrepo, newrevs)
4317 4318
4318 4319 timer(bench, setup=setup)
4319 4320 fm.end()
4320 4321 finally:
4321 4322 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4322 4323 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4323 4324
4324 4325
4325 4326 @command(
4326 4327 b'perf::branchmapload|perfbranchmapload',
4327 4328 [
4328 4329 (b'f', b'filter', b'', b'Specify repoview filter'),
4329 4330 (b'', b'list', False, b'List brachmap filter caches'),
4330 4331 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4331 4332 ]
4332 4333 + formatteropts,
4333 4334 )
4334 4335 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4335 4336 """benchmark reading the branchmap"""
4336 4337 opts = _byteskwargs(opts)
4337 4338 clearrevlogs = opts[b'clear_revlogs']
4338 4339
4339 4340 if list:
4340 4341 for name, kind, st in repo.cachevfs.readdir(stat=True):
4341 4342 if name.startswith(b'branch2'):
4342 4343 filtername = name.partition(b'-')[2] or b'unfiltered'
4343 4344 ui.status(
4344 4345 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4345 4346 )
4346 4347 return
4347 4348 if not filter:
4348 4349 filter = None
4349 4350 subsettable = getbranchmapsubsettable()
4350 4351 if filter is None:
4351 4352 repo = repo.unfiltered()
4352 4353 else:
4353 4354 repo = repoview.repoview(repo, filter)
4354 4355
4355 4356 repo.branchmap() # make sure we have a relevant, up to date branchmap
4356 4357
4357 4358 try:
4358 4359 fromfile = branchmap.branchcache.fromfile
4359 4360 except AttributeError:
4360 4361 # older versions
4361 4362 fromfile = branchmap.read
4362 4363
4363 4364 currentfilter = filter
4364 4365 # try once without timer, the filter may not be cached
4365 4366 while fromfile(repo) is None:
4366 4367 currentfilter = subsettable.get(currentfilter)
4367 4368 if currentfilter is None:
4368 4369 raise error.Abort(
4369 4370 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4370 4371 )
4371 4372 repo = repo.filtered(currentfilter)
4372 4373 timer, fm = gettimer(ui, opts)
4373 4374
4374 4375 def setup():
4375 4376 if clearrevlogs:
4376 4377 clearchangelog(repo)
4377 4378
4378 4379 def bench():
4379 4380 fromfile(repo)
4380 4381
4381 4382 timer(bench, setup=setup)
4382 4383 fm.end()
4383 4384
4384 4385
4385 4386 @command(b'perf::loadmarkers|perfloadmarkers')
4386 4387 def perfloadmarkers(ui, repo):
4387 4388 """benchmark the time to parse the on-disk markers for a repo
4388 4389
4389 4390 Result is the number of markers in the repo."""
4390 4391 timer, fm = gettimer(ui)
4391 4392 svfs = getsvfs(repo)
4392 4393 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4393 4394 fm.end()
4394 4395
4395 4396
4396 4397 @command(
4397 4398 b'perf::lrucachedict|perflrucachedict',
4398 4399 formatteropts
4399 4400 + [
4400 4401 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4401 4402 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4402 4403 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4403 4404 (b'', b'size', 4, b'size of cache'),
4404 4405 (b'', b'gets', 10000, b'number of key lookups'),
4405 4406 (b'', b'sets', 10000, b'number of key sets'),
4406 4407 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4407 4408 (
4408 4409 b'',
4409 4410 b'mixedgetfreq',
4410 4411 50,
4411 4412 b'frequency of get vs set ops in mixed mode',
4412 4413 ),
4413 4414 ],
4414 4415 norepo=True,
4415 4416 )
4416 4417 def perflrucache(
4417 4418 ui,
4418 4419 mincost=0,
4419 4420 maxcost=100,
4420 4421 costlimit=0,
4421 4422 size=4,
4422 4423 gets=10000,
4423 4424 sets=10000,
4424 4425 mixed=10000,
4425 4426 mixedgetfreq=50,
4426 4427 **opts
4427 4428 ):
4428 4429 opts = _byteskwargs(opts)
4429 4430
4430 4431 def doinit():
4431 4432 for i in _xrange(10000):
4432 4433 util.lrucachedict(size)
4433 4434
4434 4435 costrange = list(range(mincost, maxcost + 1))
4435 4436
4436 4437 values = []
4437 4438 for i in _xrange(size):
4438 4439 values.append(random.randint(0, _maxint))
4439 4440
4440 4441 # Get mode fills the cache and tests raw lookup performance with no
4441 4442 # eviction.
4442 4443 getseq = []
4443 4444 for i in _xrange(gets):
4444 4445 getseq.append(random.choice(values))
4445 4446
4446 4447 def dogets():
4447 4448 d = util.lrucachedict(size)
4448 4449 for v in values:
4449 4450 d[v] = v
4450 4451 for key in getseq:
4451 4452 value = d[key]
4452 4453 value # silence pyflakes warning
4453 4454
4454 4455 def dogetscost():
4455 4456 d = util.lrucachedict(size, maxcost=costlimit)
4456 4457 for i, v in enumerate(values):
4457 4458 d.insert(v, v, cost=costs[i])
4458 4459 for key in getseq:
4459 4460 try:
4460 4461 value = d[key]
4461 4462 value # silence pyflakes warning
4462 4463 except KeyError:
4463 4464 pass
4464 4465
4465 4466 # Set mode tests insertion speed with cache eviction.
4466 4467 setseq = []
4467 4468 costs = []
4468 4469 for i in _xrange(sets):
4469 4470 setseq.append(random.randint(0, _maxint))
4470 4471 costs.append(random.choice(costrange))
4471 4472
4472 4473 def doinserts():
4473 4474 d = util.lrucachedict(size)
4474 4475 for v in setseq:
4475 4476 d.insert(v, v)
4476 4477
4477 4478 def doinsertscost():
4478 4479 d = util.lrucachedict(size, maxcost=costlimit)
4479 4480 for i, v in enumerate(setseq):
4480 4481 d.insert(v, v, cost=costs[i])
4481 4482
4482 4483 def dosets():
4483 4484 d = util.lrucachedict(size)
4484 4485 for v in setseq:
4485 4486 d[v] = v
4486 4487
4487 4488 # Mixed mode randomly performs gets and sets with eviction.
4488 4489 mixedops = []
4489 4490 for i in _xrange(mixed):
4490 4491 r = random.randint(0, 100)
4491 4492 if r < mixedgetfreq:
4492 4493 op = 0
4493 4494 else:
4494 4495 op = 1
4495 4496
4496 4497 mixedops.append(
4497 4498 (op, random.randint(0, size * 2), random.choice(costrange))
4498 4499 )
4499 4500
4500 4501 def domixed():
4501 4502 d = util.lrucachedict(size)
4502 4503
4503 4504 for op, v, cost in mixedops:
4504 4505 if op == 0:
4505 4506 try:
4506 4507 d[v]
4507 4508 except KeyError:
4508 4509 pass
4509 4510 else:
4510 4511 d[v] = v
4511 4512
4512 4513 def domixedcost():
4513 4514 d = util.lrucachedict(size, maxcost=costlimit)
4514 4515
4515 4516 for op, v, cost in mixedops:
4516 4517 if op == 0:
4517 4518 try:
4518 4519 d[v]
4519 4520 except KeyError:
4520 4521 pass
4521 4522 else:
4522 4523 d.insert(v, v, cost=cost)
4523 4524
4524 4525 benches = [
4525 4526 (doinit, b'init'),
4526 4527 ]
4527 4528
4528 4529 if costlimit:
4529 4530 benches.extend(
4530 4531 [
4531 4532 (dogetscost, b'gets w/ cost limit'),
4532 4533 (doinsertscost, b'inserts w/ cost limit'),
4533 4534 (domixedcost, b'mixed w/ cost limit'),
4534 4535 ]
4535 4536 )
4536 4537 else:
4537 4538 benches.extend(
4538 4539 [
4539 4540 (dogets, b'gets'),
4540 4541 (doinserts, b'inserts'),
4541 4542 (dosets, b'sets'),
4542 4543 (domixed, b'mixed'),
4543 4544 ]
4544 4545 )
4545 4546
4546 4547 for fn, title in benches:
4547 4548 timer, fm = gettimer(ui, opts)
4548 4549 timer(fn, title=title)
4549 4550 fm.end()
4550 4551
4551 4552
4552 4553 @command(
4553 4554 b'perf::write|perfwrite',
4554 4555 formatteropts
4555 4556 + [
4556 4557 (b'', b'write-method', b'write', b'ui write method'),
4557 4558 (b'', b'nlines', 100, b'number of lines'),
4558 4559 (b'', b'nitems', 100, b'number of items (per line)'),
4559 4560 (b'', b'item', b'x', b'item that is written'),
4560 4561 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4561 4562 (b'', b'flush-line', None, b'flush after each line'),
4562 4563 ],
4563 4564 )
4564 4565 def perfwrite(ui, repo, **opts):
4565 4566 """microbenchmark ui.write (and others)"""
4566 4567 opts = _byteskwargs(opts)
4567 4568
4568 4569 write = getattr(ui, _sysstr(opts[b'write_method']))
4569 4570 nlines = int(opts[b'nlines'])
4570 4571 nitems = int(opts[b'nitems'])
4571 4572 item = opts[b'item']
4572 4573 batch_line = opts.get(b'batch_line')
4573 4574 flush_line = opts.get(b'flush_line')
4574 4575
4575 4576 if batch_line:
4576 4577 line = item * nitems + b'\n'
4577 4578
4578 4579 def benchmark():
4579 4580 for i in pycompat.xrange(nlines):
4580 4581 if batch_line:
4581 4582 write(line)
4582 4583 else:
4583 4584 for i in pycompat.xrange(nitems):
4584 4585 write(item)
4585 4586 write(b'\n')
4586 4587 if flush_line:
4587 4588 ui.flush()
4588 4589 ui.flush()
4589 4590
4590 4591 timer, fm = gettimer(ui, opts)
4591 4592 timer(benchmark)
4592 4593 fm.end()
4593 4594
4594 4595
4595 4596 def uisetup(ui):
4596 4597 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4597 4598 commands, b'debugrevlogopts'
4598 4599 ):
4599 4600 # for "historical portability":
4600 4601 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4601 4602 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4602 4603 # openrevlog() should cause failure, because it has been
4603 4604 # available since 3.5 (or 49c583ca48c4).
4604 4605 def openrevlog(orig, repo, cmd, file_, opts):
4605 4606 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4606 4607 raise error.Abort(
4607 4608 b"This version doesn't support --dir option",
4608 4609 hint=b"use 3.5 or later",
4609 4610 )
4610 4611 return orig(repo, cmd, file_, opts)
4611 4612
4612 4613 name = _sysstr(b'openrevlog')
4613 4614 extensions.wrapfunction(cmdutil, name, openrevlog)
4614 4615
4615 4616
4616 4617 @command(
4617 4618 b'perf::progress|perfprogress',
4618 4619 formatteropts
4619 4620 + [
4620 4621 (b'', b'topic', b'topic', b'topic for progress messages'),
4621 4622 (b'c', b'total', 1000000, b'total value we are progressing to'),
4622 4623 ],
4623 4624 norepo=True,
4624 4625 )
4625 4626 def perfprogress(ui, topic=None, total=None, **opts):
4626 4627 """printing of progress bars"""
4627 4628 opts = _byteskwargs(opts)
4628 4629
4629 4630 timer, fm = gettimer(ui, opts)
4630 4631
4631 4632 def doprogress():
4632 4633 with ui.makeprogress(topic, total=total) as progress:
4633 4634 for i in _xrange(total):
4634 4635 progress.increment()
4635 4636
4636 4637 timer(doprogress)
4637 4638 fm.end()
@@ -1,483 +1,484 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perf::addremove
82 82 (no help text available)
83 83 perf::ancestors
84 84 (no help text available)
85 85 perf::ancestorset
86 86 (no help text available)
87 87 perf::annotate
88 88 (no help text available)
89 89 perf::bdiff benchmark a bdiff between revisions
90 90 perf::bookmarks
91 91 benchmark parsing bookmarks from disk to memory
92 92 perf::branchmap
93 93 benchmark the update of a branchmap
94 94 perf::branchmapload
95 95 benchmark reading the branchmap
96 96 perf::branchmapupdate
97 97 benchmark branchmap update from for <base> revs to <target>
98 98 revs
99 99 perf::bundle benchmark the creation of a bundle from a repository
100 100 perf::bundleread
101 101 Benchmark reading of bundle files.
102 102 perf::cca (no help text available)
103 103 perf::changegroupchangelog
104 104 Benchmark producing a changelog group for a changegroup.
105 105 perf::changeset
106 106 (no help text available)
107 107 perf::ctxfiles
108 108 (no help text available)
109 109 perf::delta-find
110 110 benchmark the process of finding a valid delta for a revlog
111 111 revision
112 112 perf::diffwd Profile diff of working directory changes
113 113 perf::dirfoldmap
114 114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 115 perf::dirs (no help text available)
116 116 perf::dirstate
117 117 benchmap the time of various distate operations
118 118 perf::dirstatedirs
119 119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 120 perf::dirstatefoldmap
121 121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 122 perf::dirstatewrite
123 123 benchmap the time it take to write a dirstate on disk
124 124 perf::discovery
125 125 benchmark discovery between local repo and the peer at given
126 126 path
127 127 perf::fncacheencode
128 128 (no help text available)
129 129 perf::fncacheload
130 130 (no help text available)
131 131 perf::fncachewrite
132 132 (no help text available)
133 133 perf::heads benchmark the computation of a changelog heads
134 134 perf::helper-mergecopies
135 135 find statistics about potential parameters for
136 136 'perfmergecopies'
137 137 perf::helper-pathcopies
138 138 find statistic about potential parameters for the
139 139 'perftracecopies'
140 140 perf::ignore benchmark operation related to computing ignore
141 141 perf::index benchmark index creation time followed by a lookup
142 142 perf::linelogedits
143 143 (no help text available)
144 144 perf::loadmarkers
145 145 benchmark the time to parse the on-disk markers for a repo
146 146 perf::log (no help text available)
147 147 perf::lookup (no help text available)
148 148 perf::lrucachedict
149 149 (no help text available)
150 150 perf::manifest
151 151 benchmark the time to read a manifest from disk and return a
152 152 usable
153 153 perf::mergecalculate
154 154 (no help text available)
155 155 perf::mergecopies
156 156 measure runtime of 'copies.mergecopies'
157 157 perf::moonwalk
158 158 benchmark walking the changelog backwards
159 159 perf::nodelookup
160 160 (no help text available)
161 161 perf::nodemap
162 162 benchmark the time necessary to look up revision from a cold
163 163 nodemap
164 164 perf::parents
165 165 benchmark the time necessary to fetch one changeset's parents.
166 166 perf::pathcopies
167 167 benchmark the copy tracing logic
168 168 perf::phases benchmark phasesets computation
169 169 perf::phasesremote
170 170 benchmark time needed to analyse phases of the remote server
171 171 perf::progress
172 172 printing of progress bars
173 173 perf::rawfiles
174 174 (no help text available)
175 175 perf::revlogchunks
176 176 Benchmark operations on revlog chunks.
177 177 perf::revlogindex
178 178 Benchmark operations against a revlog index.
179 179 perf::revlogrevision
180 180 Benchmark obtaining a revlog revision.
181 181 perf::revlogrevisions
182 182 Benchmark reading a series of revisions from a revlog.
183 183 perf::revlogwrite
184 184 Benchmark writing a series of revisions to a revlog.
185 185 perf::revrange
186 186 (no help text available)
187 187 perf::revset benchmark the execution time of a revset
188 188 perf::startup
189 189 (no help text available)
190 190 perf::status benchmark the performance of a single status call
191 191 perf::stream-consume
192 192 benchmark the full application of a stream clone
193 193 perf::stream-generate
194 194 benchmark the full generation of a stream clone
195 195 perf::stream-locked-section
196 196 benchmark the initial, repo-locked, section of a stream-clone
197 197 perf::tags Benchmark tags retrieval in various situation
198 198 perf::templating
199 199 test the rendering time of a given template
200 200 perf::unbundle
201 201 benchmark application of a bundle in a repository.
202 202 perf::unidiff
203 203 benchmark a unified diff between revisions
204 204 perf::volatilesets
205 205 benchmark the computation of various volatile set
206 206 perf::walk (no help text available)
207 207 perf::write microbenchmark ui.write (and others)
208 208
209 209 (use 'hg help -v perf' to show built-in aliases and global options)
210 210
211 211 $ hg help perfaddremove
212 212 hg perf::addremove
213 213
214 214 aliases: perfaddremove
215 215
216 216 (no help text available)
217 217
218 218 options:
219 219
220 220 -T --template TEMPLATE display with template
221 221
222 222 (some details hidden, use --verbose to show complete help)
223 223
224 224 $ hg perfaddremove
225 225 $ hg perfancestors
226 226 $ hg perfancestorset 2
227 227 $ hg perfannotate a
228 228 $ hg perfbdiff -c 1
229 229 $ hg perfbdiff --alldata 1
230 230 $ hg perfunidiff -c 1
231 231 $ hg perfunidiff --alldata 1
232 232 $ hg perfbookmarks
233 233 $ hg perfbranchmap
234 234 $ hg perfbranchmapload
235 235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 236 benchmark of branchmap with 3 revisions with 1 new ones
237 237 $ hg perfcca
238 238 $ hg perfchangegroupchangelog
239 239 $ hg perfchangegroupchangelog --cgversion 01
240 240 $ hg perfchangeset 2
241 241 $ hg perfctxfiles 2
242 242 $ hg perfdiffwd
243 243 $ hg perfdirfoldmap
244 244 $ hg perfdirs
245 245 $ hg perfdirstate
246 246 $ hg perfdirstate --contains
247 247 $ hg perfdirstate --iteration
248 248 $ hg perfdirstatedirs
249 249 $ hg perfdirstatefoldmap
250 250 $ hg perfdirstatewrite
251 251 #if repofncache
252 252 $ hg perffncacheencode
253 253 $ hg perffncacheload
254 254 $ hg debugrebuildfncache
255 255 fncache already up to date
256 256 $ hg perffncachewrite
257 257 $ hg debugrebuildfncache
258 258 fncache already up to date
259 259 #endif
260 260 $ hg perfheads
261 261 $ hg perfignore
262 262 $ hg perfindex
263 263 $ hg perflinelogedits -n 1
264 264 $ hg perfloadmarkers
265 265 $ hg perflog
266 266 $ hg perflookup 2
267 267 $ hg perflrucache
268 268 $ hg perfmanifest 2
269 269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 270 $ hg perfmanifest -m 44fe2c8352bb
271 271 abort: manifest revision must be integer or full node
272 272 [255]
273 273 $ hg perfmergecalculate -r 3
274 274 $ hg perfmoonwalk
275 275 $ hg perfnodelookup 2
276 276 $ hg perfpathcopies 1 2
277 277 $ hg perfprogress --total 1000
278 278 $ hg perfrawfiles 2
279 279 $ hg perfrevlogindex -c
280 280 #if reporevlogstore
281 281 $ hg perfrevlogrevisions .hg/store/data/a.i
282 282 #endif
283 283 $ hg perfrevlogrevision -m 0
284 284 $ hg perfrevlogchunks -c
285 285 $ hg perfrevrange
286 286 $ hg perfrevset 'all()'
287 287 $ hg perfstartup
288 288 $ hg perfstatus
289 289 $ hg perfstatus --dirstate
290 290 $ hg perftags
291 291 $ hg perftemplating
292 292 $ hg perfvolatilesets
293 293 $ hg perfwalk
294 294 $ hg perfparents
295 295 $ hg perfdiscovery -q .
296 $ hg perf::phases
296 297
297 298 Test run control
298 299 ----------------
299 300
300 301 Simple single entry
301 302
302 303 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 304 ! wall * comb * user * sys * (best of 15) (glob)
304 305 ! wall * comb * user * sys * (max of 15) (glob)
305 306 ! wall * comb * user * sys * (avg of 15) (glob)
306 307 ! wall * comb * user * sys * (median of 15) (glob)
307 308
308 309 Multiple entries
309 310
310 311 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
311 312 ! wall * comb * user * sys * (best of 50) (glob)
312 313 ! wall * comb * user * sys * (max of 50) (glob)
313 314 ! wall * comb * user * sys * (avg of 50) (glob)
314 315 ! wall * comb * user * sys * (median of 50) (glob)
315 316
316 317 error case are ignored
317 318
318 319 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
319 320 malformatted run limit entry, missing "-": 500
320 321 ! wall * comb * user * sys * (best of 50) (glob)
321 322 ! wall * comb * user * sys * (max of 50) (glob)
322 323 ! wall * comb * user * sys * (avg of 50) (glob)
323 324 ! wall * comb * user * sys * (median of 50) (glob)
324 325 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
325 326 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
326 327 ! wall * comb * user * sys * (best of 50) (glob)
327 328 ! wall * comb * user * sys * (max of 50) (glob)
328 329 ! wall * comb * user * sys * (avg of 50) (glob)
329 330 ! wall * comb * user * sys * (median of 50) (glob)
330 331 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
331 332 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
332 333 ! wall * comb * user * sys * (best of 50) (glob)
333 334 ! wall * comb * user * sys * (max of 50) (glob)
334 335 ! wall * comb * user * sys * (avg of 50) (glob)
335 336 ! wall * comb * user * sys * (median of 50) (glob)
336 337
337 338 test actual output
338 339 ------------------
339 340
340 341 normal output:
341 342
342 343 $ hg perfheads --config perf.stub=no
343 344 ! wall * comb * user * sys * (best of *) (glob)
344 345 ! wall * comb * user * sys * (max of *) (glob)
345 346 ! wall * comb * user * sys * (avg of *) (glob)
346 347 ! wall * comb * user * sys * (median of *) (glob)
347 348
348 349 detailed output:
349 350
350 351 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
351 352 ! wall * comb * user * sys * (best of *) (glob)
352 353 ! wall * comb * user * sys * (max of *) (glob)
353 354 ! wall * comb * user * sys * (avg of *) (glob)
354 355 ! wall * comb * user * sys * (median of *) (glob)
355 356
356 357 test json output
357 358 ----------------
358 359
359 360 normal output:
360 361
361 362 $ hg perfheads --template json --config perf.stub=no
362 363 [
363 364 {
364 365 "avg.comb": *, (glob)
365 366 "avg.count": *, (glob)
366 367 "avg.sys": *, (glob)
367 368 "avg.user": *, (glob)
368 369 "avg.wall": *, (glob)
369 370 "comb": *, (glob)
370 371 "count": *, (glob)
371 372 "max.comb": *, (glob)
372 373 "max.count": *, (glob)
373 374 "max.sys": *, (glob)
374 375 "max.user": *, (glob)
375 376 "max.wall": *, (glob)
376 377 "median.comb": *, (glob)
377 378 "median.count": *, (glob)
378 379 "median.sys": *, (glob)
379 380 "median.user": *, (glob)
380 381 "median.wall": *, (glob)
381 382 "sys": *, (glob)
382 383 "user": *, (glob)
383 384 "wall": * (glob)
384 385 }
385 386 ]
386 387
387 388 detailed output:
388 389
389 390 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
390 391 [
391 392 {
392 393 "avg.comb": *, (glob)
393 394 "avg.count": *, (glob)
394 395 "avg.sys": *, (glob)
395 396 "avg.user": *, (glob)
396 397 "avg.wall": *, (glob)
397 398 "comb": *, (glob)
398 399 "count": *, (glob)
399 400 "max.comb": *, (glob)
400 401 "max.count": *, (glob)
401 402 "max.sys": *, (glob)
402 403 "max.user": *, (glob)
403 404 "max.wall": *, (glob)
404 405 "median.comb": *, (glob)
405 406 "median.count": *, (glob)
406 407 "median.sys": *, (glob)
407 408 "median.user": *, (glob)
408 409 "median.wall": *, (glob)
409 410 "sys": *, (glob)
410 411 "user": *, (glob)
411 412 "wall": * (glob)
412 413 }
413 414 ]
414 415
415 416 Test pre-run feature
416 417 --------------------
417 418
418 419 (perf discovery has some spurious output)
419 420
420 421 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
421 422 ! wall * comb * user * sys * (best of 1) (glob)
422 423 ! wall * comb * user * sys * (max of 1) (glob)
423 424 ! wall * comb * user * sys * (avg of 1) (glob)
424 425 ! wall * comb * user * sys * (median of 1) (glob)
425 426 searching for changes
426 427 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
427 428 ! wall * comb * user * sys * (best of 1) (glob)
428 429 ! wall * comb * user * sys * (max of 1) (glob)
429 430 ! wall * comb * user * sys * (avg of 1) (glob)
430 431 ! wall * comb * user * sys * (median of 1) (glob)
431 432 searching for changes
432 433 searching for changes
433 434 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
434 435 ! wall * comb * user * sys * (best of 1) (glob)
435 436 ! wall * comb * user * sys * (max of 1) (glob)
436 437 ! wall * comb * user * sys * (avg of 1) (glob)
437 438 ! wall * comb * user * sys * (median of 1) (glob)
438 439 searching for changes
439 440 searching for changes
440 441 searching for changes
441 442 searching for changes
442 443 $ hg perf::bundle 'last(all(), 5)'
443 444 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
444 445 4 changesets found
445 446 $ hg perf::unbundle last-5.hg
446 447
447 448
448 449 test profile-benchmark option
449 450 ------------------------------
450 451
451 452 Function to check that statprof ran
452 453 $ statprofran () {
453 454 > grep -E 'Sample count:|No samples recorded' > /dev/null
454 455 > }
455 456 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
456 457
457 458 Check perf.py for historical portability
458 459 ----------------------------------------
459 460
460 461 $ cd "$TESTDIR/.."
461 462
462 463 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
463 464 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
464 465 > "$TESTDIR"/check-perf-code.py contrib/perf.py
465 466 contrib/perf.py:\d+: (re)
466 467 > from mercurial import (
467 468 import newer module separately in try clause for early Mercurial
468 469 contrib/perf.py:\d+: (re)
469 470 > from mercurial import (
470 471 import newer module separately in try clause for early Mercurial
471 472 contrib/perf.py:\d+: (re)
472 473 > origindexpath = orig.opener.join(indexfile)
473 474 use getvfs()/getsvfs() for early Mercurial
474 475 contrib/perf.py:\d+: (re)
475 476 > origdatapath = orig.opener.join(datafile)
476 477 use getvfs()/getsvfs() for early Mercurial
477 478 contrib/perf.py:\d+: (re)
478 479 > vfs = vfsmod.vfs(tmpdir)
479 480 use getvfs()/getsvfs() for early Mercurial
480 481 contrib/perf.py:\d+: (re)
481 482 > vfs.options = getattr(orig.opener, 'options', None)
482 483 use getvfs()/getsvfs() for early Mercurial
483 484 [1]
General Comments 0
You need to be logged in to leave comments. Login now