##// END OF EJS Templates
perf: allow profiling of more than one run...
marmoute -
r52482:90ef3e04 default
parent child Browse files
Show More
@@ -1,4689 +1,4705
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 (The first iteration is benchmarked)
23 (by default, the first iteration is benchmarked)
24
25 ``profiled-runs``
26 list of iteration to profile (starting from 0)
24 27
25 28 ``run-limits``
26 29 Control the number of runs each benchmark will perform. The option value
27 30 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 31 conditions are considered in order with the following logic:
29 32
30 33 If benchmark has been running for <time> seconds, and we have performed
31 34 <numberofrun> iterations, stop the benchmark,
32 35
33 36 The default value is: `3.0-100, 10.0-3`
34 37
35 38 ``stub``
36 39 When set, benchmarks will only be run once, useful for testing
37 40 (default: off)
38 41 '''
39 42
40 43 # "historical portability" policy of perf.py:
41 44 #
42 45 # We have to do:
43 46 # - make perf.py "loadable" with as wide Mercurial version as possible
44 47 # This doesn't mean that perf commands work correctly with that Mercurial.
45 48 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 49 # - make historical perf command work correctly with as wide Mercurial
47 50 # version as possible
48 51 #
49 52 # We have to do, if possible with reasonable cost:
50 53 # - make recent perf command for historical feature work correctly
51 54 # with early Mercurial
52 55 #
53 56 # We don't have to do:
54 57 # - make perf command for recent feature work correctly with early
55 58 # Mercurial
56 59
57 60 import contextlib
58 61 import functools
59 62 import gc
60 63 import os
61 64 import random
62 65 import shutil
63 66 import struct
64 67 import sys
65 68 import tempfile
66 69 import threading
67 70 import time
68 71
69 72 import mercurial.revlog
70 73 from mercurial import (
71 74 changegroup,
72 75 cmdutil,
73 76 commands,
74 77 copies,
75 78 error,
76 79 extensions,
77 80 hg,
78 81 mdiff,
79 82 merge,
80 83 util,
81 84 )
82 85
83 86 # for "historical portability":
84 87 # try to import modules separately (in dict order), and ignore
85 88 # failure, because these aren't available with early Mercurial
86 89 try:
87 90 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 91 except ImportError:
89 92 pass
90 93 try:
91 94 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 95 except ImportError:
93 96 pass
94 97 try:
95 98 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 99
97 100 dir(registrar) # forcibly load it
98 101 except ImportError:
99 102 registrar = None
100 103 try:
101 104 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 105 except ImportError:
103 106 pass
104 107 try:
105 108 from mercurial.utils import repoviewutil # since 5.0
106 109 except ImportError:
107 110 repoviewutil = None
108 111 try:
109 112 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 113 except ImportError:
111 114 pass
112 115 try:
113 116 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 117 except ImportError:
115 118 pass
116 119
117 120 try:
118 121 from mercurial import profiling
119 122 except ImportError:
120 123 profiling = None
121 124
122 125 try:
123 126 from mercurial.revlogutils import constants as revlog_constants
124 127
125 128 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 129
127 130 def revlog(opener, *args, **kwargs):
128 131 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 132
130 133
131 134 except (ImportError, AttributeError):
132 135 perf_rl_kind = None
133 136
134 137 def revlog(opener, *args, **kwargs):
135 138 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 139
137 140
138 141 def identity(a):
139 142 return a
140 143
141 144
142 145 try:
143 146 from mercurial import pycompat
144 147
145 148 getargspec = pycompat.getargspec # added to module after 4.5
146 149 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 150 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 151 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 152 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 153 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 154 if pycompat.ispy3:
152 155 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 156 else:
154 157 _maxint = sys.maxint
155 158 except (NameError, ImportError, AttributeError):
156 159 import inspect
157 160
158 161 getargspec = inspect.getargspec
159 162 _byteskwargs = identity
160 163 _bytestr = str
161 164 fsencode = identity # no py3 support
162 165 _maxint = sys.maxint # no py3 support
163 166 _sysstr = lambda x: x # no py3 support
164 167 _xrange = xrange
165 168
166 169 try:
167 170 # 4.7+
168 171 queue = pycompat.queue.Queue
169 172 except (NameError, AttributeError, ImportError):
170 173 # <4.7.
171 174 try:
172 175 queue = pycompat.queue
173 176 except (NameError, AttributeError, ImportError):
174 177 import Queue as queue
175 178
176 179 try:
177 180 from mercurial import logcmdutil
178 181
179 182 makelogtemplater = logcmdutil.maketemplater
180 183 except (AttributeError, ImportError):
181 184 try:
182 185 makelogtemplater = cmdutil.makelogtemplater
183 186 except (AttributeError, ImportError):
184 187 makelogtemplater = None
185 188
186 189 # for "historical portability":
187 190 # define util.safehasattr forcibly, because util.safehasattr has been
188 191 # available since 1.9.3 (or 94b200a11cf7)
189 192 _undefined = object()
190 193
191 194
192 195 def safehasattr(thing, attr):
193 196 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 197
195 198
196 199 setattr(util, 'safehasattr', safehasattr)
197 200
198 201 # for "historical portability":
199 202 # define util.timer forcibly, because util.timer has been available
200 203 # since ae5d60bb70c9
201 204 if safehasattr(time, 'perf_counter'):
202 205 util.timer = time.perf_counter
203 206 elif os.name == b'nt':
204 207 util.timer = time.clock
205 208 else:
206 209 util.timer = time.time
207 210
208 211 # for "historical portability":
209 212 # use locally defined empty option list, if formatteropts isn't
210 213 # available, because commands.formatteropts has been available since
211 214 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 215 # available since 2.2 (or ae5f92e154d3)
213 216 formatteropts = getattr(
214 217 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 218 )
216 219
217 220 # for "historical portability":
218 221 # use locally defined option list, if debugrevlogopts isn't available,
219 222 # because commands.debugrevlogopts has been available since 3.7 (or
220 223 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 224 # since 1.9 (or a79fea6b3e77).
222 225 revlogopts = getattr(
223 226 cmdutil,
224 227 "debugrevlogopts",
225 228 getattr(
226 229 commands,
227 230 "debugrevlogopts",
228 231 [
229 232 (b'c', b'changelog', False, b'open changelog'),
230 233 (b'm', b'manifest', False, b'open manifest'),
231 234 (b'', b'dir', False, b'open directory manifest'),
232 235 ],
233 236 ),
234 237 )
235 238
236 239 cmdtable = {}
237 240
238 241
239 242 # for "historical portability":
240 243 # define parsealiases locally, because cmdutil.parsealiases has been
241 244 # available since 1.5 (or 6252852b4332)
242 245 def parsealiases(cmd):
243 246 return cmd.split(b"|")
244 247
245 248
246 249 if safehasattr(registrar, 'command'):
247 250 command = registrar.command(cmdtable)
248 251 elif safehasattr(cmdutil, 'command'):
249 252 command = cmdutil.command(cmdtable)
250 253 if 'norepo' not in getargspec(command).args:
251 254 # for "historical portability":
252 255 # wrap original cmdutil.command, because "norepo" option has
253 256 # been available since 3.1 (or 75a96326cecb)
254 257 _command = command
255 258
256 259 def command(name, options=(), synopsis=None, norepo=False):
257 260 if norepo:
258 261 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 262 return _command(name, list(options), synopsis)
260 263
261 264
262 265 else:
263 266 # for "historical portability":
264 267 # define "@command" annotation locally, because cmdutil.command
265 268 # has been available since 1.9 (or 2daa5179e73f)
266 269 def command(name, options=(), synopsis=None, norepo=False):
267 270 def decorator(func):
268 271 if synopsis:
269 272 cmdtable[name] = func, list(options), synopsis
270 273 else:
271 274 cmdtable[name] = func, list(options)
272 275 if norepo:
273 276 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 277 return func
275 278
276 279 return decorator
277 280
278 281
279 282 try:
280 283 import mercurial.registrar
281 284 import mercurial.configitems
282 285
283 286 configtable = {}
284 287 configitem = mercurial.registrar.configitem(configtable)
285 288 configitem(
286 289 b'perf',
287 290 b'presleep',
288 291 default=mercurial.configitems.dynamicdefault,
289 292 experimental=True,
290 293 )
291 294 configitem(
292 295 b'perf',
293 296 b'stub',
294 297 default=mercurial.configitems.dynamicdefault,
295 298 experimental=True,
296 299 )
297 300 configitem(
298 301 b'perf',
299 302 b'parentscount',
300 303 default=mercurial.configitems.dynamicdefault,
301 304 experimental=True,
302 305 )
303 306 configitem(
304 307 b'perf',
305 308 b'all-timing',
306 309 default=mercurial.configitems.dynamicdefault,
307 310 experimental=True,
308 311 )
309 312 configitem(
310 313 b'perf',
311 314 b'pre-run',
312 315 default=mercurial.configitems.dynamicdefault,
313 316 )
314 317 configitem(
315 318 b'perf',
316 319 b'profile-benchmark',
317 320 default=mercurial.configitems.dynamicdefault,
318 321 )
319 322 configitem(
320 323 b'perf',
324 b'profiled-runs',
325 default=mercurial.configitems.dynamicdefault,
326 )
327 configitem(
328 b'perf',
321 329 b'run-limits',
322 330 default=mercurial.configitems.dynamicdefault,
323 331 experimental=True,
324 332 )
325 333 except (ImportError, AttributeError):
326 334 pass
327 335 except TypeError:
328 336 # compatibility fix for a11fd395e83f
329 337 # hg version: 5.2
330 338 configitem(
331 339 b'perf',
332 340 b'presleep',
333 341 default=mercurial.configitems.dynamicdefault,
334 342 )
335 343 configitem(
336 344 b'perf',
337 345 b'stub',
338 346 default=mercurial.configitems.dynamicdefault,
339 347 )
340 348 configitem(
341 349 b'perf',
342 350 b'parentscount',
343 351 default=mercurial.configitems.dynamicdefault,
344 352 )
345 353 configitem(
346 354 b'perf',
347 355 b'all-timing',
348 356 default=mercurial.configitems.dynamicdefault,
349 357 )
350 358 configitem(
351 359 b'perf',
352 360 b'pre-run',
353 361 default=mercurial.configitems.dynamicdefault,
354 362 )
355 363 configitem(
356 364 b'perf',
357 b'profile-benchmark',
365 b'profiled-runs',
358 366 default=mercurial.configitems.dynamicdefault,
359 367 )
360 368 configitem(
361 369 b'perf',
362 370 b'run-limits',
363 371 default=mercurial.configitems.dynamicdefault,
364 372 )
365 373
366 374
367 375 def getlen(ui):
368 376 if ui.configbool(b"perf", b"stub", False):
369 377 return lambda x: 1
370 378 return len
371 379
372 380
373 381 class noop:
374 382 """dummy context manager"""
375 383
376 384 def __enter__(self):
377 385 pass
378 386
379 387 def __exit__(self, *args):
380 388 pass
381 389
382 390
383 391 NOOPCTX = noop()
384 392
385 393
386 394 def gettimer(ui, opts=None):
387 395 """return a timer function and formatter: (timer, formatter)
388 396
389 397 This function exists to gather the creation of formatter in a single
390 398 place instead of duplicating it in all performance commands."""
391 399
392 400 # enforce an idle period before execution to counteract power management
393 401 # experimental config: perf.presleep
394 402 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 403
396 404 if opts is None:
397 405 opts = {}
398 406 # redirect all to stderr unless buffer api is in use
399 407 if not ui._buffers:
400 408 ui = ui.copy()
401 409 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 410 if uifout:
403 411 # for "historical portability":
404 412 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 413 uifout.set(ui.ferr)
406 414
407 415 # get a formatter
408 416 uiformatter = getattr(ui, 'formatter', None)
409 417 if uiformatter:
410 418 fm = uiformatter(b'perf', opts)
411 419 else:
412 420 # for "historical portability":
413 421 # define formatter locally, because ui.formatter has been
414 422 # available since 2.2 (or ae5f92e154d3)
415 423 from mercurial import node
416 424
417 425 class defaultformatter:
418 426 """Minimized composition of baseformatter and plainformatter"""
419 427
420 428 def __init__(self, ui, topic, opts):
421 429 self._ui = ui
422 430 if ui.debugflag:
423 431 self.hexfunc = node.hex
424 432 else:
425 433 self.hexfunc = node.short
426 434
427 435 def __nonzero__(self):
428 436 return False
429 437
430 438 __bool__ = __nonzero__
431 439
432 440 def startitem(self):
433 441 pass
434 442
435 443 def data(self, **data):
436 444 pass
437 445
438 446 def write(self, fields, deftext, *fielddata, **opts):
439 447 self._ui.write(deftext % fielddata, **opts)
440 448
441 449 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 450 if cond:
443 451 self._ui.write(deftext % fielddata, **opts)
444 452
445 453 def plain(self, text, **opts):
446 454 self._ui.write(text, **opts)
447 455
448 456 def end(self):
449 457 pass
450 458
451 459 fm = defaultformatter(ui, b'perf', opts)
452 460
453 461 # stub function, runs code only once instead of in a loop
454 462 # experimental config: perf.stub
455 463 if ui.configbool(b"perf", b"stub", False):
456 464 return functools.partial(stub_timer, fm), fm
457 465
458 466 # experimental config: perf.all-timing
459 467 displayall = ui.configbool(b"perf", b"all-timing", True)
460 468
461 469 # experimental config: perf.run-limits
462 470 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 471 limits = []
464 472 for item in limitspec:
465 473 parts = item.split(b'-', 1)
466 474 if len(parts) < 2:
467 475 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 476 continue
469 477 try:
470 478 time_limit = float(_sysstr(parts[0]))
471 479 except ValueError as e:
472 480 ui.warn(
473 481 (
474 482 b'malformatted run limit entry, %s: %s\n'
475 483 % (_bytestr(e), item)
476 484 )
477 485 )
478 486 continue
479 487 try:
480 488 run_limit = int(_sysstr(parts[1]))
481 489 except ValueError as e:
482 490 ui.warn(
483 491 (
484 492 b'malformatted run limit entry, %s: %s\n'
485 493 % (_bytestr(e), item)
486 494 )
487 495 )
488 496 continue
489 497 limits.append((time_limit, run_limit))
490 498 if not limits:
491 499 limits = DEFAULTLIMITS
492 500
493 501 profiler = None
502 profiled_runs = set()
494 503 if profiling is not None:
495 504 if ui.configbool(b"perf", b"profile-benchmark", False):
496 profiler = profiling.profile(ui)
505 profiler = lambda: profiling.profile(ui)
506 for run in ui.configlist(b"perf", b"profiled-runs", [0]):
507 profiled_runs.add(int(run))
497 508
498 509 prerun = getint(ui, b"perf", b"pre-run", 0)
499 510 t = functools.partial(
500 511 _timer,
501 512 fm,
502 513 displayall=displayall,
503 514 limits=limits,
504 515 prerun=prerun,
505 516 profiler=profiler,
517 profiled_runs=profiled_runs,
506 518 )
507 519 return t, fm
508 520
509 521
510 522 def stub_timer(fm, func, setup=None, title=None):
511 523 if setup is not None:
512 524 setup()
513 525 func()
514 526
515 527
516 528 @contextlib.contextmanager
517 529 def timeone():
518 530 r = []
519 531 ostart = os.times()
520 532 cstart = util.timer()
521 533 yield r
522 534 cstop = util.timer()
523 535 ostop = os.times()
524 536 a, b = ostart, ostop
525 537 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 538
527 539
528 540 # list of stop condition (elapsed time, minimal run count)
529 541 DEFAULTLIMITS = (
530 542 (3.0, 100),
531 543 (10.0, 3),
532 544 )
533 545
534 546
535 547 @contextlib.contextmanager
536 548 def noop_context():
537 549 yield
538 550
539 551
540 552 def _timer(
541 553 fm,
542 554 func,
543 555 setup=None,
544 556 context=noop_context,
545 557 title=None,
546 558 displayall=False,
547 559 limits=DEFAULTLIMITS,
548 560 prerun=0,
549 561 profiler=None,
562 profiled_runs=(0,),
550 563 ):
551 564 gc.collect()
552 565 results = []
553 566 begin = util.timer()
554 567 count = 0
555 568 if profiler is None:
556 profiler = NOOPCTX
569 profiler = lambda: NOOPCTX
557 570 for i in range(prerun):
558 571 if setup is not None:
559 572 setup()
560 573 with context():
561 574 func()
562 575 keepgoing = True
563 576 while keepgoing:
577 if count in profiled_runs:
578 prof = profiler()
579 else:
580 prof = NOOPCTX
564 581 if setup is not None:
565 582 setup()
566 583 with context():
567 with profiler:
584 with prof:
568 585 with timeone() as item:
569 586 r = func()
570 profiler = NOOPCTX
571 587 count += 1
572 588 results.append(item[0])
573 589 cstop = util.timer()
574 590 # Look for a stop condition.
575 591 elapsed = cstop - begin
576 592 for t, mincount in limits:
577 593 if elapsed >= t and count >= mincount:
578 594 keepgoing = False
579 595 break
580 596
581 597 formatone(fm, results, title=title, result=r, displayall=displayall)
582 598
583 599
584 600 def formatone(fm, timings, title=None, result=None, displayall=False):
585 601 count = len(timings)
586 602
587 603 fm.startitem()
588 604
589 605 if title:
590 606 fm.write(b'title', b'! %s\n', title)
591 607 if result:
592 608 fm.write(b'result', b'! result: %s\n', result)
593 609
594 610 def display(role, entry):
595 611 prefix = b''
596 612 if role != b'best':
597 613 prefix = b'%s.' % role
598 614 fm.plain(b'!')
599 615 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 616 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 617 fm.write(prefix + b'user', b' user %f', entry[1])
602 618 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 619 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 620 fm.plain(b'\n')
605 621
606 622 timings.sort()
607 623 min_val = timings[0]
608 624 display(b'best', min_val)
609 625 if displayall:
610 626 max_val = timings[-1]
611 627 display(b'max', max_val)
612 628 avg = tuple([sum(x) / count for x in zip(*timings)])
613 629 display(b'avg', avg)
614 630 median = timings[len(timings) // 2]
615 631 display(b'median', median)
616 632
617 633
618 634 # utilities for historical portability
619 635
620 636
621 637 def getint(ui, section, name, default):
622 638 # for "historical portability":
623 639 # ui.configint has been available since 1.9 (or fa2b596db182)
624 640 v = ui.config(section, name, None)
625 641 if v is None:
626 642 return default
627 643 try:
628 644 return int(v)
629 645 except ValueError:
630 646 raise error.ConfigError(
631 647 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 648 )
633 649
634 650
635 651 def safeattrsetter(obj, name, ignoremissing=False):
636 652 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 653
638 654 This function is aborted, if 'obj' doesn't have 'name' attribute
639 655 at runtime. This avoids overlooking removal of an attribute, which
640 656 breaks assumption of performance measurement, in the future.
641 657
642 658 This function returns the object to (1) assign a new value, and
643 659 (2) restore an original value to the attribute.
644 660
645 661 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 662 abortion, and this function returns None. This is useful to
647 663 examine an attribute, which isn't ensured in all Mercurial
648 664 versions.
649 665 """
650 666 if not util.safehasattr(obj, name):
651 667 if ignoremissing:
652 668 return None
653 669 raise error.Abort(
654 670 (
655 671 b"missing attribute %s of %s might break assumption"
656 672 b" of performance measurement"
657 673 )
658 674 % (name, obj)
659 675 )
660 676
661 677 origvalue = getattr(obj, _sysstr(name))
662 678
663 679 class attrutil:
664 680 def set(self, newvalue):
665 681 setattr(obj, _sysstr(name), newvalue)
666 682
667 683 def restore(self):
668 684 setattr(obj, _sysstr(name), origvalue)
669 685
670 686 return attrutil()
671 687
672 688
673 689 # utilities to examine each internal API changes
674 690
675 691
676 692 def getbranchmapsubsettable():
677 693 # for "historical portability":
678 694 # subsettable is defined in:
679 695 # - branchmap since 2.9 (or 175c6fd8cacc)
680 696 # - repoview since 2.5 (or 59a9f18d4587)
681 697 # - repoviewutil since 5.0
682 698 for mod in (branchmap, repoview, repoviewutil):
683 699 subsettable = getattr(mod, 'subsettable', None)
684 700 if subsettable:
685 701 return subsettable
686 702
687 703 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 704 # branchmap and repoview modules exist, but subsettable attribute
689 705 # doesn't)
690 706 raise error.Abort(
691 707 b"perfbranchmap not available with this Mercurial",
692 708 hint=b"use 2.5 or later",
693 709 )
694 710
695 711
696 712 def getsvfs(repo):
697 713 """Return appropriate object to access files under .hg/store"""
698 714 # for "historical portability":
699 715 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 716 svfs = getattr(repo, 'svfs', None)
701 717 if svfs:
702 718 return svfs
703 719 else:
704 720 return getattr(repo, 'sopener')
705 721
706 722
707 723 def getvfs(repo):
708 724 """Return appropriate object to access files under .hg"""
709 725 # for "historical portability":
710 726 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 727 vfs = getattr(repo, 'vfs', None)
712 728 if vfs:
713 729 return vfs
714 730 else:
715 731 return getattr(repo, 'opener')
716 732
717 733
718 734 def repocleartagscachefunc(repo):
719 735 """Return the function to clear tags cache according to repo internal API"""
720 736 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 737 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 738 # correct way to clear tags cache, because existing code paths
723 739 # expect _tagscache to be a structured object.
724 740 def clearcache():
725 741 # _tagscache has been filteredpropertycache since 2.5 (or
726 742 # 98c867ac1330), and delattr() can't work in such case
727 743 if '_tagscache' in vars(repo):
728 744 del repo.__dict__['_tagscache']
729 745
730 746 return clearcache
731 747
732 748 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 749 if repotags: # since 1.4 (or 5614a628d173)
734 750 return lambda: repotags.set(None)
735 751
736 752 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 753 if repotagscache: # since 0.6 (or d7df759d0e97)
738 754 return lambda: repotagscache.set(None)
739 755
740 756 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 757 # this point, but it isn't so problematic, because:
742 758 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 759 # in perftags() causes failure soon
744 760 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 761 raise error.Abort(b"tags API of this hg command is unknown")
746 762
747 763
748 764 # utilities to clear cache
749 765
750 766
751 767 def clearfilecache(obj, attrname):
752 768 unfiltered = getattr(obj, 'unfiltered', None)
753 769 if unfiltered is not None:
754 770 obj = obj.unfiltered()
755 771 if attrname in vars(obj):
756 772 delattr(obj, attrname)
757 773 obj._filecache.pop(attrname, None)
758 774
759 775
760 776 def clearchangelog(repo):
761 777 if repo is not repo.unfiltered():
762 778 object.__setattr__(repo, '_clcachekey', None)
763 779 object.__setattr__(repo, '_clcache', None)
764 780 clearfilecache(repo.unfiltered(), 'changelog')
765 781
766 782
767 783 # perf commands
768 784
769 785
770 786 @command(b'perf::walk|perfwalk', formatteropts)
771 787 def perfwalk(ui, repo, *pats, **opts):
772 788 opts = _byteskwargs(opts)
773 789 timer, fm = gettimer(ui, opts)
774 790 m = scmutil.match(repo[None], pats, {})
775 791 timer(
776 792 lambda: len(
777 793 list(
778 794 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 795 )
780 796 )
781 797 )
782 798 fm.end()
783 799
784 800
785 801 @command(b'perf::annotate|perfannotate', formatteropts)
786 802 def perfannotate(ui, repo, f, **opts):
787 803 opts = _byteskwargs(opts)
788 804 timer, fm = gettimer(ui, opts)
789 805 fc = repo[b'.'][f]
790 806 timer(lambda: len(fc.annotate(True)))
791 807 fm.end()
792 808
793 809
794 810 @command(
795 811 b'perf::status|perfstatus',
796 812 [
797 813 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 814 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 815 ]
800 816 + formatteropts,
801 817 )
802 818 def perfstatus(ui, repo, **opts):
803 819 """benchmark the performance of a single status call
804 820
805 821 The repository data are preserved between each call.
806 822
807 823 By default, only the status of the tracked file are requested. If
808 824 `--unknown` is passed, the "unknown" files are also tracked.
809 825 """
810 826 opts = _byteskwargs(opts)
811 827 # m = match.always(repo.root, repo.getcwd())
812 828 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 829 # False))))
814 830 timer, fm = gettimer(ui, opts)
815 831 if opts[b'dirstate']:
816 832 dirstate = repo.dirstate
817 833 m = scmutil.matchall(repo)
818 834 unknown = opts[b'unknown']
819 835
820 836 def status_dirstate():
821 837 s = dirstate.status(
822 838 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 839 )
824 840 sum(map(bool, s))
825 841
826 842 if util.safehasattr(dirstate, 'running_status'):
827 843 with dirstate.running_status(repo):
828 844 timer(status_dirstate)
829 845 dirstate.invalidate()
830 846 else:
831 847 timer(status_dirstate)
832 848 else:
833 849 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 850 fm.end()
835 851
836 852
837 853 @command(b'perf::addremove|perfaddremove', formatteropts)
838 854 def perfaddremove(ui, repo, **opts):
839 855 opts = _byteskwargs(opts)
840 856 timer, fm = gettimer(ui, opts)
841 857 try:
842 858 oldquiet = repo.ui.quiet
843 859 repo.ui.quiet = True
844 860 matcher = scmutil.match(repo[None])
845 861 opts[b'dry_run'] = True
846 862 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 863 uipathfn = scmutil.getuipathfn(repo)
848 864 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 865 else:
850 866 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 867 finally:
852 868 repo.ui.quiet = oldquiet
853 869 fm.end()
854 870
855 871
856 872 def clearcaches(cl):
857 873 # behave somewhat consistently across internal API changes
858 874 if util.safehasattr(cl, b'clearcaches'):
859 875 cl.clearcaches()
860 876 elif util.safehasattr(cl, b'_nodecache'):
861 877 # <= hg-5.2
862 878 from mercurial.node import nullid, nullrev
863 879
864 880 cl._nodecache = {nullid: nullrev}
865 881 cl._nodepos = None
866 882
867 883
868 884 @command(b'perf::heads|perfheads', formatteropts)
869 885 def perfheads(ui, repo, **opts):
870 886 """benchmark the computation of a changelog heads"""
871 887 opts = _byteskwargs(opts)
872 888 timer, fm = gettimer(ui, opts)
873 889 cl = repo.changelog
874 890
875 891 def s():
876 892 clearcaches(cl)
877 893
878 894 def d():
879 895 len(cl.headrevs())
880 896
881 897 timer(d, setup=s)
882 898 fm.end()
883 899
884 900
885 901 def _default_clear_on_disk_tags_cache(repo):
886 902 from mercurial import tags
887 903
888 904 repo.cachevfs.tryunlink(tags._filename(repo))
889 905
890 906
891 907 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 908 from mercurial import tags
893 909
894 910 repo.cachevfs.tryunlink(tags._fnodescachefile)
895 911
896 912
897 913 def _default_forget_fnodes(repo, revs):
898 914 """function used by the perf extension to prune some entries from the
899 915 fnodes cache"""
900 916 from mercurial import tags
901 917
902 918 missing_1 = b'\xff' * 4
903 919 missing_2 = b'\xff' * 20
904 920 cache = tags.hgtagsfnodescache(repo.unfiltered())
905 921 for r in revs:
906 922 cache._writeentry(r * tags._fnodesrecsize, missing_1, missing_2)
907 923 cache.write()
908 924
909 925
910 926 @command(
911 927 b'perf::tags|perftags',
912 928 formatteropts
913 929 + [
914 930 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
915 931 (
916 932 b'',
917 933 b'clear-on-disk-cache',
918 934 False,
919 935 b'clear on disk tags cache (DESTRUCTIVE)',
920 936 ),
921 937 (
922 938 b'',
923 939 b'clear-fnode-cache-all',
924 940 False,
925 941 b'clear on disk file node cache (DESTRUCTIVE),',
926 942 ),
927 943 (
928 944 b'',
929 945 b'clear-fnode-cache-rev',
930 946 [],
931 947 b'clear on disk file node cache (DESTRUCTIVE),',
932 948 b'REVS',
933 949 ),
934 950 (
935 951 b'',
936 952 b'update-last',
937 953 b'',
938 954 b'simulate an update over the last N revisions (DESTRUCTIVE),',
939 955 b'N',
940 956 ),
941 957 ],
942 958 )
943 959 def perftags(ui, repo, **opts):
944 960 """Benchmark tags retrieval in various situation
945 961
946 962 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
947 963 altering performance after the command was run. However, it does not
948 964 destroy any stored data.
949 965 """
950 966 from mercurial import tags
951 967
952 968 opts = _byteskwargs(opts)
953 969 timer, fm = gettimer(ui, opts)
954 970 repocleartagscache = repocleartagscachefunc(repo)
955 971 clearrevlogs = opts[b'clear_revlogs']
956 972 clear_disk = opts[b'clear_on_disk_cache']
957 973 clear_fnode = opts[b'clear_fnode_cache_all']
958 974
959 975 clear_fnode_revs = opts[b'clear_fnode_cache_rev']
960 976 update_last_str = opts[b'update_last']
961 977 update_last = None
962 978 if update_last_str:
963 979 try:
964 980 update_last = int(update_last_str)
965 981 except ValueError:
966 982 msg = b'could not parse value for update-last: "%s"'
967 983 msg %= update_last_str
968 984 hint = b'value should be an integer'
969 985 raise error.Abort(msg, hint=hint)
970 986
971 987 clear_disk_fn = getattr(
972 988 tags,
973 989 "clear_cache_on_disk",
974 990 _default_clear_on_disk_tags_cache,
975 991 )
976 992 if getattr(tags, 'clear_cache_fnodes_is_working', False):
977 993 clear_fnodes_fn = tags.clear_cache_fnodes
978 994 else:
979 995 clear_fnodes_fn = _default_clear_on_disk_tags_fnodes_cache
980 996 clear_fnodes_rev_fn = getattr(
981 997 tags,
982 998 "forget_fnodes",
983 999 _default_forget_fnodes,
984 1000 )
985 1001
986 1002 clear_revs = []
987 1003 if clear_fnode_revs:
988 1004 clear_revs.extend(scmutil.revrange(repo, clear_fnode_revs))
989 1005
990 1006 if update_last:
991 1007 revset = b'last(all(), %d)' % update_last
992 1008 last_revs = repo.unfiltered().revs(revset)
993 1009 clear_revs.extend(last_revs)
994 1010
995 1011 from mercurial import repoview
996 1012
997 1013 rev_filter = {(b'experimental', b'extra-filter-revs'): revset}
998 1014 with repo.ui.configoverride(rev_filter, source=b"perf"):
999 1015 filter_id = repoview.extrafilter(repo.ui)
1000 1016
1001 1017 filter_name = b'%s%%%s' % (repo.filtername, filter_id)
1002 1018 pre_repo = repo.filtered(filter_name)
1003 1019 pre_repo.tags() # warm the cache
1004 1020 old_tags_path = repo.cachevfs.join(tags._filename(pre_repo))
1005 1021 new_tags_path = repo.cachevfs.join(tags._filename(repo))
1006 1022
1007 1023 clear_revs = sorted(set(clear_revs))
1008 1024
1009 1025 def s():
1010 1026 if update_last:
1011 1027 util.copyfile(old_tags_path, new_tags_path)
1012 1028 if clearrevlogs:
1013 1029 clearchangelog(repo)
1014 1030 clearfilecache(repo.unfiltered(), 'manifest')
1015 1031 if clear_disk:
1016 1032 clear_disk_fn(repo)
1017 1033 if clear_fnode:
1018 1034 clear_fnodes_fn(repo)
1019 1035 elif clear_revs:
1020 1036 clear_fnodes_rev_fn(repo, clear_revs)
1021 1037 repocleartagscache()
1022 1038
1023 1039 def t():
1024 1040 len(repo.tags())
1025 1041
1026 1042 timer(t, setup=s)
1027 1043 fm.end()
1028 1044
1029 1045
1030 1046 @command(b'perf::ancestors|perfancestors', formatteropts)
1031 1047 def perfancestors(ui, repo, **opts):
1032 1048 opts = _byteskwargs(opts)
1033 1049 timer, fm = gettimer(ui, opts)
1034 1050 heads = repo.changelog.headrevs()
1035 1051
1036 1052 def d():
1037 1053 for a in repo.changelog.ancestors(heads):
1038 1054 pass
1039 1055
1040 1056 timer(d)
1041 1057 fm.end()
1042 1058
1043 1059
1044 1060 @command(b'perf::ancestorset|perfancestorset', formatteropts)
1045 1061 def perfancestorset(ui, repo, revset, **opts):
1046 1062 opts = _byteskwargs(opts)
1047 1063 timer, fm = gettimer(ui, opts)
1048 1064 revs = repo.revs(revset)
1049 1065 heads = repo.changelog.headrevs()
1050 1066
1051 1067 def d():
1052 1068 s = repo.changelog.ancestors(heads)
1053 1069 for rev in revs:
1054 1070 rev in s
1055 1071
1056 1072 timer(d)
1057 1073 fm.end()
1058 1074
1059 1075
1060 1076 @command(
1061 1077 b'perf::delta-find',
1062 1078 revlogopts + formatteropts,
1063 1079 b'-c|-m|FILE REV',
1064 1080 )
1065 1081 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
1066 1082 """benchmark the process of finding a valid delta for a revlog revision
1067 1083
1068 1084 When a revlog receives a new revision (e.g. from a commit, or from an
1069 1085 incoming bundle), it searches for a suitable delta-base to produce a delta.
1070 1086 This perf command measures how much time we spend in this process. It
1071 1087 operates on an already stored revision.
1072 1088
1073 1089 See `hg help debug-delta-find` for another related command.
1074 1090 """
1075 1091 from mercurial import revlogutils
1076 1092 import mercurial.revlogutils.deltas as deltautil
1077 1093
1078 1094 opts = _byteskwargs(opts)
1079 1095 if arg_2 is None:
1080 1096 file_ = None
1081 1097 rev = arg_1
1082 1098 else:
1083 1099 file_ = arg_1
1084 1100 rev = arg_2
1085 1101
1086 1102 repo = repo.unfiltered()
1087 1103
1088 1104 timer, fm = gettimer(ui, opts)
1089 1105
1090 1106 rev = int(rev)
1091 1107
1092 1108 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
1093 1109
1094 1110 deltacomputer = deltautil.deltacomputer(revlog)
1095 1111
1096 1112 node = revlog.node(rev)
1097 1113 p1r, p2r = revlog.parentrevs(rev)
1098 1114 p1 = revlog.node(p1r)
1099 1115 p2 = revlog.node(p2r)
1100 1116 full_text = revlog.revision(rev)
1101 1117 textlen = len(full_text)
1102 1118 cachedelta = None
1103 1119 flags = revlog.flags(rev)
1104 1120
1105 1121 revinfo = revlogutils.revisioninfo(
1106 1122 node,
1107 1123 p1,
1108 1124 p2,
1109 1125 [full_text], # btext
1110 1126 textlen,
1111 1127 cachedelta,
1112 1128 flags,
1113 1129 )
1114 1130
1115 1131 # Note: we should probably purge the potential caches (like the full
1116 1132 # manifest cache) between runs.
1117 1133 def find_one():
1118 1134 with revlog._datafp() as fh:
1119 1135 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1120 1136
1121 1137 timer(find_one)
1122 1138 fm.end()
1123 1139
1124 1140
1125 1141 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1126 1142 def perfdiscovery(ui, repo, path, **opts):
1127 1143 """benchmark discovery between local repo and the peer at given path"""
1128 1144 repos = [repo, None]
1129 1145 timer, fm = gettimer(ui, opts)
1130 1146
1131 1147 try:
1132 1148 from mercurial.utils.urlutil import get_unique_pull_path_obj
1133 1149
1134 1150 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1135 1151 except ImportError:
1136 1152 try:
1137 1153 from mercurial.utils.urlutil import get_unique_pull_path
1138 1154
1139 1155 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1140 1156 except ImportError:
1141 1157 path = ui.expandpath(path)
1142 1158
1143 1159 def s():
1144 1160 repos[1] = hg.peer(ui, opts, path)
1145 1161
1146 1162 def d():
1147 1163 setdiscovery.findcommonheads(ui, *repos)
1148 1164
1149 1165 timer(d, setup=s)
1150 1166 fm.end()
1151 1167
1152 1168
1153 1169 @command(
1154 1170 b'perf::bookmarks|perfbookmarks',
1155 1171 formatteropts
1156 1172 + [
1157 1173 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1158 1174 ],
1159 1175 )
1160 1176 def perfbookmarks(ui, repo, **opts):
1161 1177 """benchmark parsing bookmarks from disk to memory"""
1162 1178 opts = _byteskwargs(opts)
1163 1179 timer, fm = gettimer(ui, opts)
1164 1180
1165 1181 clearrevlogs = opts[b'clear_revlogs']
1166 1182
1167 1183 def s():
1168 1184 if clearrevlogs:
1169 1185 clearchangelog(repo)
1170 1186 clearfilecache(repo, b'_bookmarks')
1171 1187
1172 1188 def d():
1173 1189 repo._bookmarks
1174 1190
1175 1191 timer(d, setup=s)
1176 1192 fm.end()
1177 1193
1178 1194
1179 1195 @command(
1180 1196 b'perf::bundle',
1181 1197 [
1182 1198 (
1183 1199 b'r',
1184 1200 b'rev',
1185 1201 [],
1186 1202 b'changesets to bundle',
1187 1203 b'REV',
1188 1204 ),
1189 1205 (
1190 1206 b't',
1191 1207 b'type',
1192 1208 b'none',
1193 1209 b'bundlespec to use (see `hg help bundlespec`)',
1194 1210 b'TYPE',
1195 1211 ),
1196 1212 ]
1197 1213 + formatteropts,
1198 1214 b'REVS',
1199 1215 )
1200 1216 def perfbundle(ui, repo, *revs, **opts):
1201 1217 """benchmark the creation of a bundle from a repository
1202 1218
1203 1219 For now, this only supports "none" compression.
1204 1220 """
1205 1221 try:
1206 1222 from mercurial import bundlecaches
1207 1223
1208 1224 parsebundlespec = bundlecaches.parsebundlespec
1209 1225 except ImportError:
1210 1226 from mercurial import exchange
1211 1227
1212 1228 parsebundlespec = exchange.parsebundlespec
1213 1229
1214 1230 from mercurial import discovery
1215 1231 from mercurial import bundle2
1216 1232
1217 1233 opts = _byteskwargs(opts)
1218 1234 timer, fm = gettimer(ui, opts)
1219 1235
1220 1236 cl = repo.changelog
1221 1237 revs = list(revs)
1222 1238 revs.extend(opts.get(b'rev', ()))
1223 1239 revs = scmutil.revrange(repo, revs)
1224 1240 if not revs:
1225 1241 raise error.Abort(b"not revision specified")
1226 1242 # make it a consistent set (ie: without topological gaps)
1227 1243 old_len = len(revs)
1228 1244 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1229 1245 if old_len != len(revs):
1230 1246 new_count = len(revs) - old_len
1231 1247 msg = b"add %d new revisions to make it a consistent set\n"
1232 1248 ui.write_err(msg % new_count)
1233 1249
1234 1250 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1235 1251 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1236 1252 outgoing = discovery.outgoing(repo, bases, targets)
1237 1253
1238 1254 bundle_spec = opts.get(b'type')
1239 1255
1240 1256 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1241 1257
1242 1258 cgversion = bundle_spec.params.get(b"cg.version")
1243 1259 if cgversion is None:
1244 1260 if bundle_spec.version == b'v1':
1245 1261 cgversion = b'01'
1246 1262 if bundle_spec.version == b'v2':
1247 1263 cgversion = b'02'
1248 1264 if cgversion not in changegroup.supportedoutgoingversions(repo):
1249 1265 err = b"repository does not support bundle version %s"
1250 1266 raise error.Abort(err % cgversion)
1251 1267
1252 1268 if cgversion == b'01': # bundle1
1253 1269 bversion = b'HG10' + bundle_spec.wirecompression
1254 1270 bcompression = None
1255 1271 elif cgversion in (b'02', b'03'):
1256 1272 bversion = b'HG20'
1257 1273 bcompression = bundle_spec.wirecompression
1258 1274 else:
1259 1275 err = b'perf::bundle: unexpected changegroup version %s'
1260 1276 raise error.ProgrammingError(err % cgversion)
1261 1277
1262 1278 if bcompression is None:
1263 1279 bcompression = b'UN'
1264 1280
1265 1281 if bcompression != b'UN':
1266 1282 err = b'perf::bundle: compression currently unsupported: %s'
1267 1283 raise error.ProgrammingError(err % bcompression)
1268 1284
1269 1285 def do_bundle():
1270 1286 bundle2.writenewbundle(
1271 1287 ui,
1272 1288 repo,
1273 1289 b'perf::bundle',
1274 1290 os.devnull,
1275 1291 bversion,
1276 1292 outgoing,
1277 1293 bundle_spec.params,
1278 1294 )
1279 1295
1280 1296 timer(do_bundle)
1281 1297 fm.end()
1282 1298
1283 1299
1284 1300 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1285 1301 def perfbundleread(ui, repo, bundlepath, **opts):
1286 1302 """Benchmark reading of bundle files.
1287 1303
1288 1304 This command is meant to isolate the I/O part of bundle reading as
1289 1305 much as possible.
1290 1306 """
1291 1307 from mercurial import (
1292 1308 bundle2,
1293 1309 exchange,
1294 1310 streamclone,
1295 1311 )
1296 1312
1297 1313 opts = _byteskwargs(opts)
1298 1314
1299 1315 def makebench(fn):
1300 1316 def run():
1301 1317 with open(bundlepath, b'rb') as fh:
1302 1318 bundle = exchange.readbundle(ui, fh, bundlepath)
1303 1319 fn(bundle)
1304 1320
1305 1321 return run
1306 1322
1307 1323 def makereadnbytes(size):
1308 1324 def run():
1309 1325 with open(bundlepath, b'rb') as fh:
1310 1326 bundle = exchange.readbundle(ui, fh, bundlepath)
1311 1327 while bundle.read(size):
1312 1328 pass
1313 1329
1314 1330 return run
1315 1331
1316 1332 def makestdioread(size):
1317 1333 def run():
1318 1334 with open(bundlepath, b'rb') as fh:
1319 1335 while fh.read(size):
1320 1336 pass
1321 1337
1322 1338 return run
1323 1339
1324 1340 # bundle1
1325 1341
1326 1342 def deltaiter(bundle):
1327 1343 for delta in bundle.deltaiter():
1328 1344 pass
1329 1345
1330 1346 def iterchunks(bundle):
1331 1347 for chunk in bundle.getchunks():
1332 1348 pass
1333 1349
1334 1350 # bundle2
1335 1351
1336 1352 def forwardchunks(bundle):
1337 1353 for chunk in bundle._forwardchunks():
1338 1354 pass
1339 1355
1340 1356 def iterparts(bundle):
1341 1357 for part in bundle.iterparts():
1342 1358 pass
1343 1359
1344 1360 def iterpartsseekable(bundle):
1345 1361 for part in bundle.iterparts(seekable=True):
1346 1362 pass
1347 1363
1348 1364 def seek(bundle):
1349 1365 for part in bundle.iterparts(seekable=True):
1350 1366 part.seek(0, os.SEEK_END)
1351 1367
1352 1368 def makepartreadnbytes(size):
1353 1369 def run():
1354 1370 with open(bundlepath, b'rb') as fh:
1355 1371 bundle = exchange.readbundle(ui, fh, bundlepath)
1356 1372 for part in bundle.iterparts():
1357 1373 while part.read(size):
1358 1374 pass
1359 1375
1360 1376 return run
1361 1377
1362 1378 benches = [
1363 1379 (makestdioread(8192), b'read(8k)'),
1364 1380 (makestdioread(16384), b'read(16k)'),
1365 1381 (makestdioread(32768), b'read(32k)'),
1366 1382 (makestdioread(131072), b'read(128k)'),
1367 1383 ]
1368 1384
1369 1385 with open(bundlepath, b'rb') as fh:
1370 1386 bundle = exchange.readbundle(ui, fh, bundlepath)
1371 1387
1372 1388 if isinstance(bundle, changegroup.cg1unpacker):
1373 1389 benches.extend(
1374 1390 [
1375 1391 (makebench(deltaiter), b'cg1 deltaiter()'),
1376 1392 (makebench(iterchunks), b'cg1 getchunks()'),
1377 1393 (makereadnbytes(8192), b'cg1 read(8k)'),
1378 1394 (makereadnbytes(16384), b'cg1 read(16k)'),
1379 1395 (makereadnbytes(32768), b'cg1 read(32k)'),
1380 1396 (makereadnbytes(131072), b'cg1 read(128k)'),
1381 1397 ]
1382 1398 )
1383 1399 elif isinstance(bundle, bundle2.unbundle20):
1384 1400 benches.extend(
1385 1401 [
1386 1402 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1387 1403 (makebench(iterparts), b'bundle2 iterparts()'),
1388 1404 (
1389 1405 makebench(iterpartsseekable),
1390 1406 b'bundle2 iterparts() seekable',
1391 1407 ),
1392 1408 (makebench(seek), b'bundle2 part seek()'),
1393 1409 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1394 1410 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1395 1411 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1396 1412 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1397 1413 ]
1398 1414 )
1399 1415 elif isinstance(bundle, streamclone.streamcloneapplier):
1400 1416 raise error.Abort(b'stream clone bundles not supported')
1401 1417 else:
1402 1418 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1403 1419
1404 1420 for fn, title in benches:
1405 1421 timer, fm = gettimer(ui, opts)
1406 1422 timer(fn, title=title)
1407 1423 fm.end()
1408 1424
1409 1425
1410 1426 @command(
1411 1427 b'perf::changegroupchangelog|perfchangegroupchangelog',
1412 1428 formatteropts
1413 1429 + [
1414 1430 (b'', b'cgversion', b'02', b'changegroup version'),
1415 1431 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1416 1432 ],
1417 1433 )
1418 1434 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1419 1435 """Benchmark producing a changelog group for a changegroup.
1420 1436
1421 1437 This measures the time spent processing the changelog during a
1422 1438 bundle operation. This occurs during `hg bundle` and on a server
1423 1439 processing a `getbundle` wire protocol request (handles clones
1424 1440 and pull requests).
1425 1441
1426 1442 By default, all revisions are added to the changegroup.
1427 1443 """
1428 1444 opts = _byteskwargs(opts)
1429 1445 cl = repo.changelog
1430 1446 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1431 1447 bundler = changegroup.getbundler(cgversion, repo)
1432 1448
1433 1449 def d():
1434 1450 state, chunks = bundler._generatechangelog(cl, nodes)
1435 1451 for chunk in chunks:
1436 1452 pass
1437 1453
1438 1454 timer, fm = gettimer(ui, opts)
1439 1455
1440 1456 # Terminal printing can interfere with timing. So disable it.
1441 1457 with ui.configoverride({(b'progress', b'disable'): True}):
1442 1458 timer(d)
1443 1459
1444 1460 fm.end()
1445 1461
1446 1462
1447 1463 @command(b'perf::dirs|perfdirs', formatteropts)
1448 1464 def perfdirs(ui, repo, **opts):
1449 1465 opts = _byteskwargs(opts)
1450 1466 timer, fm = gettimer(ui, opts)
1451 1467 dirstate = repo.dirstate
1452 1468 b'a' in dirstate
1453 1469
1454 1470 def d():
1455 1471 dirstate.hasdir(b'a')
1456 1472 try:
1457 1473 del dirstate._map._dirs
1458 1474 except AttributeError:
1459 1475 pass
1460 1476
1461 1477 timer(d)
1462 1478 fm.end()
1463 1479
1464 1480
1465 1481 @command(
1466 1482 b'perf::dirstate|perfdirstate',
1467 1483 [
1468 1484 (
1469 1485 b'',
1470 1486 b'iteration',
1471 1487 None,
1472 1488 b'benchmark a full iteration for the dirstate',
1473 1489 ),
1474 1490 (
1475 1491 b'',
1476 1492 b'contains',
1477 1493 None,
1478 1494 b'benchmark a large amount of `nf in dirstate` calls',
1479 1495 ),
1480 1496 ]
1481 1497 + formatteropts,
1482 1498 )
1483 1499 def perfdirstate(ui, repo, **opts):
1484 1500 """benchmap the time of various distate operations
1485 1501
1486 1502 By default benchmark the time necessary to load a dirstate from scratch.
1487 1503 The dirstate is loaded to the point were a "contains" request can be
1488 1504 answered.
1489 1505 """
1490 1506 opts = _byteskwargs(opts)
1491 1507 timer, fm = gettimer(ui, opts)
1492 1508 b"a" in repo.dirstate
1493 1509
1494 1510 if opts[b'iteration'] and opts[b'contains']:
1495 1511 msg = b'only specify one of --iteration or --contains'
1496 1512 raise error.Abort(msg)
1497 1513
1498 1514 if opts[b'iteration']:
1499 1515 setup = None
1500 1516 dirstate = repo.dirstate
1501 1517
1502 1518 def d():
1503 1519 for f in dirstate:
1504 1520 pass
1505 1521
1506 1522 elif opts[b'contains']:
1507 1523 setup = None
1508 1524 dirstate = repo.dirstate
1509 1525 allfiles = list(dirstate)
1510 1526 # also add file path that will be "missing" from the dirstate
1511 1527 allfiles.extend([f[::-1] for f in allfiles])
1512 1528
1513 1529 def d():
1514 1530 for f in allfiles:
1515 1531 f in dirstate
1516 1532
1517 1533 else:
1518 1534
1519 1535 def setup():
1520 1536 repo.dirstate.invalidate()
1521 1537
1522 1538 def d():
1523 1539 b"a" in repo.dirstate
1524 1540
1525 1541 timer(d, setup=setup)
1526 1542 fm.end()
1527 1543
1528 1544
1529 1545 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1530 1546 def perfdirstatedirs(ui, repo, **opts):
1531 1547 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1532 1548 opts = _byteskwargs(opts)
1533 1549 timer, fm = gettimer(ui, opts)
1534 1550 repo.dirstate.hasdir(b"a")
1535 1551
1536 1552 def setup():
1537 1553 try:
1538 1554 del repo.dirstate._map._dirs
1539 1555 except AttributeError:
1540 1556 pass
1541 1557
1542 1558 def d():
1543 1559 repo.dirstate.hasdir(b"a")
1544 1560
1545 1561 timer(d, setup=setup)
1546 1562 fm.end()
1547 1563
1548 1564
1549 1565 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1550 1566 def perfdirstatefoldmap(ui, repo, **opts):
1551 1567 """benchmap a `dirstate._map.filefoldmap.get()` request
1552 1568
1553 1569 The dirstate filefoldmap cache is dropped between every request.
1554 1570 """
1555 1571 opts = _byteskwargs(opts)
1556 1572 timer, fm = gettimer(ui, opts)
1557 1573 dirstate = repo.dirstate
1558 1574 dirstate._map.filefoldmap.get(b'a')
1559 1575
1560 1576 def setup():
1561 1577 del dirstate._map.filefoldmap
1562 1578
1563 1579 def d():
1564 1580 dirstate._map.filefoldmap.get(b'a')
1565 1581
1566 1582 timer(d, setup=setup)
1567 1583 fm.end()
1568 1584
1569 1585
1570 1586 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1571 1587 def perfdirfoldmap(ui, repo, **opts):
1572 1588 """benchmap a `dirstate._map.dirfoldmap.get()` request
1573 1589
1574 1590 The dirstate dirfoldmap cache is dropped between every request.
1575 1591 """
1576 1592 opts = _byteskwargs(opts)
1577 1593 timer, fm = gettimer(ui, opts)
1578 1594 dirstate = repo.dirstate
1579 1595 dirstate._map.dirfoldmap.get(b'a')
1580 1596
1581 1597 def setup():
1582 1598 del dirstate._map.dirfoldmap
1583 1599 try:
1584 1600 del dirstate._map._dirs
1585 1601 except AttributeError:
1586 1602 pass
1587 1603
1588 1604 def d():
1589 1605 dirstate._map.dirfoldmap.get(b'a')
1590 1606
1591 1607 timer(d, setup=setup)
1592 1608 fm.end()
1593 1609
1594 1610
1595 1611 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1596 1612 def perfdirstatewrite(ui, repo, **opts):
1597 1613 """benchmap the time it take to write a dirstate on disk"""
1598 1614 opts = _byteskwargs(opts)
1599 1615 timer, fm = gettimer(ui, opts)
1600 1616 ds = repo.dirstate
1601 1617 b"a" in ds
1602 1618
1603 1619 def setup():
1604 1620 ds._dirty = True
1605 1621
1606 1622 def d():
1607 1623 ds.write(repo.currenttransaction())
1608 1624
1609 1625 with repo.wlock():
1610 1626 timer(d, setup=setup)
1611 1627 fm.end()
1612 1628
1613 1629
1614 1630 def _getmergerevs(repo, opts):
1615 1631 """parse command argument to return rev involved in merge
1616 1632
1617 1633 input: options dictionnary with `rev`, `from` and `bse`
1618 1634 output: (localctx, otherctx, basectx)
1619 1635 """
1620 1636 if opts[b'from']:
1621 1637 fromrev = scmutil.revsingle(repo, opts[b'from'])
1622 1638 wctx = repo[fromrev]
1623 1639 else:
1624 1640 wctx = repo[None]
1625 1641 # we don't want working dir files to be stat'd in the benchmark, so
1626 1642 # prime that cache
1627 1643 wctx.dirty()
1628 1644 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1629 1645 if opts[b'base']:
1630 1646 fromrev = scmutil.revsingle(repo, opts[b'base'])
1631 1647 ancestor = repo[fromrev]
1632 1648 else:
1633 1649 ancestor = wctx.ancestor(rctx)
1634 1650 return (wctx, rctx, ancestor)
1635 1651
1636 1652
1637 1653 @command(
1638 1654 b'perf::mergecalculate|perfmergecalculate',
1639 1655 [
1640 1656 (b'r', b'rev', b'.', b'rev to merge against'),
1641 1657 (b'', b'from', b'', b'rev to merge from'),
1642 1658 (b'', b'base', b'', b'the revision to use as base'),
1643 1659 ]
1644 1660 + formatteropts,
1645 1661 )
1646 1662 def perfmergecalculate(ui, repo, **opts):
1647 1663 opts = _byteskwargs(opts)
1648 1664 timer, fm = gettimer(ui, opts)
1649 1665
1650 1666 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1651 1667
1652 1668 def d():
1653 1669 # acceptremote is True because we don't want prompts in the middle of
1654 1670 # our benchmark
1655 1671 merge.calculateupdates(
1656 1672 repo,
1657 1673 wctx,
1658 1674 rctx,
1659 1675 [ancestor],
1660 1676 branchmerge=False,
1661 1677 force=False,
1662 1678 acceptremote=True,
1663 1679 followcopies=True,
1664 1680 )
1665 1681
1666 1682 timer(d)
1667 1683 fm.end()
1668 1684
1669 1685
1670 1686 @command(
1671 1687 b'perf::mergecopies|perfmergecopies',
1672 1688 [
1673 1689 (b'r', b'rev', b'.', b'rev to merge against'),
1674 1690 (b'', b'from', b'', b'rev to merge from'),
1675 1691 (b'', b'base', b'', b'the revision to use as base'),
1676 1692 ]
1677 1693 + formatteropts,
1678 1694 )
1679 1695 def perfmergecopies(ui, repo, **opts):
1680 1696 """measure runtime of `copies.mergecopies`"""
1681 1697 opts = _byteskwargs(opts)
1682 1698 timer, fm = gettimer(ui, opts)
1683 1699 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1684 1700
1685 1701 def d():
1686 1702 # acceptremote is True because we don't want prompts in the middle of
1687 1703 # our benchmark
1688 1704 copies.mergecopies(repo, wctx, rctx, ancestor)
1689 1705
1690 1706 timer(d)
1691 1707 fm.end()
1692 1708
1693 1709
1694 1710 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1695 1711 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1696 1712 """benchmark the copy tracing logic"""
1697 1713 opts = _byteskwargs(opts)
1698 1714 timer, fm = gettimer(ui, opts)
1699 1715 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1700 1716 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1701 1717
1702 1718 def d():
1703 1719 copies.pathcopies(ctx1, ctx2)
1704 1720
1705 1721 timer(d)
1706 1722 fm.end()
1707 1723
1708 1724
1709 1725 @command(
1710 1726 b'perf::phases|perfphases',
1711 1727 [
1712 1728 (b'', b'full', False, b'include file reading time too'),
1713 1729 ]
1714 1730 + formatteropts,
1715 1731 b"",
1716 1732 )
1717 1733 def perfphases(ui, repo, **opts):
1718 1734 """benchmark phasesets computation"""
1719 1735 opts = _byteskwargs(opts)
1720 1736 timer, fm = gettimer(ui, opts)
1721 1737 _phases = repo._phasecache
1722 1738 full = opts.get(b'full')
1723 1739 tip_rev = repo.changelog.tiprev()
1724 1740
1725 1741 def d():
1726 1742 phases = _phases
1727 1743 if full:
1728 1744 clearfilecache(repo, b'_phasecache')
1729 1745 phases = repo._phasecache
1730 1746 phases.invalidate()
1731 1747 phases.phase(repo, tip_rev)
1732 1748
1733 1749 timer(d)
1734 1750 fm.end()
1735 1751
1736 1752
1737 1753 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1738 1754 def perfphasesremote(ui, repo, dest=None, **opts):
1739 1755 """benchmark time needed to analyse phases of the remote server"""
1740 1756 from mercurial.node import bin
1741 1757 from mercurial import (
1742 1758 exchange,
1743 1759 hg,
1744 1760 phases,
1745 1761 )
1746 1762
1747 1763 opts = _byteskwargs(opts)
1748 1764 timer, fm = gettimer(ui, opts)
1749 1765
1750 1766 path = ui.getpath(dest, default=(b'default-push', b'default'))
1751 1767 if not path:
1752 1768 raise error.Abort(
1753 1769 b'default repository not configured!',
1754 1770 hint=b"see 'hg help config.paths'",
1755 1771 )
1756 1772 if util.safehasattr(path, 'main_path'):
1757 1773 path = path.get_push_variant()
1758 1774 dest = path.loc
1759 1775 else:
1760 1776 dest = path.pushloc or path.loc
1761 1777 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1762 1778 other = hg.peer(repo, opts, dest)
1763 1779
1764 1780 # easier to perform discovery through the operation
1765 1781 op = exchange.pushoperation(repo, other)
1766 1782 exchange._pushdiscoverychangeset(op)
1767 1783
1768 1784 remotesubset = op.fallbackheads
1769 1785
1770 1786 with other.commandexecutor() as e:
1771 1787 remotephases = e.callcommand(
1772 1788 b'listkeys', {b'namespace': b'phases'}
1773 1789 ).result()
1774 1790 del other
1775 1791 publishing = remotephases.get(b'publishing', False)
1776 1792 if publishing:
1777 1793 ui.statusnoi18n(b'publishing: yes\n')
1778 1794 else:
1779 1795 ui.statusnoi18n(b'publishing: no\n')
1780 1796
1781 1797 has_node = getattr(repo.changelog.index, 'has_node', None)
1782 1798 if has_node is None:
1783 1799 has_node = repo.changelog.nodemap.__contains__
1784 1800 nonpublishroots = 0
1785 1801 for nhex, phase in remotephases.iteritems():
1786 1802 if nhex == b'publishing': # ignore data related to publish option
1787 1803 continue
1788 1804 node = bin(nhex)
1789 1805 if has_node(node) and int(phase):
1790 1806 nonpublishroots += 1
1791 1807 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1792 1808 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1793 1809
1794 1810 def d():
1795 1811 phases.remotephasessummary(repo, remotesubset, remotephases)
1796 1812
1797 1813 timer(d)
1798 1814 fm.end()
1799 1815
1800 1816
1801 1817 @command(
1802 1818 b'perf::manifest|perfmanifest',
1803 1819 [
1804 1820 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1805 1821 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1806 1822 ]
1807 1823 + formatteropts,
1808 1824 b'REV|NODE',
1809 1825 )
1810 1826 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1811 1827 """benchmark the time to read a manifest from disk and return a usable
1812 1828 dict-like object
1813 1829
1814 1830 Manifest caches are cleared before retrieval."""
1815 1831 opts = _byteskwargs(opts)
1816 1832 timer, fm = gettimer(ui, opts)
1817 1833 if not manifest_rev:
1818 1834 ctx = scmutil.revsingle(repo, rev, rev)
1819 1835 t = ctx.manifestnode()
1820 1836 else:
1821 1837 from mercurial.node import bin
1822 1838
1823 1839 if len(rev) == 40:
1824 1840 t = bin(rev)
1825 1841 else:
1826 1842 try:
1827 1843 rev = int(rev)
1828 1844
1829 1845 if util.safehasattr(repo.manifestlog, b'getstorage'):
1830 1846 t = repo.manifestlog.getstorage(b'').node(rev)
1831 1847 else:
1832 1848 t = repo.manifestlog._revlog.lookup(rev)
1833 1849 except ValueError:
1834 1850 raise error.Abort(
1835 1851 b'manifest revision must be integer or full node'
1836 1852 )
1837 1853
1838 1854 def d():
1839 1855 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1840 1856 repo.manifestlog[t].read()
1841 1857
1842 1858 timer(d)
1843 1859 fm.end()
1844 1860
1845 1861
1846 1862 @command(b'perf::changeset|perfchangeset', formatteropts)
1847 1863 def perfchangeset(ui, repo, rev, **opts):
1848 1864 opts = _byteskwargs(opts)
1849 1865 timer, fm = gettimer(ui, opts)
1850 1866 n = scmutil.revsingle(repo, rev).node()
1851 1867
1852 1868 def d():
1853 1869 repo.changelog.read(n)
1854 1870 # repo.changelog._cache = None
1855 1871
1856 1872 timer(d)
1857 1873 fm.end()
1858 1874
1859 1875
1860 1876 @command(b'perf::ignore|perfignore', formatteropts)
1861 1877 def perfignore(ui, repo, **opts):
1862 1878 """benchmark operation related to computing ignore"""
1863 1879 opts = _byteskwargs(opts)
1864 1880 timer, fm = gettimer(ui, opts)
1865 1881 dirstate = repo.dirstate
1866 1882
1867 1883 def setupone():
1868 1884 dirstate.invalidate()
1869 1885 clearfilecache(dirstate, b'_ignore')
1870 1886
1871 1887 def runone():
1872 1888 dirstate._ignore
1873 1889
1874 1890 timer(runone, setup=setupone, title=b"load")
1875 1891 fm.end()
1876 1892
1877 1893
1878 1894 @command(
1879 1895 b'perf::index|perfindex',
1880 1896 [
1881 1897 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1882 1898 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1883 1899 ]
1884 1900 + formatteropts,
1885 1901 )
1886 1902 def perfindex(ui, repo, **opts):
1887 1903 """benchmark index creation time followed by a lookup
1888 1904
1889 1905 The default is to look `tip` up. Depending on the index implementation,
1890 1906 the revision looked up can matters. For example, an implementation
1891 1907 scanning the index will have a faster lookup time for `--rev tip` than for
1892 1908 `--rev 0`. The number of looked up revisions and their order can also
1893 1909 matters.
1894 1910
1895 1911 Example of useful set to test:
1896 1912
1897 1913 * tip
1898 1914 * 0
1899 1915 * -10:
1900 1916 * :10
1901 1917 * -10: + :10
1902 1918 * :10: + -10:
1903 1919 * -10000:
1904 1920 * -10000: + 0
1905 1921
1906 1922 It is not currently possible to check for lookup of a missing node. For
1907 1923 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1908 1924 import mercurial.revlog
1909 1925
1910 1926 opts = _byteskwargs(opts)
1911 1927 timer, fm = gettimer(ui, opts)
1912 1928 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1913 1929 if opts[b'no_lookup']:
1914 1930 if opts['rev']:
1915 1931 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1916 1932 nodes = []
1917 1933 elif not opts[b'rev']:
1918 1934 nodes = [repo[b"tip"].node()]
1919 1935 else:
1920 1936 revs = scmutil.revrange(repo, opts[b'rev'])
1921 1937 cl = repo.changelog
1922 1938 nodes = [cl.node(r) for r in revs]
1923 1939
1924 1940 unfi = repo.unfiltered()
1925 1941 # find the filecache func directly
1926 1942 # This avoid polluting the benchmark with the filecache logic
1927 1943 makecl = unfi.__class__.changelog.func
1928 1944
1929 1945 def setup():
1930 1946 # probably not necessary, but for good measure
1931 1947 clearchangelog(unfi)
1932 1948
1933 1949 def d():
1934 1950 cl = makecl(unfi)
1935 1951 for n in nodes:
1936 1952 cl.rev(n)
1937 1953
1938 1954 timer(d, setup=setup)
1939 1955 fm.end()
1940 1956
1941 1957
1942 1958 @command(
1943 1959 b'perf::nodemap|perfnodemap',
1944 1960 [
1945 1961 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1946 1962 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1947 1963 ]
1948 1964 + formatteropts,
1949 1965 )
1950 1966 def perfnodemap(ui, repo, **opts):
1951 1967 """benchmark the time necessary to look up revision from a cold nodemap
1952 1968
1953 1969 Depending on the implementation, the amount and order of revision we look
1954 1970 up can varies. Example of useful set to test:
1955 1971 * tip
1956 1972 * 0
1957 1973 * -10:
1958 1974 * :10
1959 1975 * -10: + :10
1960 1976 * :10: + -10:
1961 1977 * -10000:
1962 1978 * -10000: + 0
1963 1979
1964 1980 The command currently focus on valid binary lookup. Benchmarking for
1965 1981 hexlookup, prefix lookup and missing lookup would also be valuable.
1966 1982 """
1967 1983 import mercurial.revlog
1968 1984
1969 1985 opts = _byteskwargs(opts)
1970 1986 timer, fm = gettimer(ui, opts)
1971 1987 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1972 1988
1973 1989 unfi = repo.unfiltered()
1974 1990 clearcaches = opts[b'clear_caches']
1975 1991 # find the filecache func directly
1976 1992 # This avoid polluting the benchmark with the filecache logic
1977 1993 makecl = unfi.__class__.changelog.func
1978 1994 if not opts[b'rev']:
1979 1995 raise error.Abort(b'use --rev to specify revisions to look up')
1980 1996 revs = scmutil.revrange(repo, opts[b'rev'])
1981 1997 cl = repo.changelog
1982 1998 nodes = [cl.node(r) for r in revs]
1983 1999
1984 2000 # use a list to pass reference to a nodemap from one closure to the next
1985 2001 nodeget = [None]
1986 2002
1987 2003 def setnodeget():
1988 2004 # probably not necessary, but for good measure
1989 2005 clearchangelog(unfi)
1990 2006 cl = makecl(unfi)
1991 2007 if util.safehasattr(cl.index, 'get_rev'):
1992 2008 nodeget[0] = cl.index.get_rev
1993 2009 else:
1994 2010 nodeget[0] = cl.nodemap.get
1995 2011
1996 2012 def d():
1997 2013 get = nodeget[0]
1998 2014 for n in nodes:
1999 2015 get(n)
2000 2016
2001 2017 setup = None
2002 2018 if clearcaches:
2003 2019
2004 2020 def setup():
2005 2021 setnodeget()
2006 2022
2007 2023 else:
2008 2024 setnodeget()
2009 2025 d() # prewarm the data structure
2010 2026 timer(d, setup=setup)
2011 2027 fm.end()
2012 2028
2013 2029
2014 2030 @command(b'perf::startup|perfstartup', formatteropts)
2015 2031 def perfstartup(ui, repo, **opts):
2016 2032 opts = _byteskwargs(opts)
2017 2033 timer, fm = gettimer(ui, opts)
2018 2034
2019 2035 def d():
2020 2036 if os.name != 'nt':
2021 2037 os.system(
2022 2038 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
2023 2039 )
2024 2040 else:
2025 2041 os.environ['HGRCPATH'] = r' '
2026 2042 os.system("%s version -q > NUL" % sys.argv[0])
2027 2043
2028 2044 timer(d)
2029 2045 fm.end()
2030 2046
2031 2047
2032 2048 def _find_stream_generator(version):
2033 2049 """find the proper generator function for this stream version"""
2034 2050 import mercurial.streamclone
2035 2051
2036 2052 available = {}
2037 2053
2038 2054 # try to fetch a v1 generator
2039 2055 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
2040 2056 if generatev1 is not None:
2041 2057
2042 2058 def generate(repo):
2043 2059 entries, bytes, data = generatev1(repo, None, None, True)
2044 2060 return data
2045 2061
2046 2062 available[b'v1'] = generatev1
2047 2063 # try to fetch a v2 generator
2048 2064 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
2049 2065 if generatev2 is not None:
2050 2066
2051 2067 def generate(repo):
2052 2068 entries, bytes, data = generatev2(repo, None, None, True)
2053 2069 return data
2054 2070
2055 2071 available[b'v2'] = generate
2056 2072 # try to fetch a v3 generator
2057 2073 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
2058 2074 if generatev3 is not None:
2059 2075
2060 2076 def generate(repo):
2061 2077 return generatev3(repo, None, None, True)
2062 2078
2063 2079 available[b'v3-exp'] = generate
2064 2080
2065 2081 # resolve the request
2066 2082 if version == b"latest":
2067 2083 # latest is the highest non experimental version
2068 2084 latest_key = max(v for v in available if b'-exp' not in v)
2069 2085 return available[latest_key]
2070 2086 elif version in available:
2071 2087 return available[version]
2072 2088 else:
2073 2089 msg = b"unkown or unavailable version: %s"
2074 2090 msg %= version
2075 2091 hint = b"available versions: %s"
2076 2092 hint %= b', '.join(sorted(available))
2077 2093 raise error.Abort(msg, hint=hint)
2078 2094
2079 2095
2080 2096 @command(
2081 2097 b'perf::stream-locked-section',
2082 2098 [
2083 2099 (
2084 2100 b'',
2085 2101 b'stream-version',
2086 2102 b'latest',
2087 2103 b'stream version to use ("v1", "v2", "v3-exp" '
2088 2104 b'or "latest", (the default))',
2089 2105 ),
2090 2106 ]
2091 2107 + formatteropts,
2092 2108 )
2093 2109 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
2094 2110 """benchmark the initial, repo-locked, section of a stream-clone"""
2095 2111
2096 2112 opts = _byteskwargs(opts)
2097 2113 timer, fm = gettimer(ui, opts)
2098 2114
2099 2115 # deletion of the generator may trigger some cleanup that we do not want to
2100 2116 # measure
2101 2117 result_holder = [None]
2102 2118
2103 2119 def setupone():
2104 2120 result_holder[0] = None
2105 2121
2106 2122 generate = _find_stream_generator(stream_version)
2107 2123
2108 2124 def runone():
2109 2125 # the lock is held for the duration the initialisation
2110 2126 result_holder[0] = generate(repo)
2111 2127
2112 2128 timer(runone, setup=setupone, title=b"load")
2113 2129 fm.end()
2114 2130
2115 2131
2116 2132 @command(
2117 2133 b'perf::stream-generate',
2118 2134 [
2119 2135 (
2120 2136 b'',
2121 2137 b'stream-version',
2122 2138 b'latest',
2123 2139 b'stream version to us ("v1", "v2", "v3-exp" '
2124 2140 b'or "latest", (the default))',
2125 2141 ),
2126 2142 ]
2127 2143 + formatteropts,
2128 2144 )
2129 2145 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2130 2146 """benchmark the full generation of a stream clone"""
2131 2147
2132 2148 opts = _byteskwargs(opts)
2133 2149 timer, fm = gettimer(ui, opts)
2134 2150
2135 2151 # deletion of the generator may trigger some cleanup that we do not want to
2136 2152 # measure
2137 2153
2138 2154 generate = _find_stream_generator(stream_version)
2139 2155
2140 2156 def runone():
2141 2157 # the lock is held for the duration the initialisation
2142 2158 for chunk in generate(repo):
2143 2159 pass
2144 2160
2145 2161 timer(runone, title=b"generate")
2146 2162 fm.end()
2147 2163
2148 2164
2149 2165 @command(
2150 2166 b'perf::stream-consume',
2151 2167 formatteropts,
2152 2168 )
2153 2169 def perf_stream_clone_consume(ui, repo, filename, **opts):
2154 2170 """benchmark the full application of a stream clone
2155 2171
2156 2172 This include the creation of the repository
2157 2173 """
2158 2174 # try except to appease check code
2159 2175 msg = b"mercurial too old, missing necessary module: %s"
2160 2176 try:
2161 2177 from mercurial import bundle2
2162 2178 except ImportError as exc:
2163 2179 msg %= _bytestr(exc)
2164 2180 raise error.Abort(msg)
2165 2181 try:
2166 2182 from mercurial import exchange
2167 2183 except ImportError as exc:
2168 2184 msg %= _bytestr(exc)
2169 2185 raise error.Abort(msg)
2170 2186 try:
2171 2187 from mercurial import hg
2172 2188 except ImportError as exc:
2173 2189 msg %= _bytestr(exc)
2174 2190 raise error.Abort(msg)
2175 2191 try:
2176 2192 from mercurial import localrepo
2177 2193 except ImportError as exc:
2178 2194 msg %= _bytestr(exc)
2179 2195 raise error.Abort(msg)
2180 2196
2181 2197 opts = _byteskwargs(opts)
2182 2198 timer, fm = gettimer(ui, opts)
2183 2199
2184 2200 # deletion of the generator may trigger some cleanup that we do not want to
2185 2201 # measure
2186 2202 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2187 2203 raise error.Abort("not a readable file: %s" % filename)
2188 2204
2189 2205 run_variables = [None, None]
2190 2206
2191 2207 # we create the new repository next to the other one for two reasons:
2192 2208 # - this way we use the same file system, which are relevant for benchmark
2193 2209 # - if /tmp/ is small, the operation could overfills it.
2194 2210 source_repo_dir = os.path.dirname(repo.root)
2195 2211
2196 2212 @contextlib.contextmanager
2197 2213 def context():
2198 2214 with open(filename, mode='rb') as bundle:
2199 2215 with tempfile.TemporaryDirectory(
2200 2216 prefix=b'hg-perf-stream-consume-',
2201 2217 dir=source_repo_dir,
2202 2218 ) as tmp_dir:
2203 2219 tmp_dir = fsencode(tmp_dir)
2204 2220 run_variables[0] = bundle
2205 2221 run_variables[1] = tmp_dir
2206 2222 yield
2207 2223 run_variables[0] = None
2208 2224 run_variables[1] = None
2209 2225
2210 2226 def runone():
2211 2227 bundle = run_variables[0]
2212 2228 tmp_dir = run_variables[1]
2213 2229
2214 2230 # we actually wants to copy all config to ensure the repo config is
2215 2231 # taken in account during the benchmark
2216 2232 new_ui = repo.ui.__class__(repo.ui)
2217 2233 # only pass ui when no srcrepo
2218 2234 localrepo.createrepository(
2219 2235 new_ui, tmp_dir, requirements=repo.requirements
2220 2236 )
2221 2237 target = hg.repository(new_ui, tmp_dir)
2222 2238 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2223 2239 # stream v1
2224 2240 if util.safehasattr(gen, 'apply'):
2225 2241 gen.apply(target)
2226 2242 else:
2227 2243 with target.transaction(b"perf::stream-consume") as tr:
2228 2244 bundle2.applybundle(
2229 2245 target,
2230 2246 gen,
2231 2247 tr,
2232 2248 source=b'unbundle',
2233 2249 url=filename,
2234 2250 )
2235 2251
2236 2252 timer(runone, context=context, title=b"consume")
2237 2253 fm.end()
2238 2254
2239 2255
2240 2256 @command(b'perf::parents|perfparents', formatteropts)
2241 2257 def perfparents(ui, repo, **opts):
2242 2258 """benchmark the time necessary to fetch one changeset's parents.
2243 2259
2244 2260 The fetch is done using the `node identifier`, traversing all object layers
2245 2261 from the repository object. The first N revisions will be used for this
2246 2262 benchmark. N is controlled by the ``perf.parentscount`` config option
2247 2263 (default: 1000).
2248 2264 """
2249 2265 opts = _byteskwargs(opts)
2250 2266 timer, fm = gettimer(ui, opts)
2251 2267 # control the number of commits perfparents iterates over
2252 2268 # experimental config: perf.parentscount
2253 2269 count = getint(ui, b"perf", b"parentscount", 1000)
2254 2270 if len(repo.changelog) < count:
2255 2271 raise error.Abort(b"repo needs %d commits for this test" % count)
2256 2272 repo = repo.unfiltered()
2257 2273 nl = [repo.changelog.node(i) for i in _xrange(count)]
2258 2274
2259 2275 def d():
2260 2276 for n in nl:
2261 2277 repo.changelog.parents(n)
2262 2278
2263 2279 timer(d)
2264 2280 fm.end()
2265 2281
2266 2282
2267 2283 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2268 2284 def perfctxfiles(ui, repo, x, **opts):
2269 2285 opts = _byteskwargs(opts)
2270 2286 x = int(x)
2271 2287 timer, fm = gettimer(ui, opts)
2272 2288
2273 2289 def d():
2274 2290 len(repo[x].files())
2275 2291
2276 2292 timer(d)
2277 2293 fm.end()
2278 2294
2279 2295
2280 2296 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2281 2297 def perfrawfiles(ui, repo, x, **opts):
2282 2298 opts = _byteskwargs(opts)
2283 2299 x = int(x)
2284 2300 timer, fm = gettimer(ui, opts)
2285 2301 cl = repo.changelog
2286 2302
2287 2303 def d():
2288 2304 len(cl.read(x)[3])
2289 2305
2290 2306 timer(d)
2291 2307 fm.end()
2292 2308
2293 2309
2294 2310 @command(b'perf::lookup|perflookup', formatteropts)
2295 2311 def perflookup(ui, repo, rev, **opts):
2296 2312 opts = _byteskwargs(opts)
2297 2313 timer, fm = gettimer(ui, opts)
2298 2314 timer(lambda: len(repo.lookup(rev)))
2299 2315 fm.end()
2300 2316
2301 2317
2302 2318 @command(
2303 2319 b'perf::linelogedits|perflinelogedits',
2304 2320 [
2305 2321 (b'n', b'edits', 10000, b'number of edits'),
2306 2322 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2307 2323 ],
2308 2324 norepo=True,
2309 2325 )
2310 2326 def perflinelogedits(ui, **opts):
2311 2327 from mercurial import linelog
2312 2328
2313 2329 opts = _byteskwargs(opts)
2314 2330
2315 2331 edits = opts[b'edits']
2316 2332 maxhunklines = opts[b'max_hunk_lines']
2317 2333
2318 2334 maxb1 = 100000
2319 2335 random.seed(0)
2320 2336 randint = random.randint
2321 2337 currentlines = 0
2322 2338 arglist = []
2323 2339 for rev in _xrange(edits):
2324 2340 a1 = randint(0, currentlines)
2325 2341 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2326 2342 b1 = randint(0, maxb1)
2327 2343 b2 = randint(b1, b1 + maxhunklines)
2328 2344 currentlines += (b2 - b1) - (a2 - a1)
2329 2345 arglist.append((rev, a1, a2, b1, b2))
2330 2346
2331 2347 def d():
2332 2348 ll = linelog.linelog()
2333 2349 for args in arglist:
2334 2350 ll.replacelines(*args)
2335 2351
2336 2352 timer, fm = gettimer(ui, opts)
2337 2353 timer(d)
2338 2354 fm.end()
2339 2355
2340 2356
2341 2357 @command(b'perf::revrange|perfrevrange', formatteropts)
2342 2358 def perfrevrange(ui, repo, *specs, **opts):
2343 2359 opts = _byteskwargs(opts)
2344 2360 timer, fm = gettimer(ui, opts)
2345 2361 revrange = scmutil.revrange
2346 2362 timer(lambda: len(revrange(repo, specs)))
2347 2363 fm.end()
2348 2364
2349 2365
2350 2366 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2351 2367 def perfnodelookup(ui, repo, rev, **opts):
2352 2368 opts = _byteskwargs(opts)
2353 2369 timer, fm = gettimer(ui, opts)
2354 2370 import mercurial.revlog
2355 2371
2356 2372 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2357 2373 n = scmutil.revsingle(repo, rev).node()
2358 2374
2359 2375 try:
2360 2376 cl = revlog(getsvfs(repo), radix=b"00changelog")
2361 2377 except TypeError:
2362 2378 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2363 2379
2364 2380 def d():
2365 2381 cl.rev(n)
2366 2382 clearcaches(cl)
2367 2383
2368 2384 timer(d)
2369 2385 fm.end()
2370 2386
2371 2387
2372 2388 @command(
2373 2389 b'perf::log|perflog',
2374 2390 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2375 2391 )
2376 2392 def perflog(ui, repo, rev=None, **opts):
2377 2393 opts = _byteskwargs(opts)
2378 2394 if rev is None:
2379 2395 rev = []
2380 2396 timer, fm = gettimer(ui, opts)
2381 2397 ui.pushbuffer()
2382 2398 timer(
2383 2399 lambda: commands.log(
2384 2400 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2385 2401 )
2386 2402 )
2387 2403 ui.popbuffer()
2388 2404 fm.end()
2389 2405
2390 2406
2391 2407 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2392 2408 def perfmoonwalk(ui, repo, **opts):
2393 2409 """benchmark walking the changelog backwards
2394 2410
2395 2411 This also loads the changelog data for each revision in the changelog.
2396 2412 """
2397 2413 opts = _byteskwargs(opts)
2398 2414 timer, fm = gettimer(ui, opts)
2399 2415
2400 2416 def moonwalk():
2401 2417 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2402 2418 ctx = repo[i]
2403 2419 ctx.branch() # read changelog data (in addition to the index)
2404 2420
2405 2421 timer(moonwalk)
2406 2422 fm.end()
2407 2423
2408 2424
2409 2425 @command(
2410 2426 b'perf::templating|perftemplating',
2411 2427 [
2412 2428 (b'r', b'rev', [], b'revisions to run the template on'),
2413 2429 ]
2414 2430 + formatteropts,
2415 2431 )
2416 2432 def perftemplating(ui, repo, testedtemplate=None, **opts):
2417 2433 """test the rendering time of a given template"""
2418 2434 if makelogtemplater is None:
2419 2435 raise error.Abort(
2420 2436 b"perftemplating not available with this Mercurial",
2421 2437 hint=b"use 4.3 or later",
2422 2438 )
2423 2439
2424 2440 opts = _byteskwargs(opts)
2425 2441
2426 2442 nullui = ui.copy()
2427 2443 nullui.fout = open(os.devnull, 'wb')
2428 2444 nullui.disablepager()
2429 2445 revs = opts.get(b'rev')
2430 2446 if not revs:
2431 2447 revs = [b'all()']
2432 2448 revs = list(scmutil.revrange(repo, revs))
2433 2449
2434 2450 defaulttemplate = (
2435 2451 b'{date|shortdate} [{rev}:{node|short}]'
2436 2452 b' {author|person}: {desc|firstline}\n'
2437 2453 )
2438 2454 if testedtemplate is None:
2439 2455 testedtemplate = defaulttemplate
2440 2456 displayer = makelogtemplater(nullui, repo, testedtemplate)
2441 2457
2442 2458 def format():
2443 2459 for r in revs:
2444 2460 ctx = repo[r]
2445 2461 displayer.show(ctx)
2446 2462 displayer.flush(ctx)
2447 2463
2448 2464 timer, fm = gettimer(ui, opts)
2449 2465 timer(format)
2450 2466 fm.end()
2451 2467
2452 2468
2453 2469 def _displaystats(ui, opts, entries, data):
2454 2470 # use a second formatter because the data are quite different, not sure
2455 2471 # how it flies with the templater.
2456 2472 fm = ui.formatter(b'perf-stats', opts)
2457 2473 for key, title in entries:
2458 2474 values = data[key]
2459 2475 nbvalues = len(data)
2460 2476 values.sort()
2461 2477 stats = {
2462 2478 'key': key,
2463 2479 'title': title,
2464 2480 'nbitems': len(values),
2465 2481 'min': values[0][0],
2466 2482 '10%': values[(nbvalues * 10) // 100][0],
2467 2483 '25%': values[(nbvalues * 25) // 100][0],
2468 2484 '50%': values[(nbvalues * 50) // 100][0],
2469 2485 '75%': values[(nbvalues * 75) // 100][0],
2470 2486 '80%': values[(nbvalues * 80) // 100][0],
2471 2487 '85%': values[(nbvalues * 85) // 100][0],
2472 2488 '90%': values[(nbvalues * 90) // 100][0],
2473 2489 '95%': values[(nbvalues * 95) // 100][0],
2474 2490 '99%': values[(nbvalues * 99) // 100][0],
2475 2491 'max': values[-1][0],
2476 2492 }
2477 2493 fm.startitem()
2478 2494 fm.data(**stats)
2479 2495 # make node pretty for the human output
2480 2496 fm.plain('### %s (%d items)\n' % (title, len(values)))
2481 2497 lines = [
2482 2498 'min',
2483 2499 '10%',
2484 2500 '25%',
2485 2501 '50%',
2486 2502 '75%',
2487 2503 '80%',
2488 2504 '85%',
2489 2505 '90%',
2490 2506 '95%',
2491 2507 '99%',
2492 2508 'max',
2493 2509 ]
2494 2510 for l in lines:
2495 2511 fm.plain('%s: %s\n' % (l, stats[l]))
2496 2512 fm.end()
2497 2513
2498 2514
2499 2515 @command(
2500 2516 b'perf::helper-mergecopies|perfhelper-mergecopies',
2501 2517 formatteropts
2502 2518 + [
2503 2519 (b'r', b'revs', [], b'restrict search to these revisions'),
2504 2520 (b'', b'timing', False, b'provides extra data (costly)'),
2505 2521 (b'', b'stats', False, b'provides statistic about the measured data'),
2506 2522 ],
2507 2523 )
2508 2524 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2509 2525 """find statistics about potential parameters for `perfmergecopies`
2510 2526
2511 2527 This command find (base, p1, p2) triplet relevant for copytracing
2512 2528 benchmarking in the context of a merge. It reports values for some of the
2513 2529 parameters that impact merge copy tracing time during merge.
2514 2530
2515 2531 If `--timing` is set, rename detection is run and the associated timing
2516 2532 will be reported. The extra details come at the cost of slower command
2517 2533 execution.
2518 2534
2519 2535 Since rename detection is only run once, other factors might easily
2520 2536 affect the precision of the timing. However it should give a good
2521 2537 approximation of which revision triplets are very costly.
2522 2538 """
2523 2539 opts = _byteskwargs(opts)
2524 2540 fm = ui.formatter(b'perf', opts)
2525 2541 dotiming = opts[b'timing']
2526 2542 dostats = opts[b'stats']
2527 2543
2528 2544 output_template = [
2529 2545 ("base", "%(base)12s"),
2530 2546 ("p1", "%(p1.node)12s"),
2531 2547 ("p2", "%(p2.node)12s"),
2532 2548 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2533 2549 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2534 2550 ("p1.renames", "%(p1.renamedfiles)12d"),
2535 2551 ("p1.time", "%(p1.time)12.3f"),
2536 2552 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2537 2553 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2538 2554 ("p2.renames", "%(p2.renamedfiles)12d"),
2539 2555 ("p2.time", "%(p2.time)12.3f"),
2540 2556 ("renames", "%(nbrenamedfiles)12d"),
2541 2557 ("total.time", "%(time)12.3f"),
2542 2558 ]
2543 2559 if not dotiming:
2544 2560 output_template = [
2545 2561 i
2546 2562 for i in output_template
2547 2563 if not ('time' in i[0] or 'renames' in i[0])
2548 2564 ]
2549 2565 header_names = [h for (h, v) in output_template]
2550 2566 output = ' '.join([v for (h, v) in output_template]) + '\n'
2551 2567 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2552 2568 fm.plain(header % tuple(header_names))
2553 2569
2554 2570 if not revs:
2555 2571 revs = ['all()']
2556 2572 revs = scmutil.revrange(repo, revs)
2557 2573
2558 2574 if dostats:
2559 2575 alldata = {
2560 2576 'nbrevs': [],
2561 2577 'nbmissingfiles': [],
2562 2578 }
2563 2579 if dotiming:
2564 2580 alldata['parentnbrenames'] = []
2565 2581 alldata['totalnbrenames'] = []
2566 2582 alldata['parenttime'] = []
2567 2583 alldata['totaltime'] = []
2568 2584
2569 2585 roi = repo.revs('merge() and %ld', revs)
2570 2586 for r in roi:
2571 2587 ctx = repo[r]
2572 2588 p1 = ctx.p1()
2573 2589 p2 = ctx.p2()
2574 2590 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2575 2591 for b in bases:
2576 2592 b = repo[b]
2577 2593 p1missing = copies._computeforwardmissing(b, p1)
2578 2594 p2missing = copies._computeforwardmissing(b, p2)
2579 2595 data = {
2580 2596 b'base': b.hex(),
2581 2597 b'p1.node': p1.hex(),
2582 2598 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2583 2599 b'p1.nbmissingfiles': len(p1missing),
2584 2600 b'p2.node': p2.hex(),
2585 2601 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2586 2602 b'p2.nbmissingfiles': len(p2missing),
2587 2603 }
2588 2604 if dostats:
2589 2605 if p1missing:
2590 2606 alldata['nbrevs'].append(
2591 2607 (data['p1.nbrevs'], b.hex(), p1.hex())
2592 2608 )
2593 2609 alldata['nbmissingfiles'].append(
2594 2610 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2595 2611 )
2596 2612 if p2missing:
2597 2613 alldata['nbrevs'].append(
2598 2614 (data['p2.nbrevs'], b.hex(), p2.hex())
2599 2615 )
2600 2616 alldata['nbmissingfiles'].append(
2601 2617 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2602 2618 )
2603 2619 if dotiming:
2604 2620 begin = util.timer()
2605 2621 mergedata = copies.mergecopies(repo, p1, p2, b)
2606 2622 end = util.timer()
2607 2623 # not very stable timing since we did only one run
2608 2624 data['time'] = end - begin
2609 2625 # mergedata contains five dicts: "copy", "movewithdir",
2610 2626 # "diverge", "renamedelete" and "dirmove".
2611 2627 # The first 4 are about renamed file so lets count that.
2612 2628 renames = len(mergedata[0])
2613 2629 renames += len(mergedata[1])
2614 2630 renames += len(mergedata[2])
2615 2631 renames += len(mergedata[3])
2616 2632 data['nbrenamedfiles'] = renames
2617 2633 begin = util.timer()
2618 2634 p1renames = copies.pathcopies(b, p1)
2619 2635 end = util.timer()
2620 2636 data['p1.time'] = end - begin
2621 2637 begin = util.timer()
2622 2638 p2renames = copies.pathcopies(b, p2)
2623 2639 end = util.timer()
2624 2640 data['p2.time'] = end - begin
2625 2641 data['p1.renamedfiles'] = len(p1renames)
2626 2642 data['p2.renamedfiles'] = len(p2renames)
2627 2643
2628 2644 if dostats:
2629 2645 if p1missing:
2630 2646 alldata['parentnbrenames'].append(
2631 2647 (data['p1.renamedfiles'], b.hex(), p1.hex())
2632 2648 )
2633 2649 alldata['parenttime'].append(
2634 2650 (data['p1.time'], b.hex(), p1.hex())
2635 2651 )
2636 2652 if p2missing:
2637 2653 alldata['parentnbrenames'].append(
2638 2654 (data['p2.renamedfiles'], b.hex(), p2.hex())
2639 2655 )
2640 2656 alldata['parenttime'].append(
2641 2657 (data['p2.time'], b.hex(), p2.hex())
2642 2658 )
2643 2659 if p1missing or p2missing:
2644 2660 alldata['totalnbrenames'].append(
2645 2661 (
2646 2662 data['nbrenamedfiles'],
2647 2663 b.hex(),
2648 2664 p1.hex(),
2649 2665 p2.hex(),
2650 2666 )
2651 2667 )
2652 2668 alldata['totaltime'].append(
2653 2669 (data['time'], b.hex(), p1.hex(), p2.hex())
2654 2670 )
2655 2671 fm.startitem()
2656 2672 fm.data(**data)
2657 2673 # make node pretty for the human output
2658 2674 out = data.copy()
2659 2675 out['base'] = fm.hexfunc(b.node())
2660 2676 out['p1.node'] = fm.hexfunc(p1.node())
2661 2677 out['p2.node'] = fm.hexfunc(p2.node())
2662 2678 fm.plain(output % out)
2663 2679
2664 2680 fm.end()
2665 2681 if dostats:
2666 2682 # use a second formatter because the data are quite different, not sure
2667 2683 # how it flies with the templater.
2668 2684 entries = [
2669 2685 ('nbrevs', 'number of revision covered'),
2670 2686 ('nbmissingfiles', 'number of missing files at head'),
2671 2687 ]
2672 2688 if dotiming:
2673 2689 entries.append(
2674 2690 ('parentnbrenames', 'rename from one parent to base')
2675 2691 )
2676 2692 entries.append(('totalnbrenames', 'total number of renames'))
2677 2693 entries.append(('parenttime', 'time for one parent'))
2678 2694 entries.append(('totaltime', 'time for both parents'))
2679 2695 _displaystats(ui, opts, entries, alldata)
2680 2696
2681 2697
2682 2698 @command(
2683 2699 b'perf::helper-pathcopies|perfhelper-pathcopies',
2684 2700 formatteropts
2685 2701 + [
2686 2702 (b'r', b'revs', [], b'restrict search to these revisions'),
2687 2703 (b'', b'timing', False, b'provides extra data (costly)'),
2688 2704 (b'', b'stats', False, b'provides statistic about the measured data'),
2689 2705 ],
2690 2706 )
2691 2707 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2692 2708 """find statistic about potential parameters for the `perftracecopies`
2693 2709
2694 2710 This command find source-destination pair relevant for copytracing testing.
2695 2711 It report value for some of the parameters that impact copy tracing time.
2696 2712
2697 2713 If `--timing` is set, rename detection is run and the associated timing
2698 2714 will be reported. The extra details comes at the cost of a slower command
2699 2715 execution.
2700 2716
2701 2717 Since the rename detection is only run once, other factors might easily
2702 2718 affect the precision of the timing. However it should give a good
2703 2719 approximation of which revision pairs are very costly.
2704 2720 """
2705 2721 opts = _byteskwargs(opts)
2706 2722 fm = ui.formatter(b'perf', opts)
2707 2723 dotiming = opts[b'timing']
2708 2724 dostats = opts[b'stats']
2709 2725
2710 2726 if dotiming:
2711 2727 header = '%12s %12s %12s %12s %12s %12s\n'
2712 2728 output = (
2713 2729 "%(source)12s %(destination)12s "
2714 2730 "%(nbrevs)12d %(nbmissingfiles)12d "
2715 2731 "%(nbrenamedfiles)12d %(time)18.5f\n"
2716 2732 )
2717 2733 header_names = (
2718 2734 "source",
2719 2735 "destination",
2720 2736 "nb-revs",
2721 2737 "nb-files",
2722 2738 "nb-renames",
2723 2739 "time",
2724 2740 )
2725 2741 fm.plain(header % header_names)
2726 2742 else:
2727 2743 header = '%12s %12s %12s %12s\n'
2728 2744 output = (
2729 2745 "%(source)12s %(destination)12s "
2730 2746 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2731 2747 )
2732 2748 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2733 2749
2734 2750 if not revs:
2735 2751 revs = ['all()']
2736 2752 revs = scmutil.revrange(repo, revs)
2737 2753
2738 2754 if dostats:
2739 2755 alldata = {
2740 2756 'nbrevs': [],
2741 2757 'nbmissingfiles': [],
2742 2758 }
2743 2759 if dotiming:
2744 2760 alldata['nbrenames'] = []
2745 2761 alldata['time'] = []
2746 2762
2747 2763 roi = repo.revs('merge() and %ld', revs)
2748 2764 for r in roi:
2749 2765 ctx = repo[r]
2750 2766 p1 = ctx.p1().rev()
2751 2767 p2 = ctx.p2().rev()
2752 2768 bases = repo.changelog._commonancestorsheads(p1, p2)
2753 2769 for p in (p1, p2):
2754 2770 for b in bases:
2755 2771 base = repo[b]
2756 2772 parent = repo[p]
2757 2773 missing = copies._computeforwardmissing(base, parent)
2758 2774 if not missing:
2759 2775 continue
2760 2776 data = {
2761 2777 b'source': base.hex(),
2762 2778 b'destination': parent.hex(),
2763 2779 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2764 2780 b'nbmissingfiles': len(missing),
2765 2781 }
2766 2782 if dostats:
2767 2783 alldata['nbrevs'].append(
2768 2784 (
2769 2785 data['nbrevs'],
2770 2786 base.hex(),
2771 2787 parent.hex(),
2772 2788 )
2773 2789 )
2774 2790 alldata['nbmissingfiles'].append(
2775 2791 (
2776 2792 data['nbmissingfiles'],
2777 2793 base.hex(),
2778 2794 parent.hex(),
2779 2795 )
2780 2796 )
2781 2797 if dotiming:
2782 2798 begin = util.timer()
2783 2799 renames = copies.pathcopies(base, parent)
2784 2800 end = util.timer()
2785 2801 # not very stable timing since we did only one run
2786 2802 data['time'] = end - begin
2787 2803 data['nbrenamedfiles'] = len(renames)
2788 2804 if dostats:
2789 2805 alldata['time'].append(
2790 2806 (
2791 2807 data['time'],
2792 2808 base.hex(),
2793 2809 parent.hex(),
2794 2810 )
2795 2811 )
2796 2812 alldata['nbrenames'].append(
2797 2813 (
2798 2814 data['nbrenamedfiles'],
2799 2815 base.hex(),
2800 2816 parent.hex(),
2801 2817 )
2802 2818 )
2803 2819 fm.startitem()
2804 2820 fm.data(**data)
2805 2821 out = data.copy()
2806 2822 out['source'] = fm.hexfunc(base.node())
2807 2823 out['destination'] = fm.hexfunc(parent.node())
2808 2824 fm.plain(output % out)
2809 2825
2810 2826 fm.end()
2811 2827 if dostats:
2812 2828 entries = [
2813 2829 ('nbrevs', 'number of revision covered'),
2814 2830 ('nbmissingfiles', 'number of missing files at head'),
2815 2831 ]
2816 2832 if dotiming:
2817 2833 entries.append(('nbrenames', 'renamed files'))
2818 2834 entries.append(('time', 'time'))
2819 2835 _displaystats(ui, opts, entries, alldata)
2820 2836
2821 2837
2822 2838 @command(b'perf::cca|perfcca', formatteropts)
2823 2839 def perfcca(ui, repo, **opts):
2824 2840 opts = _byteskwargs(opts)
2825 2841 timer, fm = gettimer(ui, opts)
2826 2842 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2827 2843 fm.end()
2828 2844
2829 2845
2830 2846 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2831 2847 def perffncacheload(ui, repo, **opts):
2832 2848 opts = _byteskwargs(opts)
2833 2849 timer, fm = gettimer(ui, opts)
2834 2850 s = repo.store
2835 2851
2836 2852 def d():
2837 2853 s.fncache._load()
2838 2854
2839 2855 timer(d)
2840 2856 fm.end()
2841 2857
2842 2858
2843 2859 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2844 2860 def perffncachewrite(ui, repo, **opts):
2845 2861 opts = _byteskwargs(opts)
2846 2862 timer, fm = gettimer(ui, opts)
2847 2863 s = repo.store
2848 2864 lock = repo.lock()
2849 2865 s.fncache._load()
2850 2866 tr = repo.transaction(b'perffncachewrite')
2851 2867 tr.addbackup(b'fncache')
2852 2868
2853 2869 def d():
2854 2870 s.fncache._dirty = True
2855 2871 s.fncache.write(tr)
2856 2872
2857 2873 timer(d)
2858 2874 tr.close()
2859 2875 lock.release()
2860 2876 fm.end()
2861 2877
2862 2878
2863 2879 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2864 2880 def perffncacheencode(ui, repo, **opts):
2865 2881 opts = _byteskwargs(opts)
2866 2882 timer, fm = gettimer(ui, opts)
2867 2883 s = repo.store
2868 2884 s.fncache._load()
2869 2885
2870 2886 def d():
2871 2887 for p in s.fncache.entries:
2872 2888 s.encode(p)
2873 2889
2874 2890 timer(d)
2875 2891 fm.end()
2876 2892
2877 2893
2878 2894 def _bdiffworker(q, blocks, xdiff, ready, done):
2879 2895 while not done.is_set():
2880 2896 pair = q.get()
2881 2897 while pair is not None:
2882 2898 if xdiff:
2883 2899 mdiff.bdiff.xdiffblocks(*pair)
2884 2900 elif blocks:
2885 2901 mdiff.bdiff.blocks(*pair)
2886 2902 else:
2887 2903 mdiff.textdiff(*pair)
2888 2904 q.task_done()
2889 2905 pair = q.get()
2890 2906 q.task_done() # for the None one
2891 2907 with ready:
2892 2908 ready.wait()
2893 2909
2894 2910
2895 2911 def _manifestrevision(repo, mnode):
2896 2912 ml = repo.manifestlog
2897 2913
2898 2914 if util.safehasattr(ml, b'getstorage'):
2899 2915 store = ml.getstorage(b'')
2900 2916 else:
2901 2917 store = ml._revlog
2902 2918
2903 2919 return store.revision(mnode)
2904 2920
2905 2921
2906 2922 @command(
2907 2923 b'perf::bdiff|perfbdiff',
2908 2924 revlogopts
2909 2925 + formatteropts
2910 2926 + [
2911 2927 (
2912 2928 b'',
2913 2929 b'count',
2914 2930 1,
2915 2931 b'number of revisions to test (when using --startrev)',
2916 2932 ),
2917 2933 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2918 2934 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2919 2935 (b'', b'blocks', False, b'test computing diffs into blocks'),
2920 2936 (b'', b'xdiff', False, b'use xdiff algorithm'),
2921 2937 ],
2922 2938 b'-c|-m|FILE REV',
2923 2939 )
2924 2940 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2925 2941 """benchmark a bdiff between revisions
2926 2942
2927 2943 By default, benchmark a bdiff between its delta parent and itself.
2928 2944
2929 2945 With ``--count``, benchmark bdiffs between delta parents and self for N
2930 2946 revisions starting at the specified revision.
2931 2947
2932 2948 With ``--alldata``, assume the requested revision is a changeset and
2933 2949 measure bdiffs for all changes related to that changeset (manifest
2934 2950 and filelogs).
2935 2951 """
2936 2952 opts = _byteskwargs(opts)
2937 2953
2938 2954 if opts[b'xdiff'] and not opts[b'blocks']:
2939 2955 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2940 2956
2941 2957 if opts[b'alldata']:
2942 2958 opts[b'changelog'] = True
2943 2959
2944 2960 if opts.get(b'changelog') or opts.get(b'manifest'):
2945 2961 file_, rev = None, file_
2946 2962 elif rev is None:
2947 2963 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2948 2964
2949 2965 blocks = opts[b'blocks']
2950 2966 xdiff = opts[b'xdiff']
2951 2967 textpairs = []
2952 2968
2953 2969 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2954 2970
2955 2971 startrev = r.rev(r.lookup(rev))
2956 2972 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2957 2973 if opts[b'alldata']:
2958 2974 # Load revisions associated with changeset.
2959 2975 ctx = repo[rev]
2960 2976 mtext = _manifestrevision(repo, ctx.manifestnode())
2961 2977 for pctx in ctx.parents():
2962 2978 pman = _manifestrevision(repo, pctx.manifestnode())
2963 2979 textpairs.append((pman, mtext))
2964 2980
2965 2981 # Load filelog revisions by iterating manifest delta.
2966 2982 man = ctx.manifest()
2967 2983 pman = ctx.p1().manifest()
2968 2984 for filename, change in pman.diff(man).items():
2969 2985 fctx = repo.file(filename)
2970 2986 f1 = fctx.revision(change[0][0] or -1)
2971 2987 f2 = fctx.revision(change[1][0] or -1)
2972 2988 textpairs.append((f1, f2))
2973 2989 else:
2974 2990 dp = r.deltaparent(rev)
2975 2991 textpairs.append((r.revision(dp), r.revision(rev)))
2976 2992
2977 2993 withthreads = threads > 0
2978 2994 if not withthreads:
2979 2995
2980 2996 def d():
2981 2997 for pair in textpairs:
2982 2998 if xdiff:
2983 2999 mdiff.bdiff.xdiffblocks(*pair)
2984 3000 elif blocks:
2985 3001 mdiff.bdiff.blocks(*pair)
2986 3002 else:
2987 3003 mdiff.textdiff(*pair)
2988 3004
2989 3005 else:
2990 3006 q = queue()
2991 3007 for i in _xrange(threads):
2992 3008 q.put(None)
2993 3009 ready = threading.Condition()
2994 3010 done = threading.Event()
2995 3011 for i in _xrange(threads):
2996 3012 threading.Thread(
2997 3013 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2998 3014 ).start()
2999 3015 q.join()
3000 3016
3001 3017 def d():
3002 3018 for pair in textpairs:
3003 3019 q.put(pair)
3004 3020 for i in _xrange(threads):
3005 3021 q.put(None)
3006 3022 with ready:
3007 3023 ready.notify_all()
3008 3024 q.join()
3009 3025
3010 3026 timer, fm = gettimer(ui, opts)
3011 3027 timer(d)
3012 3028 fm.end()
3013 3029
3014 3030 if withthreads:
3015 3031 done.set()
3016 3032 for i in _xrange(threads):
3017 3033 q.put(None)
3018 3034 with ready:
3019 3035 ready.notify_all()
3020 3036
3021 3037
3022 3038 @command(
3023 3039 b'perf::unbundle',
3024 3040 [
3025 3041 (b'', b'as-push', None, b'pretend the bundle comes from a push'),
3026 3042 ]
3027 3043 + formatteropts,
3028 3044 b'BUNDLE_FILE',
3029 3045 )
3030 3046 def perf_unbundle(ui, repo, fname, **opts):
3031 3047 """benchmark application of a bundle in a repository.
3032 3048
3033 3049 This does not include the final transaction processing
3034 3050
3035 3051 The --as-push option make the unbundle operation appears like it comes from
3036 3052 a client push. It change some aspect of the processing and associated
3037 3053 performance profile.
3038 3054 """
3039 3055
3040 3056 from mercurial import exchange
3041 3057 from mercurial import bundle2
3042 3058 from mercurial import transaction
3043 3059
3044 3060 opts = _byteskwargs(opts)
3045 3061
3046 3062 ### some compatibility hotfix
3047 3063 #
3048 3064 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
3049 3065 # critical regression that break transaction rollback for files that are
3050 3066 # de-inlined.
3051 3067 method = transaction.transaction._addentry
3052 3068 pre_63edc384d3b7 = "data" in getargspec(method).args
3053 3069 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
3054 3070 # a changeset that is a close descendant of 18415fc918a1, the changeset
3055 3071 # that conclude the fix run for the bug introduced in 63edc384d3b7.
3056 3072 args = getargspec(error.Abort.__init__).args
3057 3073 post_18415fc918a1 = "detailed_exit_code" in args
3058 3074
3059 3075 unbundle_source = b'perf::unbundle'
3060 3076 if opts[b'as_push']:
3061 3077 unbundle_source = b'push'
3062 3078
3063 3079 old_max_inline = None
3064 3080 try:
3065 3081 if not (pre_63edc384d3b7 or post_18415fc918a1):
3066 3082 # disable inlining
3067 3083 old_max_inline = mercurial.revlog._maxinline
3068 3084 # large enough to never happen
3069 3085 mercurial.revlog._maxinline = 2 ** 50
3070 3086
3071 3087 with repo.lock():
3072 3088 bundle = [None, None]
3073 3089 orig_quiet = repo.ui.quiet
3074 3090 try:
3075 3091 repo.ui.quiet = True
3076 3092 with open(fname, mode="rb") as f:
3077 3093
3078 3094 def noop_report(*args, **kwargs):
3079 3095 pass
3080 3096
3081 3097 def setup():
3082 3098 gen, tr = bundle
3083 3099 if tr is not None:
3084 3100 tr.abort()
3085 3101 bundle[:] = [None, None]
3086 3102 f.seek(0)
3087 3103 bundle[0] = exchange.readbundle(ui, f, fname)
3088 3104 bundle[1] = repo.transaction(b'perf::unbundle')
3089 3105 # silence the transaction
3090 3106 bundle[1]._report = noop_report
3091 3107
3092 3108 def apply():
3093 3109 gen, tr = bundle
3094 3110 bundle2.applybundle(
3095 3111 repo,
3096 3112 gen,
3097 3113 tr,
3098 3114 source=unbundle_source,
3099 3115 url=fname,
3100 3116 )
3101 3117
3102 3118 timer, fm = gettimer(ui, opts)
3103 3119 timer(apply, setup=setup)
3104 3120 fm.end()
3105 3121 finally:
3106 3122 repo.ui.quiet == orig_quiet
3107 3123 gen, tr = bundle
3108 3124 if tr is not None:
3109 3125 tr.abort()
3110 3126 finally:
3111 3127 if old_max_inline is not None:
3112 3128 mercurial.revlog._maxinline = old_max_inline
3113 3129
3114 3130
3115 3131 @command(
3116 3132 b'perf::unidiff|perfunidiff',
3117 3133 revlogopts
3118 3134 + formatteropts
3119 3135 + [
3120 3136 (
3121 3137 b'',
3122 3138 b'count',
3123 3139 1,
3124 3140 b'number of revisions to test (when using --startrev)',
3125 3141 ),
3126 3142 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
3127 3143 ],
3128 3144 b'-c|-m|FILE REV',
3129 3145 )
3130 3146 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
3131 3147 """benchmark a unified diff between revisions
3132 3148
3133 3149 This doesn't include any copy tracing - it's just a unified diff
3134 3150 of the texts.
3135 3151
3136 3152 By default, benchmark a diff between its delta parent and itself.
3137 3153
3138 3154 With ``--count``, benchmark diffs between delta parents and self for N
3139 3155 revisions starting at the specified revision.
3140 3156
3141 3157 With ``--alldata``, assume the requested revision is a changeset and
3142 3158 measure diffs for all changes related to that changeset (manifest
3143 3159 and filelogs).
3144 3160 """
3145 3161 opts = _byteskwargs(opts)
3146 3162 if opts[b'alldata']:
3147 3163 opts[b'changelog'] = True
3148 3164
3149 3165 if opts.get(b'changelog') or opts.get(b'manifest'):
3150 3166 file_, rev = None, file_
3151 3167 elif rev is None:
3152 3168 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3153 3169
3154 3170 textpairs = []
3155 3171
3156 3172 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3157 3173
3158 3174 startrev = r.rev(r.lookup(rev))
3159 3175 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3160 3176 if opts[b'alldata']:
3161 3177 # Load revisions associated with changeset.
3162 3178 ctx = repo[rev]
3163 3179 mtext = _manifestrevision(repo, ctx.manifestnode())
3164 3180 for pctx in ctx.parents():
3165 3181 pman = _manifestrevision(repo, pctx.manifestnode())
3166 3182 textpairs.append((pman, mtext))
3167 3183
3168 3184 # Load filelog revisions by iterating manifest delta.
3169 3185 man = ctx.manifest()
3170 3186 pman = ctx.p1().manifest()
3171 3187 for filename, change in pman.diff(man).items():
3172 3188 fctx = repo.file(filename)
3173 3189 f1 = fctx.revision(change[0][0] or -1)
3174 3190 f2 = fctx.revision(change[1][0] or -1)
3175 3191 textpairs.append((f1, f2))
3176 3192 else:
3177 3193 dp = r.deltaparent(rev)
3178 3194 textpairs.append((r.revision(dp), r.revision(rev)))
3179 3195
3180 3196 def d():
3181 3197 for left, right in textpairs:
3182 3198 # The date strings don't matter, so we pass empty strings.
3183 3199 headerlines, hunks = mdiff.unidiff(
3184 3200 left, b'', right, b'', b'left', b'right', binary=False
3185 3201 )
3186 3202 # consume iterators in roughly the way patch.py does
3187 3203 b'\n'.join(headerlines)
3188 3204 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3189 3205
3190 3206 timer, fm = gettimer(ui, opts)
3191 3207 timer(d)
3192 3208 fm.end()
3193 3209
3194 3210
3195 3211 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3196 3212 def perfdiffwd(ui, repo, **opts):
3197 3213 """Profile diff of working directory changes"""
3198 3214 opts = _byteskwargs(opts)
3199 3215 timer, fm = gettimer(ui, opts)
3200 3216 options = {
3201 3217 'w': 'ignore_all_space',
3202 3218 'b': 'ignore_space_change',
3203 3219 'B': 'ignore_blank_lines',
3204 3220 }
3205 3221
3206 3222 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3207 3223 opts = {options[c]: b'1' for c in diffopt}
3208 3224
3209 3225 def d():
3210 3226 ui.pushbuffer()
3211 3227 commands.diff(ui, repo, **opts)
3212 3228 ui.popbuffer()
3213 3229
3214 3230 diffopt = diffopt.encode('ascii')
3215 3231 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3216 3232 timer(d, title=title)
3217 3233 fm.end()
3218 3234
3219 3235
3220 3236 @command(
3221 3237 b'perf::revlogindex|perfrevlogindex',
3222 3238 revlogopts + formatteropts,
3223 3239 b'-c|-m|FILE',
3224 3240 )
3225 3241 def perfrevlogindex(ui, repo, file_=None, **opts):
3226 3242 """Benchmark operations against a revlog index.
3227 3243
3228 3244 This tests constructing a revlog instance, reading index data,
3229 3245 parsing index data, and performing various operations related to
3230 3246 index data.
3231 3247 """
3232 3248
3233 3249 opts = _byteskwargs(opts)
3234 3250
3235 3251 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3236 3252
3237 3253 opener = getattr(rl, 'opener') # trick linter
3238 3254 # compat with hg <= 5.8
3239 3255 radix = getattr(rl, 'radix', None)
3240 3256 indexfile = getattr(rl, '_indexfile', None)
3241 3257 if indexfile is None:
3242 3258 # compatibility with <= hg-5.8
3243 3259 indexfile = getattr(rl, 'indexfile')
3244 3260 data = opener.read(indexfile)
3245 3261
3246 3262 header = struct.unpack(b'>I', data[0:4])[0]
3247 3263 version = header & 0xFFFF
3248 3264 if version == 1:
3249 3265 inline = header & (1 << 16)
3250 3266 else:
3251 3267 raise error.Abort(b'unsupported revlog version: %d' % version)
3252 3268
3253 3269 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3254 3270 if parse_index_v1 is None:
3255 3271 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3256 3272
3257 3273 rllen = len(rl)
3258 3274
3259 3275 node0 = rl.node(0)
3260 3276 node25 = rl.node(rllen // 4)
3261 3277 node50 = rl.node(rllen // 2)
3262 3278 node75 = rl.node(rllen // 4 * 3)
3263 3279 node100 = rl.node(rllen - 1)
3264 3280
3265 3281 allrevs = range(rllen)
3266 3282 allrevsrev = list(reversed(allrevs))
3267 3283 allnodes = [rl.node(rev) for rev in range(rllen)]
3268 3284 allnodesrev = list(reversed(allnodes))
3269 3285
3270 3286 def constructor():
3271 3287 if radix is not None:
3272 3288 revlog(opener, radix=radix)
3273 3289 else:
3274 3290 # hg <= 5.8
3275 3291 revlog(opener, indexfile=indexfile)
3276 3292
3277 3293 def read():
3278 3294 with opener(indexfile) as fh:
3279 3295 fh.read()
3280 3296
3281 3297 def parseindex():
3282 3298 parse_index_v1(data, inline)
3283 3299
3284 3300 def getentry(revornode):
3285 3301 index = parse_index_v1(data, inline)[0]
3286 3302 index[revornode]
3287 3303
3288 3304 def getentries(revs, count=1):
3289 3305 index = parse_index_v1(data, inline)[0]
3290 3306
3291 3307 for i in range(count):
3292 3308 for rev in revs:
3293 3309 index[rev]
3294 3310
3295 3311 def resolvenode(node):
3296 3312 index = parse_index_v1(data, inline)[0]
3297 3313 rev = getattr(index, 'rev', None)
3298 3314 if rev is None:
3299 3315 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3300 3316 # This only works for the C code.
3301 3317 if nodemap is None:
3302 3318 return
3303 3319 rev = nodemap.__getitem__
3304 3320
3305 3321 try:
3306 3322 rev(node)
3307 3323 except error.RevlogError:
3308 3324 pass
3309 3325
3310 3326 def resolvenodes(nodes, count=1):
3311 3327 index = parse_index_v1(data, inline)[0]
3312 3328 rev = getattr(index, 'rev', None)
3313 3329 if rev is None:
3314 3330 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3315 3331 # This only works for the C code.
3316 3332 if nodemap is None:
3317 3333 return
3318 3334 rev = nodemap.__getitem__
3319 3335
3320 3336 for i in range(count):
3321 3337 for node in nodes:
3322 3338 try:
3323 3339 rev(node)
3324 3340 except error.RevlogError:
3325 3341 pass
3326 3342
3327 3343 benches = [
3328 3344 (constructor, b'revlog constructor'),
3329 3345 (read, b'read'),
3330 3346 (parseindex, b'create index object'),
3331 3347 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3332 3348 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3333 3349 (lambda: resolvenode(node0), b'look up node at rev 0'),
3334 3350 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3335 3351 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3336 3352 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3337 3353 (lambda: resolvenode(node100), b'look up node at tip'),
3338 3354 # 2x variation is to measure caching impact.
3339 3355 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3340 3356 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3341 3357 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3342 3358 (
3343 3359 lambda: resolvenodes(allnodesrev, 2),
3344 3360 b'look up all nodes 2x (reverse)',
3345 3361 ),
3346 3362 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3347 3363 (
3348 3364 lambda: getentries(allrevs, 2),
3349 3365 b'retrieve all index entries 2x (forward)',
3350 3366 ),
3351 3367 (
3352 3368 lambda: getentries(allrevsrev),
3353 3369 b'retrieve all index entries (reverse)',
3354 3370 ),
3355 3371 (
3356 3372 lambda: getentries(allrevsrev, 2),
3357 3373 b'retrieve all index entries 2x (reverse)',
3358 3374 ),
3359 3375 ]
3360 3376
3361 3377 for fn, title in benches:
3362 3378 timer, fm = gettimer(ui, opts)
3363 3379 timer(fn, title=title)
3364 3380 fm.end()
3365 3381
3366 3382
3367 3383 @command(
3368 3384 b'perf::revlogrevisions|perfrevlogrevisions',
3369 3385 revlogopts
3370 3386 + formatteropts
3371 3387 + [
3372 3388 (b'd', b'dist', 100, b'distance between the revisions'),
3373 3389 (b's', b'startrev', 0, b'revision to start reading at'),
3374 3390 (b'', b'reverse', False, b'read in reverse'),
3375 3391 ],
3376 3392 b'-c|-m|FILE',
3377 3393 )
3378 3394 def perfrevlogrevisions(
3379 3395 ui, repo, file_=None, startrev=0, reverse=False, **opts
3380 3396 ):
3381 3397 """Benchmark reading a series of revisions from a revlog.
3382 3398
3383 3399 By default, we read every ``-d/--dist`` revision from 0 to tip of
3384 3400 the specified revlog.
3385 3401
3386 3402 The start revision can be defined via ``-s/--startrev``.
3387 3403 """
3388 3404 opts = _byteskwargs(opts)
3389 3405
3390 3406 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3391 3407 rllen = getlen(ui)(rl)
3392 3408
3393 3409 if startrev < 0:
3394 3410 startrev = rllen + startrev
3395 3411
3396 3412 def d():
3397 3413 rl.clearcaches()
3398 3414
3399 3415 beginrev = startrev
3400 3416 endrev = rllen
3401 3417 dist = opts[b'dist']
3402 3418
3403 3419 if reverse:
3404 3420 beginrev, endrev = endrev - 1, beginrev - 1
3405 3421 dist = -1 * dist
3406 3422
3407 3423 for x in _xrange(beginrev, endrev, dist):
3408 3424 # Old revisions don't support passing int.
3409 3425 n = rl.node(x)
3410 3426 rl.revision(n)
3411 3427
3412 3428 timer, fm = gettimer(ui, opts)
3413 3429 timer(d)
3414 3430 fm.end()
3415 3431
3416 3432
3417 3433 @command(
3418 3434 b'perf::revlogwrite|perfrevlogwrite',
3419 3435 revlogopts
3420 3436 + formatteropts
3421 3437 + [
3422 3438 (b's', b'startrev', 1000, b'revision to start writing at'),
3423 3439 (b'', b'stoprev', -1, b'last revision to write'),
3424 3440 (b'', b'count', 3, b'number of passes to perform'),
3425 3441 (b'', b'details', False, b'print timing for every revisions tested'),
3426 3442 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3427 3443 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3428 3444 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3429 3445 ],
3430 3446 b'-c|-m|FILE',
3431 3447 )
3432 3448 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3433 3449 """Benchmark writing a series of revisions to a revlog.
3434 3450
3435 3451 Possible source values are:
3436 3452 * `full`: add from a full text (default).
3437 3453 * `parent-1`: add from a delta to the first parent
3438 3454 * `parent-2`: add from a delta to the second parent if it exists
3439 3455 (use a delta from the first parent otherwise)
3440 3456 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3441 3457 * `storage`: add from the existing precomputed deltas
3442 3458
3443 3459 Note: This performance command measures performance in a custom way. As a
3444 3460 result some of the global configuration of the 'perf' command does not
3445 3461 apply to it:
3446 3462
3447 3463 * ``pre-run``: disabled
3448 3464
3449 3465 * ``profile-benchmark``: disabled
3450 3466
3451 3467 * ``run-limits``: disabled use --count instead
3452 3468 """
3453 3469 opts = _byteskwargs(opts)
3454 3470
3455 3471 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3456 3472 rllen = getlen(ui)(rl)
3457 3473 if startrev < 0:
3458 3474 startrev = rllen + startrev
3459 3475 if stoprev < 0:
3460 3476 stoprev = rllen + stoprev
3461 3477
3462 3478 lazydeltabase = opts['lazydeltabase']
3463 3479 source = opts['source']
3464 3480 clearcaches = opts['clear_caches']
3465 3481 validsource = (
3466 3482 b'full',
3467 3483 b'parent-1',
3468 3484 b'parent-2',
3469 3485 b'parent-smallest',
3470 3486 b'storage',
3471 3487 )
3472 3488 if source not in validsource:
3473 3489 raise error.Abort('invalid source type: %s' % source)
3474 3490
3475 3491 ### actually gather results
3476 3492 count = opts['count']
3477 3493 if count <= 0:
3478 3494 raise error.Abort('invalide run count: %d' % count)
3479 3495 allresults = []
3480 3496 for c in range(count):
3481 3497 timing = _timeonewrite(
3482 3498 ui,
3483 3499 rl,
3484 3500 source,
3485 3501 startrev,
3486 3502 stoprev,
3487 3503 c + 1,
3488 3504 lazydeltabase=lazydeltabase,
3489 3505 clearcaches=clearcaches,
3490 3506 )
3491 3507 allresults.append(timing)
3492 3508
3493 3509 ### consolidate the results in a single list
3494 3510 results = []
3495 3511 for idx, (rev, t) in enumerate(allresults[0]):
3496 3512 ts = [t]
3497 3513 for other in allresults[1:]:
3498 3514 orev, ot = other[idx]
3499 3515 assert orev == rev
3500 3516 ts.append(ot)
3501 3517 results.append((rev, ts))
3502 3518 resultcount = len(results)
3503 3519
3504 3520 ### Compute and display relevant statistics
3505 3521
3506 3522 # get a formatter
3507 3523 fm = ui.formatter(b'perf', opts)
3508 3524 displayall = ui.configbool(b"perf", b"all-timing", True)
3509 3525
3510 3526 # print individual details if requested
3511 3527 if opts['details']:
3512 3528 for idx, item in enumerate(results, 1):
3513 3529 rev, data = item
3514 3530 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3515 3531 formatone(fm, data, title=title, displayall=displayall)
3516 3532
3517 3533 # sorts results by median time
3518 3534 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3519 3535 # list of (name, index) to display)
3520 3536 relevants = [
3521 3537 ("min", 0),
3522 3538 ("10%", resultcount * 10 // 100),
3523 3539 ("25%", resultcount * 25 // 100),
3524 3540 ("50%", resultcount * 70 // 100),
3525 3541 ("75%", resultcount * 75 // 100),
3526 3542 ("90%", resultcount * 90 // 100),
3527 3543 ("95%", resultcount * 95 // 100),
3528 3544 ("99%", resultcount * 99 // 100),
3529 3545 ("99.9%", resultcount * 999 // 1000),
3530 3546 ("99.99%", resultcount * 9999 // 10000),
3531 3547 ("99.999%", resultcount * 99999 // 100000),
3532 3548 ("max", -1),
3533 3549 ]
3534 3550 if not ui.quiet:
3535 3551 for name, idx in relevants:
3536 3552 data = results[idx]
3537 3553 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3538 3554 formatone(fm, data[1], title=title, displayall=displayall)
3539 3555
3540 3556 # XXX summing that many float will not be very precise, we ignore this fact
3541 3557 # for now
3542 3558 totaltime = []
3543 3559 for item in allresults:
3544 3560 totaltime.append(
3545 3561 (
3546 3562 sum(x[1][0] for x in item),
3547 3563 sum(x[1][1] for x in item),
3548 3564 sum(x[1][2] for x in item),
3549 3565 )
3550 3566 )
3551 3567 formatone(
3552 3568 fm,
3553 3569 totaltime,
3554 3570 title="total time (%d revs)" % resultcount,
3555 3571 displayall=displayall,
3556 3572 )
3557 3573 fm.end()
3558 3574
3559 3575
3560 3576 class _faketr:
3561 3577 def add(s, x, y, z=None):
3562 3578 return None
3563 3579
3564 3580
3565 3581 def _timeonewrite(
3566 3582 ui,
3567 3583 orig,
3568 3584 source,
3569 3585 startrev,
3570 3586 stoprev,
3571 3587 runidx=None,
3572 3588 lazydeltabase=True,
3573 3589 clearcaches=True,
3574 3590 ):
3575 3591 timings = []
3576 3592 tr = _faketr()
3577 3593 with _temprevlog(ui, orig, startrev) as dest:
3578 3594 if hasattr(dest, "delta_config"):
3579 3595 dest.delta_config.lazy_delta_base = lazydeltabase
3580 3596 else:
3581 3597 dest._lazydeltabase = lazydeltabase
3582 3598 revs = list(orig.revs(startrev, stoprev))
3583 3599 total = len(revs)
3584 3600 topic = 'adding'
3585 3601 if runidx is not None:
3586 3602 topic += ' (run #%d)' % runidx
3587 3603 # Support both old and new progress API
3588 3604 if util.safehasattr(ui, 'makeprogress'):
3589 3605 progress = ui.makeprogress(topic, unit='revs', total=total)
3590 3606
3591 3607 def updateprogress(pos):
3592 3608 progress.update(pos)
3593 3609
3594 3610 def completeprogress():
3595 3611 progress.complete()
3596 3612
3597 3613 else:
3598 3614
3599 3615 def updateprogress(pos):
3600 3616 ui.progress(topic, pos, unit='revs', total=total)
3601 3617
3602 3618 def completeprogress():
3603 3619 ui.progress(topic, None, unit='revs', total=total)
3604 3620
3605 3621 for idx, rev in enumerate(revs):
3606 3622 updateprogress(idx)
3607 3623 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3608 3624 if clearcaches:
3609 3625 dest.index.clearcaches()
3610 3626 dest.clearcaches()
3611 3627 with timeone() as r:
3612 3628 dest.addrawrevision(*addargs, **addkwargs)
3613 3629 timings.append((rev, r[0]))
3614 3630 updateprogress(total)
3615 3631 completeprogress()
3616 3632 return timings
3617 3633
3618 3634
3619 3635 def _getrevisionseed(orig, rev, tr, source):
3620 3636 from mercurial.node import nullid
3621 3637
3622 3638 linkrev = orig.linkrev(rev)
3623 3639 node = orig.node(rev)
3624 3640 p1, p2 = orig.parents(node)
3625 3641 flags = orig.flags(rev)
3626 3642 cachedelta = None
3627 3643 text = None
3628 3644
3629 3645 if source == b'full':
3630 3646 text = orig.revision(rev)
3631 3647 elif source == b'parent-1':
3632 3648 baserev = orig.rev(p1)
3633 3649 cachedelta = (baserev, orig.revdiff(p1, rev))
3634 3650 elif source == b'parent-2':
3635 3651 parent = p2
3636 3652 if p2 == nullid:
3637 3653 parent = p1
3638 3654 baserev = orig.rev(parent)
3639 3655 cachedelta = (baserev, orig.revdiff(parent, rev))
3640 3656 elif source == b'parent-smallest':
3641 3657 p1diff = orig.revdiff(p1, rev)
3642 3658 parent = p1
3643 3659 diff = p1diff
3644 3660 if p2 != nullid:
3645 3661 p2diff = orig.revdiff(p2, rev)
3646 3662 if len(p1diff) > len(p2diff):
3647 3663 parent = p2
3648 3664 diff = p2diff
3649 3665 baserev = orig.rev(parent)
3650 3666 cachedelta = (baserev, diff)
3651 3667 elif source == b'storage':
3652 3668 baserev = orig.deltaparent(rev)
3653 3669 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3654 3670
3655 3671 return (
3656 3672 (text, tr, linkrev, p1, p2),
3657 3673 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3658 3674 )
3659 3675
3660 3676
3661 3677 @contextlib.contextmanager
3662 3678 def _temprevlog(ui, orig, truncaterev):
3663 3679 from mercurial import vfs as vfsmod
3664 3680
3665 3681 if orig._inline:
3666 3682 raise error.Abort('not supporting inline revlog (yet)')
3667 3683 revlogkwargs = {}
3668 3684 k = 'upperboundcomp'
3669 3685 if util.safehasattr(orig, k):
3670 3686 revlogkwargs[k] = getattr(orig, k)
3671 3687
3672 3688 indexfile = getattr(orig, '_indexfile', None)
3673 3689 if indexfile is None:
3674 3690 # compatibility with <= hg-5.8
3675 3691 indexfile = getattr(orig, 'indexfile')
3676 3692 origindexpath = orig.opener.join(indexfile)
3677 3693
3678 3694 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3679 3695 origdatapath = orig.opener.join(datafile)
3680 3696 radix = b'revlog'
3681 3697 indexname = b'revlog.i'
3682 3698 dataname = b'revlog.d'
3683 3699
3684 3700 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3685 3701 try:
3686 3702 # copy the data file in a temporary directory
3687 3703 ui.debug('copying data in %s\n' % tmpdir)
3688 3704 destindexpath = os.path.join(tmpdir, 'revlog.i')
3689 3705 destdatapath = os.path.join(tmpdir, 'revlog.d')
3690 3706 shutil.copyfile(origindexpath, destindexpath)
3691 3707 shutil.copyfile(origdatapath, destdatapath)
3692 3708
3693 3709 # remove the data we want to add again
3694 3710 ui.debug('truncating data to be rewritten\n')
3695 3711 with open(destindexpath, 'ab') as index:
3696 3712 index.seek(0)
3697 3713 index.truncate(truncaterev * orig._io.size)
3698 3714 with open(destdatapath, 'ab') as data:
3699 3715 data.seek(0)
3700 3716 data.truncate(orig.start(truncaterev))
3701 3717
3702 3718 # instantiate a new revlog from the temporary copy
3703 3719 ui.debug('truncating adding to be rewritten\n')
3704 3720 vfs = vfsmod.vfs(tmpdir)
3705 3721 vfs.options = getattr(orig.opener, 'options', None)
3706 3722
3707 3723 try:
3708 3724 dest = revlog(vfs, radix=radix, **revlogkwargs)
3709 3725 except TypeError:
3710 3726 dest = revlog(
3711 3727 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3712 3728 )
3713 3729 if dest._inline:
3714 3730 raise error.Abort('not supporting inline revlog (yet)')
3715 3731 # make sure internals are initialized
3716 3732 dest.revision(len(dest) - 1)
3717 3733 yield dest
3718 3734 del dest, vfs
3719 3735 finally:
3720 3736 shutil.rmtree(tmpdir, True)
3721 3737
3722 3738
3723 3739 @command(
3724 3740 b'perf::revlogchunks|perfrevlogchunks',
3725 3741 revlogopts
3726 3742 + formatteropts
3727 3743 + [
3728 3744 (b'e', b'engines', b'', b'compression engines to use'),
3729 3745 (b's', b'startrev', 0, b'revision to start at'),
3730 3746 ],
3731 3747 b'-c|-m|FILE',
3732 3748 )
3733 3749 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3734 3750 """Benchmark operations on revlog chunks.
3735 3751
3736 3752 Logically, each revlog is a collection of fulltext revisions. However,
3737 3753 stored within each revlog are "chunks" of possibly compressed data. This
3738 3754 data needs to be read and decompressed or compressed and written.
3739 3755
3740 3756 This command measures the time it takes to read+decompress and recompress
3741 3757 chunks in a revlog. It effectively isolates I/O and compression performance.
3742 3758 For measurements of higher-level operations like resolving revisions,
3743 3759 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3744 3760 """
3745 3761 opts = _byteskwargs(opts)
3746 3762
3747 3763 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3748 3764
3749 3765 # - _chunkraw was renamed to _getsegmentforrevs
3750 3766 # - _getsegmentforrevs was moved on the inner object
3751 3767 try:
3752 3768 segmentforrevs = rl._inner.get_segment_for_revs
3753 3769 except AttributeError:
3754 3770 try:
3755 3771 segmentforrevs = rl._getsegmentforrevs
3756 3772 except AttributeError:
3757 3773 segmentforrevs = rl._chunkraw
3758 3774
3759 3775 # Verify engines argument.
3760 3776 if engines:
3761 3777 engines = {e.strip() for e in engines.split(b',')}
3762 3778 for engine in engines:
3763 3779 try:
3764 3780 util.compressionengines[engine]
3765 3781 except KeyError:
3766 3782 raise error.Abort(b'unknown compression engine: %s' % engine)
3767 3783 else:
3768 3784 engines = []
3769 3785 for e in util.compengines:
3770 3786 engine = util.compengines[e]
3771 3787 try:
3772 3788 if engine.available():
3773 3789 engine.revlogcompressor().compress(b'dummy')
3774 3790 engines.append(e)
3775 3791 except NotImplementedError:
3776 3792 pass
3777 3793
3778 3794 revs = list(rl.revs(startrev, len(rl) - 1))
3779 3795
3780 3796 @contextlib.contextmanager
3781 3797 def reading(rl):
3782 3798 if getattr(rl, 'reading', None) is not None:
3783 3799 with rl.reading():
3784 3800 yield None
3785 3801 elif rl._inline:
3786 3802 indexfile = getattr(rl, '_indexfile', None)
3787 3803 if indexfile is None:
3788 3804 # compatibility with <= hg-5.8
3789 3805 indexfile = getattr(rl, 'indexfile')
3790 3806 yield getsvfs(repo)(indexfile)
3791 3807 else:
3792 3808 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3793 3809 yield getsvfs(repo)(datafile)
3794 3810
3795 3811 if getattr(rl, 'reading', None) is not None:
3796 3812
3797 3813 @contextlib.contextmanager
3798 3814 def lazy_reading(rl):
3799 3815 with rl.reading():
3800 3816 yield
3801 3817
3802 3818 else:
3803 3819
3804 3820 @contextlib.contextmanager
3805 3821 def lazy_reading(rl):
3806 3822 yield
3807 3823
3808 3824 def doread():
3809 3825 rl.clearcaches()
3810 3826 for rev in revs:
3811 3827 with lazy_reading(rl):
3812 3828 segmentforrevs(rev, rev)
3813 3829
3814 3830 def doreadcachedfh():
3815 3831 rl.clearcaches()
3816 3832 with reading(rl) as fh:
3817 3833 if fh is not None:
3818 3834 for rev in revs:
3819 3835 segmentforrevs(rev, rev, df=fh)
3820 3836 else:
3821 3837 for rev in revs:
3822 3838 segmentforrevs(rev, rev)
3823 3839
3824 3840 def doreadbatch():
3825 3841 rl.clearcaches()
3826 3842 with lazy_reading(rl):
3827 3843 segmentforrevs(revs[0], revs[-1])
3828 3844
3829 3845 def doreadbatchcachedfh():
3830 3846 rl.clearcaches()
3831 3847 with reading(rl) as fh:
3832 3848 if fh is not None:
3833 3849 segmentforrevs(revs[0], revs[-1], df=fh)
3834 3850 else:
3835 3851 segmentforrevs(revs[0], revs[-1])
3836 3852
3837 3853 def dochunk():
3838 3854 rl.clearcaches()
3839 3855 # chunk used to be available directly on the revlog
3840 3856 _chunk = getattr(rl, '_inner', rl)._chunk
3841 3857 with reading(rl) as fh:
3842 3858 if fh is not None:
3843 3859 for rev in revs:
3844 3860 _chunk(rev, df=fh)
3845 3861 else:
3846 3862 for rev in revs:
3847 3863 _chunk(rev)
3848 3864
3849 3865 chunks = [None]
3850 3866
3851 3867 def dochunkbatch():
3852 3868 rl.clearcaches()
3853 3869 _chunks = getattr(rl, '_inner', rl)._chunks
3854 3870 with reading(rl) as fh:
3855 3871 if fh is not None:
3856 3872 # Save chunks as a side-effect.
3857 3873 chunks[0] = _chunks(revs, df=fh)
3858 3874 else:
3859 3875 # Save chunks as a side-effect.
3860 3876 chunks[0] = _chunks(revs)
3861 3877
3862 3878 def docompress(compressor):
3863 3879 rl.clearcaches()
3864 3880
3865 3881 compressor_holder = getattr(rl, '_inner', rl)
3866 3882
3867 3883 try:
3868 3884 # Swap in the requested compression engine.
3869 3885 oldcompressor = compressor_holder._compressor
3870 3886 compressor_holder._compressor = compressor
3871 3887 for chunk in chunks[0]:
3872 3888 rl.compress(chunk)
3873 3889 finally:
3874 3890 compressor_holder._compressor = oldcompressor
3875 3891
3876 3892 benches = [
3877 3893 (lambda: doread(), b'read'),
3878 3894 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3879 3895 (lambda: doreadbatch(), b'read batch'),
3880 3896 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3881 3897 (lambda: dochunk(), b'chunk'),
3882 3898 (lambda: dochunkbatch(), b'chunk batch'),
3883 3899 ]
3884 3900
3885 3901 for engine in sorted(engines):
3886 3902 compressor = util.compengines[engine].revlogcompressor()
3887 3903 benches.append(
3888 3904 (
3889 3905 functools.partial(docompress, compressor),
3890 3906 b'compress w/ %s' % engine,
3891 3907 )
3892 3908 )
3893 3909
3894 3910 for fn, title in benches:
3895 3911 timer, fm = gettimer(ui, opts)
3896 3912 timer(fn, title=title)
3897 3913 fm.end()
3898 3914
3899 3915
3900 3916 @command(
3901 3917 b'perf::revlogrevision|perfrevlogrevision',
3902 3918 revlogopts
3903 3919 + formatteropts
3904 3920 + [(b'', b'cache', False, b'use caches instead of clearing')],
3905 3921 b'-c|-m|FILE REV',
3906 3922 )
3907 3923 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3908 3924 """Benchmark obtaining a revlog revision.
3909 3925
3910 3926 Obtaining a revlog revision consists of roughly the following steps:
3911 3927
3912 3928 1. Compute the delta chain
3913 3929 2. Slice the delta chain if applicable
3914 3930 3. Obtain the raw chunks for that delta chain
3915 3931 4. Decompress each raw chunk
3916 3932 5. Apply binary patches to obtain fulltext
3917 3933 6. Verify hash of fulltext
3918 3934
3919 3935 This command measures the time spent in each of these phases.
3920 3936 """
3921 3937 opts = _byteskwargs(opts)
3922 3938
3923 3939 if opts.get(b'changelog') or opts.get(b'manifest'):
3924 3940 file_, rev = None, file_
3925 3941 elif rev is None:
3926 3942 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3927 3943
3928 3944 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3929 3945
3930 3946 # _chunkraw was renamed to _getsegmentforrevs.
3931 3947 try:
3932 3948 segmentforrevs = r._inner.get_segment_for_revs
3933 3949 except AttributeError:
3934 3950 try:
3935 3951 segmentforrevs = r._getsegmentforrevs
3936 3952 except AttributeError:
3937 3953 segmentforrevs = r._chunkraw
3938 3954
3939 3955 node = r.lookup(rev)
3940 3956 rev = r.rev(node)
3941 3957
3942 3958 if getattr(r, 'reading', None) is not None:
3943 3959
3944 3960 @contextlib.contextmanager
3945 3961 def lazy_reading(r):
3946 3962 with r.reading():
3947 3963 yield
3948 3964
3949 3965 else:
3950 3966
3951 3967 @contextlib.contextmanager
3952 3968 def lazy_reading(r):
3953 3969 yield
3954 3970
3955 3971 def getrawchunks(data, chain):
3956 3972 start = r.start
3957 3973 length = r.length
3958 3974 inline = r._inline
3959 3975 try:
3960 3976 iosize = r.index.entry_size
3961 3977 except AttributeError:
3962 3978 iosize = r._io.size
3963 3979 buffer = util.buffer
3964 3980
3965 3981 chunks = []
3966 3982 ladd = chunks.append
3967 3983 for idx, item in enumerate(chain):
3968 3984 offset = start(item[0])
3969 3985 bits = data[idx]
3970 3986 for rev in item:
3971 3987 chunkstart = start(rev)
3972 3988 if inline:
3973 3989 chunkstart += (rev + 1) * iosize
3974 3990 chunklength = length(rev)
3975 3991 ladd(buffer(bits, chunkstart - offset, chunklength))
3976 3992
3977 3993 return chunks
3978 3994
3979 3995 def dodeltachain(rev):
3980 3996 if not cache:
3981 3997 r.clearcaches()
3982 3998 r._deltachain(rev)
3983 3999
3984 4000 def doread(chain):
3985 4001 if not cache:
3986 4002 r.clearcaches()
3987 4003 for item in slicedchain:
3988 4004 with lazy_reading(r):
3989 4005 segmentforrevs(item[0], item[-1])
3990 4006
3991 4007 def doslice(r, chain, size):
3992 4008 for s in slicechunk(r, chain, targetsize=size):
3993 4009 pass
3994 4010
3995 4011 def dorawchunks(data, chain):
3996 4012 if not cache:
3997 4013 r.clearcaches()
3998 4014 getrawchunks(data, chain)
3999 4015
4000 4016 def dodecompress(chunks):
4001 4017 decomp = r.decompress
4002 4018 for chunk in chunks:
4003 4019 decomp(chunk)
4004 4020
4005 4021 def dopatch(text, bins):
4006 4022 if not cache:
4007 4023 r.clearcaches()
4008 4024 mdiff.patches(text, bins)
4009 4025
4010 4026 def dohash(text):
4011 4027 if not cache:
4012 4028 r.clearcaches()
4013 4029 r.checkhash(text, node, rev=rev)
4014 4030
4015 4031 def dorevision():
4016 4032 if not cache:
4017 4033 r.clearcaches()
4018 4034 r.revision(node)
4019 4035
4020 4036 try:
4021 4037 from mercurial.revlogutils.deltas import slicechunk
4022 4038 except ImportError:
4023 4039 slicechunk = getattr(revlog, '_slicechunk', None)
4024 4040
4025 4041 size = r.length(rev)
4026 4042 chain = r._deltachain(rev)[0]
4027 4043
4028 4044 with_sparse_read = False
4029 4045 if hasattr(r, 'data_config'):
4030 4046 with_sparse_read = r.data_config.with_sparse_read
4031 4047 elif hasattr(r, '_withsparseread'):
4032 4048 with_sparse_read = r._withsparseread
4033 4049 if with_sparse_read:
4034 4050 slicedchain = (chain,)
4035 4051 else:
4036 4052 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
4037 4053 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
4038 4054 rawchunks = getrawchunks(data, slicedchain)
4039 4055 bins = r._inner._chunks(chain)
4040 4056 text = bytes(bins[0])
4041 4057 bins = bins[1:]
4042 4058 text = mdiff.patches(text, bins)
4043 4059
4044 4060 benches = [
4045 4061 (lambda: dorevision(), b'full'),
4046 4062 (lambda: dodeltachain(rev), b'deltachain'),
4047 4063 (lambda: doread(chain), b'read'),
4048 4064 ]
4049 4065
4050 4066 if with_sparse_read:
4051 4067 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
4052 4068 benches.append(slicing)
4053 4069
4054 4070 benches.extend(
4055 4071 [
4056 4072 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
4057 4073 (lambda: dodecompress(rawchunks), b'decompress'),
4058 4074 (lambda: dopatch(text, bins), b'patch'),
4059 4075 (lambda: dohash(text), b'hash'),
4060 4076 ]
4061 4077 )
4062 4078
4063 4079 timer, fm = gettimer(ui, opts)
4064 4080 for fn, title in benches:
4065 4081 timer(fn, title=title)
4066 4082 fm.end()
4067 4083
4068 4084
4069 4085 @command(
4070 4086 b'perf::revset|perfrevset',
4071 4087 [
4072 4088 (b'C', b'clear', False, b'clear volatile cache between each call.'),
4073 4089 (b'', b'contexts', False, b'obtain changectx for each revision'),
4074 4090 ]
4075 4091 + formatteropts,
4076 4092 b"REVSET",
4077 4093 )
4078 4094 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
4079 4095 """benchmark the execution time of a revset
4080 4096
4081 4097 Use the --clean option if need to evaluate the impact of build volatile
4082 4098 revisions set cache on the revset execution. Volatile cache hold filtered
4083 4099 and obsolete related cache."""
4084 4100 opts = _byteskwargs(opts)
4085 4101
4086 4102 timer, fm = gettimer(ui, opts)
4087 4103
4088 4104 def d():
4089 4105 if clear:
4090 4106 repo.invalidatevolatilesets()
4091 4107 if contexts:
4092 4108 for ctx in repo.set(expr):
4093 4109 pass
4094 4110 else:
4095 4111 for r in repo.revs(expr):
4096 4112 pass
4097 4113
4098 4114 timer(d)
4099 4115 fm.end()
4100 4116
4101 4117
4102 4118 @command(
4103 4119 b'perf::volatilesets|perfvolatilesets',
4104 4120 [
4105 4121 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
4106 4122 ]
4107 4123 + formatteropts,
4108 4124 )
4109 4125 def perfvolatilesets(ui, repo, *names, **opts):
4110 4126 """benchmark the computation of various volatile set
4111 4127
4112 4128 Volatile set computes element related to filtering and obsolescence."""
4113 4129 opts = _byteskwargs(opts)
4114 4130 timer, fm = gettimer(ui, opts)
4115 4131 repo = repo.unfiltered()
4116 4132
4117 4133 def getobs(name):
4118 4134 def d():
4119 4135 repo.invalidatevolatilesets()
4120 4136 if opts[b'clear_obsstore']:
4121 4137 clearfilecache(repo, b'obsstore')
4122 4138 obsolete.getrevs(repo, name)
4123 4139
4124 4140 return d
4125 4141
4126 4142 allobs = sorted(obsolete.cachefuncs)
4127 4143 if names:
4128 4144 allobs = [n for n in allobs if n in names]
4129 4145
4130 4146 for name in allobs:
4131 4147 timer(getobs(name), title=name)
4132 4148
4133 4149 def getfiltered(name):
4134 4150 def d():
4135 4151 repo.invalidatevolatilesets()
4136 4152 if opts[b'clear_obsstore']:
4137 4153 clearfilecache(repo, b'obsstore')
4138 4154 repoview.filterrevs(repo, name)
4139 4155
4140 4156 return d
4141 4157
4142 4158 allfilter = sorted(repoview.filtertable)
4143 4159 if names:
4144 4160 allfilter = [n for n in allfilter if n in names]
4145 4161
4146 4162 for name in allfilter:
4147 4163 timer(getfiltered(name), title=name)
4148 4164 fm.end()
4149 4165
4150 4166
4151 4167 @command(
4152 4168 b'perf::branchmap|perfbranchmap',
4153 4169 [
4154 4170 (b'f', b'full', False, b'Includes build time of subset'),
4155 4171 (
4156 4172 b'',
4157 4173 b'clear-revbranch',
4158 4174 False,
4159 4175 b'purge the revbranch cache between computation',
4160 4176 ),
4161 4177 ]
4162 4178 + formatteropts,
4163 4179 )
4164 4180 def perfbranchmap(ui, repo, *filternames, **opts):
4165 4181 """benchmark the update of a branchmap
4166 4182
4167 4183 This benchmarks the full repo.branchmap() call with read and write disabled
4168 4184 """
4169 4185 opts = _byteskwargs(opts)
4170 4186 full = opts.get(b"full", False)
4171 4187 clear_revbranch = opts.get(b"clear_revbranch", False)
4172 4188 timer, fm = gettimer(ui, opts)
4173 4189
4174 4190 def getbranchmap(filtername):
4175 4191 """generate a benchmark function for the filtername"""
4176 4192 if filtername is None:
4177 4193 view = repo
4178 4194 else:
4179 4195 view = repo.filtered(filtername)
4180 4196 if util.safehasattr(view._branchcaches, '_per_filter'):
4181 4197 filtered = view._branchcaches._per_filter
4182 4198 else:
4183 4199 # older versions
4184 4200 filtered = view._branchcaches
4185 4201
4186 4202 def d():
4187 4203 if clear_revbranch:
4188 4204 repo.revbranchcache()._clear()
4189 4205 if full:
4190 4206 view._branchcaches.clear()
4191 4207 else:
4192 4208 filtered.pop(filtername, None)
4193 4209 view.branchmap()
4194 4210
4195 4211 return d
4196 4212
4197 4213 # add filter in smaller subset to bigger subset
4198 4214 possiblefilters = set(repoview.filtertable)
4199 4215 if filternames:
4200 4216 possiblefilters &= set(filternames)
4201 4217 subsettable = getbranchmapsubsettable()
4202 4218 allfilters = []
4203 4219 while possiblefilters:
4204 4220 for name in possiblefilters:
4205 4221 subset = subsettable.get(name)
4206 4222 if subset not in possiblefilters:
4207 4223 break
4208 4224 else:
4209 4225 assert False, b'subset cycle %s!' % possiblefilters
4210 4226 allfilters.append(name)
4211 4227 possiblefilters.remove(name)
4212 4228
4213 4229 # warm the cache
4214 4230 if not full:
4215 4231 for name in allfilters:
4216 4232 repo.filtered(name).branchmap()
4217 4233 if not filternames or b'unfiltered' in filternames:
4218 4234 # add unfiltered
4219 4235 allfilters.append(None)
4220 4236
4221 4237 old_branch_cache_from_file = None
4222 4238 branchcacheread = None
4223 4239 if util.safehasattr(branchmap, 'branch_cache_from_file'):
4224 4240 old_branch_cache_from_file = branchmap.branch_cache_from_file
4225 4241 branchmap.branch_cache_from_file = lambda *args: None
4226 4242 elif util.safehasattr(branchmap.branchcache, 'fromfile'):
4227 4243 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4228 4244 branchcacheread.set(classmethod(lambda *args: None))
4229 4245 else:
4230 4246 # older versions
4231 4247 branchcacheread = safeattrsetter(branchmap, b'read')
4232 4248 branchcacheread.set(lambda *args: None)
4233 4249 if util.safehasattr(branchmap, '_LocalBranchCache'):
4234 4250 branchcachewrite = safeattrsetter(branchmap._LocalBranchCache, b'write')
4235 4251 branchcachewrite.set(lambda *args: None)
4236 4252 else:
4237 4253 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4238 4254 branchcachewrite.set(lambda *args: None)
4239 4255 try:
4240 4256 for name in allfilters:
4241 4257 printname = name
4242 4258 if name is None:
4243 4259 printname = b'unfiltered'
4244 4260 timer(getbranchmap(name), title=printname)
4245 4261 finally:
4246 4262 if old_branch_cache_from_file is not None:
4247 4263 branchmap.branch_cache_from_file = old_branch_cache_from_file
4248 4264 if branchcacheread is not None:
4249 4265 branchcacheread.restore()
4250 4266 branchcachewrite.restore()
4251 4267 fm.end()
4252 4268
4253 4269
4254 4270 @command(
4255 4271 b'perf::branchmapupdate|perfbranchmapupdate',
4256 4272 [
4257 4273 (b'', b'base', [], b'subset of revision to start from'),
4258 4274 (b'', b'target', [], b'subset of revision to end with'),
4259 4275 (b'', b'clear-caches', False, b'clear cache between each runs'),
4260 4276 ]
4261 4277 + formatteropts,
4262 4278 )
4263 4279 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4264 4280 """benchmark branchmap update from for <base> revs to <target> revs
4265 4281
4266 4282 If `--clear-caches` is passed, the following items will be reset before
4267 4283 each update:
4268 4284 * the changelog instance and associated indexes
4269 4285 * the rev-branch-cache instance
4270 4286
4271 4287 Examples:
4272 4288
4273 4289 # update for the one last revision
4274 4290 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4275 4291
4276 4292 $ update for change coming with a new branch
4277 4293 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4278 4294 """
4279 4295 from mercurial import branchmap
4280 4296 from mercurial import repoview
4281 4297
4282 4298 opts = _byteskwargs(opts)
4283 4299 timer, fm = gettimer(ui, opts)
4284 4300 clearcaches = opts[b'clear_caches']
4285 4301 unfi = repo.unfiltered()
4286 4302 x = [None] # used to pass data between closure
4287 4303
4288 4304 # we use a `list` here to avoid possible side effect from smartset
4289 4305 baserevs = list(scmutil.revrange(repo, base))
4290 4306 targetrevs = list(scmutil.revrange(repo, target))
4291 4307 if not baserevs:
4292 4308 raise error.Abort(b'no revisions selected for --base')
4293 4309 if not targetrevs:
4294 4310 raise error.Abort(b'no revisions selected for --target')
4295 4311
4296 4312 # make sure the target branchmap also contains the one in the base
4297 4313 targetrevs = list(set(baserevs) | set(targetrevs))
4298 4314 targetrevs.sort()
4299 4315
4300 4316 cl = repo.changelog
4301 4317 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4302 4318 allbaserevs.sort()
4303 4319 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4304 4320
4305 4321 newrevs = list(alltargetrevs.difference(allbaserevs))
4306 4322 newrevs.sort()
4307 4323
4308 4324 allrevs = frozenset(unfi.changelog.revs())
4309 4325 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4310 4326 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4311 4327
4312 4328 def basefilter(repo, visibilityexceptions=None):
4313 4329 return basefilterrevs
4314 4330
4315 4331 def targetfilter(repo, visibilityexceptions=None):
4316 4332 return targetfilterrevs
4317 4333
4318 4334 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4319 4335 ui.status(msg % (len(allbaserevs), len(newrevs)))
4320 4336 if targetfilterrevs:
4321 4337 msg = b'(%d revisions still filtered)\n'
4322 4338 ui.status(msg % len(targetfilterrevs))
4323 4339
4324 4340 try:
4325 4341 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4326 4342 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4327 4343
4328 4344 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4329 4345 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4330 4346
4331 4347 bcache = repo.branchmap()
4332 4348 copy_method = 'copy'
4333 4349
4334 4350 copy_base_kwargs = copy_base_kwargs = {}
4335 4351 if hasattr(bcache, 'copy'):
4336 4352 if 'repo' in getargspec(bcache.copy).args:
4337 4353 copy_base_kwargs = {"repo": baserepo}
4338 4354 copy_target_kwargs = {"repo": targetrepo}
4339 4355 else:
4340 4356 copy_method = 'inherit_for'
4341 4357 copy_base_kwargs = {"repo": baserepo}
4342 4358 copy_target_kwargs = {"repo": targetrepo}
4343 4359
4344 4360 # try to find an existing branchmap to reuse
4345 4361 subsettable = getbranchmapsubsettable()
4346 4362 candidatefilter = subsettable.get(None)
4347 4363 while candidatefilter is not None:
4348 4364 candidatebm = repo.filtered(candidatefilter).branchmap()
4349 4365 if candidatebm.validfor(baserepo):
4350 4366 filtered = repoview.filterrevs(repo, candidatefilter)
4351 4367 missing = [r for r in allbaserevs if r in filtered]
4352 4368 base = getattr(candidatebm, copy_method)(**copy_base_kwargs)
4353 4369 base.update(baserepo, missing)
4354 4370 break
4355 4371 candidatefilter = subsettable.get(candidatefilter)
4356 4372 else:
4357 4373 # no suitable subset where found
4358 4374 base = branchmap.branchcache()
4359 4375 base.update(baserepo, allbaserevs)
4360 4376
4361 4377 def setup():
4362 4378 x[0] = getattr(base, copy_method)(**copy_target_kwargs)
4363 4379 if clearcaches:
4364 4380 unfi._revbranchcache = None
4365 4381 clearchangelog(repo)
4366 4382
4367 4383 def bench():
4368 4384 x[0].update(targetrepo, newrevs)
4369 4385
4370 4386 timer(bench, setup=setup)
4371 4387 fm.end()
4372 4388 finally:
4373 4389 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4374 4390 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4375 4391
4376 4392
4377 4393 @command(
4378 4394 b'perf::branchmapload|perfbranchmapload',
4379 4395 [
4380 4396 (b'f', b'filter', b'', b'Specify repoview filter'),
4381 4397 (b'', b'list', False, b'List brachmap filter caches'),
4382 4398 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4383 4399 ]
4384 4400 + formatteropts,
4385 4401 )
4386 4402 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4387 4403 """benchmark reading the branchmap"""
4388 4404 opts = _byteskwargs(opts)
4389 4405 clearrevlogs = opts[b'clear_revlogs']
4390 4406
4391 4407 if list:
4392 4408 for name, kind, st in repo.cachevfs.readdir(stat=True):
4393 4409 if name.startswith(b'branch2'):
4394 4410 filtername = name.partition(b'-')[2] or b'unfiltered'
4395 4411 ui.status(
4396 4412 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4397 4413 )
4398 4414 return
4399 4415 if not filter:
4400 4416 filter = None
4401 4417 subsettable = getbranchmapsubsettable()
4402 4418 if filter is None:
4403 4419 repo = repo.unfiltered()
4404 4420 else:
4405 4421 repo = repoview.repoview(repo, filter)
4406 4422
4407 4423 repo.branchmap() # make sure we have a relevant, up to date branchmap
4408 4424
4409 4425 fromfile = getattr(branchmap, 'branch_cache_from_file', None)
4410 4426 if fromfile is None:
4411 4427 fromfile = getattr(branchmap.branchcache, 'fromfile', None)
4412 4428 if fromfile is None:
4413 4429 fromfile = branchmap.read
4414 4430
4415 4431 currentfilter = filter
4416 4432 # try once without timer, the filter may not be cached
4417 4433 while fromfile(repo) is None:
4418 4434 currentfilter = subsettable.get(currentfilter)
4419 4435 if currentfilter is None:
4420 4436 raise error.Abort(
4421 4437 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4422 4438 )
4423 4439 repo = repo.filtered(currentfilter)
4424 4440 timer, fm = gettimer(ui, opts)
4425 4441
4426 4442 def setup():
4427 4443 if clearrevlogs:
4428 4444 clearchangelog(repo)
4429 4445
4430 4446 def bench():
4431 4447 fromfile(repo)
4432 4448
4433 4449 timer(bench, setup=setup)
4434 4450 fm.end()
4435 4451
4436 4452
4437 4453 @command(b'perf::loadmarkers|perfloadmarkers')
4438 4454 def perfloadmarkers(ui, repo):
4439 4455 """benchmark the time to parse the on-disk markers for a repo
4440 4456
4441 4457 Result is the number of markers in the repo."""
4442 4458 timer, fm = gettimer(ui)
4443 4459 svfs = getsvfs(repo)
4444 4460 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4445 4461 fm.end()
4446 4462
4447 4463
4448 4464 @command(
4449 4465 b'perf::lrucachedict|perflrucachedict',
4450 4466 formatteropts
4451 4467 + [
4452 4468 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4453 4469 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4454 4470 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4455 4471 (b'', b'size', 4, b'size of cache'),
4456 4472 (b'', b'gets', 10000, b'number of key lookups'),
4457 4473 (b'', b'sets', 10000, b'number of key sets'),
4458 4474 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4459 4475 (
4460 4476 b'',
4461 4477 b'mixedgetfreq',
4462 4478 50,
4463 4479 b'frequency of get vs set ops in mixed mode',
4464 4480 ),
4465 4481 ],
4466 4482 norepo=True,
4467 4483 )
4468 4484 def perflrucache(
4469 4485 ui,
4470 4486 mincost=0,
4471 4487 maxcost=100,
4472 4488 costlimit=0,
4473 4489 size=4,
4474 4490 gets=10000,
4475 4491 sets=10000,
4476 4492 mixed=10000,
4477 4493 mixedgetfreq=50,
4478 4494 **opts
4479 4495 ):
4480 4496 opts = _byteskwargs(opts)
4481 4497
4482 4498 def doinit():
4483 4499 for i in _xrange(10000):
4484 4500 util.lrucachedict(size)
4485 4501
4486 4502 costrange = list(range(mincost, maxcost + 1))
4487 4503
4488 4504 values = []
4489 4505 for i in _xrange(size):
4490 4506 values.append(random.randint(0, _maxint))
4491 4507
4492 4508 # Get mode fills the cache and tests raw lookup performance with no
4493 4509 # eviction.
4494 4510 getseq = []
4495 4511 for i in _xrange(gets):
4496 4512 getseq.append(random.choice(values))
4497 4513
4498 4514 def dogets():
4499 4515 d = util.lrucachedict(size)
4500 4516 for v in values:
4501 4517 d[v] = v
4502 4518 for key in getseq:
4503 4519 value = d[key]
4504 4520 value # silence pyflakes warning
4505 4521
4506 4522 def dogetscost():
4507 4523 d = util.lrucachedict(size, maxcost=costlimit)
4508 4524 for i, v in enumerate(values):
4509 4525 d.insert(v, v, cost=costs[i])
4510 4526 for key in getseq:
4511 4527 try:
4512 4528 value = d[key]
4513 4529 value # silence pyflakes warning
4514 4530 except KeyError:
4515 4531 pass
4516 4532
4517 4533 # Set mode tests insertion speed with cache eviction.
4518 4534 setseq = []
4519 4535 costs = []
4520 4536 for i in _xrange(sets):
4521 4537 setseq.append(random.randint(0, _maxint))
4522 4538 costs.append(random.choice(costrange))
4523 4539
4524 4540 def doinserts():
4525 4541 d = util.lrucachedict(size)
4526 4542 for v in setseq:
4527 4543 d.insert(v, v)
4528 4544
4529 4545 def doinsertscost():
4530 4546 d = util.lrucachedict(size, maxcost=costlimit)
4531 4547 for i, v in enumerate(setseq):
4532 4548 d.insert(v, v, cost=costs[i])
4533 4549
4534 4550 def dosets():
4535 4551 d = util.lrucachedict(size)
4536 4552 for v in setseq:
4537 4553 d[v] = v
4538 4554
4539 4555 # Mixed mode randomly performs gets and sets with eviction.
4540 4556 mixedops = []
4541 4557 for i in _xrange(mixed):
4542 4558 r = random.randint(0, 100)
4543 4559 if r < mixedgetfreq:
4544 4560 op = 0
4545 4561 else:
4546 4562 op = 1
4547 4563
4548 4564 mixedops.append(
4549 4565 (op, random.randint(0, size * 2), random.choice(costrange))
4550 4566 )
4551 4567
4552 4568 def domixed():
4553 4569 d = util.lrucachedict(size)
4554 4570
4555 4571 for op, v, cost in mixedops:
4556 4572 if op == 0:
4557 4573 try:
4558 4574 d[v]
4559 4575 except KeyError:
4560 4576 pass
4561 4577 else:
4562 4578 d[v] = v
4563 4579
4564 4580 def domixedcost():
4565 4581 d = util.lrucachedict(size, maxcost=costlimit)
4566 4582
4567 4583 for op, v, cost in mixedops:
4568 4584 if op == 0:
4569 4585 try:
4570 4586 d[v]
4571 4587 except KeyError:
4572 4588 pass
4573 4589 else:
4574 4590 d.insert(v, v, cost=cost)
4575 4591
4576 4592 benches = [
4577 4593 (doinit, b'init'),
4578 4594 ]
4579 4595
4580 4596 if costlimit:
4581 4597 benches.extend(
4582 4598 [
4583 4599 (dogetscost, b'gets w/ cost limit'),
4584 4600 (doinsertscost, b'inserts w/ cost limit'),
4585 4601 (domixedcost, b'mixed w/ cost limit'),
4586 4602 ]
4587 4603 )
4588 4604 else:
4589 4605 benches.extend(
4590 4606 [
4591 4607 (dogets, b'gets'),
4592 4608 (doinserts, b'inserts'),
4593 4609 (dosets, b'sets'),
4594 4610 (domixed, b'mixed'),
4595 4611 ]
4596 4612 )
4597 4613
4598 4614 for fn, title in benches:
4599 4615 timer, fm = gettimer(ui, opts)
4600 4616 timer(fn, title=title)
4601 4617 fm.end()
4602 4618
4603 4619
4604 4620 @command(
4605 4621 b'perf::write|perfwrite',
4606 4622 formatteropts
4607 4623 + [
4608 4624 (b'', b'write-method', b'write', b'ui write method'),
4609 4625 (b'', b'nlines', 100, b'number of lines'),
4610 4626 (b'', b'nitems', 100, b'number of items (per line)'),
4611 4627 (b'', b'item', b'x', b'item that is written'),
4612 4628 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4613 4629 (b'', b'flush-line', None, b'flush after each line'),
4614 4630 ],
4615 4631 )
4616 4632 def perfwrite(ui, repo, **opts):
4617 4633 """microbenchmark ui.write (and others)"""
4618 4634 opts = _byteskwargs(opts)
4619 4635
4620 4636 write = getattr(ui, _sysstr(opts[b'write_method']))
4621 4637 nlines = int(opts[b'nlines'])
4622 4638 nitems = int(opts[b'nitems'])
4623 4639 item = opts[b'item']
4624 4640 batch_line = opts.get(b'batch_line')
4625 4641 flush_line = opts.get(b'flush_line')
4626 4642
4627 4643 if batch_line:
4628 4644 line = item * nitems + b'\n'
4629 4645
4630 4646 def benchmark():
4631 4647 for i in pycompat.xrange(nlines):
4632 4648 if batch_line:
4633 4649 write(line)
4634 4650 else:
4635 4651 for i in pycompat.xrange(nitems):
4636 4652 write(item)
4637 4653 write(b'\n')
4638 4654 if flush_line:
4639 4655 ui.flush()
4640 4656 ui.flush()
4641 4657
4642 4658 timer, fm = gettimer(ui, opts)
4643 4659 timer(benchmark)
4644 4660 fm.end()
4645 4661
4646 4662
4647 4663 def uisetup(ui):
4648 4664 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4649 4665 commands, b'debugrevlogopts'
4650 4666 ):
4651 4667 # for "historical portability":
4652 4668 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4653 4669 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4654 4670 # openrevlog() should cause failure, because it has been
4655 4671 # available since 3.5 (or 49c583ca48c4).
4656 4672 def openrevlog(orig, repo, cmd, file_, opts):
4657 4673 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4658 4674 raise error.Abort(
4659 4675 b"This version doesn't support --dir option",
4660 4676 hint=b"use 3.5 or later",
4661 4677 )
4662 4678 return orig(repo, cmd, file_, opts)
4663 4679
4664 4680 name = _sysstr(b'openrevlog')
4665 4681 extensions.wrapfunction(cmdutil, name, openrevlog)
4666 4682
4667 4683
4668 4684 @command(
4669 4685 b'perf::progress|perfprogress',
4670 4686 formatteropts
4671 4687 + [
4672 4688 (b'', b'topic', b'topic', b'topic for progress messages'),
4673 4689 (b'c', b'total', 1000000, b'total value we are progressing to'),
4674 4690 ],
4675 4691 norepo=True,
4676 4692 )
4677 4693 def perfprogress(ui, topic=None, total=None, **opts):
4678 4694 """printing of progress bars"""
4679 4695 opts = _byteskwargs(opts)
4680 4696
4681 4697 timer, fm = gettimer(ui, opts)
4682 4698
4683 4699 def doprogress():
4684 4700 with ui.makeprogress(topic, total=total) as progress:
4685 4701 for i in _xrange(total):
4686 4702 progress.increment()
4687 4703
4688 4704 timer(doprogress)
4689 4705 fm.end()
@@ -1,484 +1,487
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 Enable profiling for the benchmarked section. (The first iteration is
63 benchmarked)
62 Enable profiling for the benchmarked section. (by default, the first
63 iteration is benchmarked)
64
65 "profiled-runs"
66 list of iteration to profile (starting from 0)
64 67
65 68 "run-limits"
66 69 Control the number of runs each benchmark will perform. The option value
67 70 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 71 conditions are considered in order with the following logic:
69 72
70 73 If benchmark has been running for <time> seconds, and we have performed
71 74 <numberofrun> iterations, stop the benchmark,
72 75
73 76 The default value is: '3.0-100, 10.0-3'
74 77
75 78 "stub"
76 79 When set, benchmarks will only be run once, useful for testing (default:
77 80 off)
78 81
79 82 list of commands:
80 83
81 84 perf::addremove
82 85 (no help text available)
83 86 perf::ancestors
84 87 (no help text available)
85 88 perf::ancestorset
86 89 (no help text available)
87 90 perf::annotate
88 91 (no help text available)
89 92 perf::bdiff benchmark a bdiff between revisions
90 93 perf::bookmarks
91 94 benchmark parsing bookmarks from disk to memory
92 95 perf::branchmap
93 96 benchmark the update of a branchmap
94 97 perf::branchmapload
95 98 benchmark reading the branchmap
96 99 perf::branchmapupdate
97 100 benchmark branchmap update from for <base> revs to <target>
98 101 revs
99 102 perf::bundle benchmark the creation of a bundle from a repository
100 103 perf::bundleread
101 104 Benchmark reading of bundle files.
102 105 perf::cca (no help text available)
103 106 perf::changegroupchangelog
104 107 Benchmark producing a changelog group for a changegroup.
105 108 perf::changeset
106 109 (no help text available)
107 110 perf::ctxfiles
108 111 (no help text available)
109 112 perf::delta-find
110 113 benchmark the process of finding a valid delta for a revlog
111 114 revision
112 115 perf::diffwd Profile diff of working directory changes
113 116 perf::dirfoldmap
114 117 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 118 perf::dirs (no help text available)
116 119 perf::dirstate
117 120 benchmap the time of various distate operations
118 121 perf::dirstatedirs
119 122 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 123 perf::dirstatefoldmap
121 124 benchmap a 'dirstate._map.filefoldmap.get()' request
122 125 perf::dirstatewrite
123 126 benchmap the time it take to write a dirstate on disk
124 127 perf::discovery
125 128 benchmark discovery between local repo and the peer at given
126 129 path
127 130 perf::fncacheencode
128 131 (no help text available)
129 132 perf::fncacheload
130 133 (no help text available)
131 134 perf::fncachewrite
132 135 (no help text available)
133 136 perf::heads benchmark the computation of a changelog heads
134 137 perf::helper-mergecopies
135 138 find statistics about potential parameters for
136 139 'perfmergecopies'
137 140 perf::helper-pathcopies
138 141 find statistic about potential parameters for the
139 142 'perftracecopies'
140 143 perf::ignore benchmark operation related to computing ignore
141 144 perf::index benchmark index creation time followed by a lookup
142 145 perf::linelogedits
143 146 (no help text available)
144 147 perf::loadmarkers
145 148 benchmark the time to parse the on-disk markers for a repo
146 149 perf::log (no help text available)
147 150 perf::lookup (no help text available)
148 151 perf::lrucachedict
149 152 (no help text available)
150 153 perf::manifest
151 154 benchmark the time to read a manifest from disk and return a
152 155 usable
153 156 perf::mergecalculate
154 157 (no help text available)
155 158 perf::mergecopies
156 159 measure runtime of 'copies.mergecopies'
157 160 perf::moonwalk
158 161 benchmark walking the changelog backwards
159 162 perf::nodelookup
160 163 (no help text available)
161 164 perf::nodemap
162 165 benchmark the time necessary to look up revision from a cold
163 166 nodemap
164 167 perf::parents
165 168 benchmark the time necessary to fetch one changeset's parents.
166 169 perf::pathcopies
167 170 benchmark the copy tracing logic
168 171 perf::phases benchmark phasesets computation
169 172 perf::phasesremote
170 173 benchmark time needed to analyse phases of the remote server
171 174 perf::progress
172 175 printing of progress bars
173 176 perf::rawfiles
174 177 (no help text available)
175 178 perf::revlogchunks
176 179 Benchmark operations on revlog chunks.
177 180 perf::revlogindex
178 181 Benchmark operations against a revlog index.
179 182 perf::revlogrevision
180 183 Benchmark obtaining a revlog revision.
181 184 perf::revlogrevisions
182 185 Benchmark reading a series of revisions from a revlog.
183 186 perf::revlogwrite
184 187 Benchmark writing a series of revisions to a revlog.
185 188 perf::revrange
186 189 (no help text available)
187 190 perf::revset benchmark the execution time of a revset
188 191 perf::startup
189 192 (no help text available)
190 193 perf::status benchmark the performance of a single status call
191 194 perf::stream-consume
192 195 benchmark the full application of a stream clone
193 196 perf::stream-generate
194 197 benchmark the full generation of a stream clone
195 198 perf::stream-locked-section
196 199 benchmark the initial, repo-locked, section of a stream-clone
197 200 perf::tags Benchmark tags retrieval in various situation
198 201 perf::templating
199 202 test the rendering time of a given template
200 203 perf::unbundle
201 204 benchmark application of a bundle in a repository.
202 205 perf::unidiff
203 206 benchmark a unified diff between revisions
204 207 perf::volatilesets
205 208 benchmark the computation of various volatile set
206 209 perf::walk (no help text available)
207 210 perf::write microbenchmark ui.write (and others)
208 211
209 212 (use 'hg help -v perf' to show built-in aliases and global options)
210 213
211 214 $ hg help perfaddremove
212 215 hg perf::addremove
213 216
214 217 aliases: perfaddremove
215 218
216 219 (no help text available)
217 220
218 221 options:
219 222
220 223 -T --template TEMPLATE display with template
221 224
222 225 (some details hidden, use --verbose to show complete help)
223 226
224 227 $ hg perfaddremove
225 228 $ hg perfancestors
226 229 $ hg perfancestorset 2
227 230 $ hg perfannotate a
228 231 $ hg perfbdiff -c 1
229 232 $ hg perfbdiff --alldata 1
230 233 $ hg perfunidiff -c 1
231 234 $ hg perfunidiff --alldata 1
232 235 $ hg perfbookmarks
233 236 $ hg perfbranchmap
234 237 $ hg perfbranchmapload
235 238 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 239 benchmark of branchmap with 3 revisions with 1 new ones
237 240 $ hg perfcca
238 241 $ hg perfchangegroupchangelog
239 242 $ hg perfchangegroupchangelog --cgversion 01
240 243 $ hg perfchangeset 2
241 244 $ hg perfctxfiles 2
242 245 $ hg perfdiffwd
243 246 $ hg perfdirfoldmap
244 247 $ hg perfdirs
245 248 $ hg perfdirstate
246 249 $ hg perfdirstate --contains
247 250 $ hg perfdirstate --iteration
248 251 $ hg perfdirstatedirs
249 252 $ hg perfdirstatefoldmap
250 253 $ hg perfdirstatewrite
251 254 #if repofncache
252 255 $ hg perffncacheencode
253 256 $ hg perffncacheload
254 257 $ hg debugrebuildfncache
255 258 fncache already up to date
256 259 $ hg perffncachewrite
257 260 $ hg debugrebuildfncache
258 261 fncache already up to date
259 262 #endif
260 263 $ hg perfheads
261 264 $ hg perfignore
262 265 $ hg perfindex
263 266 $ hg perflinelogedits -n 1
264 267 $ hg perfloadmarkers
265 268 $ hg perflog
266 269 $ hg perflookup 2
267 270 $ hg perflrucache
268 271 $ hg perfmanifest 2
269 272 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 273 $ hg perfmanifest -m 44fe2c8352bb
271 274 abort: manifest revision must be integer or full node
272 275 [255]
273 276 $ hg perfmergecalculate -r 3
274 277 $ hg perfmoonwalk
275 278 $ hg perfnodelookup 2
276 279 $ hg perfpathcopies 1 2
277 280 $ hg perfprogress --total 1000
278 281 $ hg perfrawfiles 2
279 282 $ hg perfrevlogindex -c
280 283 #if reporevlogstore
281 284 $ hg perfrevlogrevisions .hg/store/data/a.i
282 285 #endif
283 286 $ hg perfrevlogrevision -m 0
284 287 $ hg perfrevlogchunks -c
285 288 $ hg perfrevrange
286 289 $ hg perfrevset 'all()'
287 290 $ hg perfstartup
288 291 $ hg perfstatus
289 292 $ hg perfstatus --dirstate
290 293 $ hg perftags
291 294 $ hg perftemplating
292 295 $ hg perfvolatilesets
293 296 $ hg perfwalk
294 297 $ hg perfparents
295 298 $ hg perfdiscovery -q .
296 299 $ hg perf::phases
297 300
298 301 Test run control
299 302 ----------------
300 303
301 304 Simple single entry
302 305
303 306 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
304 307 ! wall * comb * user * sys * (best of 15) (glob)
305 308 ! wall * comb * user * sys * (max of 15) (glob)
306 309 ! wall * comb * user * sys * (avg of 15) (glob)
307 310 ! wall * comb * user * sys * (median of 15) (glob)
308 311
309 312 Multiple entries
310 313
311 314 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
312 315 ! wall * comb * user * sys * (best of 50) (glob)
313 316 ! wall * comb * user * sys * (max of 50) (glob)
314 317 ! wall * comb * user * sys * (avg of 50) (glob)
315 318 ! wall * comb * user * sys * (median of 50) (glob)
316 319
317 320 error case are ignored
318 321
319 322 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
320 323 malformatted run limit entry, missing "-": 500
321 324 ! wall * comb * user * sys * (best of 50) (glob)
322 325 ! wall * comb * user * sys * (max of 50) (glob)
323 326 ! wall * comb * user * sys * (avg of 50) (glob)
324 327 ! wall * comb * user * sys * (median of 50) (glob)
325 328 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
326 329 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
327 330 ! wall * comb * user * sys * (best of 50) (glob)
328 331 ! wall * comb * user * sys * (max of 50) (glob)
329 332 ! wall * comb * user * sys * (avg of 50) (glob)
330 333 ! wall * comb * user * sys * (median of 50) (glob)
331 334 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
332 335 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
333 336 ! wall * comb * user * sys * (best of 50) (glob)
334 337 ! wall * comb * user * sys * (max of 50) (glob)
335 338 ! wall * comb * user * sys * (avg of 50) (glob)
336 339 ! wall * comb * user * sys * (median of 50) (glob)
337 340
338 341 test actual output
339 342 ------------------
340 343
341 344 normal output:
342 345
343 346 $ hg perfheads --config perf.stub=no
344 347 ! wall * comb * user * sys * (best of *) (glob)
345 348 ! wall * comb * user * sys * (max of *) (glob)
346 349 ! wall * comb * user * sys * (avg of *) (glob)
347 350 ! wall * comb * user * sys * (median of *) (glob)
348 351
349 352 detailed output:
350 353
351 354 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
352 355 ! wall * comb * user * sys * (best of *) (glob)
353 356 ! wall * comb * user * sys * (max of *) (glob)
354 357 ! wall * comb * user * sys * (avg of *) (glob)
355 358 ! wall * comb * user * sys * (median of *) (glob)
356 359
357 360 test json output
358 361 ----------------
359 362
360 363 normal output:
361 364
362 365 $ hg perfheads --template json --config perf.stub=no
363 366 [
364 367 {
365 368 "avg.comb": *, (glob)
366 369 "avg.count": *, (glob)
367 370 "avg.sys": *, (glob)
368 371 "avg.user": *, (glob)
369 372 "avg.wall": *, (glob)
370 373 "comb": *, (glob)
371 374 "count": *, (glob)
372 375 "max.comb": *, (glob)
373 376 "max.count": *, (glob)
374 377 "max.sys": *, (glob)
375 378 "max.user": *, (glob)
376 379 "max.wall": *, (glob)
377 380 "median.comb": *, (glob)
378 381 "median.count": *, (glob)
379 382 "median.sys": *, (glob)
380 383 "median.user": *, (glob)
381 384 "median.wall": *, (glob)
382 385 "sys": *, (glob)
383 386 "user": *, (glob)
384 387 "wall": * (glob)
385 388 }
386 389 ]
387 390
388 391 detailed output:
389 392
390 393 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
391 394 [
392 395 {
393 396 "avg.comb": *, (glob)
394 397 "avg.count": *, (glob)
395 398 "avg.sys": *, (glob)
396 399 "avg.user": *, (glob)
397 400 "avg.wall": *, (glob)
398 401 "comb": *, (glob)
399 402 "count": *, (glob)
400 403 "max.comb": *, (glob)
401 404 "max.count": *, (glob)
402 405 "max.sys": *, (glob)
403 406 "max.user": *, (glob)
404 407 "max.wall": *, (glob)
405 408 "median.comb": *, (glob)
406 409 "median.count": *, (glob)
407 410 "median.sys": *, (glob)
408 411 "median.user": *, (glob)
409 412 "median.wall": *, (glob)
410 413 "sys": *, (glob)
411 414 "user": *, (glob)
412 415 "wall": * (glob)
413 416 }
414 417 ]
415 418
416 419 Test pre-run feature
417 420 --------------------
418 421
419 422 (perf discovery has some spurious output)
420 423
421 424 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
422 425 ! wall * comb * user * sys * (best of 1) (glob)
423 426 ! wall * comb * user * sys * (max of 1) (glob)
424 427 ! wall * comb * user * sys * (avg of 1) (glob)
425 428 ! wall * comb * user * sys * (median of 1) (glob)
426 429 searching for changes
427 430 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
428 431 ! wall * comb * user * sys * (best of 1) (glob)
429 432 ! wall * comb * user * sys * (max of 1) (glob)
430 433 ! wall * comb * user * sys * (avg of 1) (glob)
431 434 ! wall * comb * user * sys * (median of 1) (glob)
432 435 searching for changes
433 436 searching for changes
434 437 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
435 438 ! wall * comb * user * sys * (best of 1) (glob)
436 439 ! wall * comb * user * sys * (max of 1) (glob)
437 440 ! wall * comb * user * sys * (avg of 1) (glob)
438 441 ! wall * comb * user * sys * (median of 1) (glob)
439 442 searching for changes
440 443 searching for changes
441 444 searching for changes
442 445 searching for changes
443 446 $ hg perf::bundle 'last(all(), 5)'
444 447 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
445 448 4 changesets found
446 449 $ hg perf::unbundle last-5.hg
447 450
448 451
449 452 test profile-benchmark option
450 453 ------------------------------
451 454
452 455 Function to check that statprof ran
453 456 $ statprofran () {
454 457 > grep -E 'Sample count:|No samples recorded' > /dev/null
455 458 > }
456 459 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
457 460
458 461 Check perf.py for historical portability
459 462 ----------------------------------------
460 463
461 464 $ cd "$TESTDIR/.."
462 465
463 466 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
464 467 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
465 468 > "$TESTDIR"/check-perf-code.py contrib/perf.py
466 469 contrib/perf.py:\d+: (re)
467 470 > from mercurial import (
468 471 import newer module separately in try clause for early Mercurial
469 472 contrib/perf.py:\d+: (re)
470 473 > from mercurial import (
471 474 import newer module separately in try clause for early Mercurial
472 475 contrib/perf.py:\d+: (re)
473 476 > origindexpath = orig.opener.join(indexfile)
474 477 use getvfs()/getsvfs() for early Mercurial
475 478 contrib/perf.py:\d+: (re)
476 479 > origdatapath = orig.opener.join(datafile)
477 480 use getvfs()/getsvfs() for early Mercurial
478 481 contrib/perf.py:\d+: (re)
479 482 > vfs = vfsmod.vfs(tmpdir)
480 483 use getvfs()/getsvfs() for early Mercurial
481 484 contrib/perf.py:\d+: (re)
482 485 > vfs.options = getattr(orig.opener, 'options', None)
483 486 use getvfs()/getsvfs() for early Mercurial
484 487 [1]
General Comments 0
You need to be logged in to leave comments. Login now