##// END OF EJS Templates
perf-unbundle: do a quick and dirty fix to make it run on more commit...
marmoute -
r50457:27bff608 stable
parent child Browse files
Show More
@@ -1,4205 +1,4230
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238 # for "historical portability":
239 239 # define parsealiases locally, because cmdutil.parsealiases has been
240 240 # available since 1.5 (or 6252852b4332)
241 241 def parsealiases(cmd):
242 242 return cmd.split(b"|")
243 243
244 244
245 245 if safehasattr(registrar, 'command'):
246 246 command = registrar.command(cmdtable)
247 247 elif safehasattr(cmdutil, 'command'):
248 248 command = cmdutil.command(cmdtable)
249 249 if 'norepo' not in getargspec(command).args:
250 250 # for "historical portability":
251 251 # wrap original cmdutil.command, because "norepo" option has
252 252 # been available since 3.1 (or 75a96326cecb)
253 253 _command = command
254 254
255 255 def command(name, options=(), synopsis=None, norepo=False):
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return _command(name, list(options), synopsis)
259 259
260 260
261 261 else:
262 262 # for "historical portability":
263 263 # define "@command" annotation locally, because cmdutil.command
264 264 # has been available since 1.9 (or 2daa5179e73f)
265 265 def command(name, options=(), synopsis=None, norepo=False):
266 266 def decorator(func):
267 267 if synopsis:
268 268 cmdtable[name] = func, list(options), synopsis
269 269 else:
270 270 cmdtable[name] = func, list(options)
271 271 if norepo:
272 272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 273 return func
274 274
275 275 return decorator
276 276
277 277
278 278 try:
279 279 import mercurial.registrar
280 280 import mercurial.configitems
281 281
282 282 configtable = {}
283 283 configitem = mercurial.registrar.configitem(configtable)
284 284 configitem(
285 285 b'perf',
286 286 b'presleep',
287 287 default=mercurial.configitems.dynamicdefault,
288 288 experimental=True,
289 289 )
290 290 configitem(
291 291 b'perf',
292 292 b'stub',
293 293 default=mercurial.configitems.dynamicdefault,
294 294 experimental=True,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'parentscount',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 experimental=True,
301 301 )
302 302 configitem(
303 303 b'perf',
304 304 b'all-timing',
305 305 default=mercurial.configitems.dynamicdefault,
306 306 experimental=True,
307 307 )
308 308 configitem(
309 309 b'perf',
310 310 b'pre-run',
311 311 default=mercurial.configitems.dynamicdefault,
312 312 )
313 313 configitem(
314 314 b'perf',
315 315 b'profile-benchmark',
316 316 default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf',
320 320 b'run-limits',
321 321 default=mercurial.configitems.dynamicdefault,
322 322 experimental=True,
323 323 )
324 324 except (ImportError, AttributeError):
325 325 pass
326 326 except TypeError:
327 327 # compatibility fix for a11fd395e83f
328 328 # hg version: 5.2
329 329 configitem(
330 330 b'perf',
331 331 b'presleep',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'stub',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'parentscount',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'all-timing',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349 configitem(
350 350 b'perf',
351 351 b'pre-run',
352 352 default=mercurial.configitems.dynamicdefault,
353 353 )
354 354 configitem(
355 355 b'perf',
356 356 b'profile-benchmark',
357 357 default=mercurial.configitems.dynamicdefault,
358 358 )
359 359 configitem(
360 360 b'perf',
361 361 b'run-limits',
362 362 default=mercurial.configitems.dynamicdefault,
363 363 )
364 364
365 365
366 366 def getlen(ui):
367 367 if ui.configbool(b"perf", b"stub", False):
368 368 return lambda x: 1
369 369 return len
370 370
371 371
372 372 class noop:
373 373 """dummy context manager"""
374 374
375 375 def __enter__(self):
376 376 pass
377 377
378 378 def __exit__(self, *args):
379 379 pass
380 380
381 381
382 382 NOOPCTX = noop()
383 383
384 384
385 385 def gettimer(ui, opts=None):
386 386 """return a timer function and formatter: (timer, formatter)
387 387
388 388 This function exists to gather the creation of formatter in a single
389 389 place instead of duplicating it in all performance commands."""
390 390
391 391 # enforce an idle period before execution to counteract power management
392 392 # experimental config: perf.presleep
393 393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 394
395 395 if opts is None:
396 396 opts = {}
397 397 # redirect all to stderr unless buffer api is in use
398 398 if not ui._buffers:
399 399 ui = ui.copy()
400 400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 401 if uifout:
402 402 # for "historical portability":
403 403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 404 uifout.set(ui.ferr)
405 405
406 406 # get a formatter
407 407 uiformatter = getattr(ui, 'formatter', None)
408 408 if uiformatter:
409 409 fm = uiformatter(b'perf', opts)
410 410 else:
411 411 # for "historical portability":
412 412 # define formatter locally, because ui.formatter has been
413 413 # available since 2.2 (or ae5f92e154d3)
414 414 from mercurial import node
415 415
416 416 class defaultformatter:
417 417 """Minimized composition of baseformatter and plainformatter"""
418 418
419 419 def __init__(self, ui, topic, opts):
420 420 self._ui = ui
421 421 if ui.debugflag:
422 422 self.hexfunc = node.hex
423 423 else:
424 424 self.hexfunc = node.short
425 425
426 426 def __nonzero__(self):
427 427 return False
428 428
429 429 __bool__ = __nonzero__
430 430
431 431 def startitem(self):
432 432 pass
433 433
434 434 def data(self, **data):
435 435 pass
436 436
437 437 def write(self, fields, deftext, *fielddata, **opts):
438 438 self._ui.write(deftext % fielddata, **opts)
439 439
440 440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 441 if cond:
442 442 self._ui.write(deftext % fielddata, **opts)
443 443
444 444 def plain(self, text, **opts):
445 445 self._ui.write(text, **opts)
446 446
447 447 def end(self):
448 448 pass
449 449
450 450 fm = defaultformatter(ui, b'perf', opts)
451 451
452 452 # stub function, runs code only once instead of in a loop
453 453 # experimental config: perf.stub
454 454 if ui.configbool(b"perf", b"stub", False):
455 455 return functools.partial(stub_timer, fm), fm
456 456
457 457 # experimental config: perf.all-timing
458 458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 459
460 460 # experimental config: perf.run-limits
461 461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 462 limits = []
463 463 for item in limitspec:
464 464 parts = item.split(b'-', 1)
465 465 if len(parts) < 2:
466 466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 467 continue
468 468 try:
469 469 time_limit = float(_sysstr(parts[0]))
470 470 except ValueError as e:
471 471 ui.warn(
472 472 (
473 473 b'malformatted run limit entry, %s: %s\n'
474 474 % (_bytestr(e), item)
475 475 )
476 476 )
477 477 continue
478 478 try:
479 479 run_limit = int(_sysstr(parts[1]))
480 480 except ValueError as e:
481 481 ui.warn(
482 482 (
483 483 b'malformatted run limit entry, %s: %s\n'
484 484 % (_bytestr(e), item)
485 485 )
486 486 )
487 487 continue
488 488 limits.append((time_limit, run_limit))
489 489 if not limits:
490 490 limits = DEFAULTLIMITS
491 491
492 492 profiler = None
493 493 if profiling is not None:
494 494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 495 profiler = profiling.profile(ui)
496 496
497 497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 498 t = functools.partial(
499 499 _timer,
500 500 fm,
501 501 displayall=displayall,
502 502 limits=limits,
503 503 prerun=prerun,
504 504 profiler=profiler,
505 505 )
506 506 return t, fm
507 507
508 508
509 509 def stub_timer(fm, func, setup=None, title=None):
510 510 if setup is not None:
511 511 setup()
512 512 func()
513 513
514 514
515 515 @contextlib.contextmanager
516 516 def timeone():
517 517 r = []
518 518 ostart = os.times()
519 519 cstart = util.timer()
520 520 yield r
521 521 cstop = util.timer()
522 522 ostop = os.times()
523 523 a, b = ostart, ostop
524 524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 525
526 526
527 527 # list of stop condition (elapsed time, minimal run count)
528 528 DEFAULTLIMITS = (
529 529 (3.0, 100),
530 530 (10.0, 3),
531 531 )
532 532
533 533
534 534 def _timer(
535 535 fm,
536 536 func,
537 537 setup=None,
538 538 title=None,
539 539 displayall=False,
540 540 limits=DEFAULTLIMITS,
541 541 prerun=0,
542 542 profiler=None,
543 543 ):
544 544 gc.collect()
545 545 results = []
546 546 begin = util.timer()
547 547 count = 0
548 548 if profiler is None:
549 549 profiler = NOOPCTX
550 550 for i in range(prerun):
551 551 if setup is not None:
552 552 setup()
553 553 func()
554 554 keepgoing = True
555 555 while keepgoing:
556 556 if setup is not None:
557 557 setup()
558 558 with profiler:
559 559 with timeone() as item:
560 560 r = func()
561 561 profiler = NOOPCTX
562 562 count += 1
563 563 results.append(item[0])
564 564 cstop = util.timer()
565 565 # Look for a stop condition.
566 566 elapsed = cstop - begin
567 567 for t, mincount in limits:
568 568 if elapsed >= t and count >= mincount:
569 569 keepgoing = False
570 570 break
571 571
572 572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 573
574 574
575 575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'perf::delta-find',
930 930 revlogopts + formatteropts,
931 931 b'-c|-m|FILE REV',
932 932 )
933 933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 934 """benchmark the process of finding a valid delta for a revlog revision
935 935
936 936 When a revlog receives a new revision (e.g. from a commit, or from an
937 937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 938 This perf command measures how much time we spend in this process. It
939 939 operates on an already stored revision.
940 940
941 941 See `hg help debug-delta-find` for another related command.
942 942 """
943 943 from mercurial import revlogutils
944 944 import mercurial.revlogutils.deltas as deltautil
945 945
946 946 opts = _byteskwargs(opts)
947 947 if arg_2 is None:
948 948 file_ = None
949 949 rev = arg_1
950 950 else:
951 951 file_ = arg_1
952 952 rev = arg_2
953 953
954 954 repo = repo.unfiltered()
955 955
956 956 timer, fm = gettimer(ui, opts)
957 957
958 958 rev = int(rev)
959 959
960 960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961 961
962 962 deltacomputer = deltautil.deltacomputer(revlog)
963 963
964 964 node = revlog.node(rev)
965 965 p1r, p2r = revlog.parentrevs(rev)
966 966 p1 = revlog.node(p1r)
967 967 p2 = revlog.node(p2r)
968 968 full_text = revlog.revision(rev)
969 969 textlen = len(full_text)
970 970 cachedelta = None
971 971 flags = revlog.flags(rev)
972 972
973 973 revinfo = revlogutils.revisioninfo(
974 974 node,
975 975 p1,
976 976 p2,
977 977 [full_text], # btext
978 978 textlen,
979 979 cachedelta,
980 980 flags,
981 981 )
982 982
983 983 # Note: we should probably purge the potential caches (like the full
984 984 # manifest cache) between runs.
985 985 def find_one():
986 986 with revlog._datafp() as fh:
987 987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988 988
989 989 timer(find_one)
990 990 fm.end()
991 991
992 992
993 993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 994 def perfdiscovery(ui, repo, path, **opts):
995 995 """benchmark discovery between local repo and the peer at given path"""
996 996 repos = [repo, None]
997 997 timer, fm = gettimer(ui, opts)
998 998
999 999 try:
1000 1000 from mercurial.utils.urlutil import get_unique_pull_path
1001 1001
1002 1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1003 1003 except ImportError:
1004 1004 path = ui.expandpath(path)
1005 1005
1006 1006 def s():
1007 1007 repos[1] = hg.peer(ui, opts, path)
1008 1008
1009 1009 def d():
1010 1010 setdiscovery.findcommonheads(ui, *repos)
1011 1011
1012 1012 timer(d, setup=s)
1013 1013 fm.end()
1014 1014
1015 1015
1016 1016 @command(
1017 1017 b'perf::bookmarks|perfbookmarks',
1018 1018 formatteropts
1019 1019 + [
1020 1020 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 1021 ],
1022 1022 )
1023 1023 def perfbookmarks(ui, repo, **opts):
1024 1024 """benchmark parsing bookmarks from disk to memory"""
1025 1025 opts = _byteskwargs(opts)
1026 1026 timer, fm = gettimer(ui, opts)
1027 1027
1028 1028 clearrevlogs = opts[b'clear_revlogs']
1029 1029
1030 1030 def s():
1031 1031 if clearrevlogs:
1032 1032 clearchangelog(repo)
1033 1033 clearfilecache(repo, b'_bookmarks')
1034 1034
1035 1035 def d():
1036 1036 repo._bookmarks
1037 1037
1038 1038 timer(d, setup=s)
1039 1039 fm.end()
1040 1040
1041 1041
1042 1042 @command(
1043 1043 b'perf::bundle',
1044 1044 [
1045 1045 (
1046 1046 b'r',
1047 1047 b'rev',
1048 1048 [],
1049 1049 b'changesets to bundle',
1050 1050 b'REV',
1051 1051 ),
1052 1052 (
1053 1053 b't',
1054 1054 b'type',
1055 1055 b'none',
1056 1056 b'bundlespec to use (see `hg help bundlespec`)',
1057 1057 b'TYPE',
1058 1058 ),
1059 1059 ]
1060 1060 + formatteropts,
1061 1061 b'REVS',
1062 1062 )
1063 1063 def perfbundle(ui, repo, *revs, **opts):
1064 1064 """benchmark the creation of a bundle from a repository
1065 1065
1066 1066 For now, this only supports "none" compression.
1067 1067 """
1068 1068 try:
1069 1069 from mercurial import bundlecaches
1070 1070
1071 1071 parsebundlespec = bundlecaches.parsebundlespec
1072 1072 except ImportError:
1073 1073 from mercurial import exchange
1074 1074
1075 1075 parsebundlespec = exchange.parsebundlespec
1076 1076
1077 1077 from mercurial import discovery
1078 1078 from mercurial import bundle2
1079 1079
1080 1080 opts = _byteskwargs(opts)
1081 1081 timer, fm = gettimer(ui, opts)
1082 1082
1083 1083 cl = repo.changelog
1084 1084 revs = list(revs)
1085 1085 revs.extend(opts.get(b'rev', ()))
1086 1086 revs = scmutil.revrange(repo, revs)
1087 1087 if not revs:
1088 1088 raise error.Abort(b"not revision specified")
1089 1089 # make it a consistent set (ie: without topological gaps)
1090 1090 old_len = len(revs)
1091 1091 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 1092 if old_len != len(revs):
1093 1093 new_count = len(revs) - old_len
1094 1094 msg = b"add %d new revisions to make it a consistent set\n"
1095 1095 ui.write_err(msg % new_count)
1096 1096
1097 1097 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 1098 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 1099 outgoing = discovery.outgoing(repo, bases, targets)
1100 1100
1101 1101 bundle_spec = opts.get(b'type')
1102 1102
1103 1103 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104 1104
1105 1105 cgversion = bundle_spec.params.get(b"cg.version")
1106 1106 if cgversion is None:
1107 1107 if bundle_spec.version == b'v1':
1108 1108 cgversion = b'01'
1109 1109 if bundle_spec.version == b'v2':
1110 1110 cgversion = b'02'
1111 1111 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 1112 err = b"repository does not support bundle version %s"
1113 1113 raise error.Abort(err % cgversion)
1114 1114
1115 1115 if cgversion == b'01': # bundle1
1116 1116 bversion = b'HG10' + bundle_spec.wirecompression
1117 1117 bcompression = None
1118 1118 elif cgversion in (b'02', b'03'):
1119 1119 bversion = b'HG20'
1120 1120 bcompression = bundle_spec.wirecompression
1121 1121 else:
1122 1122 err = b'perf::bundle: unexpected changegroup version %s'
1123 1123 raise error.ProgrammingError(err % cgversion)
1124 1124
1125 1125 if bcompression is None:
1126 1126 bcompression = b'UN'
1127 1127
1128 1128 if bcompression != b'UN':
1129 1129 err = b'perf::bundle: compression currently unsupported: %s'
1130 1130 raise error.ProgrammingError(err % bcompression)
1131 1131
1132 1132 def do_bundle():
1133 1133 bundle2.writenewbundle(
1134 1134 ui,
1135 1135 repo,
1136 1136 b'perf::bundle',
1137 1137 os.devnull,
1138 1138 bversion,
1139 1139 outgoing,
1140 1140 bundle_spec.params,
1141 1141 )
1142 1142
1143 1143 timer(do_bundle)
1144 1144 fm.end()
1145 1145
1146 1146
1147 1147 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 1148 def perfbundleread(ui, repo, bundlepath, **opts):
1149 1149 """Benchmark reading of bundle files.
1150 1150
1151 1151 This command is meant to isolate the I/O part of bundle reading as
1152 1152 much as possible.
1153 1153 """
1154 1154 from mercurial import (
1155 1155 bundle2,
1156 1156 exchange,
1157 1157 streamclone,
1158 1158 )
1159 1159
1160 1160 opts = _byteskwargs(opts)
1161 1161
1162 1162 def makebench(fn):
1163 1163 def run():
1164 1164 with open(bundlepath, b'rb') as fh:
1165 1165 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 1166 fn(bundle)
1167 1167
1168 1168 return run
1169 1169
1170 1170 def makereadnbytes(size):
1171 1171 def run():
1172 1172 with open(bundlepath, b'rb') as fh:
1173 1173 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 1174 while bundle.read(size):
1175 1175 pass
1176 1176
1177 1177 return run
1178 1178
1179 1179 def makestdioread(size):
1180 1180 def run():
1181 1181 with open(bundlepath, b'rb') as fh:
1182 1182 while fh.read(size):
1183 1183 pass
1184 1184
1185 1185 return run
1186 1186
1187 1187 # bundle1
1188 1188
1189 1189 def deltaiter(bundle):
1190 1190 for delta in bundle.deltaiter():
1191 1191 pass
1192 1192
1193 1193 def iterchunks(bundle):
1194 1194 for chunk in bundle.getchunks():
1195 1195 pass
1196 1196
1197 1197 # bundle2
1198 1198
1199 1199 def forwardchunks(bundle):
1200 1200 for chunk in bundle._forwardchunks():
1201 1201 pass
1202 1202
1203 1203 def iterparts(bundle):
1204 1204 for part in bundle.iterparts():
1205 1205 pass
1206 1206
1207 1207 def iterpartsseekable(bundle):
1208 1208 for part in bundle.iterparts(seekable=True):
1209 1209 pass
1210 1210
1211 1211 def seek(bundle):
1212 1212 for part in bundle.iterparts(seekable=True):
1213 1213 part.seek(0, os.SEEK_END)
1214 1214
1215 1215 def makepartreadnbytes(size):
1216 1216 def run():
1217 1217 with open(bundlepath, b'rb') as fh:
1218 1218 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 1219 for part in bundle.iterparts():
1220 1220 while part.read(size):
1221 1221 pass
1222 1222
1223 1223 return run
1224 1224
1225 1225 benches = [
1226 1226 (makestdioread(8192), b'read(8k)'),
1227 1227 (makestdioread(16384), b'read(16k)'),
1228 1228 (makestdioread(32768), b'read(32k)'),
1229 1229 (makestdioread(131072), b'read(128k)'),
1230 1230 ]
1231 1231
1232 1232 with open(bundlepath, b'rb') as fh:
1233 1233 bundle = exchange.readbundle(ui, fh, bundlepath)
1234 1234
1235 1235 if isinstance(bundle, changegroup.cg1unpacker):
1236 1236 benches.extend(
1237 1237 [
1238 1238 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 1239 (makebench(iterchunks), b'cg1 getchunks()'),
1240 1240 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 1241 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 1242 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 1243 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 1244 ]
1245 1245 )
1246 1246 elif isinstance(bundle, bundle2.unbundle20):
1247 1247 benches.extend(
1248 1248 [
1249 1249 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 1250 (makebench(iterparts), b'bundle2 iterparts()'),
1251 1251 (
1252 1252 makebench(iterpartsseekable),
1253 1253 b'bundle2 iterparts() seekable',
1254 1254 ),
1255 1255 (makebench(seek), b'bundle2 part seek()'),
1256 1256 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 1257 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 1258 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 1259 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 1260 ]
1261 1261 )
1262 1262 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 1263 raise error.Abort(b'stream clone bundles not supported')
1264 1264 else:
1265 1265 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266 1266
1267 1267 for fn, title in benches:
1268 1268 timer, fm = gettimer(ui, opts)
1269 1269 timer(fn, title=title)
1270 1270 fm.end()
1271 1271
1272 1272
1273 1273 @command(
1274 1274 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 1275 formatteropts
1276 1276 + [
1277 1277 (b'', b'cgversion', b'02', b'changegroup version'),
1278 1278 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 1279 ],
1280 1280 )
1281 1281 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 1282 """Benchmark producing a changelog group for a changegroup.
1283 1283
1284 1284 This measures the time spent processing the changelog during a
1285 1285 bundle operation. This occurs during `hg bundle` and on a server
1286 1286 processing a `getbundle` wire protocol request (handles clones
1287 1287 and pull requests).
1288 1288
1289 1289 By default, all revisions are added to the changegroup.
1290 1290 """
1291 1291 opts = _byteskwargs(opts)
1292 1292 cl = repo.changelog
1293 1293 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 1294 bundler = changegroup.getbundler(cgversion, repo)
1295 1295
1296 1296 def d():
1297 1297 state, chunks = bundler._generatechangelog(cl, nodes)
1298 1298 for chunk in chunks:
1299 1299 pass
1300 1300
1301 1301 timer, fm = gettimer(ui, opts)
1302 1302
1303 1303 # Terminal printing can interfere with timing. So disable it.
1304 1304 with ui.configoverride({(b'progress', b'disable'): True}):
1305 1305 timer(d)
1306 1306
1307 1307 fm.end()
1308 1308
1309 1309
1310 1310 @command(b'perf::dirs|perfdirs', formatteropts)
1311 1311 def perfdirs(ui, repo, **opts):
1312 1312 opts = _byteskwargs(opts)
1313 1313 timer, fm = gettimer(ui, opts)
1314 1314 dirstate = repo.dirstate
1315 1315 b'a' in dirstate
1316 1316
1317 1317 def d():
1318 1318 dirstate.hasdir(b'a')
1319 1319 try:
1320 1320 del dirstate._map._dirs
1321 1321 except AttributeError:
1322 1322 pass
1323 1323
1324 1324 timer(d)
1325 1325 fm.end()
1326 1326
1327 1327
1328 1328 @command(
1329 1329 b'perf::dirstate|perfdirstate',
1330 1330 [
1331 1331 (
1332 1332 b'',
1333 1333 b'iteration',
1334 1334 None,
1335 1335 b'benchmark a full iteration for the dirstate',
1336 1336 ),
1337 1337 (
1338 1338 b'',
1339 1339 b'contains',
1340 1340 None,
1341 1341 b'benchmark a large amount of `nf in dirstate` calls',
1342 1342 ),
1343 1343 ]
1344 1344 + formatteropts,
1345 1345 )
1346 1346 def perfdirstate(ui, repo, **opts):
1347 1347 """benchmap the time of various distate operations
1348 1348
1349 1349 By default benchmark the time necessary to load a dirstate from scratch.
1350 1350 The dirstate is loaded to the point were a "contains" request can be
1351 1351 answered.
1352 1352 """
1353 1353 opts = _byteskwargs(opts)
1354 1354 timer, fm = gettimer(ui, opts)
1355 1355 b"a" in repo.dirstate
1356 1356
1357 1357 if opts[b'iteration'] and opts[b'contains']:
1358 1358 msg = b'only specify one of --iteration or --contains'
1359 1359 raise error.Abort(msg)
1360 1360
1361 1361 if opts[b'iteration']:
1362 1362 setup = None
1363 1363 dirstate = repo.dirstate
1364 1364
1365 1365 def d():
1366 1366 for f in dirstate:
1367 1367 pass
1368 1368
1369 1369 elif opts[b'contains']:
1370 1370 setup = None
1371 1371 dirstate = repo.dirstate
1372 1372 allfiles = list(dirstate)
1373 1373 # also add file path that will be "missing" from the dirstate
1374 1374 allfiles.extend([f[::-1] for f in allfiles])
1375 1375
1376 1376 def d():
1377 1377 for f in allfiles:
1378 1378 f in dirstate
1379 1379
1380 1380 else:
1381 1381
1382 1382 def setup():
1383 1383 repo.dirstate.invalidate()
1384 1384
1385 1385 def d():
1386 1386 b"a" in repo.dirstate
1387 1387
1388 1388 timer(d, setup=setup)
1389 1389 fm.end()
1390 1390
1391 1391
1392 1392 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 1393 def perfdirstatedirs(ui, repo, **opts):
1394 1394 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 1395 opts = _byteskwargs(opts)
1396 1396 timer, fm = gettimer(ui, opts)
1397 1397 repo.dirstate.hasdir(b"a")
1398 1398
1399 1399 def setup():
1400 1400 try:
1401 1401 del repo.dirstate._map._dirs
1402 1402 except AttributeError:
1403 1403 pass
1404 1404
1405 1405 def d():
1406 1406 repo.dirstate.hasdir(b"a")
1407 1407
1408 1408 timer(d, setup=setup)
1409 1409 fm.end()
1410 1410
1411 1411
1412 1412 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 1413 def perfdirstatefoldmap(ui, repo, **opts):
1414 1414 """benchmap a `dirstate._map.filefoldmap.get()` request
1415 1415
1416 1416 The dirstate filefoldmap cache is dropped between every request.
1417 1417 """
1418 1418 opts = _byteskwargs(opts)
1419 1419 timer, fm = gettimer(ui, opts)
1420 1420 dirstate = repo.dirstate
1421 1421 dirstate._map.filefoldmap.get(b'a')
1422 1422
1423 1423 def setup():
1424 1424 del dirstate._map.filefoldmap
1425 1425
1426 1426 def d():
1427 1427 dirstate._map.filefoldmap.get(b'a')
1428 1428
1429 1429 timer(d, setup=setup)
1430 1430 fm.end()
1431 1431
1432 1432
1433 1433 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 1434 def perfdirfoldmap(ui, repo, **opts):
1435 1435 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436 1436
1437 1437 The dirstate dirfoldmap cache is dropped between every request.
1438 1438 """
1439 1439 opts = _byteskwargs(opts)
1440 1440 timer, fm = gettimer(ui, opts)
1441 1441 dirstate = repo.dirstate
1442 1442 dirstate._map.dirfoldmap.get(b'a')
1443 1443
1444 1444 def setup():
1445 1445 del dirstate._map.dirfoldmap
1446 1446 try:
1447 1447 del dirstate._map._dirs
1448 1448 except AttributeError:
1449 1449 pass
1450 1450
1451 1451 def d():
1452 1452 dirstate._map.dirfoldmap.get(b'a')
1453 1453
1454 1454 timer(d, setup=setup)
1455 1455 fm.end()
1456 1456
1457 1457
1458 1458 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 1459 def perfdirstatewrite(ui, repo, **opts):
1460 1460 """benchmap the time it take to write a dirstate on disk"""
1461 1461 opts = _byteskwargs(opts)
1462 1462 timer, fm = gettimer(ui, opts)
1463 1463 ds = repo.dirstate
1464 1464 b"a" in ds
1465 1465
1466 1466 def setup():
1467 1467 ds._dirty = True
1468 1468
1469 1469 def d():
1470 1470 ds.write(repo.currenttransaction())
1471 1471
1472 1472 timer(d, setup=setup)
1473 1473 fm.end()
1474 1474
1475 1475
1476 1476 def _getmergerevs(repo, opts):
1477 1477 """parse command argument to return rev involved in merge
1478 1478
1479 1479 input: options dictionnary with `rev`, `from` and `bse`
1480 1480 output: (localctx, otherctx, basectx)
1481 1481 """
1482 1482 if opts[b'from']:
1483 1483 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 1484 wctx = repo[fromrev]
1485 1485 else:
1486 1486 wctx = repo[None]
1487 1487 # we don't want working dir files to be stat'd in the benchmark, so
1488 1488 # prime that cache
1489 1489 wctx.dirty()
1490 1490 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 1491 if opts[b'base']:
1492 1492 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 1493 ancestor = repo[fromrev]
1494 1494 else:
1495 1495 ancestor = wctx.ancestor(rctx)
1496 1496 return (wctx, rctx, ancestor)
1497 1497
1498 1498
1499 1499 @command(
1500 1500 b'perf::mergecalculate|perfmergecalculate',
1501 1501 [
1502 1502 (b'r', b'rev', b'.', b'rev to merge against'),
1503 1503 (b'', b'from', b'', b'rev to merge from'),
1504 1504 (b'', b'base', b'', b'the revision to use as base'),
1505 1505 ]
1506 1506 + formatteropts,
1507 1507 )
1508 1508 def perfmergecalculate(ui, repo, **opts):
1509 1509 opts = _byteskwargs(opts)
1510 1510 timer, fm = gettimer(ui, opts)
1511 1511
1512 1512 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513 1513
1514 1514 def d():
1515 1515 # acceptremote is True because we don't want prompts in the middle of
1516 1516 # our benchmark
1517 1517 merge.calculateupdates(
1518 1518 repo,
1519 1519 wctx,
1520 1520 rctx,
1521 1521 [ancestor],
1522 1522 branchmerge=False,
1523 1523 force=False,
1524 1524 acceptremote=True,
1525 1525 followcopies=True,
1526 1526 )
1527 1527
1528 1528 timer(d)
1529 1529 fm.end()
1530 1530
1531 1531
1532 1532 @command(
1533 1533 b'perf::mergecopies|perfmergecopies',
1534 1534 [
1535 1535 (b'r', b'rev', b'.', b'rev to merge against'),
1536 1536 (b'', b'from', b'', b'rev to merge from'),
1537 1537 (b'', b'base', b'', b'the revision to use as base'),
1538 1538 ]
1539 1539 + formatteropts,
1540 1540 )
1541 1541 def perfmergecopies(ui, repo, **opts):
1542 1542 """measure runtime of `copies.mergecopies`"""
1543 1543 opts = _byteskwargs(opts)
1544 1544 timer, fm = gettimer(ui, opts)
1545 1545 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546 1546
1547 1547 def d():
1548 1548 # acceptremote is True because we don't want prompts in the middle of
1549 1549 # our benchmark
1550 1550 copies.mergecopies(repo, wctx, rctx, ancestor)
1551 1551
1552 1552 timer(d)
1553 1553 fm.end()
1554 1554
1555 1555
1556 1556 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 1557 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 1558 """benchmark the copy tracing logic"""
1559 1559 opts = _byteskwargs(opts)
1560 1560 timer, fm = gettimer(ui, opts)
1561 1561 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 1562 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563 1563
1564 1564 def d():
1565 1565 copies.pathcopies(ctx1, ctx2)
1566 1566
1567 1567 timer(d)
1568 1568 fm.end()
1569 1569
1570 1570
1571 1571 @command(
1572 1572 b'perf::phases|perfphases',
1573 1573 [
1574 1574 (b'', b'full', False, b'include file reading time too'),
1575 1575 ],
1576 1576 b"",
1577 1577 )
1578 1578 def perfphases(ui, repo, **opts):
1579 1579 """benchmark phasesets computation"""
1580 1580 opts = _byteskwargs(opts)
1581 1581 timer, fm = gettimer(ui, opts)
1582 1582 _phases = repo._phasecache
1583 1583 full = opts.get(b'full')
1584 1584
1585 1585 def d():
1586 1586 phases = _phases
1587 1587 if full:
1588 1588 clearfilecache(repo, b'_phasecache')
1589 1589 phases = repo._phasecache
1590 1590 phases.invalidate()
1591 1591 phases.loadphaserevs(repo)
1592 1592
1593 1593 timer(d)
1594 1594 fm.end()
1595 1595
1596 1596
1597 1597 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 1598 def perfphasesremote(ui, repo, dest=None, **opts):
1599 1599 """benchmark time needed to analyse phases of the remote server"""
1600 1600 from mercurial.node import bin
1601 1601 from mercurial import (
1602 1602 exchange,
1603 1603 hg,
1604 1604 phases,
1605 1605 )
1606 1606
1607 1607 opts = _byteskwargs(opts)
1608 1608 timer, fm = gettimer(ui, opts)
1609 1609
1610 1610 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 1611 if not path:
1612 1612 raise error.Abort(
1613 1613 b'default repository not configured!',
1614 1614 hint=b"see 'hg help config.paths'",
1615 1615 )
1616 1616 dest = path.pushloc or path.loc
1617 1617 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1618 1618 other = hg.peer(repo, opts, dest)
1619 1619
1620 1620 # easier to perform discovery through the operation
1621 1621 op = exchange.pushoperation(repo, other)
1622 1622 exchange._pushdiscoverychangeset(op)
1623 1623
1624 1624 remotesubset = op.fallbackheads
1625 1625
1626 1626 with other.commandexecutor() as e:
1627 1627 remotephases = e.callcommand(
1628 1628 b'listkeys', {b'namespace': b'phases'}
1629 1629 ).result()
1630 1630 del other
1631 1631 publishing = remotephases.get(b'publishing', False)
1632 1632 if publishing:
1633 1633 ui.statusnoi18n(b'publishing: yes\n')
1634 1634 else:
1635 1635 ui.statusnoi18n(b'publishing: no\n')
1636 1636
1637 1637 has_node = getattr(repo.changelog.index, 'has_node', None)
1638 1638 if has_node is None:
1639 1639 has_node = repo.changelog.nodemap.__contains__
1640 1640 nonpublishroots = 0
1641 1641 for nhex, phase in remotephases.iteritems():
1642 1642 if nhex == b'publishing': # ignore data related to publish option
1643 1643 continue
1644 1644 node = bin(nhex)
1645 1645 if has_node(node) and int(phase):
1646 1646 nonpublishroots += 1
1647 1647 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1648 1648 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1649 1649
1650 1650 def d():
1651 1651 phases.remotephasessummary(repo, remotesubset, remotephases)
1652 1652
1653 1653 timer(d)
1654 1654 fm.end()
1655 1655
1656 1656
1657 1657 @command(
1658 1658 b'perf::manifest|perfmanifest',
1659 1659 [
1660 1660 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1661 1661 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1662 1662 ]
1663 1663 + formatteropts,
1664 1664 b'REV|NODE',
1665 1665 )
1666 1666 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1667 1667 """benchmark the time to read a manifest from disk and return a usable
1668 1668 dict-like object
1669 1669
1670 1670 Manifest caches are cleared before retrieval."""
1671 1671 opts = _byteskwargs(opts)
1672 1672 timer, fm = gettimer(ui, opts)
1673 1673 if not manifest_rev:
1674 1674 ctx = scmutil.revsingle(repo, rev, rev)
1675 1675 t = ctx.manifestnode()
1676 1676 else:
1677 1677 from mercurial.node import bin
1678 1678
1679 1679 if len(rev) == 40:
1680 1680 t = bin(rev)
1681 1681 else:
1682 1682 try:
1683 1683 rev = int(rev)
1684 1684
1685 1685 if util.safehasattr(repo.manifestlog, b'getstorage'):
1686 1686 t = repo.manifestlog.getstorage(b'').node(rev)
1687 1687 else:
1688 1688 t = repo.manifestlog._revlog.lookup(rev)
1689 1689 except ValueError:
1690 1690 raise error.Abort(
1691 1691 b'manifest revision must be integer or full node'
1692 1692 )
1693 1693
1694 1694 def d():
1695 1695 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1696 1696 repo.manifestlog[t].read()
1697 1697
1698 1698 timer(d)
1699 1699 fm.end()
1700 1700
1701 1701
1702 1702 @command(b'perf::changeset|perfchangeset', formatteropts)
1703 1703 def perfchangeset(ui, repo, rev, **opts):
1704 1704 opts = _byteskwargs(opts)
1705 1705 timer, fm = gettimer(ui, opts)
1706 1706 n = scmutil.revsingle(repo, rev).node()
1707 1707
1708 1708 def d():
1709 1709 repo.changelog.read(n)
1710 1710 # repo.changelog._cache = None
1711 1711
1712 1712 timer(d)
1713 1713 fm.end()
1714 1714
1715 1715
1716 1716 @command(b'perf::ignore|perfignore', formatteropts)
1717 1717 def perfignore(ui, repo, **opts):
1718 1718 """benchmark operation related to computing ignore"""
1719 1719 opts = _byteskwargs(opts)
1720 1720 timer, fm = gettimer(ui, opts)
1721 1721 dirstate = repo.dirstate
1722 1722
1723 1723 def setupone():
1724 1724 dirstate.invalidate()
1725 1725 clearfilecache(dirstate, b'_ignore')
1726 1726
1727 1727 def runone():
1728 1728 dirstate._ignore
1729 1729
1730 1730 timer(runone, setup=setupone, title=b"load")
1731 1731 fm.end()
1732 1732
1733 1733
1734 1734 @command(
1735 1735 b'perf::index|perfindex',
1736 1736 [
1737 1737 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1738 1738 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1739 1739 ]
1740 1740 + formatteropts,
1741 1741 )
1742 1742 def perfindex(ui, repo, **opts):
1743 1743 """benchmark index creation time followed by a lookup
1744 1744
1745 1745 The default is to look `tip` up. Depending on the index implementation,
1746 1746 the revision looked up can matters. For example, an implementation
1747 1747 scanning the index will have a faster lookup time for `--rev tip` than for
1748 1748 `--rev 0`. The number of looked up revisions and their order can also
1749 1749 matters.
1750 1750
1751 1751 Example of useful set to test:
1752 1752
1753 1753 * tip
1754 1754 * 0
1755 1755 * -10:
1756 1756 * :10
1757 1757 * -10: + :10
1758 1758 * :10: + -10:
1759 1759 * -10000:
1760 1760 * -10000: + 0
1761 1761
1762 1762 It is not currently possible to check for lookup of a missing node. For
1763 1763 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1764 1764 import mercurial.revlog
1765 1765
1766 1766 opts = _byteskwargs(opts)
1767 1767 timer, fm = gettimer(ui, opts)
1768 1768 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1769 1769 if opts[b'no_lookup']:
1770 1770 if opts['rev']:
1771 1771 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1772 1772 nodes = []
1773 1773 elif not opts[b'rev']:
1774 1774 nodes = [repo[b"tip"].node()]
1775 1775 else:
1776 1776 revs = scmutil.revrange(repo, opts[b'rev'])
1777 1777 cl = repo.changelog
1778 1778 nodes = [cl.node(r) for r in revs]
1779 1779
1780 1780 unfi = repo.unfiltered()
1781 1781 # find the filecache func directly
1782 1782 # This avoid polluting the benchmark with the filecache logic
1783 1783 makecl = unfi.__class__.changelog.func
1784 1784
1785 1785 def setup():
1786 1786 # probably not necessary, but for good measure
1787 1787 clearchangelog(unfi)
1788 1788
1789 1789 def d():
1790 1790 cl = makecl(unfi)
1791 1791 for n in nodes:
1792 1792 cl.rev(n)
1793 1793
1794 1794 timer(d, setup=setup)
1795 1795 fm.end()
1796 1796
1797 1797
1798 1798 @command(
1799 1799 b'perf::nodemap|perfnodemap',
1800 1800 [
1801 1801 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1802 1802 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1803 1803 ]
1804 1804 + formatteropts,
1805 1805 )
1806 1806 def perfnodemap(ui, repo, **opts):
1807 1807 """benchmark the time necessary to look up revision from a cold nodemap
1808 1808
1809 1809 Depending on the implementation, the amount and order of revision we look
1810 1810 up can varies. Example of useful set to test:
1811 1811 * tip
1812 1812 * 0
1813 1813 * -10:
1814 1814 * :10
1815 1815 * -10: + :10
1816 1816 * :10: + -10:
1817 1817 * -10000:
1818 1818 * -10000: + 0
1819 1819
1820 1820 The command currently focus on valid binary lookup. Benchmarking for
1821 1821 hexlookup, prefix lookup and missing lookup would also be valuable.
1822 1822 """
1823 1823 import mercurial.revlog
1824 1824
1825 1825 opts = _byteskwargs(opts)
1826 1826 timer, fm = gettimer(ui, opts)
1827 1827 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1828 1828
1829 1829 unfi = repo.unfiltered()
1830 1830 clearcaches = opts[b'clear_caches']
1831 1831 # find the filecache func directly
1832 1832 # This avoid polluting the benchmark with the filecache logic
1833 1833 makecl = unfi.__class__.changelog.func
1834 1834 if not opts[b'rev']:
1835 1835 raise error.Abort(b'use --rev to specify revisions to look up')
1836 1836 revs = scmutil.revrange(repo, opts[b'rev'])
1837 1837 cl = repo.changelog
1838 1838 nodes = [cl.node(r) for r in revs]
1839 1839
1840 1840 # use a list to pass reference to a nodemap from one closure to the next
1841 1841 nodeget = [None]
1842 1842
1843 1843 def setnodeget():
1844 1844 # probably not necessary, but for good measure
1845 1845 clearchangelog(unfi)
1846 1846 cl = makecl(unfi)
1847 1847 if util.safehasattr(cl.index, 'get_rev'):
1848 1848 nodeget[0] = cl.index.get_rev
1849 1849 else:
1850 1850 nodeget[0] = cl.nodemap.get
1851 1851
1852 1852 def d():
1853 1853 get = nodeget[0]
1854 1854 for n in nodes:
1855 1855 get(n)
1856 1856
1857 1857 setup = None
1858 1858 if clearcaches:
1859 1859
1860 1860 def setup():
1861 1861 setnodeget()
1862 1862
1863 1863 else:
1864 1864 setnodeget()
1865 1865 d() # prewarm the data structure
1866 1866 timer(d, setup=setup)
1867 1867 fm.end()
1868 1868
1869 1869
1870 1870 @command(b'perf::startup|perfstartup', formatteropts)
1871 1871 def perfstartup(ui, repo, **opts):
1872 1872 opts = _byteskwargs(opts)
1873 1873 timer, fm = gettimer(ui, opts)
1874 1874
1875 1875 def d():
1876 1876 if os.name != 'nt':
1877 1877 os.system(
1878 1878 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1879 1879 )
1880 1880 else:
1881 1881 os.environ['HGRCPATH'] = r' '
1882 1882 os.system("%s version -q > NUL" % sys.argv[0])
1883 1883
1884 1884 timer(d)
1885 1885 fm.end()
1886 1886
1887 1887
1888 1888 @command(b'perf::parents|perfparents', formatteropts)
1889 1889 def perfparents(ui, repo, **opts):
1890 1890 """benchmark the time necessary to fetch one changeset's parents.
1891 1891
1892 1892 The fetch is done using the `node identifier`, traversing all object layers
1893 1893 from the repository object. The first N revisions will be used for this
1894 1894 benchmark. N is controlled by the ``perf.parentscount`` config option
1895 1895 (default: 1000).
1896 1896 """
1897 1897 opts = _byteskwargs(opts)
1898 1898 timer, fm = gettimer(ui, opts)
1899 1899 # control the number of commits perfparents iterates over
1900 1900 # experimental config: perf.parentscount
1901 1901 count = getint(ui, b"perf", b"parentscount", 1000)
1902 1902 if len(repo.changelog) < count:
1903 1903 raise error.Abort(b"repo needs %d commits for this test" % count)
1904 1904 repo = repo.unfiltered()
1905 1905 nl = [repo.changelog.node(i) for i in _xrange(count)]
1906 1906
1907 1907 def d():
1908 1908 for n in nl:
1909 1909 repo.changelog.parents(n)
1910 1910
1911 1911 timer(d)
1912 1912 fm.end()
1913 1913
1914 1914
1915 1915 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1916 1916 def perfctxfiles(ui, repo, x, **opts):
1917 1917 opts = _byteskwargs(opts)
1918 1918 x = int(x)
1919 1919 timer, fm = gettimer(ui, opts)
1920 1920
1921 1921 def d():
1922 1922 len(repo[x].files())
1923 1923
1924 1924 timer(d)
1925 1925 fm.end()
1926 1926
1927 1927
1928 1928 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1929 1929 def perfrawfiles(ui, repo, x, **opts):
1930 1930 opts = _byteskwargs(opts)
1931 1931 x = int(x)
1932 1932 timer, fm = gettimer(ui, opts)
1933 1933 cl = repo.changelog
1934 1934
1935 1935 def d():
1936 1936 len(cl.read(x)[3])
1937 1937
1938 1938 timer(d)
1939 1939 fm.end()
1940 1940
1941 1941
1942 1942 @command(b'perf::lookup|perflookup', formatteropts)
1943 1943 def perflookup(ui, repo, rev, **opts):
1944 1944 opts = _byteskwargs(opts)
1945 1945 timer, fm = gettimer(ui, opts)
1946 1946 timer(lambda: len(repo.lookup(rev)))
1947 1947 fm.end()
1948 1948
1949 1949
1950 1950 @command(
1951 1951 b'perf::linelogedits|perflinelogedits',
1952 1952 [
1953 1953 (b'n', b'edits', 10000, b'number of edits'),
1954 1954 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1955 1955 ],
1956 1956 norepo=True,
1957 1957 )
1958 1958 def perflinelogedits(ui, **opts):
1959 1959 from mercurial import linelog
1960 1960
1961 1961 opts = _byteskwargs(opts)
1962 1962
1963 1963 edits = opts[b'edits']
1964 1964 maxhunklines = opts[b'max_hunk_lines']
1965 1965
1966 1966 maxb1 = 100000
1967 1967 random.seed(0)
1968 1968 randint = random.randint
1969 1969 currentlines = 0
1970 1970 arglist = []
1971 1971 for rev in _xrange(edits):
1972 1972 a1 = randint(0, currentlines)
1973 1973 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1974 1974 b1 = randint(0, maxb1)
1975 1975 b2 = randint(b1, b1 + maxhunklines)
1976 1976 currentlines += (b2 - b1) - (a2 - a1)
1977 1977 arglist.append((rev, a1, a2, b1, b2))
1978 1978
1979 1979 def d():
1980 1980 ll = linelog.linelog()
1981 1981 for args in arglist:
1982 1982 ll.replacelines(*args)
1983 1983
1984 1984 timer, fm = gettimer(ui, opts)
1985 1985 timer(d)
1986 1986 fm.end()
1987 1987
1988 1988
1989 1989 @command(b'perf::revrange|perfrevrange', formatteropts)
1990 1990 def perfrevrange(ui, repo, *specs, **opts):
1991 1991 opts = _byteskwargs(opts)
1992 1992 timer, fm = gettimer(ui, opts)
1993 1993 revrange = scmutil.revrange
1994 1994 timer(lambda: len(revrange(repo, specs)))
1995 1995 fm.end()
1996 1996
1997 1997
1998 1998 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1999 1999 def perfnodelookup(ui, repo, rev, **opts):
2000 2000 opts = _byteskwargs(opts)
2001 2001 timer, fm = gettimer(ui, opts)
2002 2002 import mercurial.revlog
2003 2003
2004 2004 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2005 2005 n = scmutil.revsingle(repo, rev).node()
2006 2006
2007 2007 try:
2008 2008 cl = revlog(getsvfs(repo), radix=b"00changelog")
2009 2009 except TypeError:
2010 2010 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2011 2011
2012 2012 def d():
2013 2013 cl.rev(n)
2014 2014 clearcaches(cl)
2015 2015
2016 2016 timer(d)
2017 2017 fm.end()
2018 2018
2019 2019
2020 2020 @command(
2021 2021 b'perf::log|perflog',
2022 2022 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2023 2023 )
2024 2024 def perflog(ui, repo, rev=None, **opts):
2025 2025 opts = _byteskwargs(opts)
2026 2026 if rev is None:
2027 2027 rev = []
2028 2028 timer, fm = gettimer(ui, opts)
2029 2029 ui.pushbuffer()
2030 2030 timer(
2031 2031 lambda: commands.log(
2032 2032 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2033 2033 )
2034 2034 )
2035 2035 ui.popbuffer()
2036 2036 fm.end()
2037 2037
2038 2038
2039 2039 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2040 2040 def perfmoonwalk(ui, repo, **opts):
2041 2041 """benchmark walking the changelog backwards
2042 2042
2043 2043 This also loads the changelog data for each revision in the changelog.
2044 2044 """
2045 2045 opts = _byteskwargs(opts)
2046 2046 timer, fm = gettimer(ui, opts)
2047 2047
2048 2048 def moonwalk():
2049 2049 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2050 2050 ctx = repo[i]
2051 2051 ctx.branch() # read changelog data (in addition to the index)
2052 2052
2053 2053 timer(moonwalk)
2054 2054 fm.end()
2055 2055
2056 2056
2057 2057 @command(
2058 2058 b'perf::templating|perftemplating',
2059 2059 [
2060 2060 (b'r', b'rev', [], b'revisions to run the template on'),
2061 2061 ]
2062 2062 + formatteropts,
2063 2063 )
2064 2064 def perftemplating(ui, repo, testedtemplate=None, **opts):
2065 2065 """test the rendering time of a given template"""
2066 2066 if makelogtemplater is None:
2067 2067 raise error.Abort(
2068 2068 b"perftemplating not available with this Mercurial",
2069 2069 hint=b"use 4.3 or later",
2070 2070 )
2071 2071
2072 2072 opts = _byteskwargs(opts)
2073 2073
2074 2074 nullui = ui.copy()
2075 2075 nullui.fout = open(os.devnull, 'wb')
2076 2076 nullui.disablepager()
2077 2077 revs = opts.get(b'rev')
2078 2078 if not revs:
2079 2079 revs = [b'all()']
2080 2080 revs = list(scmutil.revrange(repo, revs))
2081 2081
2082 2082 defaulttemplate = (
2083 2083 b'{date|shortdate} [{rev}:{node|short}]'
2084 2084 b' {author|person}: {desc|firstline}\n'
2085 2085 )
2086 2086 if testedtemplate is None:
2087 2087 testedtemplate = defaulttemplate
2088 2088 displayer = makelogtemplater(nullui, repo, testedtemplate)
2089 2089
2090 2090 def format():
2091 2091 for r in revs:
2092 2092 ctx = repo[r]
2093 2093 displayer.show(ctx)
2094 2094 displayer.flush(ctx)
2095 2095
2096 2096 timer, fm = gettimer(ui, opts)
2097 2097 timer(format)
2098 2098 fm.end()
2099 2099
2100 2100
2101 2101 def _displaystats(ui, opts, entries, data):
2102 2102 # use a second formatter because the data are quite different, not sure
2103 2103 # how it flies with the templater.
2104 2104 fm = ui.formatter(b'perf-stats', opts)
2105 2105 for key, title in entries:
2106 2106 values = data[key]
2107 2107 nbvalues = len(data)
2108 2108 values.sort()
2109 2109 stats = {
2110 2110 'key': key,
2111 2111 'title': title,
2112 2112 'nbitems': len(values),
2113 2113 'min': values[0][0],
2114 2114 '10%': values[(nbvalues * 10) // 100][0],
2115 2115 '25%': values[(nbvalues * 25) // 100][0],
2116 2116 '50%': values[(nbvalues * 50) // 100][0],
2117 2117 '75%': values[(nbvalues * 75) // 100][0],
2118 2118 '80%': values[(nbvalues * 80) // 100][0],
2119 2119 '85%': values[(nbvalues * 85) // 100][0],
2120 2120 '90%': values[(nbvalues * 90) // 100][0],
2121 2121 '95%': values[(nbvalues * 95) // 100][0],
2122 2122 '99%': values[(nbvalues * 99) // 100][0],
2123 2123 'max': values[-1][0],
2124 2124 }
2125 2125 fm.startitem()
2126 2126 fm.data(**stats)
2127 2127 # make node pretty for the human output
2128 2128 fm.plain('### %s (%d items)\n' % (title, len(values)))
2129 2129 lines = [
2130 2130 'min',
2131 2131 '10%',
2132 2132 '25%',
2133 2133 '50%',
2134 2134 '75%',
2135 2135 '80%',
2136 2136 '85%',
2137 2137 '90%',
2138 2138 '95%',
2139 2139 '99%',
2140 2140 'max',
2141 2141 ]
2142 2142 for l in lines:
2143 2143 fm.plain('%s: %s\n' % (l, stats[l]))
2144 2144 fm.end()
2145 2145
2146 2146
2147 2147 @command(
2148 2148 b'perf::helper-mergecopies|perfhelper-mergecopies',
2149 2149 formatteropts
2150 2150 + [
2151 2151 (b'r', b'revs', [], b'restrict search to these revisions'),
2152 2152 (b'', b'timing', False, b'provides extra data (costly)'),
2153 2153 (b'', b'stats', False, b'provides statistic about the measured data'),
2154 2154 ],
2155 2155 )
2156 2156 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2157 2157 """find statistics about potential parameters for `perfmergecopies`
2158 2158
2159 2159 This command find (base, p1, p2) triplet relevant for copytracing
2160 2160 benchmarking in the context of a merge. It reports values for some of the
2161 2161 parameters that impact merge copy tracing time during merge.
2162 2162
2163 2163 If `--timing` is set, rename detection is run and the associated timing
2164 2164 will be reported. The extra details come at the cost of slower command
2165 2165 execution.
2166 2166
2167 2167 Since rename detection is only run once, other factors might easily
2168 2168 affect the precision of the timing. However it should give a good
2169 2169 approximation of which revision triplets are very costly.
2170 2170 """
2171 2171 opts = _byteskwargs(opts)
2172 2172 fm = ui.formatter(b'perf', opts)
2173 2173 dotiming = opts[b'timing']
2174 2174 dostats = opts[b'stats']
2175 2175
2176 2176 output_template = [
2177 2177 ("base", "%(base)12s"),
2178 2178 ("p1", "%(p1.node)12s"),
2179 2179 ("p2", "%(p2.node)12s"),
2180 2180 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2181 2181 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2182 2182 ("p1.renames", "%(p1.renamedfiles)12d"),
2183 2183 ("p1.time", "%(p1.time)12.3f"),
2184 2184 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2185 2185 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2186 2186 ("p2.renames", "%(p2.renamedfiles)12d"),
2187 2187 ("p2.time", "%(p2.time)12.3f"),
2188 2188 ("renames", "%(nbrenamedfiles)12d"),
2189 2189 ("total.time", "%(time)12.3f"),
2190 2190 ]
2191 2191 if not dotiming:
2192 2192 output_template = [
2193 2193 i
2194 2194 for i in output_template
2195 2195 if not ('time' in i[0] or 'renames' in i[0])
2196 2196 ]
2197 2197 header_names = [h for (h, v) in output_template]
2198 2198 output = ' '.join([v for (h, v) in output_template]) + '\n'
2199 2199 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2200 2200 fm.plain(header % tuple(header_names))
2201 2201
2202 2202 if not revs:
2203 2203 revs = ['all()']
2204 2204 revs = scmutil.revrange(repo, revs)
2205 2205
2206 2206 if dostats:
2207 2207 alldata = {
2208 2208 'nbrevs': [],
2209 2209 'nbmissingfiles': [],
2210 2210 }
2211 2211 if dotiming:
2212 2212 alldata['parentnbrenames'] = []
2213 2213 alldata['totalnbrenames'] = []
2214 2214 alldata['parenttime'] = []
2215 2215 alldata['totaltime'] = []
2216 2216
2217 2217 roi = repo.revs('merge() and %ld', revs)
2218 2218 for r in roi:
2219 2219 ctx = repo[r]
2220 2220 p1 = ctx.p1()
2221 2221 p2 = ctx.p2()
2222 2222 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2223 2223 for b in bases:
2224 2224 b = repo[b]
2225 2225 p1missing = copies._computeforwardmissing(b, p1)
2226 2226 p2missing = copies._computeforwardmissing(b, p2)
2227 2227 data = {
2228 2228 b'base': b.hex(),
2229 2229 b'p1.node': p1.hex(),
2230 2230 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2231 2231 b'p1.nbmissingfiles': len(p1missing),
2232 2232 b'p2.node': p2.hex(),
2233 2233 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2234 2234 b'p2.nbmissingfiles': len(p2missing),
2235 2235 }
2236 2236 if dostats:
2237 2237 if p1missing:
2238 2238 alldata['nbrevs'].append(
2239 2239 (data['p1.nbrevs'], b.hex(), p1.hex())
2240 2240 )
2241 2241 alldata['nbmissingfiles'].append(
2242 2242 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2243 2243 )
2244 2244 if p2missing:
2245 2245 alldata['nbrevs'].append(
2246 2246 (data['p2.nbrevs'], b.hex(), p2.hex())
2247 2247 )
2248 2248 alldata['nbmissingfiles'].append(
2249 2249 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2250 2250 )
2251 2251 if dotiming:
2252 2252 begin = util.timer()
2253 2253 mergedata = copies.mergecopies(repo, p1, p2, b)
2254 2254 end = util.timer()
2255 2255 # not very stable timing since we did only one run
2256 2256 data['time'] = end - begin
2257 2257 # mergedata contains five dicts: "copy", "movewithdir",
2258 2258 # "diverge", "renamedelete" and "dirmove".
2259 2259 # The first 4 are about renamed file so lets count that.
2260 2260 renames = len(mergedata[0])
2261 2261 renames += len(mergedata[1])
2262 2262 renames += len(mergedata[2])
2263 2263 renames += len(mergedata[3])
2264 2264 data['nbrenamedfiles'] = renames
2265 2265 begin = util.timer()
2266 2266 p1renames = copies.pathcopies(b, p1)
2267 2267 end = util.timer()
2268 2268 data['p1.time'] = end - begin
2269 2269 begin = util.timer()
2270 2270 p2renames = copies.pathcopies(b, p2)
2271 2271 end = util.timer()
2272 2272 data['p2.time'] = end - begin
2273 2273 data['p1.renamedfiles'] = len(p1renames)
2274 2274 data['p2.renamedfiles'] = len(p2renames)
2275 2275
2276 2276 if dostats:
2277 2277 if p1missing:
2278 2278 alldata['parentnbrenames'].append(
2279 2279 (data['p1.renamedfiles'], b.hex(), p1.hex())
2280 2280 )
2281 2281 alldata['parenttime'].append(
2282 2282 (data['p1.time'], b.hex(), p1.hex())
2283 2283 )
2284 2284 if p2missing:
2285 2285 alldata['parentnbrenames'].append(
2286 2286 (data['p2.renamedfiles'], b.hex(), p2.hex())
2287 2287 )
2288 2288 alldata['parenttime'].append(
2289 2289 (data['p2.time'], b.hex(), p2.hex())
2290 2290 )
2291 2291 if p1missing or p2missing:
2292 2292 alldata['totalnbrenames'].append(
2293 2293 (
2294 2294 data['nbrenamedfiles'],
2295 2295 b.hex(),
2296 2296 p1.hex(),
2297 2297 p2.hex(),
2298 2298 )
2299 2299 )
2300 2300 alldata['totaltime'].append(
2301 2301 (data['time'], b.hex(), p1.hex(), p2.hex())
2302 2302 )
2303 2303 fm.startitem()
2304 2304 fm.data(**data)
2305 2305 # make node pretty for the human output
2306 2306 out = data.copy()
2307 2307 out['base'] = fm.hexfunc(b.node())
2308 2308 out['p1.node'] = fm.hexfunc(p1.node())
2309 2309 out['p2.node'] = fm.hexfunc(p2.node())
2310 2310 fm.plain(output % out)
2311 2311
2312 2312 fm.end()
2313 2313 if dostats:
2314 2314 # use a second formatter because the data are quite different, not sure
2315 2315 # how it flies with the templater.
2316 2316 entries = [
2317 2317 ('nbrevs', 'number of revision covered'),
2318 2318 ('nbmissingfiles', 'number of missing files at head'),
2319 2319 ]
2320 2320 if dotiming:
2321 2321 entries.append(
2322 2322 ('parentnbrenames', 'rename from one parent to base')
2323 2323 )
2324 2324 entries.append(('totalnbrenames', 'total number of renames'))
2325 2325 entries.append(('parenttime', 'time for one parent'))
2326 2326 entries.append(('totaltime', 'time for both parents'))
2327 2327 _displaystats(ui, opts, entries, alldata)
2328 2328
2329 2329
2330 2330 @command(
2331 2331 b'perf::helper-pathcopies|perfhelper-pathcopies',
2332 2332 formatteropts
2333 2333 + [
2334 2334 (b'r', b'revs', [], b'restrict search to these revisions'),
2335 2335 (b'', b'timing', False, b'provides extra data (costly)'),
2336 2336 (b'', b'stats', False, b'provides statistic about the measured data'),
2337 2337 ],
2338 2338 )
2339 2339 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2340 2340 """find statistic about potential parameters for the `perftracecopies`
2341 2341
2342 2342 This command find source-destination pair relevant for copytracing testing.
2343 2343 It report value for some of the parameters that impact copy tracing time.
2344 2344
2345 2345 If `--timing` is set, rename detection is run and the associated timing
2346 2346 will be reported. The extra details comes at the cost of a slower command
2347 2347 execution.
2348 2348
2349 2349 Since the rename detection is only run once, other factors might easily
2350 2350 affect the precision of the timing. However it should give a good
2351 2351 approximation of which revision pairs are very costly.
2352 2352 """
2353 2353 opts = _byteskwargs(opts)
2354 2354 fm = ui.formatter(b'perf', opts)
2355 2355 dotiming = opts[b'timing']
2356 2356 dostats = opts[b'stats']
2357 2357
2358 2358 if dotiming:
2359 2359 header = '%12s %12s %12s %12s %12s %12s\n'
2360 2360 output = (
2361 2361 "%(source)12s %(destination)12s "
2362 2362 "%(nbrevs)12d %(nbmissingfiles)12d "
2363 2363 "%(nbrenamedfiles)12d %(time)18.5f\n"
2364 2364 )
2365 2365 header_names = (
2366 2366 "source",
2367 2367 "destination",
2368 2368 "nb-revs",
2369 2369 "nb-files",
2370 2370 "nb-renames",
2371 2371 "time",
2372 2372 )
2373 2373 fm.plain(header % header_names)
2374 2374 else:
2375 2375 header = '%12s %12s %12s %12s\n'
2376 2376 output = (
2377 2377 "%(source)12s %(destination)12s "
2378 2378 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2379 2379 )
2380 2380 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2381 2381
2382 2382 if not revs:
2383 2383 revs = ['all()']
2384 2384 revs = scmutil.revrange(repo, revs)
2385 2385
2386 2386 if dostats:
2387 2387 alldata = {
2388 2388 'nbrevs': [],
2389 2389 'nbmissingfiles': [],
2390 2390 }
2391 2391 if dotiming:
2392 2392 alldata['nbrenames'] = []
2393 2393 alldata['time'] = []
2394 2394
2395 2395 roi = repo.revs('merge() and %ld', revs)
2396 2396 for r in roi:
2397 2397 ctx = repo[r]
2398 2398 p1 = ctx.p1().rev()
2399 2399 p2 = ctx.p2().rev()
2400 2400 bases = repo.changelog._commonancestorsheads(p1, p2)
2401 2401 for p in (p1, p2):
2402 2402 for b in bases:
2403 2403 base = repo[b]
2404 2404 parent = repo[p]
2405 2405 missing = copies._computeforwardmissing(base, parent)
2406 2406 if not missing:
2407 2407 continue
2408 2408 data = {
2409 2409 b'source': base.hex(),
2410 2410 b'destination': parent.hex(),
2411 2411 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2412 2412 b'nbmissingfiles': len(missing),
2413 2413 }
2414 2414 if dostats:
2415 2415 alldata['nbrevs'].append(
2416 2416 (
2417 2417 data['nbrevs'],
2418 2418 base.hex(),
2419 2419 parent.hex(),
2420 2420 )
2421 2421 )
2422 2422 alldata['nbmissingfiles'].append(
2423 2423 (
2424 2424 data['nbmissingfiles'],
2425 2425 base.hex(),
2426 2426 parent.hex(),
2427 2427 )
2428 2428 )
2429 2429 if dotiming:
2430 2430 begin = util.timer()
2431 2431 renames = copies.pathcopies(base, parent)
2432 2432 end = util.timer()
2433 2433 # not very stable timing since we did only one run
2434 2434 data['time'] = end - begin
2435 2435 data['nbrenamedfiles'] = len(renames)
2436 2436 if dostats:
2437 2437 alldata['time'].append(
2438 2438 (
2439 2439 data['time'],
2440 2440 base.hex(),
2441 2441 parent.hex(),
2442 2442 )
2443 2443 )
2444 2444 alldata['nbrenames'].append(
2445 2445 (
2446 2446 data['nbrenamedfiles'],
2447 2447 base.hex(),
2448 2448 parent.hex(),
2449 2449 )
2450 2450 )
2451 2451 fm.startitem()
2452 2452 fm.data(**data)
2453 2453 out = data.copy()
2454 2454 out['source'] = fm.hexfunc(base.node())
2455 2455 out['destination'] = fm.hexfunc(parent.node())
2456 2456 fm.plain(output % out)
2457 2457
2458 2458 fm.end()
2459 2459 if dostats:
2460 2460 entries = [
2461 2461 ('nbrevs', 'number of revision covered'),
2462 2462 ('nbmissingfiles', 'number of missing files at head'),
2463 2463 ]
2464 2464 if dotiming:
2465 2465 entries.append(('nbrenames', 'renamed files'))
2466 2466 entries.append(('time', 'time'))
2467 2467 _displaystats(ui, opts, entries, alldata)
2468 2468
2469 2469
2470 2470 @command(b'perf::cca|perfcca', formatteropts)
2471 2471 def perfcca(ui, repo, **opts):
2472 2472 opts = _byteskwargs(opts)
2473 2473 timer, fm = gettimer(ui, opts)
2474 2474 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2475 2475 fm.end()
2476 2476
2477 2477
2478 2478 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2479 2479 def perffncacheload(ui, repo, **opts):
2480 2480 opts = _byteskwargs(opts)
2481 2481 timer, fm = gettimer(ui, opts)
2482 2482 s = repo.store
2483 2483
2484 2484 def d():
2485 2485 s.fncache._load()
2486 2486
2487 2487 timer(d)
2488 2488 fm.end()
2489 2489
2490 2490
2491 2491 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2492 2492 def perffncachewrite(ui, repo, **opts):
2493 2493 opts = _byteskwargs(opts)
2494 2494 timer, fm = gettimer(ui, opts)
2495 2495 s = repo.store
2496 2496 lock = repo.lock()
2497 2497 s.fncache._load()
2498 2498 tr = repo.transaction(b'perffncachewrite')
2499 2499 tr.addbackup(b'fncache')
2500 2500
2501 2501 def d():
2502 2502 s.fncache._dirty = True
2503 2503 s.fncache.write(tr)
2504 2504
2505 2505 timer(d)
2506 2506 tr.close()
2507 2507 lock.release()
2508 2508 fm.end()
2509 2509
2510 2510
2511 2511 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2512 2512 def perffncacheencode(ui, repo, **opts):
2513 2513 opts = _byteskwargs(opts)
2514 2514 timer, fm = gettimer(ui, opts)
2515 2515 s = repo.store
2516 2516 s.fncache._load()
2517 2517
2518 2518 def d():
2519 2519 for p in s.fncache.entries:
2520 2520 s.encode(p)
2521 2521
2522 2522 timer(d)
2523 2523 fm.end()
2524 2524
2525 2525
2526 2526 def _bdiffworker(q, blocks, xdiff, ready, done):
2527 2527 while not done.is_set():
2528 2528 pair = q.get()
2529 2529 while pair is not None:
2530 2530 if xdiff:
2531 2531 mdiff.bdiff.xdiffblocks(*pair)
2532 2532 elif blocks:
2533 2533 mdiff.bdiff.blocks(*pair)
2534 2534 else:
2535 2535 mdiff.textdiff(*pair)
2536 2536 q.task_done()
2537 2537 pair = q.get()
2538 2538 q.task_done() # for the None one
2539 2539 with ready:
2540 2540 ready.wait()
2541 2541
2542 2542
2543 2543 def _manifestrevision(repo, mnode):
2544 2544 ml = repo.manifestlog
2545 2545
2546 2546 if util.safehasattr(ml, b'getstorage'):
2547 2547 store = ml.getstorage(b'')
2548 2548 else:
2549 2549 store = ml._revlog
2550 2550
2551 2551 return store.revision(mnode)
2552 2552
2553 2553
2554 2554 @command(
2555 2555 b'perf::bdiff|perfbdiff',
2556 2556 revlogopts
2557 2557 + formatteropts
2558 2558 + [
2559 2559 (
2560 2560 b'',
2561 2561 b'count',
2562 2562 1,
2563 2563 b'number of revisions to test (when using --startrev)',
2564 2564 ),
2565 2565 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2566 2566 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2567 2567 (b'', b'blocks', False, b'test computing diffs into blocks'),
2568 2568 (b'', b'xdiff', False, b'use xdiff algorithm'),
2569 2569 ],
2570 2570 b'-c|-m|FILE REV',
2571 2571 )
2572 2572 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2573 2573 """benchmark a bdiff between revisions
2574 2574
2575 2575 By default, benchmark a bdiff between its delta parent and itself.
2576 2576
2577 2577 With ``--count``, benchmark bdiffs between delta parents and self for N
2578 2578 revisions starting at the specified revision.
2579 2579
2580 2580 With ``--alldata``, assume the requested revision is a changeset and
2581 2581 measure bdiffs for all changes related to that changeset (manifest
2582 2582 and filelogs).
2583 2583 """
2584 2584 opts = _byteskwargs(opts)
2585 2585
2586 2586 if opts[b'xdiff'] and not opts[b'blocks']:
2587 2587 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2588 2588
2589 2589 if opts[b'alldata']:
2590 2590 opts[b'changelog'] = True
2591 2591
2592 2592 if opts.get(b'changelog') or opts.get(b'manifest'):
2593 2593 file_, rev = None, file_
2594 2594 elif rev is None:
2595 2595 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2596 2596
2597 2597 blocks = opts[b'blocks']
2598 2598 xdiff = opts[b'xdiff']
2599 2599 textpairs = []
2600 2600
2601 2601 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2602 2602
2603 2603 startrev = r.rev(r.lookup(rev))
2604 2604 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2605 2605 if opts[b'alldata']:
2606 2606 # Load revisions associated with changeset.
2607 2607 ctx = repo[rev]
2608 2608 mtext = _manifestrevision(repo, ctx.manifestnode())
2609 2609 for pctx in ctx.parents():
2610 2610 pman = _manifestrevision(repo, pctx.manifestnode())
2611 2611 textpairs.append((pman, mtext))
2612 2612
2613 2613 # Load filelog revisions by iterating manifest delta.
2614 2614 man = ctx.manifest()
2615 2615 pman = ctx.p1().manifest()
2616 2616 for filename, change in pman.diff(man).items():
2617 2617 fctx = repo.file(filename)
2618 2618 f1 = fctx.revision(change[0][0] or -1)
2619 2619 f2 = fctx.revision(change[1][0] or -1)
2620 2620 textpairs.append((f1, f2))
2621 2621 else:
2622 2622 dp = r.deltaparent(rev)
2623 2623 textpairs.append((r.revision(dp), r.revision(rev)))
2624 2624
2625 2625 withthreads = threads > 0
2626 2626 if not withthreads:
2627 2627
2628 2628 def d():
2629 2629 for pair in textpairs:
2630 2630 if xdiff:
2631 2631 mdiff.bdiff.xdiffblocks(*pair)
2632 2632 elif blocks:
2633 2633 mdiff.bdiff.blocks(*pair)
2634 2634 else:
2635 2635 mdiff.textdiff(*pair)
2636 2636
2637 2637 else:
2638 2638 q = queue()
2639 2639 for i in _xrange(threads):
2640 2640 q.put(None)
2641 2641 ready = threading.Condition()
2642 2642 done = threading.Event()
2643 2643 for i in _xrange(threads):
2644 2644 threading.Thread(
2645 2645 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2646 2646 ).start()
2647 2647 q.join()
2648 2648
2649 2649 def d():
2650 2650 for pair in textpairs:
2651 2651 q.put(pair)
2652 2652 for i in _xrange(threads):
2653 2653 q.put(None)
2654 2654 with ready:
2655 2655 ready.notify_all()
2656 2656 q.join()
2657 2657
2658 2658 timer, fm = gettimer(ui, opts)
2659 2659 timer(d)
2660 2660 fm.end()
2661 2661
2662 2662 if withthreads:
2663 2663 done.set()
2664 2664 for i in _xrange(threads):
2665 2665 q.put(None)
2666 2666 with ready:
2667 2667 ready.notify_all()
2668 2668
2669 2669
2670 2670 @command(
2671 2671 b'perf::unbundle',
2672 2672 formatteropts,
2673 2673 b'BUNDLE_FILE',
2674 2674 )
2675 2675 def perf_unbundle(ui, repo, fname, **opts):
2676 2676 """benchmark application of a bundle in a repository.
2677 2677
2678 2678 This does not include the final transaction processing"""
2679
2679 2680 from mercurial import exchange
2680 2681 from mercurial import bundle2
2682 from mercurial import transaction
2681 2683
2682 2684 opts = _byteskwargs(opts)
2683 2685
2684 if True:
2686 ### some compatibility hotfix
2687 #
2688 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2689 # critical regression that break transaction rollback for files that are
2690 # de-inlined.
2691 method = transaction.transaction._addentry
2692 pre_63edc384d3b7 = "data" in getargspec(method).args
2693 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2694 # a changeset that is a close descendant of 18415fc918a1, the changeset
2695 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2696 args = getargspec(error.Abort.__init__).args
2697 post_18415fc918a1 = "detailed_exit_code" in args
2698
2699 old_max_inline = None
2700 try:
2701 if not (pre_63edc384d3b7 or post_18415fc918a1):
2702 # disable inlining
2703 old_max_inline = mercurial.revlog._maxinline
2704 # large enough to never happen
2705 mercurial.revlog._maxinline = 2 ** 50
2706
2685 2707 with repo.lock():
2686 2708 bundle = [None, None]
2687 2709 orig_quiet = repo.ui.quiet
2688 2710 try:
2689 2711 repo.ui.quiet = True
2690 2712 with open(fname, mode="rb") as f:
2691 2713
2692 2714 def noop_report(*args, **kwargs):
2693 2715 pass
2694 2716
2695 2717 def setup():
2696 2718 gen, tr = bundle
2697 2719 if tr is not None:
2698 2720 tr.abort()
2699 2721 bundle[:] = [None, None]
2700 2722 f.seek(0)
2701 2723 bundle[0] = exchange.readbundle(ui, f, fname)
2702 2724 bundle[1] = repo.transaction(b'perf::unbundle')
2703 2725 # silence the transaction
2704 2726 bundle[1]._report = noop_report
2705 2727
2706 2728 def apply():
2707 2729 gen, tr = bundle
2708 2730 bundle2.applybundle(
2709 2731 repo,
2710 2732 gen,
2711 2733 tr,
2712 2734 source=b'perf::unbundle',
2713 2735 url=fname,
2714 2736 )
2715 2737
2716 2738 timer, fm = gettimer(ui, opts)
2717 2739 timer(apply, setup=setup)
2718 2740 fm.end()
2719 2741 finally:
2720 2742 repo.ui.quiet == orig_quiet
2721 2743 gen, tr = bundle
2722 2744 if tr is not None:
2723 2745 tr.abort()
2746 finally:
2747 if old_max_inline is not None:
2748 mercurial.revlog._maxinline = old_max_inline
2724 2749
2725 2750
2726 2751 @command(
2727 2752 b'perf::unidiff|perfunidiff',
2728 2753 revlogopts
2729 2754 + formatteropts
2730 2755 + [
2731 2756 (
2732 2757 b'',
2733 2758 b'count',
2734 2759 1,
2735 2760 b'number of revisions to test (when using --startrev)',
2736 2761 ),
2737 2762 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2738 2763 ],
2739 2764 b'-c|-m|FILE REV',
2740 2765 )
2741 2766 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2742 2767 """benchmark a unified diff between revisions
2743 2768
2744 2769 This doesn't include any copy tracing - it's just a unified diff
2745 2770 of the texts.
2746 2771
2747 2772 By default, benchmark a diff between its delta parent and itself.
2748 2773
2749 2774 With ``--count``, benchmark diffs between delta parents and self for N
2750 2775 revisions starting at the specified revision.
2751 2776
2752 2777 With ``--alldata``, assume the requested revision is a changeset and
2753 2778 measure diffs for all changes related to that changeset (manifest
2754 2779 and filelogs).
2755 2780 """
2756 2781 opts = _byteskwargs(opts)
2757 2782 if opts[b'alldata']:
2758 2783 opts[b'changelog'] = True
2759 2784
2760 2785 if opts.get(b'changelog') or opts.get(b'manifest'):
2761 2786 file_, rev = None, file_
2762 2787 elif rev is None:
2763 2788 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2764 2789
2765 2790 textpairs = []
2766 2791
2767 2792 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2768 2793
2769 2794 startrev = r.rev(r.lookup(rev))
2770 2795 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2771 2796 if opts[b'alldata']:
2772 2797 # Load revisions associated with changeset.
2773 2798 ctx = repo[rev]
2774 2799 mtext = _manifestrevision(repo, ctx.manifestnode())
2775 2800 for pctx in ctx.parents():
2776 2801 pman = _manifestrevision(repo, pctx.manifestnode())
2777 2802 textpairs.append((pman, mtext))
2778 2803
2779 2804 # Load filelog revisions by iterating manifest delta.
2780 2805 man = ctx.manifest()
2781 2806 pman = ctx.p1().manifest()
2782 2807 for filename, change in pman.diff(man).items():
2783 2808 fctx = repo.file(filename)
2784 2809 f1 = fctx.revision(change[0][0] or -1)
2785 2810 f2 = fctx.revision(change[1][0] or -1)
2786 2811 textpairs.append((f1, f2))
2787 2812 else:
2788 2813 dp = r.deltaparent(rev)
2789 2814 textpairs.append((r.revision(dp), r.revision(rev)))
2790 2815
2791 2816 def d():
2792 2817 for left, right in textpairs:
2793 2818 # The date strings don't matter, so we pass empty strings.
2794 2819 headerlines, hunks = mdiff.unidiff(
2795 2820 left, b'', right, b'', b'left', b'right', binary=False
2796 2821 )
2797 2822 # consume iterators in roughly the way patch.py does
2798 2823 b'\n'.join(headerlines)
2799 2824 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2800 2825
2801 2826 timer, fm = gettimer(ui, opts)
2802 2827 timer(d)
2803 2828 fm.end()
2804 2829
2805 2830
2806 2831 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2807 2832 def perfdiffwd(ui, repo, **opts):
2808 2833 """Profile diff of working directory changes"""
2809 2834 opts = _byteskwargs(opts)
2810 2835 timer, fm = gettimer(ui, opts)
2811 2836 options = {
2812 2837 'w': 'ignore_all_space',
2813 2838 'b': 'ignore_space_change',
2814 2839 'B': 'ignore_blank_lines',
2815 2840 }
2816 2841
2817 2842 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2818 2843 opts = {options[c]: b'1' for c in diffopt}
2819 2844
2820 2845 def d():
2821 2846 ui.pushbuffer()
2822 2847 commands.diff(ui, repo, **opts)
2823 2848 ui.popbuffer()
2824 2849
2825 2850 diffopt = diffopt.encode('ascii')
2826 2851 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2827 2852 timer(d, title=title)
2828 2853 fm.end()
2829 2854
2830 2855
2831 2856 @command(
2832 2857 b'perf::revlogindex|perfrevlogindex',
2833 2858 revlogopts + formatteropts,
2834 2859 b'-c|-m|FILE',
2835 2860 )
2836 2861 def perfrevlogindex(ui, repo, file_=None, **opts):
2837 2862 """Benchmark operations against a revlog index.
2838 2863
2839 2864 This tests constructing a revlog instance, reading index data,
2840 2865 parsing index data, and performing various operations related to
2841 2866 index data.
2842 2867 """
2843 2868
2844 2869 opts = _byteskwargs(opts)
2845 2870
2846 2871 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2847 2872
2848 2873 opener = getattr(rl, 'opener') # trick linter
2849 2874 # compat with hg <= 5.8
2850 2875 radix = getattr(rl, 'radix', None)
2851 2876 indexfile = getattr(rl, '_indexfile', None)
2852 2877 if indexfile is None:
2853 2878 # compatibility with <= hg-5.8
2854 2879 indexfile = getattr(rl, 'indexfile')
2855 2880 data = opener.read(indexfile)
2856 2881
2857 2882 header = struct.unpack(b'>I', data[0:4])[0]
2858 2883 version = header & 0xFFFF
2859 2884 if version == 1:
2860 2885 inline = header & (1 << 16)
2861 2886 else:
2862 2887 raise error.Abort(b'unsupported revlog version: %d' % version)
2863 2888
2864 2889 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2865 2890 if parse_index_v1 is None:
2866 2891 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2867 2892
2868 2893 rllen = len(rl)
2869 2894
2870 2895 node0 = rl.node(0)
2871 2896 node25 = rl.node(rllen // 4)
2872 2897 node50 = rl.node(rllen // 2)
2873 2898 node75 = rl.node(rllen // 4 * 3)
2874 2899 node100 = rl.node(rllen - 1)
2875 2900
2876 2901 allrevs = range(rllen)
2877 2902 allrevsrev = list(reversed(allrevs))
2878 2903 allnodes = [rl.node(rev) for rev in range(rllen)]
2879 2904 allnodesrev = list(reversed(allnodes))
2880 2905
2881 2906 def constructor():
2882 2907 if radix is not None:
2883 2908 revlog(opener, radix=radix)
2884 2909 else:
2885 2910 # hg <= 5.8
2886 2911 revlog(opener, indexfile=indexfile)
2887 2912
2888 2913 def read():
2889 2914 with opener(indexfile) as fh:
2890 2915 fh.read()
2891 2916
2892 2917 def parseindex():
2893 2918 parse_index_v1(data, inline)
2894 2919
2895 2920 def getentry(revornode):
2896 2921 index = parse_index_v1(data, inline)[0]
2897 2922 index[revornode]
2898 2923
2899 2924 def getentries(revs, count=1):
2900 2925 index = parse_index_v1(data, inline)[0]
2901 2926
2902 2927 for i in range(count):
2903 2928 for rev in revs:
2904 2929 index[rev]
2905 2930
2906 2931 def resolvenode(node):
2907 2932 index = parse_index_v1(data, inline)[0]
2908 2933 rev = getattr(index, 'rev', None)
2909 2934 if rev is None:
2910 2935 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2911 2936 # This only works for the C code.
2912 2937 if nodemap is None:
2913 2938 return
2914 2939 rev = nodemap.__getitem__
2915 2940
2916 2941 try:
2917 2942 rev(node)
2918 2943 except error.RevlogError:
2919 2944 pass
2920 2945
2921 2946 def resolvenodes(nodes, count=1):
2922 2947 index = parse_index_v1(data, inline)[0]
2923 2948 rev = getattr(index, 'rev', None)
2924 2949 if rev is None:
2925 2950 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2926 2951 # This only works for the C code.
2927 2952 if nodemap is None:
2928 2953 return
2929 2954 rev = nodemap.__getitem__
2930 2955
2931 2956 for i in range(count):
2932 2957 for node in nodes:
2933 2958 try:
2934 2959 rev(node)
2935 2960 except error.RevlogError:
2936 2961 pass
2937 2962
2938 2963 benches = [
2939 2964 (constructor, b'revlog constructor'),
2940 2965 (read, b'read'),
2941 2966 (parseindex, b'create index object'),
2942 2967 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2943 2968 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2944 2969 (lambda: resolvenode(node0), b'look up node at rev 0'),
2945 2970 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2946 2971 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2947 2972 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2948 2973 (lambda: resolvenode(node100), b'look up node at tip'),
2949 2974 # 2x variation is to measure caching impact.
2950 2975 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2951 2976 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2952 2977 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2953 2978 (
2954 2979 lambda: resolvenodes(allnodesrev, 2),
2955 2980 b'look up all nodes 2x (reverse)',
2956 2981 ),
2957 2982 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2958 2983 (
2959 2984 lambda: getentries(allrevs, 2),
2960 2985 b'retrieve all index entries 2x (forward)',
2961 2986 ),
2962 2987 (
2963 2988 lambda: getentries(allrevsrev),
2964 2989 b'retrieve all index entries (reverse)',
2965 2990 ),
2966 2991 (
2967 2992 lambda: getentries(allrevsrev, 2),
2968 2993 b'retrieve all index entries 2x (reverse)',
2969 2994 ),
2970 2995 ]
2971 2996
2972 2997 for fn, title in benches:
2973 2998 timer, fm = gettimer(ui, opts)
2974 2999 timer(fn, title=title)
2975 3000 fm.end()
2976 3001
2977 3002
2978 3003 @command(
2979 3004 b'perf::revlogrevisions|perfrevlogrevisions',
2980 3005 revlogopts
2981 3006 + formatteropts
2982 3007 + [
2983 3008 (b'd', b'dist', 100, b'distance between the revisions'),
2984 3009 (b's', b'startrev', 0, b'revision to start reading at'),
2985 3010 (b'', b'reverse', False, b'read in reverse'),
2986 3011 ],
2987 3012 b'-c|-m|FILE',
2988 3013 )
2989 3014 def perfrevlogrevisions(
2990 3015 ui, repo, file_=None, startrev=0, reverse=False, **opts
2991 3016 ):
2992 3017 """Benchmark reading a series of revisions from a revlog.
2993 3018
2994 3019 By default, we read every ``-d/--dist`` revision from 0 to tip of
2995 3020 the specified revlog.
2996 3021
2997 3022 The start revision can be defined via ``-s/--startrev``.
2998 3023 """
2999 3024 opts = _byteskwargs(opts)
3000 3025
3001 3026 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3002 3027 rllen = getlen(ui)(rl)
3003 3028
3004 3029 if startrev < 0:
3005 3030 startrev = rllen + startrev
3006 3031
3007 3032 def d():
3008 3033 rl.clearcaches()
3009 3034
3010 3035 beginrev = startrev
3011 3036 endrev = rllen
3012 3037 dist = opts[b'dist']
3013 3038
3014 3039 if reverse:
3015 3040 beginrev, endrev = endrev - 1, beginrev - 1
3016 3041 dist = -1 * dist
3017 3042
3018 3043 for x in _xrange(beginrev, endrev, dist):
3019 3044 # Old revisions don't support passing int.
3020 3045 n = rl.node(x)
3021 3046 rl.revision(n)
3022 3047
3023 3048 timer, fm = gettimer(ui, opts)
3024 3049 timer(d)
3025 3050 fm.end()
3026 3051
3027 3052
3028 3053 @command(
3029 3054 b'perf::revlogwrite|perfrevlogwrite',
3030 3055 revlogopts
3031 3056 + formatteropts
3032 3057 + [
3033 3058 (b's', b'startrev', 1000, b'revision to start writing at'),
3034 3059 (b'', b'stoprev', -1, b'last revision to write'),
3035 3060 (b'', b'count', 3, b'number of passes to perform'),
3036 3061 (b'', b'details', False, b'print timing for every revisions tested'),
3037 3062 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3038 3063 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3039 3064 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3040 3065 ],
3041 3066 b'-c|-m|FILE',
3042 3067 )
3043 3068 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3044 3069 """Benchmark writing a series of revisions to a revlog.
3045 3070
3046 3071 Possible source values are:
3047 3072 * `full`: add from a full text (default).
3048 3073 * `parent-1`: add from a delta to the first parent
3049 3074 * `parent-2`: add from a delta to the second parent if it exists
3050 3075 (use a delta from the first parent otherwise)
3051 3076 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3052 3077 * `storage`: add from the existing precomputed deltas
3053 3078
3054 3079 Note: This performance command measures performance in a custom way. As a
3055 3080 result some of the global configuration of the 'perf' command does not
3056 3081 apply to it:
3057 3082
3058 3083 * ``pre-run``: disabled
3059 3084
3060 3085 * ``profile-benchmark``: disabled
3061 3086
3062 3087 * ``run-limits``: disabled use --count instead
3063 3088 """
3064 3089 opts = _byteskwargs(opts)
3065 3090
3066 3091 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3067 3092 rllen = getlen(ui)(rl)
3068 3093 if startrev < 0:
3069 3094 startrev = rllen + startrev
3070 3095 if stoprev < 0:
3071 3096 stoprev = rllen + stoprev
3072 3097
3073 3098 lazydeltabase = opts['lazydeltabase']
3074 3099 source = opts['source']
3075 3100 clearcaches = opts['clear_caches']
3076 3101 validsource = (
3077 3102 b'full',
3078 3103 b'parent-1',
3079 3104 b'parent-2',
3080 3105 b'parent-smallest',
3081 3106 b'storage',
3082 3107 )
3083 3108 if source not in validsource:
3084 3109 raise error.Abort('invalid source type: %s' % source)
3085 3110
3086 3111 ### actually gather results
3087 3112 count = opts['count']
3088 3113 if count <= 0:
3089 3114 raise error.Abort('invalide run count: %d' % count)
3090 3115 allresults = []
3091 3116 for c in range(count):
3092 3117 timing = _timeonewrite(
3093 3118 ui,
3094 3119 rl,
3095 3120 source,
3096 3121 startrev,
3097 3122 stoprev,
3098 3123 c + 1,
3099 3124 lazydeltabase=lazydeltabase,
3100 3125 clearcaches=clearcaches,
3101 3126 )
3102 3127 allresults.append(timing)
3103 3128
3104 3129 ### consolidate the results in a single list
3105 3130 results = []
3106 3131 for idx, (rev, t) in enumerate(allresults[0]):
3107 3132 ts = [t]
3108 3133 for other in allresults[1:]:
3109 3134 orev, ot = other[idx]
3110 3135 assert orev == rev
3111 3136 ts.append(ot)
3112 3137 results.append((rev, ts))
3113 3138 resultcount = len(results)
3114 3139
3115 3140 ### Compute and display relevant statistics
3116 3141
3117 3142 # get a formatter
3118 3143 fm = ui.formatter(b'perf', opts)
3119 3144 displayall = ui.configbool(b"perf", b"all-timing", False)
3120 3145
3121 3146 # print individual details if requested
3122 3147 if opts['details']:
3123 3148 for idx, item in enumerate(results, 1):
3124 3149 rev, data = item
3125 3150 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3126 3151 formatone(fm, data, title=title, displayall=displayall)
3127 3152
3128 3153 # sorts results by median time
3129 3154 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3130 3155 # list of (name, index) to display)
3131 3156 relevants = [
3132 3157 ("min", 0),
3133 3158 ("10%", resultcount * 10 // 100),
3134 3159 ("25%", resultcount * 25 // 100),
3135 3160 ("50%", resultcount * 70 // 100),
3136 3161 ("75%", resultcount * 75 // 100),
3137 3162 ("90%", resultcount * 90 // 100),
3138 3163 ("95%", resultcount * 95 // 100),
3139 3164 ("99%", resultcount * 99 // 100),
3140 3165 ("99.9%", resultcount * 999 // 1000),
3141 3166 ("99.99%", resultcount * 9999 // 10000),
3142 3167 ("99.999%", resultcount * 99999 // 100000),
3143 3168 ("max", -1),
3144 3169 ]
3145 3170 if not ui.quiet:
3146 3171 for name, idx in relevants:
3147 3172 data = results[idx]
3148 3173 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3149 3174 formatone(fm, data[1], title=title, displayall=displayall)
3150 3175
3151 3176 # XXX summing that many float will not be very precise, we ignore this fact
3152 3177 # for now
3153 3178 totaltime = []
3154 3179 for item in allresults:
3155 3180 totaltime.append(
3156 3181 (
3157 3182 sum(x[1][0] for x in item),
3158 3183 sum(x[1][1] for x in item),
3159 3184 sum(x[1][2] for x in item),
3160 3185 )
3161 3186 )
3162 3187 formatone(
3163 3188 fm,
3164 3189 totaltime,
3165 3190 title="total time (%d revs)" % resultcount,
3166 3191 displayall=displayall,
3167 3192 )
3168 3193 fm.end()
3169 3194
3170 3195
3171 3196 class _faketr:
3172 3197 def add(s, x, y, z=None):
3173 3198 return None
3174 3199
3175 3200
3176 3201 def _timeonewrite(
3177 3202 ui,
3178 3203 orig,
3179 3204 source,
3180 3205 startrev,
3181 3206 stoprev,
3182 3207 runidx=None,
3183 3208 lazydeltabase=True,
3184 3209 clearcaches=True,
3185 3210 ):
3186 3211 timings = []
3187 3212 tr = _faketr()
3188 3213 with _temprevlog(ui, orig, startrev) as dest:
3189 3214 dest._lazydeltabase = lazydeltabase
3190 3215 revs = list(orig.revs(startrev, stoprev))
3191 3216 total = len(revs)
3192 3217 topic = 'adding'
3193 3218 if runidx is not None:
3194 3219 topic += ' (run #%d)' % runidx
3195 3220 # Support both old and new progress API
3196 3221 if util.safehasattr(ui, 'makeprogress'):
3197 3222 progress = ui.makeprogress(topic, unit='revs', total=total)
3198 3223
3199 3224 def updateprogress(pos):
3200 3225 progress.update(pos)
3201 3226
3202 3227 def completeprogress():
3203 3228 progress.complete()
3204 3229
3205 3230 else:
3206 3231
3207 3232 def updateprogress(pos):
3208 3233 ui.progress(topic, pos, unit='revs', total=total)
3209 3234
3210 3235 def completeprogress():
3211 3236 ui.progress(topic, None, unit='revs', total=total)
3212 3237
3213 3238 for idx, rev in enumerate(revs):
3214 3239 updateprogress(idx)
3215 3240 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3216 3241 if clearcaches:
3217 3242 dest.index.clearcaches()
3218 3243 dest.clearcaches()
3219 3244 with timeone() as r:
3220 3245 dest.addrawrevision(*addargs, **addkwargs)
3221 3246 timings.append((rev, r[0]))
3222 3247 updateprogress(total)
3223 3248 completeprogress()
3224 3249 return timings
3225 3250
3226 3251
3227 3252 def _getrevisionseed(orig, rev, tr, source):
3228 3253 from mercurial.node import nullid
3229 3254
3230 3255 linkrev = orig.linkrev(rev)
3231 3256 node = orig.node(rev)
3232 3257 p1, p2 = orig.parents(node)
3233 3258 flags = orig.flags(rev)
3234 3259 cachedelta = None
3235 3260 text = None
3236 3261
3237 3262 if source == b'full':
3238 3263 text = orig.revision(rev)
3239 3264 elif source == b'parent-1':
3240 3265 baserev = orig.rev(p1)
3241 3266 cachedelta = (baserev, orig.revdiff(p1, rev))
3242 3267 elif source == b'parent-2':
3243 3268 parent = p2
3244 3269 if p2 == nullid:
3245 3270 parent = p1
3246 3271 baserev = orig.rev(parent)
3247 3272 cachedelta = (baserev, orig.revdiff(parent, rev))
3248 3273 elif source == b'parent-smallest':
3249 3274 p1diff = orig.revdiff(p1, rev)
3250 3275 parent = p1
3251 3276 diff = p1diff
3252 3277 if p2 != nullid:
3253 3278 p2diff = orig.revdiff(p2, rev)
3254 3279 if len(p1diff) > len(p2diff):
3255 3280 parent = p2
3256 3281 diff = p2diff
3257 3282 baserev = orig.rev(parent)
3258 3283 cachedelta = (baserev, diff)
3259 3284 elif source == b'storage':
3260 3285 baserev = orig.deltaparent(rev)
3261 3286 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3262 3287
3263 3288 return (
3264 3289 (text, tr, linkrev, p1, p2),
3265 3290 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3266 3291 )
3267 3292
3268 3293
3269 3294 @contextlib.contextmanager
3270 3295 def _temprevlog(ui, orig, truncaterev):
3271 3296 from mercurial import vfs as vfsmod
3272 3297
3273 3298 if orig._inline:
3274 3299 raise error.Abort('not supporting inline revlog (yet)')
3275 3300 revlogkwargs = {}
3276 3301 k = 'upperboundcomp'
3277 3302 if util.safehasattr(orig, k):
3278 3303 revlogkwargs[k] = getattr(orig, k)
3279 3304
3280 3305 indexfile = getattr(orig, '_indexfile', None)
3281 3306 if indexfile is None:
3282 3307 # compatibility with <= hg-5.8
3283 3308 indexfile = getattr(orig, 'indexfile')
3284 3309 origindexpath = orig.opener.join(indexfile)
3285 3310
3286 3311 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3287 3312 origdatapath = orig.opener.join(datafile)
3288 3313 radix = b'revlog'
3289 3314 indexname = b'revlog.i'
3290 3315 dataname = b'revlog.d'
3291 3316
3292 3317 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3293 3318 try:
3294 3319 # copy the data file in a temporary directory
3295 3320 ui.debug('copying data in %s\n' % tmpdir)
3296 3321 destindexpath = os.path.join(tmpdir, 'revlog.i')
3297 3322 destdatapath = os.path.join(tmpdir, 'revlog.d')
3298 3323 shutil.copyfile(origindexpath, destindexpath)
3299 3324 shutil.copyfile(origdatapath, destdatapath)
3300 3325
3301 3326 # remove the data we want to add again
3302 3327 ui.debug('truncating data to be rewritten\n')
3303 3328 with open(destindexpath, 'ab') as index:
3304 3329 index.seek(0)
3305 3330 index.truncate(truncaterev * orig._io.size)
3306 3331 with open(destdatapath, 'ab') as data:
3307 3332 data.seek(0)
3308 3333 data.truncate(orig.start(truncaterev))
3309 3334
3310 3335 # instantiate a new revlog from the temporary copy
3311 3336 ui.debug('truncating adding to be rewritten\n')
3312 3337 vfs = vfsmod.vfs(tmpdir)
3313 3338 vfs.options = getattr(orig.opener, 'options', None)
3314 3339
3315 3340 try:
3316 3341 dest = revlog(vfs, radix=radix, **revlogkwargs)
3317 3342 except TypeError:
3318 3343 dest = revlog(
3319 3344 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3320 3345 )
3321 3346 if dest._inline:
3322 3347 raise error.Abort('not supporting inline revlog (yet)')
3323 3348 # make sure internals are initialized
3324 3349 dest.revision(len(dest) - 1)
3325 3350 yield dest
3326 3351 del dest, vfs
3327 3352 finally:
3328 3353 shutil.rmtree(tmpdir, True)
3329 3354
3330 3355
3331 3356 @command(
3332 3357 b'perf::revlogchunks|perfrevlogchunks',
3333 3358 revlogopts
3334 3359 + formatteropts
3335 3360 + [
3336 3361 (b'e', b'engines', b'', b'compression engines to use'),
3337 3362 (b's', b'startrev', 0, b'revision to start at'),
3338 3363 ],
3339 3364 b'-c|-m|FILE',
3340 3365 )
3341 3366 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3342 3367 """Benchmark operations on revlog chunks.
3343 3368
3344 3369 Logically, each revlog is a collection of fulltext revisions. However,
3345 3370 stored within each revlog are "chunks" of possibly compressed data. This
3346 3371 data needs to be read and decompressed or compressed and written.
3347 3372
3348 3373 This command measures the time it takes to read+decompress and recompress
3349 3374 chunks in a revlog. It effectively isolates I/O and compression performance.
3350 3375 For measurements of higher-level operations like resolving revisions,
3351 3376 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3352 3377 """
3353 3378 opts = _byteskwargs(opts)
3354 3379
3355 3380 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3356 3381
3357 3382 # _chunkraw was renamed to _getsegmentforrevs.
3358 3383 try:
3359 3384 segmentforrevs = rl._getsegmentforrevs
3360 3385 except AttributeError:
3361 3386 segmentforrevs = rl._chunkraw
3362 3387
3363 3388 # Verify engines argument.
3364 3389 if engines:
3365 3390 engines = {e.strip() for e in engines.split(b',')}
3366 3391 for engine in engines:
3367 3392 try:
3368 3393 util.compressionengines[engine]
3369 3394 except KeyError:
3370 3395 raise error.Abort(b'unknown compression engine: %s' % engine)
3371 3396 else:
3372 3397 engines = []
3373 3398 for e in util.compengines:
3374 3399 engine = util.compengines[e]
3375 3400 try:
3376 3401 if engine.available():
3377 3402 engine.revlogcompressor().compress(b'dummy')
3378 3403 engines.append(e)
3379 3404 except NotImplementedError:
3380 3405 pass
3381 3406
3382 3407 revs = list(rl.revs(startrev, len(rl) - 1))
3383 3408
3384 3409 def rlfh(rl):
3385 3410 if rl._inline:
3386 3411 indexfile = getattr(rl, '_indexfile', None)
3387 3412 if indexfile is None:
3388 3413 # compatibility with <= hg-5.8
3389 3414 indexfile = getattr(rl, 'indexfile')
3390 3415 return getsvfs(repo)(indexfile)
3391 3416 else:
3392 3417 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3393 3418 return getsvfs(repo)(datafile)
3394 3419
3395 3420 def doread():
3396 3421 rl.clearcaches()
3397 3422 for rev in revs:
3398 3423 segmentforrevs(rev, rev)
3399 3424
3400 3425 def doreadcachedfh():
3401 3426 rl.clearcaches()
3402 3427 fh = rlfh(rl)
3403 3428 for rev in revs:
3404 3429 segmentforrevs(rev, rev, df=fh)
3405 3430
3406 3431 def doreadbatch():
3407 3432 rl.clearcaches()
3408 3433 segmentforrevs(revs[0], revs[-1])
3409 3434
3410 3435 def doreadbatchcachedfh():
3411 3436 rl.clearcaches()
3412 3437 fh = rlfh(rl)
3413 3438 segmentforrevs(revs[0], revs[-1], df=fh)
3414 3439
3415 3440 def dochunk():
3416 3441 rl.clearcaches()
3417 3442 fh = rlfh(rl)
3418 3443 for rev in revs:
3419 3444 rl._chunk(rev, df=fh)
3420 3445
3421 3446 chunks = [None]
3422 3447
3423 3448 def dochunkbatch():
3424 3449 rl.clearcaches()
3425 3450 fh = rlfh(rl)
3426 3451 # Save chunks as a side-effect.
3427 3452 chunks[0] = rl._chunks(revs, df=fh)
3428 3453
3429 3454 def docompress(compressor):
3430 3455 rl.clearcaches()
3431 3456
3432 3457 try:
3433 3458 # Swap in the requested compression engine.
3434 3459 oldcompressor = rl._compressor
3435 3460 rl._compressor = compressor
3436 3461 for chunk in chunks[0]:
3437 3462 rl.compress(chunk)
3438 3463 finally:
3439 3464 rl._compressor = oldcompressor
3440 3465
3441 3466 benches = [
3442 3467 (lambda: doread(), b'read'),
3443 3468 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3444 3469 (lambda: doreadbatch(), b'read batch'),
3445 3470 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3446 3471 (lambda: dochunk(), b'chunk'),
3447 3472 (lambda: dochunkbatch(), b'chunk batch'),
3448 3473 ]
3449 3474
3450 3475 for engine in sorted(engines):
3451 3476 compressor = util.compengines[engine].revlogcompressor()
3452 3477 benches.append(
3453 3478 (
3454 3479 functools.partial(docompress, compressor),
3455 3480 b'compress w/ %s' % engine,
3456 3481 )
3457 3482 )
3458 3483
3459 3484 for fn, title in benches:
3460 3485 timer, fm = gettimer(ui, opts)
3461 3486 timer(fn, title=title)
3462 3487 fm.end()
3463 3488
3464 3489
3465 3490 @command(
3466 3491 b'perf::revlogrevision|perfrevlogrevision',
3467 3492 revlogopts
3468 3493 + formatteropts
3469 3494 + [(b'', b'cache', False, b'use caches instead of clearing')],
3470 3495 b'-c|-m|FILE REV',
3471 3496 )
3472 3497 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3473 3498 """Benchmark obtaining a revlog revision.
3474 3499
3475 3500 Obtaining a revlog revision consists of roughly the following steps:
3476 3501
3477 3502 1. Compute the delta chain
3478 3503 2. Slice the delta chain if applicable
3479 3504 3. Obtain the raw chunks for that delta chain
3480 3505 4. Decompress each raw chunk
3481 3506 5. Apply binary patches to obtain fulltext
3482 3507 6. Verify hash of fulltext
3483 3508
3484 3509 This command measures the time spent in each of these phases.
3485 3510 """
3486 3511 opts = _byteskwargs(opts)
3487 3512
3488 3513 if opts.get(b'changelog') or opts.get(b'manifest'):
3489 3514 file_, rev = None, file_
3490 3515 elif rev is None:
3491 3516 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3492 3517
3493 3518 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3494 3519
3495 3520 # _chunkraw was renamed to _getsegmentforrevs.
3496 3521 try:
3497 3522 segmentforrevs = r._getsegmentforrevs
3498 3523 except AttributeError:
3499 3524 segmentforrevs = r._chunkraw
3500 3525
3501 3526 node = r.lookup(rev)
3502 3527 rev = r.rev(node)
3503 3528
3504 3529 def getrawchunks(data, chain):
3505 3530 start = r.start
3506 3531 length = r.length
3507 3532 inline = r._inline
3508 3533 try:
3509 3534 iosize = r.index.entry_size
3510 3535 except AttributeError:
3511 3536 iosize = r._io.size
3512 3537 buffer = util.buffer
3513 3538
3514 3539 chunks = []
3515 3540 ladd = chunks.append
3516 3541 for idx, item in enumerate(chain):
3517 3542 offset = start(item[0])
3518 3543 bits = data[idx]
3519 3544 for rev in item:
3520 3545 chunkstart = start(rev)
3521 3546 if inline:
3522 3547 chunkstart += (rev + 1) * iosize
3523 3548 chunklength = length(rev)
3524 3549 ladd(buffer(bits, chunkstart - offset, chunklength))
3525 3550
3526 3551 return chunks
3527 3552
3528 3553 def dodeltachain(rev):
3529 3554 if not cache:
3530 3555 r.clearcaches()
3531 3556 r._deltachain(rev)
3532 3557
3533 3558 def doread(chain):
3534 3559 if not cache:
3535 3560 r.clearcaches()
3536 3561 for item in slicedchain:
3537 3562 segmentforrevs(item[0], item[-1])
3538 3563
3539 3564 def doslice(r, chain, size):
3540 3565 for s in slicechunk(r, chain, targetsize=size):
3541 3566 pass
3542 3567
3543 3568 def dorawchunks(data, chain):
3544 3569 if not cache:
3545 3570 r.clearcaches()
3546 3571 getrawchunks(data, chain)
3547 3572
3548 3573 def dodecompress(chunks):
3549 3574 decomp = r.decompress
3550 3575 for chunk in chunks:
3551 3576 decomp(chunk)
3552 3577
3553 3578 def dopatch(text, bins):
3554 3579 if not cache:
3555 3580 r.clearcaches()
3556 3581 mdiff.patches(text, bins)
3557 3582
3558 3583 def dohash(text):
3559 3584 if not cache:
3560 3585 r.clearcaches()
3561 3586 r.checkhash(text, node, rev=rev)
3562 3587
3563 3588 def dorevision():
3564 3589 if not cache:
3565 3590 r.clearcaches()
3566 3591 r.revision(node)
3567 3592
3568 3593 try:
3569 3594 from mercurial.revlogutils.deltas import slicechunk
3570 3595 except ImportError:
3571 3596 slicechunk = getattr(revlog, '_slicechunk', None)
3572 3597
3573 3598 size = r.length(rev)
3574 3599 chain = r._deltachain(rev)[0]
3575 3600 if not getattr(r, '_withsparseread', False):
3576 3601 slicedchain = (chain,)
3577 3602 else:
3578 3603 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3579 3604 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3580 3605 rawchunks = getrawchunks(data, slicedchain)
3581 3606 bins = r._chunks(chain)
3582 3607 text = bytes(bins[0])
3583 3608 bins = bins[1:]
3584 3609 text = mdiff.patches(text, bins)
3585 3610
3586 3611 benches = [
3587 3612 (lambda: dorevision(), b'full'),
3588 3613 (lambda: dodeltachain(rev), b'deltachain'),
3589 3614 (lambda: doread(chain), b'read'),
3590 3615 ]
3591 3616
3592 3617 if getattr(r, '_withsparseread', False):
3593 3618 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3594 3619 benches.append(slicing)
3595 3620
3596 3621 benches.extend(
3597 3622 [
3598 3623 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3599 3624 (lambda: dodecompress(rawchunks), b'decompress'),
3600 3625 (lambda: dopatch(text, bins), b'patch'),
3601 3626 (lambda: dohash(text), b'hash'),
3602 3627 ]
3603 3628 )
3604 3629
3605 3630 timer, fm = gettimer(ui, opts)
3606 3631 for fn, title in benches:
3607 3632 timer(fn, title=title)
3608 3633 fm.end()
3609 3634
3610 3635
3611 3636 @command(
3612 3637 b'perf::revset|perfrevset',
3613 3638 [
3614 3639 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3615 3640 (b'', b'contexts', False, b'obtain changectx for each revision'),
3616 3641 ]
3617 3642 + formatteropts,
3618 3643 b"REVSET",
3619 3644 )
3620 3645 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3621 3646 """benchmark the execution time of a revset
3622 3647
3623 3648 Use the --clean option if need to evaluate the impact of build volatile
3624 3649 revisions set cache on the revset execution. Volatile cache hold filtered
3625 3650 and obsolete related cache."""
3626 3651 opts = _byteskwargs(opts)
3627 3652
3628 3653 timer, fm = gettimer(ui, opts)
3629 3654
3630 3655 def d():
3631 3656 if clear:
3632 3657 repo.invalidatevolatilesets()
3633 3658 if contexts:
3634 3659 for ctx in repo.set(expr):
3635 3660 pass
3636 3661 else:
3637 3662 for r in repo.revs(expr):
3638 3663 pass
3639 3664
3640 3665 timer(d)
3641 3666 fm.end()
3642 3667
3643 3668
3644 3669 @command(
3645 3670 b'perf::volatilesets|perfvolatilesets',
3646 3671 [
3647 3672 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3648 3673 ]
3649 3674 + formatteropts,
3650 3675 )
3651 3676 def perfvolatilesets(ui, repo, *names, **opts):
3652 3677 """benchmark the computation of various volatile set
3653 3678
3654 3679 Volatile set computes element related to filtering and obsolescence."""
3655 3680 opts = _byteskwargs(opts)
3656 3681 timer, fm = gettimer(ui, opts)
3657 3682 repo = repo.unfiltered()
3658 3683
3659 3684 def getobs(name):
3660 3685 def d():
3661 3686 repo.invalidatevolatilesets()
3662 3687 if opts[b'clear_obsstore']:
3663 3688 clearfilecache(repo, b'obsstore')
3664 3689 obsolete.getrevs(repo, name)
3665 3690
3666 3691 return d
3667 3692
3668 3693 allobs = sorted(obsolete.cachefuncs)
3669 3694 if names:
3670 3695 allobs = [n for n in allobs if n in names]
3671 3696
3672 3697 for name in allobs:
3673 3698 timer(getobs(name), title=name)
3674 3699
3675 3700 def getfiltered(name):
3676 3701 def d():
3677 3702 repo.invalidatevolatilesets()
3678 3703 if opts[b'clear_obsstore']:
3679 3704 clearfilecache(repo, b'obsstore')
3680 3705 repoview.filterrevs(repo, name)
3681 3706
3682 3707 return d
3683 3708
3684 3709 allfilter = sorted(repoview.filtertable)
3685 3710 if names:
3686 3711 allfilter = [n for n in allfilter if n in names]
3687 3712
3688 3713 for name in allfilter:
3689 3714 timer(getfiltered(name), title=name)
3690 3715 fm.end()
3691 3716
3692 3717
3693 3718 @command(
3694 3719 b'perf::branchmap|perfbranchmap',
3695 3720 [
3696 3721 (b'f', b'full', False, b'Includes build time of subset'),
3697 3722 (
3698 3723 b'',
3699 3724 b'clear-revbranch',
3700 3725 False,
3701 3726 b'purge the revbranch cache between computation',
3702 3727 ),
3703 3728 ]
3704 3729 + formatteropts,
3705 3730 )
3706 3731 def perfbranchmap(ui, repo, *filternames, **opts):
3707 3732 """benchmark the update of a branchmap
3708 3733
3709 3734 This benchmarks the full repo.branchmap() call with read and write disabled
3710 3735 """
3711 3736 opts = _byteskwargs(opts)
3712 3737 full = opts.get(b"full", False)
3713 3738 clear_revbranch = opts.get(b"clear_revbranch", False)
3714 3739 timer, fm = gettimer(ui, opts)
3715 3740
3716 3741 def getbranchmap(filtername):
3717 3742 """generate a benchmark function for the filtername"""
3718 3743 if filtername is None:
3719 3744 view = repo
3720 3745 else:
3721 3746 view = repo.filtered(filtername)
3722 3747 if util.safehasattr(view._branchcaches, '_per_filter'):
3723 3748 filtered = view._branchcaches._per_filter
3724 3749 else:
3725 3750 # older versions
3726 3751 filtered = view._branchcaches
3727 3752
3728 3753 def d():
3729 3754 if clear_revbranch:
3730 3755 repo.revbranchcache()._clear()
3731 3756 if full:
3732 3757 view._branchcaches.clear()
3733 3758 else:
3734 3759 filtered.pop(filtername, None)
3735 3760 view.branchmap()
3736 3761
3737 3762 return d
3738 3763
3739 3764 # add filter in smaller subset to bigger subset
3740 3765 possiblefilters = set(repoview.filtertable)
3741 3766 if filternames:
3742 3767 possiblefilters &= set(filternames)
3743 3768 subsettable = getbranchmapsubsettable()
3744 3769 allfilters = []
3745 3770 while possiblefilters:
3746 3771 for name in possiblefilters:
3747 3772 subset = subsettable.get(name)
3748 3773 if subset not in possiblefilters:
3749 3774 break
3750 3775 else:
3751 3776 assert False, b'subset cycle %s!' % possiblefilters
3752 3777 allfilters.append(name)
3753 3778 possiblefilters.remove(name)
3754 3779
3755 3780 # warm the cache
3756 3781 if not full:
3757 3782 for name in allfilters:
3758 3783 repo.filtered(name).branchmap()
3759 3784 if not filternames or b'unfiltered' in filternames:
3760 3785 # add unfiltered
3761 3786 allfilters.append(None)
3762 3787
3763 3788 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3764 3789 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3765 3790 branchcacheread.set(classmethod(lambda *args: None))
3766 3791 else:
3767 3792 # older versions
3768 3793 branchcacheread = safeattrsetter(branchmap, b'read')
3769 3794 branchcacheread.set(lambda *args: None)
3770 3795 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3771 3796 branchcachewrite.set(lambda *args: None)
3772 3797 try:
3773 3798 for name in allfilters:
3774 3799 printname = name
3775 3800 if name is None:
3776 3801 printname = b'unfiltered'
3777 3802 timer(getbranchmap(name), title=printname)
3778 3803 finally:
3779 3804 branchcacheread.restore()
3780 3805 branchcachewrite.restore()
3781 3806 fm.end()
3782 3807
3783 3808
3784 3809 @command(
3785 3810 b'perf::branchmapupdate|perfbranchmapupdate',
3786 3811 [
3787 3812 (b'', b'base', [], b'subset of revision to start from'),
3788 3813 (b'', b'target', [], b'subset of revision to end with'),
3789 3814 (b'', b'clear-caches', False, b'clear cache between each runs'),
3790 3815 ]
3791 3816 + formatteropts,
3792 3817 )
3793 3818 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3794 3819 """benchmark branchmap update from for <base> revs to <target> revs
3795 3820
3796 3821 If `--clear-caches` is passed, the following items will be reset before
3797 3822 each update:
3798 3823 * the changelog instance and associated indexes
3799 3824 * the rev-branch-cache instance
3800 3825
3801 3826 Examples:
3802 3827
3803 3828 # update for the one last revision
3804 3829 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3805 3830
3806 3831 $ update for change coming with a new branch
3807 3832 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3808 3833 """
3809 3834 from mercurial import branchmap
3810 3835 from mercurial import repoview
3811 3836
3812 3837 opts = _byteskwargs(opts)
3813 3838 timer, fm = gettimer(ui, opts)
3814 3839 clearcaches = opts[b'clear_caches']
3815 3840 unfi = repo.unfiltered()
3816 3841 x = [None] # used to pass data between closure
3817 3842
3818 3843 # we use a `list` here to avoid possible side effect from smartset
3819 3844 baserevs = list(scmutil.revrange(repo, base))
3820 3845 targetrevs = list(scmutil.revrange(repo, target))
3821 3846 if not baserevs:
3822 3847 raise error.Abort(b'no revisions selected for --base')
3823 3848 if not targetrevs:
3824 3849 raise error.Abort(b'no revisions selected for --target')
3825 3850
3826 3851 # make sure the target branchmap also contains the one in the base
3827 3852 targetrevs = list(set(baserevs) | set(targetrevs))
3828 3853 targetrevs.sort()
3829 3854
3830 3855 cl = repo.changelog
3831 3856 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3832 3857 allbaserevs.sort()
3833 3858 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3834 3859
3835 3860 newrevs = list(alltargetrevs.difference(allbaserevs))
3836 3861 newrevs.sort()
3837 3862
3838 3863 allrevs = frozenset(unfi.changelog.revs())
3839 3864 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3840 3865 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3841 3866
3842 3867 def basefilter(repo, visibilityexceptions=None):
3843 3868 return basefilterrevs
3844 3869
3845 3870 def targetfilter(repo, visibilityexceptions=None):
3846 3871 return targetfilterrevs
3847 3872
3848 3873 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3849 3874 ui.status(msg % (len(allbaserevs), len(newrevs)))
3850 3875 if targetfilterrevs:
3851 3876 msg = b'(%d revisions still filtered)\n'
3852 3877 ui.status(msg % len(targetfilterrevs))
3853 3878
3854 3879 try:
3855 3880 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3856 3881 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3857 3882
3858 3883 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3859 3884 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3860 3885
3861 3886 # try to find an existing branchmap to reuse
3862 3887 subsettable = getbranchmapsubsettable()
3863 3888 candidatefilter = subsettable.get(None)
3864 3889 while candidatefilter is not None:
3865 3890 candidatebm = repo.filtered(candidatefilter).branchmap()
3866 3891 if candidatebm.validfor(baserepo):
3867 3892 filtered = repoview.filterrevs(repo, candidatefilter)
3868 3893 missing = [r for r in allbaserevs if r in filtered]
3869 3894 base = candidatebm.copy()
3870 3895 base.update(baserepo, missing)
3871 3896 break
3872 3897 candidatefilter = subsettable.get(candidatefilter)
3873 3898 else:
3874 3899 # no suitable subset where found
3875 3900 base = branchmap.branchcache()
3876 3901 base.update(baserepo, allbaserevs)
3877 3902
3878 3903 def setup():
3879 3904 x[0] = base.copy()
3880 3905 if clearcaches:
3881 3906 unfi._revbranchcache = None
3882 3907 clearchangelog(repo)
3883 3908
3884 3909 def bench():
3885 3910 x[0].update(targetrepo, newrevs)
3886 3911
3887 3912 timer(bench, setup=setup)
3888 3913 fm.end()
3889 3914 finally:
3890 3915 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3891 3916 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3892 3917
3893 3918
3894 3919 @command(
3895 3920 b'perf::branchmapload|perfbranchmapload',
3896 3921 [
3897 3922 (b'f', b'filter', b'', b'Specify repoview filter'),
3898 3923 (b'', b'list', False, b'List brachmap filter caches'),
3899 3924 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3900 3925 ]
3901 3926 + formatteropts,
3902 3927 )
3903 3928 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3904 3929 """benchmark reading the branchmap"""
3905 3930 opts = _byteskwargs(opts)
3906 3931 clearrevlogs = opts[b'clear_revlogs']
3907 3932
3908 3933 if list:
3909 3934 for name, kind, st in repo.cachevfs.readdir(stat=True):
3910 3935 if name.startswith(b'branch2'):
3911 3936 filtername = name.partition(b'-')[2] or b'unfiltered'
3912 3937 ui.status(
3913 3938 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3914 3939 )
3915 3940 return
3916 3941 if not filter:
3917 3942 filter = None
3918 3943 subsettable = getbranchmapsubsettable()
3919 3944 if filter is None:
3920 3945 repo = repo.unfiltered()
3921 3946 else:
3922 3947 repo = repoview.repoview(repo, filter)
3923 3948
3924 3949 repo.branchmap() # make sure we have a relevant, up to date branchmap
3925 3950
3926 3951 try:
3927 3952 fromfile = branchmap.branchcache.fromfile
3928 3953 except AttributeError:
3929 3954 # older versions
3930 3955 fromfile = branchmap.read
3931 3956
3932 3957 currentfilter = filter
3933 3958 # try once without timer, the filter may not be cached
3934 3959 while fromfile(repo) is None:
3935 3960 currentfilter = subsettable.get(currentfilter)
3936 3961 if currentfilter is None:
3937 3962 raise error.Abort(
3938 3963 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3939 3964 )
3940 3965 repo = repo.filtered(currentfilter)
3941 3966 timer, fm = gettimer(ui, opts)
3942 3967
3943 3968 def setup():
3944 3969 if clearrevlogs:
3945 3970 clearchangelog(repo)
3946 3971
3947 3972 def bench():
3948 3973 fromfile(repo)
3949 3974
3950 3975 timer(bench, setup=setup)
3951 3976 fm.end()
3952 3977
3953 3978
3954 3979 @command(b'perf::loadmarkers|perfloadmarkers')
3955 3980 def perfloadmarkers(ui, repo):
3956 3981 """benchmark the time to parse the on-disk markers for a repo
3957 3982
3958 3983 Result is the number of markers in the repo."""
3959 3984 timer, fm = gettimer(ui)
3960 3985 svfs = getsvfs(repo)
3961 3986 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3962 3987 fm.end()
3963 3988
3964 3989
3965 3990 @command(
3966 3991 b'perf::lrucachedict|perflrucachedict',
3967 3992 formatteropts
3968 3993 + [
3969 3994 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3970 3995 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3971 3996 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3972 3997 (b'', b'size', 4, b'size of cache'),
3973 3998 (b'', b'gets', 10000, b'number of key lookups'),
3974 3999 (b'', b'sets', 10000, b'number of key sets'),
3975 4000 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3976 4001 (
3977 4002 b'',
3978 4003 b'mixedgetfreq',
3979 4004 50,
3980 4005 b'frequency of get vs set ops in mixed mode',
3981 4006 ),
3982 4007 ],
3983 4008 norepo=True,
3984 4009 )
3985 4010 def perflrucache(
3986 4011 ui,
3987 4012 mincost=0,
3988 4013 maxcost=100,
3989 4014 costlimit=0,
3990 4015 size=4,
3991 4016 gets=10000,
3992 4017 sets=10000,
3993 4018 mixed=10000,
3994 4019 mixedgetfreq=50,
3995 4020 **opts
3996 4021 ):
3997 4022 opts = _byteskwargs(opts)
3998 4023
3999 4024 def doinit():
4000 4025 for i in _xrange(10000):
4001 4026 util.lrucachedict(size)
4002 4027
4003 4028 costrange = list(range(mincost, maxcost + 1))
4004 4029
4005 4030 values = []
4006 4031 for i in _xrange(size):
4007 4032 values.append(random.randint(0, _maxint))
4008 4033
4009 4034 # Get mode fills the cache and tests raw lookup performance with no
4010 4035 # eviction.
4011 4036 getseq = []
4012 4037 for i in _xrange(gets):
4013 4038 getseq.append(random.choice(values))
4014 4039
4015 4040 def dogets():
4016 4041 d = util.lrucachedict(size)
4017 4042 for v in values:
4018 4043 d[v] = v
4019 4044 for key in getseq:
4020 4045 value = d[key]
4021 4046 value # silence pyflakes warning
4022 4047
4023 4048 def dogetscost():
4024 4049 d = util.lrucachedict(size, maxcost=costlimit)
4025 4050 for i, v in enumerate(values):
4026 4051 d.insert(v, v, cost=costs[i])
4027 4052 for key in getseq:
4028 4053 try:
4029 4054 value = d[key]
4030 4055 value # silence pyflakes warning
4031 4056 except KeyError:
4032 4057 pass
4033 4058
4034 4059 # Set mode tests insertion speed with cache eviction.
4035 4060 setseq = []
4036 4061 costs = []
4037 4062 for i in _xrange(sets):
4038 4063 setseq.append(random.randint(0, _maxint))
4039 4064 costs.append(random.choice(costrange))
4040 4065
4041 4066 def doinserts():
4042 4067 d = util.lrucachedict(size)
4043 4068 for v in setseq:
4044 4069 d.insert(v, v)
4045 4070
4046 4071 def doinsertscost():
4047 4072 d = util.lrucachedict(size, maxcost=costlimit)
4048 4073 for i, v in enumerate(setseq):
4049 4074 d.insert(v, v, cost=costs[i])
4050 4075
4051 4076 def dosets():
4052 4077 d = util.lrucachedict(size)
4053 4078 for v in setseq:
4054 4079 d[v] = v
4055 4080
4056 4081 # Mixed mode randomly performs gets and sets with eviction.
4057 4082 mixedops = []
4058 4083 for i in _xrange(mixed):
4059 4084 r = random.randint(0, 100)
4060 4085 if r < mixedgetfreq:
4061 4086 op = 0
4062 4087 else:
4063 4088 op = 1
4064 4089
4065 4090 mixedops.append(
4066 4091 (op, random.randint(0, size * 2), random.choice(costrange))
4067 4092 )
4068 4093
4069 4094 def domixed():
4070 4095 d = util.lrucachedict(size)
4071 4096
4072 4097 for op, v, cost in mixedops:
4073 4098 if op == 0:
4074 4099 try:
4075 4100 d[v]
4076 4101 except KeyError:
4077 4102 pass
4078 4103 else:
4079 4104 d[v] = v
4080 4105
4081 4106 def domixedcost():
4082 4107 d = util.lrucachedict(size, maxcost=costlimit)
4083 4108
4084 4109 for op, v, cost in mixedops:
4085 4110 if op == 0:
4086 4111 try:
4087 4112 d[v]
4088 4113 except KeyError:
4089 4114 pass
4090 4115 else:
4091 4116 d.insert(v, v, cost=cost)
4092 4117
4093 4118 benches = [
4094 4119 (doinit, b'init'),
4095 4120 ]
4096 4121
4097 4122 if costlimit:
4098 4123 benches.extend(
4099 4124 [
4100 4125 (dogetscost, b'gets w/ cost limit'),
4101 4126 (doinsertscost, b'inserts w/ cost limit'),
4102 4127 (domixedcost, b'mixed w/ cost limit'),
4103 4128 ]
4104 4129 )
4105 4130 else:
4106 4131 benches.extend(
4107 4132 [
4108 4133 (dogets, b'gets'),
4109 4134 (doinserts, b'inserts'),
4110 4135 (dosets, b'sets'),
4111 4136 (domixed, b'mixed'),
4112 4137 ]
4113 4138 )
4114 4139
4115 4140 for fn, title in benches:
4116 4141 timer, fm = gettimer(ui, opts)
4117 4142 timer(fn, title=title)
4118 4143 fm.end()
4119 4144
4120 4145
4121 4146 @command(
4122 4147 b'perf::write|perfwrite',
4123 4148 formatteropts
4124 4149 + [
4125 4150 (b'', b'write-method', b'write', b'ui write method'),
4126 4151 (b'', b'nlines', 100, b'number of lines'),
4127 4152 (b'', b'nitems', 100, b'number of items (per line)'),
4128 4153 (b'', b'item', b'x', b'item that is written'),
4129 4154 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4130 4155 (b'', b'flush-line', None, b'flush after each line'),
4131 4156 ],
4132 4157 )
4133 4158 def perfwrite(ui, repo, **opts):
4134 4159 """microbenchmark ui.write (and others)"""
4135 4160 opts = _byteskwargs(opts)
4136 4161
4137 4162 write = getattr(ui, _sysstr(opts[b'write_method']))
4138 4163 nlines = int(opts[b'nlines'])
4139 4164 nitems = int(opts[b'nitems'])
4140 4165 item = opts[b'item']
4141 4166 batch_line = opts.get(b'batch_line')
4142 4167 flush_line = opts.get(b'flush_line')
4143 4168
4144 4169 if batch_line:
4145 4170 line = item * nitems + b'\n'
4146 4171
4147 4172 def benchmark():
4148 4173 for i in pycompat.xrange(nlines):
4149 4174 if batch_line:
4150 4175 write(line)
4151 4176 else:
4152 4177 for i in pycompat.xrange(nitems):
4153 4178 write(item)
4154 4179 write(b'\n')
4155 4180 if flush_line:
4156 4181 ui.flush()
4157 4182 ui.flush()
4158 4183
4159 4184 timer, fm = gettimer(ui, opts)
4160 4185 timer(benchmark)
4161 4186 fm.end()
4162 4187
4163 4188
4164 4189 def uisetup(ui):
4165 4190 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4166 4191 commands, b'debugrevlogopts'
4167 4192 ):
4168 4193 # for "historical portability":
4169 4194 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4170 4195 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4171 4196 # openrevlog() should cause failure, because it has been
4172 4197 # available since 3.5 (or 49c583ca48c4).
4173 4198 def openrevlog(orig, repo, cmd, file_, opts):
4174 4199 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4175 4200 raise error.Abort(
4176 4201 b"This version doesn't support --dir option",
4177 4202 hint=b"use 3.5 or later",
4178 4203 )
4179 4204 return orig(repo, cmd, file_, opts)
4180 4205
4181 4206 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4182 4207
4183 4208
4184 4209 @command(
4185 4210 b'perf::progress|perfprogress',
4186 4211 formatteropts
4187 4212 + [
4188 4213 (b'', b'topic', b'topic', b'topic for progress messages'),
4189 4214 (b'c', b'total', 1000000, b'total value we are progressing to'),
4190 4215 ],
4191 4216 norepo=True,
4192 4217 )
4193 4218 def perfprogress(ui, topic=None, total=None, **opts):
4194 4219 """printing of progress bars"""
4195 4220 opts = _byteskwargs(opts)
4196 4221
4197 4222 timer, fm = gettimer(ui, opts)
4198 4223
4199 4224 def doprogress():
4200 4225 with ui.makeprogress(topic, total=total) as progress:
4201 4226 for i in _xrange(total):
4202 4227 progress.increment()
4203 4228
4204 4229 timer(doprogress)
4205 4230 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now