##// END OF EJS Templates
perf-discovery: use `get_unique_pull_path`...
marmoute -
r47735:92029a43 default
parent child Browse files
Show More
@@ -1,3919 +1,3925 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if 'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf',
295 295 b'pre-run',
296 296 default=mercurial.configitems.dynamicdefault,
297 297 )
298 298 configitem(
299 299 b'perf',
300 300 b'profile-benchmark',
301 301 default=mercurial.configitems.dynamicdefault,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'run-limits',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 except (ImportError, AttributeError):
310 310 pass
311 311 except TypeError:
312 312 # compatibility fix for a11fd395e83f
313 313 # hg version: 5.2
314 314 configitem(
315 315 b'perf',
316 316 b'presleep',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'stub',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf',
326 326 b'parentscount',
327 327 default=mercurial.configitems.dynamicdefault,
328 328 )
329 329 configitem(
330 330 b'perf',
331 331 b'all-timing',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'pre-run',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'profile-benchmark',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'run-limits',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349
350 350
351 351 def getlen(ui):
352 352 if ui.configbool(b"perf", b"stub", False):
353 353 return lambda x: 1
354 354 return len
355 355
356 356
357 357 class noop(object):
358 358 """dummy context manager"""
359 359
360 360 def __enter__(self):
361 361 pass
362 362
363 363 def __exit__(self, *args):
364 364 pass
365 365
366 366
367 367 NOOPCTX = noop()
368 368
369 369
370 370 def gettimer(ui, opts=None):
371 371 """return a timer function and formatter: (timer, formatter)
372 372
373 373 This function exists to gather the creation of formatter in a single
374 374 place instead of duplicating it in all performance commands."""
375 375
376 376 # enforce an idle period before execution to counteract power management
377 377 # experimental config: perf.presleep
378 378 time.sleep(getint(ui, b"perf", b"presleep", 1))
379 379
380 380 if opts is None:
381 381 opts = {}
382 382 # redirect all to stderr unless buffer api is in use
383 383 if not ui._buffers:
384 384 ui = ui.copy()
385 385 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
386 386 if uifout:
387 387 # for "historical portability":
388 388 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
389 389 uifout.set(ui.ferr)
390 390
391 391 # get a formatter
392 392 uiformatter = getattr(ui, 'formatter', None)
393 393 if uiformatter:
394 394 fm = uiformatter(b'perf', opts)
395 395 else:
396 396 # for "historical portability":
397 397 # define formatter locally, because ui.formatter has been
398 398 # available since 2.2 (or ae5f92e154d3)
399 399 from mercurial import node
400 400
401 401 class defaultformatter(object):
402 402 """Minimized composition of baseformatter and plainformatter"""
403 403
404 404 def __init__(self, ui, topic, opts):
405 405 self._ui = ui
406 406 if ui.debugflag:
407 407 self.hexfunc = node.hex
408 408 else:
409 409 self.hexfunc = node.short
410 410
411 411 def __nonzero__(self):
412 412 return False
413 413
414 414 __bool__ = __nonzero__
415 415
416 416 def startitem(self):
417 417 pass
418 418
419 419 def data(self, **data):
420 420 pass
421 421
422 422 def write(self, fields, deftext, *fielddata, **opts):
423 423 self._ui.write(deftext % fielddata, **opts)
424 424
425 425 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
426 426 if cond:
427 427 self._ui.write(deftext % fielddata, **opts)
428 428
429 429 def plain(self, text, **opts):
430 430 self._ui.write(text, **opts)
431 431
432 432 def end(self):
433 433 pass
434 434
435 435 fm = defaultformatter(ui, b'perf', opts)
436 436
437 437 # stub function, runs code only once instead of in a loop
438 438 # experimental config: perf.stub
439 439 if ui.configbool(b"perf", b"stub", False):
440 440 return functools.partial(stub_timer, fm), fm
441 441
442 442 # experimental config: perf.all-timing
443 443 displayall = ui.configbool(b"perf", b"all-timing", False)
444 444
445 445 # experimental config: perf.run-limits
446 446 limitspec = ui.configlist(b"perf", b"run-limits", [])
447 447 limits = []
448 448 for item in limitspec:
449 449 parts = item.split(b'-', 1)
450 450 if len(parts) < 2:
451 451 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
452 452 continue
453 453 try:
454 454 time_limit = float(_sysstr(parts[0]))
455 455 except ValueError as e:
456 456 ui.warn(
457 457 (
458 458 b'malformatted run limit entry, %s: %s\n'
459 459 % (_bytestr(e), item)
460 460 )
461 461 )
462 462 continue
463 463 try:
464 464 run_limit = int(_sysstr(parts[1]))
465 465 except ValueError as e:
466 466 ui.warn(
467 467 (
468 468 b'malformatted run limit entry, %s: %s\n'
469 469 % (_bytestr(e), item)
470 470 )
471 471 )
472 472 continue
473 473 limits.append((time_limit, run_limit))
474 474 if not limits:
475 475 limits = DEFAULTLIMITS
476 476
477 477 profiler = None
478 478 if profiling is not None:
479 479 if ui.configbool(b"perf", b"profile-benchmark", False):
480 480 profiler = profiling.profile(ui)
481 481
482 482 prerun = getint(ui, b"perf", b"pre-run", 0)
483 483 t = functools.partial(
484 484 _timer,
485 485 fm,
486 486 displayall=displayall,
487 487 limits=limits,
488 488 prerun=prerun,
489 489 profiler=profiler,
490 490 )
491 491 return t, fm
492 492
493 493
494 494 def stub_timer(fm, func, setup=None, title=None):
495 495 if setup is not None:
496 496 setup()
497 497 func()
498 498
499 499
500 500 @contextlib.contextmanager
501 501 def timeone():
502 502 r = []
503 503 ostart = os.times()
504 504 cstart = util.timer()
505 505 yield r
506 506 cstop = util.timer()
507 507 ostop = os.times()
508 508 a, b = ostart, ostop
509 509 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
510 510
511 511
512 512 # list of stop condition (elapsed time, minimal run count)
513 513 DEFAULTLIMITS = (
514 514 (3.0, 100),
515 515 (10.0, 3),
516 516 )
517 517
518 518
519 519 def _timer(
520 520 fm,
521 521 func,
522 522 setup=None,
523 523 title=None,
524 524 displayall=False,
525 525 limits=DEFAULTLIMITS,
526 526 prerun=0,
527 527 profiler=None,
528 528 ):
529 529 gc.collect()
530 530 results = []
531 531 begin = util.timer()
532 532 count = 0
533 533 if profiler is None:
534 534 profiler = NOOPCTX
535 535 for i in range(prerun):
536 536 if setup is not None:
537 537 setup()
538 538 func()
539 539 keepgoing = True
540 540 while keepgoing:
541 541 if setup is not None:
542 542 setup()
543 543 with profiler:
544 544 with timeone() as item:
545 545 r = func()
546 546 profiler = NOOPCTX
547 547 count += 1
548 548 results.append(item[0])
549 549 cstop = util.timer()
550 550 # Look for a stop condition.
551 551 elapsed = cstop - begin
552 552 for t, mincount in limits:
553 553 if elapsed >= t and count >= mincount:
554 554 keepgoing = False
555 555 break
556 556
557 557 formatone(fm, results, title=title, result=r, displayall=displayall)
558 558
559 559
560 560 def formatone(fm, timings, title=None, result=None, displayall=False):
561 561
562 562 count = len(timings)
563 563
564 564 fm.startitem()
565 565
566 566 if title:
567 567 fm.write(b'title', b'! %s\n', title)
568 568 if result:
569 569 fm.write(b'result', b'! result: %s\n', result)
570 570
571 571 def display(role, entry):
572 572 prefix = b''
573 573 if role != b'best':
574 574 prefix = b'%s.' % role
575 575 fm.plain(b'!')
576 576 fm.write(prefix + b'wall', b' wall %f', entry[0])
577 577 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
578 578 fm.write(prefix + b'user', b' user %f', entry[1])
579 579 fm.write(prefix + b'sys', b' sys %f', entry[2])
580 580 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
581 581 fm.plain(b'\n')
582 582
583 583 timings.sort()
584 584 min_val = timings[0]
585 585 display(b'best', min_val)
586 586 if displayall:
587 587 max_val = timings[-1]
588 588 display(b'max', max_val)
589 589 avg = tuple([sum(x) / count for x in zip(*timings)])
590 590 display(b'avg', avg)
591 591 median = timings[len(timings) // 2]
592 592 display(b'median', median)
593 593
594 594
595 595 # utilities for historical portability
596 596
597 597
598 598 def getint(ui, section, name, default):
599 599 # for "historical portability":
600 600 # ui.configint has been available since 1.9 (or fa2b596db182)
601 601 v = ui.config(section, name, None)
602 602 if v is None:
603 603 return default
604 604 try:
605 605 return int(v)
606 606 except ValueError:
607 607 raise error.ConfigError(
608 608 b"%s.%s is not an integer ('%s')" % (section, name, v)
609 609 )
610 610
611 611
612 612 def safeattrsetter(obj, name, ignoremissing=False):
613 613 """Ensure that 'obj' has 'name' attribute before subsequent setattr
614 614
615 615 This function is aborted, if 'obj' doesn't have 'name' attribute
616 616 at runtime. This avoids overlooking removal of an attribute, which
617 617 breaks assumption of performance measurement, in the future.
618 618
619 619 This function returns the object to (1) assign a new value, and
620 620 (2) restore an original value to the attribute.
621 621
622 622 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
623 623 abortion, and this function returns None. This is useful to
624 624 examine an attribute, which isn't ensured in all Mercurial
625 625 versions.
626 626 """
627 627 if not util.safehasattr(obj, name):
628 628 if ignoremissing:
629 629 return None
630 630 raise error.Abort(
631 631 (
632 632 b"missing attribute %s of %s might break assumption"
633 633 b" of performance measurement"
634 634 )
635 635 % (name, obj)
636 636 )
637 637
638 638 origvalue = getattr(obj, _sysstr(name))
639 639
640 640 class attrutil(object):
641 641 def set(self, newvalue):
642 642 setattr(obj, _sysstr(name), newvalue)
643 643
644 644 def restore(self):
645 645 setattr(obj, _sysstr(name), origvalue)
646 646
647 647 return attrutil()
648 648
649 649
650 650 # utilities to examine each internal API changes
651 651
652 652
653 653 def getbranchmapsubsettable():
654 654 # for "historical portability":
655 655 # subsettable is defined in:
656 656 # - branchmap since 2.9 (or 175c6fd8cacc)
657 657 # - repoview since 2.5 (or 59a9f18d4587)
658 658 # - repoviewutil since 5.0
659 659 for mod in (branchmap, repoview, repoviewutil):
660 660 subsettable = getattr(mod, 'subsettable', None)
661 661 if subsettable:
662 662 return subsettable
663 663
664 664 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
665 665 # branchmap and repoview modules exist, but subsettable attribute
666 666 # doesn't)
667 667 raise error.Abort(
668 668 b"perfbranchmap not available with this Mercurial",
669 669 hint=b"use 2.5 or later",
670 670 )
671 671
672 672
673 673 def getsvfs(repo):
674 674 """Return appropriate object to access files under .hg/store"""
675 675 # for "historical portability":
676 676 # repo.svfs has been available since 2.3 (or 7034365089bf)
677 677 svfs = getattr(repo, 'svfs', None)
678 678 if svfs:
679 679 return svfs
680 680 else:
681 681 return getattr(repo, 'sopener')
682 682
683 683
684 684 def getvfs(repo):
685 685 """Return appropriate object to access files under .hg"""
686 686 # for "historical portability":
687 687 # repo.vfs has been available since 2.3 (or 7034365089bf)
688 688 vfs = getattr(repo, 'vfs', None)
689 689 if vfs:
690 690 return vfs
691 691 else:
692 692 return getattr(repo, 'opener')
693 693
694 694
695 695 def repocleartagscachefunc(repo):
696 696 """Return the function to clear tags cache according to repo internal API"""
697 697 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
698 698 # in this case, setattr(repo, '_tagscache', None) or so isn't
699 699 # correct way to clear tags cache, because existing code paths
700 700 # expect _tagscache to be a structured object.
701 701 def clearcache():
702 702 # _tagscache has been filteredpropertycache since 2.5 (or
703 703 # 98c867ac1330), and delattr() can't work in such case
704 704 if '_tagscache' in vars(repo):
705 705 del repo.__dict__['_tagscache']
706 706
707 707 return clearcache
708 708
709 709 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
710 710 if repotags: # since 1.4 (or 5614a628d173)
711 711 return lambda: repotags.set(None)
712 712
713 713 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
714 714 if repotagscache: # since 0.6 (or d7df759d0e97)
715 715 return lambda: repotagscache.set(None)
716 716
717 717 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
718 718 # this point, but it isn't so problematic, because:
719 719 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
720 720 # in perftags() causes failure soon
721 721 # - perf.py itself has been available since 1.1 (or eb240755386d)
722 722 raise error.Abort(b"tags API of this hg command is unknown")
723 723
724 724
725 725 # utilities to clear cache
726 726
727 727
728 728 def clearfilecache(obj, attrname):
729 729 unfiltered = getattr(obj, 'unfiltered', None)
730 730 if unfiltered is not None:
731 731 obj = obj.unfiltered()
732 732 if attrname in vars(obj):
733 733 delattr(obj, attrname)
734 734 obj._filecache.pop(attrname, None)
735 735
736 736
737 737 def clearchangelog(repo):
738 738 if repo is not repo.unfiltered():
739 739 object.__setattr__(repo, '_clcachekey', None)
740 740 object.__setattr__(repo, '_clcache', None)
741 741 clearfilecache(repo.unfiltered(), 'changelog')
742 742
743 743
744 744 # perf commands
745 745
746 746
747 747 @command(b'perf::walk|perfwalk', formatteropts)
748 748 def perfwalk(ui, repo, *pats, **opts):
749 749 opts = _byteskwargs(opts)
750 750 timer, fm = gettimer(ui, opts)
751 751 m = scmutil.match(repo[None], pats, {})
752 752 timer(
753 753 lambda: len(
754 754 list(
755 755 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
756 756 )
757 757 )
758 758 )
759 759 fm.end()
760 760
761 761
762 762 @command(b'perf::annotate|perfannotate', formatteropts)
763 763 def perfannotate(ui, repo, f, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 fc = repo[b'.'][f]
767 767 timer(lambda: len(fc.annotate(True)))
768 768 fm.end()
769 769
770 770
771 771 @command(
772 772 b'perf::status|perfstatus',
773 773 [
774 774 (b'u', b'unknown', False, b'ask status to look for unknown files'),
775 775 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
776 776 ]
777 777 + formatteropts,
778 778 )
779 779 def perfstatus(ui, repo, **opts):
780 780 """benchmark the performance of a single status call
781 781
782 782 The repository data are preserved between each call.
783 783
784 784 By default, only the status of the tracked file are requested. If
785 785 `--unknown` is passed, the "unknown" files are also tracked.
786 786 """
787 787 opts = _byteskwargs(opts)
788 788 # m = match.always(repo.root, repo.getcwd())
789 789 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
790 790 # False))))
791 791 timer, fm = gettimer(ui, opts)
792 792 if opts[b'dirstate']:
793 793 dirstate = repo.dirstate
794 794 m = scmutil.matchall(repo)
795 795 unknown = opts[b'unknown']
796 796
797 797 def status_dirstate():
798 798 s = dirstate.status(
799 799 m, subrepos=[], ignored=False, clean=False, unknown=unknown
800 800 )
801 801 sum(map(bool, s))
802 802
803 803 timer(status_dirstate)
804 804 else:
805 805 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
806 806 fm.end()
807 807
808 808
809 809 @command(b'perf::addremove|perfaddremove', formatteropts)
810 810 def perfaddremove(ui, repo, **opts):
811 811 opts = _byteskwargs(opts)
812 812 timer, fm = gettimer(ui, opts)
813 813 try:
814 814 oldquiet = repo.ui.quiet
815 815 repo.ui.quiet = True
816 816 matcher = scmutil.match(repo[None])
817 817 opts[b'dry_run'] = True
818 818 if 'uipathfn' in getargspec(scmutil.addremove).args:
819 819 uipathfn = scmutil.getuipathfn(repo)
820 820 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
821 821 else:
822 822 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
823 823 finally:
824 824 repo.ui.quiet = oldquiet
825 825 fm.end()
826 826
827 827
828 828 def clearcaches(cl):
829 829 # behave somewhat consistently across internal API changes
830 830 if util.safehasattr(cl, b'clearcaches'):
831 831 cl.clearcaches()
832 832 elif util.safehasattr(cl, b'_nodecache'):
833 833 # <= hg-5.2
834 834 from mercurial.node import nullid, nullrev
835 835
836 836 cl._nodecache = {nullid: nullrev}
837 837 cl._nodepos = None
838 838
839 839
840 840 @command(b'perf::heads|perfheads', formatteropts)
841 841 def perfheads(ui, repo, **opts):
842 842 """benchmark the computation of a changelog heads"""
843 843 opts = _byteskwargs(opts)
844 844 timer, fm = gettimer(ui, opts)
845 845 cl = repo.changelog
846 846
847 847 def s():
848 848 clearcaches(cl)
849 849
850 850 def d():
851 851 len(cl.headrevs())
852 852
853 853 timer(d, setup=s)
854 854 fm.end()
855 855
856 856
857 857 @command(
858 858 b'perf::tags|perftags',
859 859 formatteropts
860 860 + [
861 861 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
862 862 ],
863 863 )
864 864 def perftags(ui, repo, **opts):
865 865 opts = _byteskwargs(opts)
866 866 timer, fm = gettimer(ui, opts)
867 867 repocleartagscache = repocleartagscachefunc(repo)
868 868 clearrevlogs = opts[b'clear_revlogs']
869 869
870 870 def s():
871 871 if clearrevlogs:
872 872 clearchangelog(repo)
873 873 clearfilecache(repo.unfiltered(), 'manifest')
874 874 repocleartagscache()
875 875
876 876 def t():
877 877 return len(repo.tags())
878 878
879 879 timer(t, setup=s)
880 880 fm.end()
881 881
882 882
883 883 @command(b'perf::ancestors|perfancestors', formatteropts)
884 884 def perfancestors(ui, repo, **opts):
885 885 opts = _byteskwargs(opts)
886 886 timer, fm = gettimer(ui, opts)
887 887 heads = repo.changelog.headrevs()
888 888
889 889 def d():
890 890 for a in repo.changelog.ancestors(heads):
891 891 pass
892 892
893 893 timer(d)
894 894 fm.end()
895 895
896 896
897 897 @command(b'perf::ancestorset|perfancestorset', formatteropts)
898 898 def perfancestorset(ui, repo, revset, **opts):
899 899 opts = _byteskwargs(opts)
900 900 timer, fm = gettimer(ui, opts)
901 901 revs = repo.revs(revset)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 s = repo.changelog.ancestors(heads)
906 906 for rev in revs:
907 907 rev in s
908 908
909 909 timer(d)
910 910 fm.end()
911 911
912 912
913 913 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
914 914 def perfdiscovery(ui, repo, path, **opts):
915 915 """benchmark discovery between local repo and the peer at given path"""
916 916 repos = [repo, None]
917 917 timer, fm = gettimer(ui, opts)
918 path = ui.expandpath(path)
918
919 try:
920 from mercurial.utils.urlutil import get_unique_pull_path
921
922 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
923 except ImportError:
924 path = ui.expandpath(path)
919 925
920 926 def s():
921 927 repos[1] = hg.peer(ui, opts, path)
922 928
923 929 def d():
924 930 setdiscovery.findcommonheads(ui, *repos)
925 931
926 932 timer(d, setup=s)
927 933 fm.end()
928 934
929 935
930 936 @command(
931 937 b'perf::bookmarks|perfbookmarks',
932 938 formatteropts
933 939 + [
934 940 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
935 941 ],
936 942 )
937 943 def perfbookmarks(ui, repo, **opts):
938 944 """benchmark parsing bookmarks from disk to memory"""
939 945 opts = _byteskwargs(opts)
940 946 timer, fm = gettimer(ui, opts)
941 947
942 948 clearrevlogs = opts[b'clear_revlogs']
943 949
944 950 def s():
945 951 if clearrevlogs:
946 952 clearchangelog(repo)
947 953 clearfilecache(repo, b'_bookmarks')
948 954
949 955 def d():
950 956 repo._bookmarks
951 957
952 958 timer(d, setup=s)
953 959 fm.end()
954 960
955 961
956 962 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
957 963 def perfbundleread(ui, repo, bundlepath, **opts):
958 964 """Benchmark reading of bundle files.
959 965
960 966 This command is meant to isolate the I/O part of bundle reading as
961 967 much as possible.
962 968 """
963 969 from mercurial import (
964 970 bundle2,
965 971 exchange,
966 972 streamclone,
967 973 )
968 974
969 975 opts = _byteskwargs(opts)
970 976
971 977 def makebench(fn):
972 978 def run():
973 979 with open(bundlepath, b'rb') as fh:
974 980 bundle = exchange.readbundle(ui, fh, bundlepath)
975 981 fn(bundle)
976 982
977 983 return run
978 984
979 985 def makereadnbytes(size):
980 986 def run():
981 987 with open(bundlepath, b'rb') as fh:
982 988 bundle = exchange.readbundle(ui, fh, bundlepath)
983 989 while bundle.read(size):
984 990 pass
985 991
986 992 return run
987 993
988 994 def makestdioread(size):
989 995 def run():
990 996 with open(bundlepath, b'rb') as fh:
991 997 while fh.read(size):
992 998 pass
993 999
994 1000 return run
995 1001
996 1002 # bundle1
997 1003
998 1004 def deltaiter(bundle):
999 1005 for delta in bundle.deltaiter():
1000 1006 pass
1001 1007
1002 1008 def iterchunks(bundle):
1003 1009 for chunk in bundle.getchunks():
1004 1010 pass
1005 1011
1006 1012 # bundle2
1007 1013
1008 1014 def forwardchunks(bundle):
1009 1015 for chunk in bundle._forwardchunks():
1010 1016 pass
1011 1017
1012 1018 def iterparts(bundle):
1013 1019 for part in bundle.iterparts():
1014 1020 pass
1015 1021
1016 1022 def iterpartsseekable(bundle):
1017 1023 for part in bundle.iterparts(seekable=True):
1018 1024 pass
1019 1025
1020 1026 def seek(bundle):
1021 1027 for part in bundle.iterparts(seekable=True):
1022 1028 part.seek(0, os.SEEK_END)
1023 1029
1024 1030 def makepartreadnbytes(size):
1025 1031 def run():
1026 1032 with open(bundlepath, b'rb') as fh:
1027 1033 bundle = exchange.readbundle(ui, fh, bundlepath)
1028 1034 for part in bundle.iterparts():
1029 1035 while part.read(size):
1030 1036 pass
1031 1037
1032 1038 return run
1033 1039
1034 1040 benches = [
1035 1041 (makestdioread(8192), b'read(8k)'),
1036 1042 (makestdioread(16384), b'read(16k)'),
1037 1043 (makestdioread(32768), b'read(32k)'),
1038 1044 (makestdioread(131072), b'read(128k)'),
1039 1045 ]
1040 1046
1041 1047 with open(bundlepath, b'rb') as fh:
1042 1048 bundle = exchange.readbundle(ui, fh, bundlepath)
1043 1049
1044 1050 if isinstance(bundle, changegroup.cg1unpacker):
1045 1051 benches.extend(
1046 1052 [
1047 1053 (makebench(deltaiter), b'cg1 deltaiter()'),
1048 1054 (makebench(iterchunks), b'cg1 getchunks()'),
1049 1055 (makereadnbytes(8192), b'cg1 read(8k)'),
1050 1056 (makereadnbytes(16384), b'cg1 read(16k)'),
1051 1057 (makereadnbytes(32768), b'cg1 read(32k)'),
1052 1058 (makereadnbytes(131072), b'cg1 read(128k)'),
1053 1059 ]
1054 1060 )
1055 1061 elif isinstance(bundle, bundle2.unbundle20):
1056 1062 benches.extend(
1057 1063 [
1058 1064 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1059 1065 (makebench(iterparts), b'bundle2 iterparts()'),
1060 1066 (
1061 1067 makebench(iterpartsseekable),
1062 1068 b'bundle2 iterparts() seekable',
1063 1069 ),
1064 1070 (makebench(seek), b'bundle2 part seek()'),
1065 1071 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1066 1072 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1067 1073 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1068 1074 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1069 1075 ]
1070 1076 )
1071 1077 elif isinstance(bundle, streamclone.streamcloneapplier):
1072 1078 raise error.Abort(b'stream clone bundles not supported')
1073 1079 else:
1074 1080 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1075 1081
1076 1082 for fn, title in benches:
1077 1083 timer, fm = gettimer(ui, opts)
1078 1084 timer(fn, title=title)
1079 1085 fm.end()
1080 1086
1081 1087
1082 1088 @command(
1083 1089 b'perf::changegroupchangelog|perfchangegroupchangelog',
1084 1090 formatteropts
1085 1091 + [
1086 1092 (b'', b'cgversion', b'02', b'changegroup version'),
1087 1093 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1088 1094 ],
1089 1095 )
1090 1096 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1091 1097 """Benchmark producing a changelog group for a changegroup.
1092 1098
1093 1099 This measures the time spent processing the changelog during a
1094 1100 bundle operation. This occurs during `hg bundle` and on a server
1095 1101 processing a `getbundle` wire protocol request (handles clones
1096 1102 and pull requests).
1097 1103
1098 1104 By default, all revisions are added to the changegroup.
1099 1105 """
1100 1106 opts = _byteskwargs(opts)
1101 1107 cl = repo.changelog
1102 1108 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1103 1109 bundler = changegroup.getbundler(cgversion, repo)
1104 1110
1105 1111 def d():
1106 1112 state, chunks = bundler._generatechangelog(cl, nodes)
1107 1113 for chunk in chunks:
1108 1114 pass
1109 1115
1110 1116 timer, fm = gettimer(ui, opts)
1111 1117
1112 1118 # Terminal printing can interfere with timing. So disable it.
1113 1119 with ui.configoverride({(b'progress', b'disable'): True}):
1114 1120 timer(d)
1115 1121
1116 1122 fm.end()
1117 1123
1118 1124
1119 1125 @command(b'perf::dirs|perfdirs', formatteropts)
1120 1126 def perfdirs(ui, repo, **opts):
1121 1127 opts = _byteskwargs(opts)
1122 1128 timer, fm = gettimer(ui, opts)
1123 1129 dirstate = repo.dirstate
1124 1130 b'a' in dirstate
1125 1131
1126 1132 def d():
1127 1133 dirstate.hasdir(b'a')
1128 1134 del dirstate._map._dirs
1129 1135
1130 1136 timer(d)
1131 1137 fm.end()
1132 1138
1133 1139
1134 1140 @command(
1135 1141 b'perf::dirstate|perfdirstate',
1136 1142 [
1137 1143 (
1138 1144 b'',
1139 1145 b'iteration',
1140 1146 None,
1141 1147 b'benchmark a full iteration for the dirstate',
1142 1148 ),
1143 1149 (
1144 1150 b'',
1145 1151 b'contains',
1146 1152 None,
1147 1153 b'benchmark a large amount of `nf in dirstate` calls',
1148 1154 ),
1149 1155 ]
1150 1156 + formatteropts,
1151 1157 )
1152 1158 def perfdirstate(ui, repo, **opts):
1153 1159 """benchmap the time of various distate operations
1154 1160
1155 1161 By default benchmark the time necessary to load a dirstate from scratch.
1156 1162 The dirstate is loaded to the point were a "contains" request can be
1157 1163 answered.
1158 1164 """
1159 1165 opts = _byteskwargs(opts)
1160 1166 timer, fm = gettimer(ui, opts)
1161 1167 b"a" in repo.dirstate
1162 1168
1163 1169 if opts[b'iteration'] and opts[b'contains']:
1164 1170 msg = b'only specify one of --iteration or --contains'
1165 1171 raise error.Abort(msg)
1166 1172
1167 1173 if opts[b'iteration']:
1168 1174 setup = None
1169 1175 dirstate = repo.dirstate
1170 1176
1171 1177 def d():
1172 1178 for f in dirstate:
1173 1179 pass
1174 1180
1175 1181 elif opts[b'contains']:
1176 1182 setup = None
1177 1183 dirstate = repo.dirstate
1178 1184 allfiles = list(dirstate)
1179 1185 # also add file path that will be "missing" from the dirstate
1180 1186 allfiles.extend([f[::-1] for f in allfiles])
1181 1187
1182 1188 def d():
1183 1189 for f in allfiles:
1184 1190 f in dirstate
1185 1191
1186 1192 else:
1187 1193
1188 1194 def setup():
1189 1195 repo.dirstate.invalidate()
1190 1196
1191 1197 def d():
1192 1198 b"a" in repo.dirstate
1193 1199
1194 1200 timer(d, setup=setup)
1195 1201 fm.end()
1196 1202
1197 1203
1198 1204 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1199 1205 def perfdirstatedirs(ui, repo, **opts):
1200 1206 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1201 1207 opts = _byteskwargs(opts)
1202 1208 timer, fm = gettimer(ui, opts)
1203 1209 repo.dirstate.hasdir(b"a")
1204 1210
1205 1211 def setup():
1206 1212 del repo.dirstate._map._dirs
1207 1213
1208 1214 def d():
1209 1215 repo.dirstate.hasdir(b"a")
1210 1216
1211 1217 timer(d, setup=setup)
1212 1218 fm.end()
1213 1219
1214 1220
1215 1221 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1216 1222 def perfdirstatefoldmap(ui, repo, **opts):
1217 1223 """benchmap a `dirstate._map.filefoldmap.get()` request
1218 1224
1219 1225 The dirstate filefoldmap cache is dropped between every request.
1220 1226 """
1221 1227 opts = _byteskwargs(opts)
1222 1228 timer, fm = gettimer(ui, opts)
1223 1229 dirstate = repo.dirstate
1224 1230 dirstate._map.filefoldmap.get(b'a')
1225 1231
1226 1232 def setup():
1227 1233 del dirstate._map.filefoldmap
1228 1234
1229 1235 def d():
1230 1236 dirstate._map.filefoldmap.get(b'a')
1231 1237
1232 1238 timer(d, setup=setup)
1233 1239 fm.end()
1234 1240
1235 1241
1236 1242 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1237 1243 def perfdirfoldmap(ui, repo, **opts):
1238 1244 """benchmap a `dirstate._map.dirfoldmap.get()` request
1239 1245
1240 1246 The dirstate dirfoldmap cache is dropped between every request.
1241 1247 """
1242 1248 opts = _byteskwargs(opts)
1243 1249 timer, fm = gettimer(ui, opts)
1244 1250 dirstate = repo.dirstate
1245 1251 dirstate._map.dirfoldmap.get(b'a')
1246 1252
1247 1253 def setup():
1248 1254 del dirstate._map.dirfoldmap
1249 1255 del dirstate._map._dirs
1250 1256
1251 1257 def d():
1252 1258 dirstate._map.dirfoldmap.get(b'a')
1253 1259
1254 1260 timer(d, setup=setup)
1255 1261 fm.end()
1256 1262
1257 1263
1258 1264 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1259 1265 def perfdirstatewrite(ui, repo, **opts):
1260 1266 """benchmap the time it take to write a dirstate on disk"""
1261 1267 opts = _byteskwargs(opts)
1262 1268 timer, fm = gettimer(ui, opts)
1263 1269 ds = repo.dirstate
1264 1270 b"a" in ds
1265 1271
1266 1272 def setup():
1267 1273 ds._dirty = True
1268 1274
1269 1275 def d():
1270 1276 ds.write(repo.currenttransaction())
1271 1277
1272 1278 timer(d, setup=setup)
1273 1279 fm.end()
1274 1280
1275 1281
1276 1282 def _getmergerevs(repo, opts):
1277 1283 """parse command argument to return rev involved in merge
1278 1284
1279 1285 input: options dictionnary with `rev`, `from` and `bse`
1280 1286 output: (localctx, otherctx, basectx)
1281 1287 """
1282 1288 if opts[b'from']:
1283 1289 fromrev = scmutil.revsingle(repo, opts[b'from'])
1284 1290 wctx = repo[fromrev]
1285 1291 else:
1286 1292 wctx = repo[None]
1287 1293 # we don't want working dir files to be stat'd in the benchmark, so
1288 1294 # prime that cache
1289 1295 wctx.dirty()
1290 1296 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1291 1297 if opts[b'base']:
1292 1298 fromrev = scmutil.revsingle(repo, opts[b'base'])
1293 1299 ancestor = repo[fromrev]
1294 1300 else:
1295 1301 ancestor = wctx.ancestor(rctx)
1296 1302 return (wctx, rctx, ancestor)
1297 1303
1298 1304
1299 1305 @command(
1300 1306 b'perf::mergecalculate|perfmergecalculate',
1301 1307 [
1302 1308 (b'r', b'rev', b'.', b'rev to merge against'),
1303 1309 (b'', b'from', b'', b'rev to merge from'),
1304 1310 (b'', b'base', b'', b'the revision to use as base'),
1305 1311 ]
1306 1312 + formatteropts,
1307 1313 )
1308 1314 def perfmergecalculate(ui, repo, **opts):
1309 1315 opts = _byteskwargs(opts)
1310 1316 timer, fm = gettimer(ui, opts)
1311 1317
1312 1318 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1313 1319
1314 1320 def d():
1315 1321 # acceptremote is True because we don't want prompts in the middle of
1316 1322 # our benchmark
1317 1323 merge.calculateupdates(
1318 1324 repo,
1319 1325 wctx,
1320 1326 rctx,
1321 1327 [ancestor],
1322 1328 branchmerge=False,
1323 1329 force=False,
1324 1330 acceptremote=True,
1325 1331 followcopies=True,
1326 1332 )
1327 1333
1328 1334 timer(d)
1329 1335 fm.end()
1330 1336
1331 1337
1332 1338 @command(
1333 1339 b'perf::mergecopies|perfmergecopies',
1334 1340 [
1335 1341 (b'r', b'rev', b'.', b'rev to merge against'),
1336 1342 (b'', b'from', b'', b'rev to merge from'),
1337 1343 (b'', b'base', b'', b'the revision to use as base'),
1338 1344 ]
1339 1345 + formatteropts,
1340 1346 )
1341 1347 def perfmergecopies(ui, repo, **opts):
1342 1348 """measure runtime of `copies.mergecopies`"""
1343 1349 opts = _byteskwargs(opts)
1344 1350 timer, fm = gettimer(ui, opts)
1345 1351 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1346 1352
1347 1353 def d():
1348 1354 # acceptremote is True because we don't want prompts in the middle of
1349 1355 # our benchmark
1350 1356 copies.mergecopies(repo, wctx, rctx, ancestor)
1351 1357
1352 1358 timer(d)
1353 1359 fm.end()
1354 1360
1355 1361
1356 1362 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1357 1363 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1358 1364 """benchmark the copy tracing logic"""
1359 1365 opts = _byteskwargs(opts)
1360 1366 timer, fm = gettimer(ui, opts)
1361 1367 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1362 1368 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1363 1369
1364 1370 def d():
1365 1371 copies.pathcopies(ctx1, ctx2)
1366 1372
1367 1373 timer(d)
1368 1374 fm.end()
1369 1375
1370 1376
1371 1377 @command(
1372 1378 b'perf::phases|perfphases',
1373 1379 [
1374 1380 (b'', b'full', False, b'include file reading time too'),
1375 1381 ],
1376 1382 b"",
1377 1383 )
1378 1384 def perfphases(ui, repo, **opts):
1379 1385 """benchmark phasesets computation"""
1380 1386 opts = _byteskwargs(opts)
1381 1387 timer, fm = gettimer(ui, opts)
1382 1388 _phases = repo._phasecache
1383 1389 full = opts.get(b'full')
1384 1390
1385 1391 def d():
1386 1392 phases = _phases
1387 1393 if full:
1388 1394 clearfilecache(repo, b'_phasecache')
1389 1395 phases = repo._phasecache
1390 1396 phases.invalidate()
1391 1397 phases.loadphaserevs(repo)
1392 1398
1393 1399 timer(d)
1394 1400 fm.end()
1395 1401
1396 1402
1397 1403 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1398 1404 def perfphasesremote(ui, repo, dest=None, **opts):
1399 1405 """benchmark time needed to analyse phases of the remote server"""
1400 1406 from mercurial.node import bin
1401 1407 from mercurial import (
1402 1408 exchange,
1403 1409 hg,
1404 1410 phases,
1405 1411 )
1406 1412
1407 1413 opts = _byteskwargs(opts)
1408 1414 timer, fm = gettimer(ui, opts)
1409 1415
1410 1416 path = ui.getpath(dest, default=(b'default-push', b'default'))
1411 1417 if not path:
1412 1418 raise error.Abort(
1413 1419 b'default repository not configured!',
1414 1420 hint=b"see 'hg help config.paths'",
1415 1421 )
1416 1422 dest = path.pushloc or path.loc
1417 1423 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1418 1424 other = hg.peer(repo, opts, dest)
1419 1425
1420 1426 # easier to perform discovery through the operation
1421 1427 op = exchange.pushoperation(repo, other)
1422 1428 exchange._pushdiscoverychangeset(op)
1423 1429
1424 1430 remotesubset = op.fallbackheads
1425 1431
1426 1432 with other.commandexecutor() as e:
1427 1433 remotephases = e.callcommand(
1428 1434 b'listkeys', {b'namespace': b'phases'}
1429 1435 ).result()
1430 1436 del other
1431 1437 publishing = remotephases.get(b'publishing', False)
1432 1438 if publishing:
1433 1439 ui.statusnoi18n(b'publishing: yes\n')
1434 1440 else:
1435 1441 ui.statusnoi18n(b'publishing: no\n')
1436 1442
1437 1443 has_node = getattr(repo.changelog.index, 'has_node', None)
1438 1444 if has_node is None:
1439 1445 has_node = repo.changelog.nodemap.__contains__
1440 1446 nonpublishroots = 0
1441 1447 for nhex, phase in remotephases.iteritems():
1442 1448 if nhex == b'publishing': # ignore data related to publish option
1443 1449 continue
1444 1450 node = bin(nhex)
1445 1451 if has_node(node) and int(phase):
1446 1452 nonpublishroots += 1
1447 1453 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1448 1454 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1449 1455
1450 1456 def d():
1451 1457 phases.remotephasessummary(repo, remotesubset, remotephases)
1452 1458
1453 1459 timer(d)
1454 1460 fm.end()
1455 1461
1456 1462
1457 1463 @command(
1458 1464 b'perf::manifest|perfmanifest',
1459 1465 [
1460 1466 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1461 1467 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1462 1468 ]
1463 1469 + formatteropts,
1464 1470 b'REV|NODE',
1465 1471 )
1466 1472 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1467 1473 """benchmark the time to read a manifest from disk and return a usable
1468 1474 dict-like object
1469 1475
1470 1476 Manifest caches are cleared before retrieval."""
1471 1477 opts = _byteskwargs(opts)
1472 1478 timer, fm = gettimer(ui, opts)
1473 1479 if not manifest_rev:
1474 1480 ctx = scmutil.revsingle(repo, rev, rev)
1475 1481 t = ctx.manifestnode()
1476 1482 else:
1477 1483 from mercurial.node import bin
1478 1484
1479 1485 if len(rev) == 40:
1480 1486 t = bin(rev)
1481 1487 else:
1482 1488 try:
1483 1489 rev = int(rev)
1484 1490
1485 1491 if util.safehasattr(repo.manifestlog, b'getstorage'):
1486 1492 t = repo.manifestlog.getstorage(b'').node(rev)
1487 1493 else:
1488 1494 t = repo.manifestlog._revlog.lookup(rev)
1489 1495 except ValueError:
1490 1496 raise error.Abort(
1491 1497 b'manifest revision must be integer or full node'
1492 1498 )
1493 1499
1494 1500 def d():
1495 1501 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1496 1502 repo.manifestlog[t].read()
1497 1503
1498 1504 timer(d)
1499 1505 fm.end()
1500 1506
1501 1507
1502 1508 @command(b'perf::changeset|perfchangeset', formatteropts)
1503 1509 def perfchangeset(ui, repo, rev, **opts):
1504 1510 opts = _byteskwargs(opts)
1505 1511 timer, fm = gettimer(ui, opts)
1506 1512 n = scmutil.revsingle(repo, rev).node()
1507 1513
1508 1514 def d():
1509 1515 repo.changelog.read(n)
1510 1516 # repo.changelog._cache = None
1511 1517
1512 1518 timer(d)
1513 1519 fm.end()
1514 1520
1515 1521
1516 1522 @command(b'perf::ignore|perfignore', formatteropts)
1517 1523 def perfignore(ui, repo, **opts):
1518 1524 """benchmark operation related to computing ignore"""
1519 1525 opts = _byteskwargs(opts)
1520 1526 timer, fm = gettimer(ui, opts)
1521 1527 dirstate = repo.dirstate
1522 1528
1523 1529 def setupone():
1524 1530 dirstate.invalidate()
1525 1531 clearfilecache(dirstate, b'_ignore')
1526 1532
1527 1533 def runone():
1528 1534 dirstate._ignore
1529 1535
1530 1536 timer(runone, setup=setupone, title=b"load")
1531 1537 fm.end()
1532 1538
1533 1539
1534 1540 @command(
1535 1541 b'perf::index|perfindex',
1536 1542 [
1537 1543 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1538 1544 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1539 1545 ]
1540 1546 + formatteropts,
1541 1547 )
1542 1548 def perfindex(ui, repo, **opts):
1543 1549 """benchmark index creation time followed by a lookup
1544 1550
1545 1551 The default is to look `tip` up. Depending on the index implementation,
1546 1552 the revision looked up can matters. For example, an implementation
1547 1553 scanning the index will have a faster lookup time for `--rev tip` than for
1548 1554 `--rev 0`. The number of looked up revisions and their order can also
1549 1555 matters.
1550 1556
1551 1557 Example of useful set to test:
1552 1558
1553 1559 * tip
1554 1560 * 0
1555 1561 * -10:
1556 1562 * :10
1557 1563 * -10: + :10
1558 1564 * :10: + -10:
1559 1565 * -10000:
1560 1566 * -10000: + 0
1561 1567
1562 1568 It is not currently possible to check for lookup of a missing node. For
1563 1569 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1564 1570 import mercurial.revlog
1565 1571
1566 1572 opts = _byteskwargs(opts)
1567 1573 timer, fm = gettimer(ui, opts)
1568 1574 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1569 1575 if opts[b'no_lookup']:
1570 1576 if opts['rev']:
1571 1577 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1572 1578 nodes = []
1573 1579 elif not opts[b'rev']:
1574 1580 nodes = [repo[b"tip"].node()]
1575 1581 else:
1576 1582 revs = scmutil.revrange(repo, opts[b'rev'])
1577 1583 cl = repo.changelog
1578 1584 nodes = [cl.node(r) for r in revs]
1579 1585
1580 1586 unfi = repo.unfiltered()
1581 1587 # find the filecache func directly
1582 1588 # This avoid polluting the benchmark with the filecache logic
1583 1589 makecl = unfi.__class__.changelog.func
1584 1590
1585 1591 def setup():
1586 1592 # probably not necessary, but for good measure
1587 1593 clearchangelog(unfi)
1588 1594
1589 1595 def d():
1590 1596 cl = makecl(unfi)
1591 1597 for n in nodes:
1592 1598 cl.rev(n)
1593 1599
1594 1600 timer(d, setup=setup)
1595 1601 fm.end()
1596 1602
1597 1603
1598 1604 @command(
1599 1605 b'perf::nodemap|perfnodemap',
1600 1606 [
1601 1607 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1602 1608 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1603 1609 ]
1604 1610 + formatteropts,
1605 1611 )
1606 1612 def perfnodemap(ui, repo, **opts):
1607 1613 """benchmark the time necessary to look up revision from a cold nodemap
1608 1614
1609 1615 Depending on the implementation, the amount and order of revision we look
1610 1616 up can varies. Example of useful set to test:
1611 1617 * tip
1612 1618 * 0
1613 1619 * -10:
1614 1620 * :10
1615 1621 * -10: + :10
1616 1622 * :10: + -10:
1617 1623 * -10000:
1618 1624 * -10000: + 0
1619 1625
1620 1626 The command currently focus on valid binary lookup. Benchmarking for
1621 1627 hexlookup, prefix lookup and missing lookup would also be valuable.
1622 1628 """
1623 1629 import mercurial.revlog
1624 1630
1625 1631 opts = _byteskwargs(opts)
1626 1632 timer, fm = gettimer(ui, opts)
1627 1633 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1628 1634
1629 1635 unfi = repo.unfiltered()
1630 1636 clearcaches = opts[b'clear_caches']
1631 1637 # find the filecache func directly
1632 1638 # This avoid polluting the benchmark with the filecache logic
1633 1639 makecl = unfi.__class__.changelog.func
1634 1640 if not opts[b'rev']:
1635 1641 raise error.Abort(b'use --rev to specify revisions to look up')
1636 1642 revs = scmutil.revrange(repo, opts[b'rev'])
1637 1643 cl = repo.changelog
1638 1644 nodes = [cl.node(r) for r in revs]
1639 1645
1640 1646 # use a list to pass reference to a nodemap from one closure to the next
1641 1647 nodeget = [None]
1642 1648
1643 1649 def setnodeget():
1644 1650 # probably not necessary, but for good measure
1645 1651 clearchangelog(unfi)
1646 1652 cl = makecl(unfi)
1647 1653 if util.safehasattr(cl.index, 'get_rev'):
1648 1654 nodeget[0] = cl.index.get_rev
1649 1655 else:
1650 1656 nodeget[0] = cl.nodemap.get
1651 1657
1652 1658 def d():
1653 1659 get = nodeget[0]
1654 1660 for n in nodes:
1655 1661 get(n)
1656 1662
1657 1663 setup = None
1658 1664 if clearcaches:
1659 1665
1660 1666 def setup():
1661 1667 setnodeget()
1662 1668
1663 1669 else:
1664 1670 setnodeget()
1665 1671 d() # prewarm the data structure
1666 1672 timer(d, setup=setup)
1667 1673 fm.end()
1668 1674
1669 1675
1670 1676 @command(b'perf::startup|perfstartup', formatteropts)
1671 1677 def perfstartup(ui, repo, **opts):
1672 1678 opts = _byteskwargs(opts)
1673 1679 timer, fm = gettimer(ui, opts)
1674 1680
1675 1681 def d():
1676 1682 if os.name != 'nt':
1677 1683 os.system(
1678 1684 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1679 1685 )
1680 1686 else:
1681 1687 os.environ['HGRCPATH'] = r' '
1682 1688 os.system("%s version -q > NUL" % sys.argv[0])
1683 1689
1684 1690 timer(d)
1685 1691 fm.end()
1686 1692
1687 1693
1688 1694 @command(b'perf::parents|perfparents', formatteropts)
1689 1695 def perfparents(ui, repo, **opts):
1690 1696 """benchmark the time necessary to fetch one changeset's parents.
1691 1697
1692 1698 The fetch is done using the `node identifier`, traversing all object layers
1693 1699 from the repository object. The first N revisions will be used for this
1694 1700 benchmark. N is controlled by the ``perf.parentscount`` config option
1695 1701 (default: 1000).
1696 1702 """
1697 1703 opts = _byteskwargs(opts)
1698 1704 timer, fm = gettimer(ui, opts)
1699 1705 # control the number of commits perfparents iterates over
1700 1706 # experimental config: perf.parentscount
1701 1707 count = getint(ui, b"perf", b"parentscount", 1000)
1702 1708 if len(repo.changelog) < count:
1703 1709 raise error.Abort(b"repo needs %d commits for this test" % count)
1704 1710 repo = repo.unfiltered()
1705 1711 nl = [repo.changelog.node(i) for i in _xrange(count)]
1706 1712
1707 1713 def d():
1708 1714 for n in nl:
1709 1715 repo.changelog.parents(n)
1710 1716
1711 1717 timer(d)
1712 1718 fm.end()
1713 1719
1714 1720
1715 1721 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1716 1722 def perfctxfiles(ui, repo, x, **opts):
1717 1723 opts = _byteskwargs(opts)
1718 1724 x = int(x)
1719 1725 timer, fm = gettimer(ui, opts)
1720 1726
1721 1727 def d():
1722 1728 len(repo[x].files())
1723 1729
1724 1730 timer(d)
1725 1731 fm.end()
1726 1732
1727 1733
1728 1734 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1729 1735 def perfrawfiles(ui, repo, x, **opts):
1730 1736 opts = _byteskwargs(opts)
1731 1737 x = int(x)
1732 1738 timer, fm = gettimer(ui, opts)
1733 1739 cl = repo.changelog
1734 1740
1735 1741 def d():
1736 1742 len(cl.read(x)[3])
1737 1743
1738 1744 timer(d)
1739 1745 fm.end()
1740 1746
1741 1747
1742 1748 @command(b'perf::lookup|perflookup', formatteropts)
1743 1749 def perflookup(ui, repo, rev, **opts):
1744 1750 opts = _byteskwargs(opts)
1745 1751 timer, fm = gettimer(ui, opts)
1746 1752 timer(lambda: len(repo.lookup(rev)))
1747 1753 fm.end()
1748 1754
1749 1755
1750 1756 @command(
1751 1757 b'perf::linelogedits|perflinelogedits',
1752 1758 [
1753 1759 (b'n', b'edits', 10000, b'number of edits'),
1754 1760 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1755 1761 ],
1756 1762 norepo=True,
1757 1763 )
1758 1764 def perflinelogedits(ui, **opts):
1759 1765 from mercurial import linelog
1760 1766
1761 1767 opts = _byteskwargs(opts)
1762 1768
1763 1769 edits = opts[b'edits']
1764 1770 maxhunklines = opts[b'max_hunk_lines']
1765 1771
1766 1772 maxb1 = 100000
1767 1773 random.seed(0)
1768 1774 randint = random.randint
1769 1775 currentlines = 0
1770 1776 arglist = []
1771 1777 for rev in _xrange(edits):
1772 1778 a1 = randint(0, currentlines)
1773 1779 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1774 1780 b1 = randint(0, maxb1)
1775 1781 b2 = randint(b1, b1 + maxhunklines)
1776 1782 currentlines += (b2 - b1) - (a2 - a1)
1777 1783 arglist.append((rev, a1, a2, b1, b2))
1778 1784
1779 1785 def d():
1780 1786 ll = linelog.linelog()
1781 1787 for args in arglist:
1782 1788 ll.replacelines(*args)
1783 1789
1784 1790 timer, fm = gettimer(ui, opts)
1785 1791 timer(d)
1786 1792 fm.end()
1787 1793
1788 1794
1789 1795 @command(b'perf::revrange|perfrevrange', formatteropts)
1790 1796 def perfrevrange(ui, repo, *specs, **opts):
1791 1797 opts = _byteskwargs(opts)
1792 1798 timer, fm = gettimer(ui, opts)
1793 1799 revrange = scmutil.revrange
1794 1800 timer(lambda: len(revrange(repo, specs)))
1795 1801 fm.end()
1796 1802
1797 1803
1798 1804 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
1799 1805 def perfnodelookup(ui, repo, rev, **opts):
1800 1806 opts = _byteskwargs(opts)
1801 1807 timer, fm = gettimer(ui, opts)
1802 1808 import mercurial.revlog
1803 1809
1804 1810 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1805 1811 n = scmutil.revsingle(repo, rev).node()
1806 1812 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1807 1813
1808 1814 def d():
1809 1815 cl.rev(n)
1810 1816 clearcaches(cl)
1811 1817
1812 1818 timer(d)
1813 1819 fm.end()
1814 1820
1815 1821
1816 1822 @command(
1817 1823 b'perf::log|perflog',
1818 1824 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1819 1825 )
1820 1826 def perflog(ui, repo, rev=None, **opts):
1821 1827 opts = _byteskwargs(opts)
1822 1828 if rev is None:
1823 1829 rev = []
1824 1830 timer, fm = gettimer(ui, opts)
1825 1831 ui.pushbuffer()
1826 1832 timer(
1827 1833 lambda: commands.log(
1828 1834 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1829 1835 )
1830 1836 )
1831 1837 ui.popbuffer()
1832 1838 fm.end()
1833 1839
1834 1840
1835 1841 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
1836 1842 def perfmoonwalk(ui, repo, **opts):
1837 1843 """benchmark walking the changelog backwards
1838 1844
1839 1845 This also loads the changelog data for each revision in the changelog.
1840 1846 """
1841 1847 opts = _byteskwargs(opts)
1842 1848 timer, fm = gettimer(ui, opts)
1843 1849
1844 1850 def moonwalk():
1845 1851 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1846 1852 ctx = repo[i]
1847 1853 ctx.branch() # read changelog data (in addition to the index)
1848 1854
1849 1855 timer(moonwalk)
1850 1856 fm.end()
1851 1857
1852 1858
1853 1859 @command(
1854 1860 b'perf::templating|perftemplating',
1855 1861 [
1856 1862 (b'r', b'rev', [], b'revisions to run the template on'),
1857 1863 ]
1858 1864 + formatteropts,
1859 1865 )
1860 1866 def perftemplating(ui, repo, testedtemplate=None, **opts):
1861 1867 """test the rendering time of a given template"""
1862 1868 if makelogtemplater is None:
1863 1869 raise error.Abort(
1864 1870 b"perftemplating not available with this Mercurial",
1865 1871 hint=b"use 4.3 or later",
1866 1872 )
1867 1873
1868 1874 opts = _byteskwargs(opts)
1869 1875
1870 1876 nullui = ui.copy()
1871 1877 nullui.fout = open(os.devnull, 'wb')
1872 1878 nullui.disablepager()
1873 1879 revs = opts.get(b'rev')
1874 1880 if not revs:
1875 1881 revs = [b'all()']
1876 1882 revs = list(scmutil.revrange(repo, revs))
1877 1883
1878 1884 defaulttemplate = (
1879 1885 b'{date|shortdate} [{rev}:{node|short}]'
1880 1886 b' {author|person}: {desc|firstline}\n'
1881 1887 )
1882 1888 if testedtemplate is None:
1883 1889 testedtemplate = defaulttemplate
1884 1890 displayer = makelogtemplater(nullui, repo, testedtemplate)
1885 1891
1886 1892 def format():
1887 1893 for r in revs:
1888 1894 ctx = repo[r]
1889 1895 displayer.show(ctx)
1890 1896 displayer.flush(ctx)
1891 1897
1892 1898 timer, fm = gettimer(ui, opts)
1893 1899 timer(format)
1894 1900 fm.end()
1895 1901
1896 1902
1897 1903 def _displaystats(ui, opts, entries, data):
1898 1904 # use a second formatter because the data are quite different, not sure
1899 1905 # how it flies with the templater.
1900 1906 fm = ui.formatter(b'perf-stats', opts)
1901 1907 for key, title in entries:
1902 1908 values = data[key]
1903 1909 nbvalues = len(data)
1904 1910 values.sort()
1905 1911 stats = {
1906 1912 'key': key,
1907 1913 'title': title,
1908 1914 'nbitems': len(values),
1909 1915 'min': values[0][0],
1910 1916 '10%': values[(nbvalues * 10) // 100][0],
1911 1917 '25%': values[(nbvalues * 25) // 100][0],
1912 1918 '50%': values[(nbvalues * 50) // 100][0],
1913 1919 '75%': values[(nbvalues * 75) // 100][0],
1914 1920 '80%': values[(nbvalues * 80) // 100][0],
1915 1921 '85%': values[(nbvalues * 85) // 100][0],
1916 1922 '90%': values[(nbvalues * 90) // 100][0],
1917 1923 '95%': values[(nbvalues * 95) // 100][0],
1918 1924 '99%': values[(nbvalues * 99) // 100][0],
1919 1925 'max': values[-1][0],
1920 1926 }
1921 1927 fm.startitem()
1922 1928 fm.data(**stats)
1923 1929 # make node pretty for the human output
1924 1930 fm.plain('### %s (%d items)\n' % (title, len(values)))
1925 1931 lines = [
1926 1932 'min',
1927 1933 '10%',
1928 1934 '25%',
1929 1935 '50%',
1930 1936 '75%',
1931 1937 '80%',
1932 1938 '85%',
1933 1939 '90%',
1934 1940 '95%',
1935 1941 '99%',
1936 1942 'max',
1937 1943 ]
1938 1944 for l in lines:
1939 1945 fm.plain('%s: %s\n' % (l, stats[l]))
1940 1946 fm.end()
1941 1947
1942 1948
1943 1949 @command(
1944 1950 b'perf::helper-mergecopies|perfhelper-mergecopies',
1945 1951 formatteropts
1946 1952 + [
1947 1953 (b'r', b'revs', [], b'restrict search to these revisions'),
1948 1954 (b'', b'timing', False, b'provides extra data (costly)'),
1949 1955 (b'', b'stats', False, b'provides statistic about the measured data'),
1950 1956 ],
1951 1957 )
1952 1958 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1953 1959 """find statistics about potential parameters for `perfmergecopies`
1954 1960
1955 1961 This command find (base, p1, p2) triplet relevant for copytracing
1956 1962 benchmarking in the context of a merge. It reports values for some of the
1957 1963 parameters that impact merge copy tracing time during merge.
1958 1964
1959 1965 If `--timing` is set, rename detection is run and the associated timing
1960 1966 will be reported. The extra details come at the cost of slower command
1961 1967 execution.
1962 1968
1963 1969 Since rename detection is only run once, other factors might easily
1964 1970 affect the precision of the timing. However it should give a good
1965 1971 approximation of which revision triplets are very costly.
1966 1972 """
1967 1973 opts = _byteskwargs(opts)
1968 1974 fm = ui.formatter(b'perf', opts)
1969 1975 dotiming = opts[b'timing']
1970 1976 dostats = opts[b'stats']
1971 1977
1972 1978 output_template = [
1973 1979 ("base", "%(base)12s"),
1974 1980 ("p1", "%(p1.node)12s"),
1975 1981 ("p2", "%(p2.node)12s"),
1976 1982 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1977 1983 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1978 1984 ("p1.renames", "%(p1.renamedfiles)12d"),
1979 1985 ("p1.time", "%(p1.time)12.3f"),
1980 1986 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1981 1987 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1982 1988 ("p2.renames", "%(p2.renamedfiles)12d"),
1983 1989 ("p2.time", "%(p2.time)12.3f"),
1984 1990 ("renames", "%(nbrenamedfiles)12d"),
1985 1991 ("total.time", "%(time)12.3f"),
1986 1992 ]
1987 1993 if not dotiming:
1988 1994 output_template = [
1989 1995 i
1990 1996 for i in output_template
1991 1997 if not ('time' in i[0] or 'renames' in i[0])
1992 1998 ]
1993 1999 header_names = [h for (h, v) in output_template]
1994 2000 output = ' '.join([v for (h, v) in output_template]) + '\n'
1995 2001 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1996 2002 fm.plain(header % tuple(header_names))
1997 2003
1998 2004 if not revs:
1999 2005 revs = ['all()']
2000 2006 revs = scmutil.revrange(repo, revs)
2001 2007
2002 2008 if dostats:
2003 2009 alldata = {
2004 2010 'nbrevs': [],
2005 2011 'nbmissingfiles': [],
2006 2012 }
2007 2013 if dotiming:
2008 2014 alldata['parentnbrenames'] = []
2009 2015 alldata['totalnbrenames'] = []
2010 2016 alldata['parenttime'] = []
2011 2017 alldata['totaltime'] = []
2012 2018
2013 2019 roi = repo.revs('merge() and %ld', revs)
2014 2020 for r in roi:
2015 2021 ctx = repo[r]
2016 2022 p1 = ctx.p1()
2017 2023 p2 = ctx.p2()
2018 2024 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2019 2025 for b in bases:
2020 2026 b = repo[b]
2021 2027 p1missing = copies._computeforwardmissing(b, p1)
2022 2028 p2missing = copies._computeforwardmissing(b, p2)
2023 2029 data = {
2024 2030 b'base': b.hex(),
2025 2031 b'p1.node': p1.hex(),
2026 2032 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2027 2033 b'p1.nbmissingfiles': len(p1missing),
2028 2034 b'p2.node': p2.hex(),
2029 2035 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2030 2036 b'p2.nbmissingfiles': len(p2missing),
2031 2037 }
2032 2038 if dostats:
2033 2039 if p1missing:
2034 2040 alldata['nbrevs'].append(
2035 2041 (data['p1.nbrevs'], b.hex(), p1.hex())
2036 2042 )
2037 2043 alldata['nbmissingfiles'].append(
2038 2044 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2039 2045 )
2040 2046 if p2missing:
2041 2047 alldata['nbrevs'].append(
2042 2048 (data['p2.nbrevs'], b.hex(), p2.hex())
2043 2049 )
2044 2050 alldata['nbmissingfiles'].append(
2045 2051 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2046 2052 )
2047 2053 if dotiming:
2048 2054 begin = util.timer()
2049 2055 mergedata = copies.mergecopies(repo, p1, p2, b)
2050 2056 end = util.timer()
2051 2057 # not very stable timing since we did only one run
2052 2058 data['time'] = end - begin
2053 2059 # mergedata contains five dicts: "copy", "movewithdir",
2054 2060 # "diverge", "renamedelete" and "dirmove".
2055 2061 # The first 4 are about renamed file so lets count that.
2056 2062 renames = len(mergedata[0])
2057 2063 renames += len(mergedata[1])
2058 2064 renames += len(mergedata[2])
2059 2065 renames += len(mergedata[3])
2060 2066 data['nbrenamedfiles'] = renames
2061 2067 begin = util.timer()
2062 2068 p1renames = copies.pathcopies(b, p1)
2063 2069 end = util.timer()
2064 2070 data['p1.time'] = end - begin
2065 2071 begin = util.timer()
2066 2072 p2renames = copies.pathcopies(b, p2)
2067 2073 end = util.timer()
2068 2074 data['p2.time'] = end - begin
2069 2075 data['p1.renamedfiles'] = len(p1renames)
2070 2076 data['p2.renamedfiles'] = len(p2renames)
2071 2077
2072 2078 if dostats:
2073 2079 if p1missing:
2074 2080 alldata['parentnbrenames'].append(
2075 2081 (data['p1.renamedfiles'], b.hex(), p1.hex())
2076 2082 )
2077 2083 alldata['parenttime'].append(
2078 2084 (data['p1.time'], b.hex(), p1.hex())
2079 2085 )
2080 2086 if p2missing:
2081 2087 alldata['parentnbrenames'].append(
2082 2088 (data['p2.renamedfiles'], b.hex(), p2.hex())
2083 2089 )
2084 2090 alldata['parenttime'].append(
2085 2091 (data['p2.time'], b.hex(), p2.hex())
2086 2092 )
2087 2093 if p1missing or p2missing:
2088 2094 alldata['totalnbrenames'].append(
2089 2095 (
2090 2096 data['nbrenamedfiles'],
2091 2097 b.hex(),
2092 2098 p1.hex(),
2093 2099 p2.hex(),
2094 2100 )
2095 2101 )
2096 2102 alldata['totaltime'].append(
2097 2103 (data['time'], b.hex(), p1.hex(), p2.hex())
2098 2104 )
2099 2105 fm.startitem()
2100 2106 fm.data(**data)
2101 2107 # make node pretty for the human output
2102 2108 out = data.copy()
2103 2109 out['base'] = fm.hexfunc(b.node())
2104 2110 out['p1.node'] = fm.hexfunc(p1.node())
2105 2111 out['p2.node'] = fm.hexfunc(p2.node())
2106 2112 fm.plain(output % out)
2107 2113
2108 2114 fm.end()
2109 2115 if dostats:
2110 2116 # use a second formatter because the data are quite different, not sure
2111 2117 # how it flies with the templater.
2112 2118 entries = [
2113 2119 ('nbrevs', 'number of revision covered'),
2114 2120 ('nbmissingfiles', 'number of missing files at head'),
2115 2121 ]
2116 2122 if dotiming:
2117 2123 entries.append(
2118 2124 ('parentnbrenames', 'rename from one parent to base')
2119 2125 )
2120 2126 entries.append(('totalnbrenames', 'total number of renames'))
2121 2127 entries.append(('parenttime', 'time for one parent'))
2122 2128 entries.append(('totaltime', 'time for both parents'))
2123 2129 _displaystats(ui, opts, entries, alldata)
2124 2130
2125 2131
2126 2132 @command(
2127 2133 b'perf::helper-pathcopies|perfhelper-pathcopies',
2128 2134 formatteropts
2129 2135 + [
2130 2136 (b'r', b'revs', [], b'restrict search to these revisions'),
2131 2137 (b'', b'timing', False, b'provides extra data (costly)'),
2132 2138 (b'', b'stats', False, b'provides statistic about the measured data'),
2133 2139 ],
2134 2140 )
2135 2141 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2136 2142 """find statistic about potential parameters for the `perftracecopies`
2137 2143
2138 2144 This command find source-destination pair relevant for copytracing testing.
2139 2145 It report value for some of the parameters that impact copy tracing time.
2140 2146
2141 2147 If `--timing` is set, rename detection is run and the associated timing
2142 2148 will be reported. The extra details comes at the cost of a slower command
2143 2149 execution.
2144 2150
2145 2151 Since the rename detection is only run once, other factors might easily
2146 2152 affect the precision of the timing. However it should give a good
2147 2153 approximation of which revision pairs are very costly.
2148 2154 """
2149 2155 opts = _byteskwargs(opts)
2150 2156 fm = ui.formatter(b'perf', opts)
2151 2157 dotiming = opts[b'timing']
2152 2158 dostats = opts[b'stats']
2153 2159
2154 2160 if dotiming:
2155 2161 header = '%12s %12s %12s %12s %12s %12s\n'
2156 2162 output = (
2157 2163 "%(source)12s %(destination)12s "
2158 2164 "%(nbrevs)12d %(nbmissingfiles)12d "
2159 2165 "%(nbrenamedfiles)12d %(time)18.5f\n"
2160 2166 )
2161 2167 header_names = (
2162 2168 "source",
2163 2169 "destination",
2164 2170 "nb-revs",
2165 2171 "nb-files",
2166 2172 "nb-renames",
2167 2173 "time",
2168 2174 )
2169 2175 fm.plain(header % header_names)
2170 2176 else:
2171 2177 header = '%12s %12s %12s %12s\n'
2172 2178 output = (
2173 2179 "%(source)12s %(destination)12s "
2174 2180 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2175 2181 )
2176 2182 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2177 2183
2178 2184 if not revs:
2179 2185 revs = ['all()']
2180 2186 revs = scmutil.revrange(repo, revs)
2181 2187
2182 2188 if dostats:
2183 2189 alldata = {
2184 2190 'nbrevs': [],
2185 2191 'nbmissingfiles': [],
2186 2192 }
2187 2193 if dotiming:
2188 2194 alldata['nbrenames'] = []
2189 2195 alldata['time'] = []
2190 2196
2191 2197 roi = repo.revs('merge() and %ld', revs)
2192 2198 for r in roi:
2193 2199 ctx = repo[r]
2194 2200 p1 = ctx.p1().rev()
2195 2201 p2 = ctx.p2().rev()
2196 2202 bases = repo.changelog._commonancestorsheads(p1, p2)
2197 2203 for p in (p1, p2):
2198 2204 for b in bases:
2199 2205 base = repo[b]
2200 2206 parent = repo[p]
2201 2207 missing = copies._computeforwardmissing(base, parent)
2202 2208 if not missing:
2203 2209 continue
2204 2210 data = {
2205 2211 b'source': base.hex(),
2206 2212 b'destination': parent.hex(),
2207 2213 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2208 2214 b'nbmissingfiles': len(missing),
2209 2215 }
2210 2216 if dostats:
2211 2217 alldata['nbrevs'].append(
2212 2218 (
2213 2219 data['nbrevs'],
2214 2220 base.hex(),
2215 2221 parent.hex(),
2216 2222 )
2217 2223 )
2218 2224 alldata['nbmissingfiles'].append(
2219 2225 (
2220 2226 data['nbmissingfiles'],
2221 2227 base.hex(),
2222 2228 parent.hex(),
2223 2229 )
2224 2230 )
2225 2231 if dotiming:
2226 2232 begin = util.timer()
2227 2233 renames = copies.pathcopies(base, parent)
2228 2234 end = util.timer()
2229 2235 # not very stable timing since we did only one run
2230 2236 data['time'] = end - begin
2231 2237 data['nbrenamedfiles'] = len(renames)
2232 2238 if dostats:
2233 2239 alldata['time'].append(
2234 2240 (
2235 2241 data['time'],
2236 2242 base.hex(),
2237 2243 parent.hex(),
2238 2244 )
2239 2245 )
2240 2246 alldata['nbrenames'].append(
2241 2247 (
2242 2248 data['nbrenamedfiles'],
2243 2249 base.hex(),
2244 2250 parent.hex(),
2245 2251 )
2246 2252 )
2247 2253 fm.startitem()
2248 2254 fm.data(**data)
2249 2255 out = data.copy()
2250 2256 out['source'] = fm.hexfunc(base.node())
2251 2257 out['destination'] = fm.hexfunc(parent.node())
2252 2258 fm.plain(output % out)
2253 2259
2254 2260 fm.end()
2255 2261 if dostats:
2256 2262 entries = [
2257 2263 ('nbrevs', 'number of revision covered'),
2258 2264 ('nbmissingfiles', 'number of missing files at head'),
2259 2265 ]
2260 2266 if dotiming:
2261 2267 entries.append(('nbrenames', 'renamed files'))
2262 2268 entries.append(('time', 'time'))
2263 2269 _displaystats(ui, opts, entries, alldata)
2264 2270
2265 2271
2266 2272 @command(b'perf::cca|perfcca', formatteropts)
2267 2273 def perfcca(ui, repo, **opts):
2268 2274 opts = _byteskwargs(opts)
2269 2275 timer, fm = gettimer(ui, opts)
2270 2276 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2271 2277 fm.end()
2272 2278
2273 2279
2274 2280 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2275 2281 def perffncacheload(ui, repo, **opts):
2276 2282 opts = _byteskwargs(opts)
2277 2283 timer, fm = gettimer(ui, opts)
2278 2284 s = repo.store
2279 2285
2280 2286 def d():
2281 2287 s.fncache._load()
2282 2288
2283 2289 timer(d)
2284 2290 fm.end()
2285 2291
2286 2292
2287 2293 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2288 2294 def perffncachewrite(ui, repo, **opts):
2289 2295 opts = _byteskwargs(opts)
2290 2296 timer, fm = gettimer(ui, opts)
2291 2297 s = repo.store
2292 2298 lock = repo.lock()
2293 2299 s.fncache._load()
2294 2300 tr = repo.transaction(b'perffncachewrite')
2295 2301 tr.addbackup(b'fncache')
2296 2302
2297 2303 def d():
2298 2304 s.fncache._dirty = True
2299 2305 s.fncache.write(tr)
2300 2306
2301 2307 timer(d)
2302 2308 tr.close()
2303 2309 lock.release()
2304 2310 fm.end()
2305 2311
2306 2312
2307 2313 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2308 2314 def perffncacheencode(ui, repo, **opts):
2309 2315 opts = _byteskwargs(opts)
2310 2316 timer, fm = gettimer(ui, opts)
2311 2317 s = repo.store
2312 2318 s.fncache._load()
2313 2319
2314 2320 def d():
2315 2321 for p in s.fncache.entries:
2316 2322 s.encode(p)
2317 2323
2318 2324 timer(d)
2319 2325 fm.end()
2320 2326
2321 2327
2322 2328 def _bdiffworker(q, blocks, xdiff, ready, done):
2323 2329 while not done.is_set():
2324 2330 pair = q.get()
2325 2331 while pair is not None:
2326 2332 if xdiff:
2327 2333 mdiff.bdiff.xdiffblocks(*pair)
2328 2334 elif blocks:
2329 2335 mdiff.bdiff.blocks(*pair)
2330 2336 else:
2331 2337 mdiff.textdiff(*pair)
2332 2338 q.task_done()
2333 2339 pair = q.get()
2334 2340 q.task_done() # for the None one
2335 2341 with ready:
2336 2342 ready.wait()
2337 2343
2338 2344
2339 2345 def _manifestrevision(repo, mnode):
2340 2346 ml = repo.manifestlog
2341 2347
2342 2348 if util.safehasattr(ml, b'getstorage'):
2343 2349 store = ml.getstorage(b'')
2344 2350 else:
2345 2351 store = ml._revlog
2346 2352
2347 2353 return store.revision(mnode)
2348 2354
2349 2355
2350 2356 @command(
2351 2357 b'perf::bdiff|perfbdiff',
2352 2358 revlogopts
2353 2359 + formatteropts
2354 2360 + [
2355 2361 (
2356 2362 b'',
2357 2363 b'count',
2358 2364 1,
2359 2365 b'number of revisions to test (when using --startrev)',
2360 2366 ),
2361 2367 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2362 2368 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2363 2369 (b'', b'blocks', False, b'test computing diffs into blocks'),
2364 2370 (b'', b'xdiff', False, b'use xdiff algorithm'),
2365 2371 ],
2366 2372 b'-c|-m|FILE REV',
2367 2373 )
2368 2374 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2369 2375 """benchmark a bdiff between revisions
2370 2376
2371 2377 By default, benchmark a bdiff between its delta parent and itself.
2372 2378
2373 2379 With ``--count``, benchmark bdiffs between delta parents and self for N
2374 2380 revisions starting at the specified revision.
2375 2381
2376 2382 With ``--alldata``, assume the requested revision is a changeset and
2377 2383 measure bdiffs for all changes related to that changeset (manifest
2378 2384 and filelogs).
2379 2385 """
2380 2386 opts = _byteskwargs(opts)
2381 2387
2382 2388 if opts[b'xdiff'] and not opts[b'blocks']:
2383 2389 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2384 2390
2385 2391 if opts[b'alldata']:
2386 2392 opts[b'changelog'] = True
2387 2393
2388 2394 if opts.get(b'changelog') or opts.get(b'manifest'):
2389 2395 file_, rev = None, file_
2390 2396 elif rev is None:
2391 2397 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2392 2398
2393 2399 blocks = opts[b'blocks']
2394 2400 xdiff = opts[b'xdiff']
2395 2401 textpairs = []
2396 2402
2397 2403 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2398 2404
2399 2405 startrev = r.rev(r.lookup(rev))
2400 2406 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2401 2407 if opts[b'alldata']:
2402 2408 # Load revisions associated with changeset.
2403 2409 ctx = repo[rev]
2404 2410 mtext = _manifestrevision(repo, ctx.manifestnode())
2405 2411 for pctx in ctx.parents():
2406 2412 pman = _manifestrevision(repo, pctx.manifestnode())
2407 2413 textpairs.append((pman, mtext))
2408 2414
2409 2415 # Load filelog revisions by iterating manifest delta.
2410 2416 man = ctx.manifest()
2411 2417 pman = ctx.p1().manifest()
2412 2418 for filename, change in pman.diff(man).items():
2413 2419 fctx = repo.file(filename)
2414 2420 f1 = fctx.revision(change[0][0] or -1)
2415 2421 f2 = fctx.revision(change[1][0] or -1)
2416 2422 textpairs.append((f1, f2))
2417 2423 else:
2418 2424 dp = r.deltaparent(rev)
2419 2425 textpairs.append((r.revision(dp), r.revision(rev)))
2420 2426
2421 2427 withthreads = threads > 0
2422 2428 if not withthreads:
2423 2429
2424 2430 def d():
2425 2431 for pair in textpairs:
2426 2432 if xdiff:
2427 2433 mdiff.bdiff.xdiffblocks(*pair)
2428 2434 elif blocks:
2429 2435 mdiff.bdiff.blocks(*pair)
2430 2436 else:
2431 2437 mdiff.textdiff(*pair)
2432 2438
2433 2439 else:
2434 2440 q = queue()
2435 2441 for i in _xrange(threads):
2436 2442 q.put(None)
2437 2443 ready = threading.Condition()
2438 2444 done = threading.Event()
2439 2445 for i in _xrange(threads):
2440 2446 threading.Thread(
2441 2447 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2442 2448 ).start()
2443 2449 q.join()
2444 2450
2445 2451 def d():
2446 2452 for pair in textpairs:
2447 2453 q.put(pair)
2448 2454 for i in _xrange(threads):
2449 2455 q.put(None)
2450 2456 with ready:
2451 2457 ready.notify_all()
2452 2458 q.join()
2453 2459
2454 2460 timer, fm = gettimer(ui, opts)
2455 2461 timer(d)
2456 2462 fm.end()
2457 2463
2458 2464 if withthreads:
2459 2465 done.set()
2460 2466 for i in _xrange(threads):
2461 2467 q.put(None)
2462 2468 with ready:
2463 2469 ready.notify_all()
2464 2470
2465 2471
2466 2472 @command(
2467 2473 b'perf::unidiff|perfunidiff',
2468 2474 revlogopts
2469 2475 + formatteropts
2470 2476 + [
2471 2477 (
2472 2478 b'',
2473 2479 b'count',
2474 2480 1,
2475 2481 b'number of revisions to test (when using --startrev)',
2476 2482 ),
2477 2483 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2478 2484 ],
2479 2485 b'-c|-m|FILE REV',
2480 2486 )
2481 2487 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2482 2488 """benchmark a unified diff between revisions
2483 2489
2484 2490 This doesn't include any copy tracing - it's just a unified diff
2485 2491 of the texts.
2486 2492
2487 2493 By default, benchmark a diff between its delta parent and itself.
2488 2494
2489 2495 With ``--count``, benchmark diffs between delta parents and self for N
2490 2496 revisions starting at the specified revision.
2491 2497
2492 2498 With ``--alldata``, assume the requested revision is a changeset and
2493 2499 measure diffs for all changes related to that changeset (manifest
2494 2500 and filelogs).
2495 2501 """
2496 2502 opts = _byteskwargs(opts)
2497 2503 if opts[b'alldata']:
2498 2504 opts[b'changelog'] = True
2499 2505
2500 2506 if opts.get(b'changelog') or opts.get(b'manifest'):
2501 2507 file_, rev = None, file_
2502 2508 elif rev is None:
2503 2509 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2504 2510
2505 2511 textpairs = []
2506 2512
2507 2513 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2508 2514
2509 2515 startrev = r.rev(r.lookup(rev))
2510 2516 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2511 2517 if opts[b'alldata']:
2512 2518 # Load revisions associated with changeset.
2513 2519 ctx = repo[rev]
2514 2520 mtext = _manifestrevision(repo, ctx.manifestnode())
2515 2521 for pctx in ctx.parents():
2516 2522 pman = _manifestrevision(repo, pctx.manifestnode())
2517 2523 textpairs.append((pman, mtext))
2518 2524
2519 2525 # Load filelog revisions by iterating manifest delta.
2520 2526 man = ctx.manifest()
2521 2527 pman = ctx.p1().manifest()
2522 2528 for filename, change in pman.diff(man).items():
2523 2529 fctx = repo.file(filename)
2524 2530 f1 = fctx.revision(change[0][0] or -1)
2525 2531 f2 = fctx.revision(change[1][0] or -1)
2526 2532 textpairs.append((f1, f2))
2527 2533 else:
2528 2534 dp = r.deltaparent(rev)
2529 2535 textpairs.append((r.revision(dp), r.revision(rev)))
2530 2536
2531 2537 def d():
2532 2538 for left, right in textpairs:
2533 2539 # The date strings don't matter, so we pass empty strings.
2534 2540 headerlines, hunks = mdiff.unidiff(
2535 2541 left, b'', right, b'', b'left', b'right', binary=False
2536 2542 )
2537 2543 # consume iterators in roughly the way patch.py does
2538 2544 b'\n'.join(headerlines)
2539 2545 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2540 2546
2541 2547 timer, fm = gettimer(ui, opts)
2542 2548 timer(d)
2543 2549 fm.end()
2544 2550
2545 2551
2546 2552 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2547 2553 def perfdiffwd(ui, repo, **opts):
2548 2554 """Profile diff of working directory changes"""
2549 2555 opts = _byteskwargs(opts)
2550 2556 timer, fm = gettimer(ui, opts)
2551 2557 options = {
2552 2558 'w': 'ignore_all_space',
2553 2559 'b': 'ignore_space_change',
2554 2560 'B': 'ignore_blank_lines',
2555 2561 }
2556 2562
2557 2563 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2558 2564 opts = {options[c]: b'1' for c in diffopt}
2559 2565
2560 2566 def d():
2561 2567 ui.pushbuffer()
2562 2568 commands.diff(ui, repo, **opts)
2563 2569 ui.popbuffer()
2564 2570
2565 2571 diffopt = diffopt.encode('ascii')
2566 2572 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2567 2573 timer(d, title=title)
2568 2574 fm.end()
2569 2575
2570 2576
2571 2577 @command(
2572 2578 b'perf::revlogindex|perfrevlogindex',
2573 2579 revlogopts + formatteropts,
2574 2580 b'-c|-m|FILE',
2575 2581 )
2576 2582 def perfrevlogindex(ui, repo, file_=None, **opts):
2577 2583 """Benchmark operations against a revlog index.
2578 2584
2579 2585 This tests constructing a revlog instance, reading index data,
2580 2586 parsing index data, and performing various operations related to
2581 2587 index data.
2582 2588 """
2583 2589
2584 2590 opts = _byteskwargs(opts)
2585 2591
2586 2592 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2587 2593
2588 2594 opener = getattr(rl, 'opener') # trick linter
2589 2595 indexfile = rl.indexfile
2590 2596 data = opener.read(indexfile)
2591 2597
2592 2598 header = struct.unpack(b'>I', data[0:4])[0]
2593 2599 version = header & 0xFFFF
2594 2600 if version == 1:
2595 2601 revlogio = revlog.revlogio()
2596 2602 inline = header & (1 << 16)
2597 2603 else:
2598 2604 raise error.Abort(b'unsupported revlog version: %d' % version)
2599 2605
2600 2606 rllen = len(rl)
2601 2607
2602 2608 node0 = rl.node(0)
2603 2609 node25 = rl.node(rllen // 4)
2604 2610 node50 = rl.node(rllen // 2)
2605 2611 node75 = rl.node(rllen // 4 * 3)
2606 2612 node100 = rl.node(rllen - 1)
2607 2613
2608 2614 allrevs = range(rllen)
2609 2615 allrevsrev = list(reversed(allrevs))
2610 2616 allnodes = [rl.node(rev) for rev in range(rllen)]
2611 2617 allnodesrev = list(reversed(allnodes))
2612 2618
2613 2619 def constructor():
2614 2620 revlog.revlog(opener, indexfile)
2615 2621
2616 2622 def read():
2617 2623 with opener(indexfile) as fh:
2618 2624 fh.read()
2619 2625
2620 2626 def parseindex():
2621 2627 revlogio.parseindex(data, inline)
2622 2628
2623 2629 def getentry(revornode):
2624 2630 index = revlogio.parseindex(data, inline)[0]
2625 2631 index[revornode]
2626 2632
2627 2633 def getentries(revs, count=1):
2628 2634 index = revlogio.parseindex(data, inline)[0]
2629 2635
2630 2636 for i in range(count):
2631 2637 for rev in revs:
2632 2638 index[rev]
2633 2639
2634 2640 def resolvenode(node):
2635 2641 index = revlogio.parseindex(data, inline)[0]
2636 2642 rev = getattr(index, 'rev', None)
2637 2643 if rev is None:
2638 2644 nodemap = getattr(
2639 2645 revlogio.parseindex(data, inline)[0], 'nodemap', None
2640 2646 )
2641 2647 # This only works for the C code.
2642 2648 if nodemap is None:
2643 2649 return
2644 2650 rev = nodemap.__getitem__
2645 2651
2646 2652 try:
2647 2653 rev(node)
2648 2654 except error.RevlogError:
2649 2655 pass
2650 2656
2651 2657 def resolvenodes(nodes, count=1):
2652 2658 index = revlogio.parseindex(data, inline)[0]
2653 2659 rev = getattr(index, 'rev', None)
2654 2660 if rev is None:
2655 2661 nodemap = getattr(
2656 2662 revlogio.parseindex(data, inline)[0], 'nodemap', None
2657 2663 )
2658 2664 # This only works for the C code.
2659 2665 if nodemap is None:
2660 2666 return
2661 2667 rev = nodemap.__getitem__
2662 2668
2663 2669 for i in range(count):
2664 2670 for node in nodes:
2665 2671 try:
2666 2672 rev(node)
2667 2673 except error.RevlogError:
2668 2674 pass
2669 2675
2670 2676 benches = [
2671 2677 (constructor, b'revlog constructor'),
2672 2678 (read, b'read'),
2673 2679 (parseindex, b'create index object'),
2674 2680 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2675 2681 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2676 2682 (lambda: resolvenode(node0), b'look up node at rev 0'),
2677 2683 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2678 2684 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2679 2685 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2680 2686 (lambda: resolvenode(node100), b'look up node at tip'),
2681 2687 # 2x variation is to measure caching impact.
2682 2688 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2683 2689 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2684 2690 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2685 2691 (
2686 2692 lambda: resolvenodes(allnodesrev, 2),
2687 2693 b'look up all nodes 2x (reverse)',
2688 2694 ),
2689 2695 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2690 2696 (
2691 2697 lambda: getentries(allrevs, 2),
2692 2698 b'retrieve all index entries 2x (forward)',
2693 2699 ),
2694 2700 (
2695 2701 lambda: getentries(allrevsrev),
2696 2702 b'retrieve all index entries (reverse)',
2697 2703 ),
2698 2704 (
2699 2705 lambda: getentries(allrevsrev, 2),
2700 2706 b'retrieve all index entries 2x (reverse)',
2701 2707 ),
2702 2708 ]
2703 2709
2704 2710 for fn, title in benches:
2705 2711 timer, fm = gettimer(ui, opts)
2706 2712 timer(fn, title=title)
2707 2713 fm.end()
2708 2714
2709 2715
2710 2716 @command(
2711 2717 b'perf::revlogrevisions|perfrevlogrevisions',
2712 2718 revlogopts
2713 2719 + formatteropts
2714 2720 + [
2715 2721 (b'd', b'dist', 100, b'distance between the revisions'),
2716 2722 (b's', b'startrev', 0, b'revision to start reading at'),
2717 2723 (b'', b'reverse', False, b'read in reverse'),
2718 2724 ],
2719 2725 b'-c|-m|FILE',
2720 2726 )
2721 2727 def perfrevlogrevisions(
2722 2728 ui, repo, file_=None, startrev=0, reverse=False, **opts
2723 2729 ):
2724 2730 """Benchmark reading a series of revisions from a revlog.
2725 2731
2726 2732 By default, we read every ``-d/--dist`` revision from 0 to tip of
2727 2733 the specified revlog.
2728 2734
2729 2735 The start revision can be defined via ``-s/--startrev``.
2730 2736 """
2731 2737 opts = _byteskwargs(opts)
2732 2738
2733 2739 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2734 2740 rllen = getlen(ui)(rl)
2735 2741
2736 2742 if startrev < 0:
2737 2743 startrev = rllen + startrev
2738 2744
2739 2745 def d():
2740 2746 rl.clearcaches()
2741 2747
2742 2748 beginrev = startrev
2743 2749 endrev = rllen
2744 2750 dist = opts[b'dist']
2745 2751
2746 2752 if reverse:
2747 2753 beginrev, endrev = endrev - 1, beginrev - 1
2748 2754 dist = -1 * dist
2749 2755
2750 2756 for x in _xrange(beginrev, endrev, dist):
2751 2757 # Old revisions don't support passing int.
2752 2758 n = rl.node(x)
2753 2759 rl.revision(n)
2754 2760
2755 2761 timer, fm = gettimer(ui, opts)
2756 2762 timer(d)
2757 2763 fm.end()
2758 2764
2759 2765
2760 2766 @command(
2761 2767 b'perf::revlogwrite|perfrevlogwrite',
2762 2768 revlogopts
2763 2769 + formatteropts
2764 2770 + [
2765 2771 (b's', b'startrev', 1000, b'revision to start writing at'),
2766 2772 (b'', b'stoprev', -1, b'last revision to write'),
2767 2773 (b'', b'count', 3, b'number of passes to perform'),
2768 2774 (b'', b'details', False, b'print timing for every revisions tested'),
2769 2775 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2770 2776 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2771 2777 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2772 2778 ],
2773 2779 b'-c|-m|FILE',
2774 2780 )
2775 2781 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2776 2782 """Benchmark writing a series of revisions to a revlog.
2777 2783
2778 2784 Possible source values are:
2779 2785 * `full`: add from a full text (default).
2780 2786 * `parent-1`: add from a delta to the first parent
2781 2787 * `parent-2`: add from a delta to the second parent if it exists
2782 2788 (use a delta from the first parent otherwise)
2783 2789 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2784 2790 * `storage`: add from the existing precomputed deltas
2785 2791
2786 2792 Note: This performance command measures performance in a custom way. As a
2787 2793 result some of the global configuration of the 'perf' command does not
2788 2794 apply to it:
2789 2795
2790 2796 * ``pre-run``: disabled
2791 2797
2792 2798 * ``profile-benchmark``: disabled
2793 2799
2794 2800 * ``run-limits``: disabled use --count instead
2795 2801 """
2796 2802 opts = _byteskwargs(opts)
2797 2803
2798 2804 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2799 2805 rllen = getlen(ui)(rl)
2800 2806 if startrev < 0:
2801 2807 startrev = rllen + startrev
2802 2808 if stoprev < 0:
2803 2809 stoprev = rllen + stoprev
2804 2810
2805 2811 lazydeltabase = opts['lazydeltabase']
2806 2812 source = opts['source']
2807 2813 clearcaches = opts['clear_caches']
2808 2814 validsource = (
2809 2815 b'full',
2810 2816 b'parent-1',
2811 2817 b'parent-2',
2812 2818 b'parent-smallest',
2813 2819 b'storage',
2814 2820 )
2815 2821 if source not in validsource:
2816 2822 raise error.Abort('invalid source type: %s' % source)
2817 2823
2818 2824 ### actually gather results
2819 2825 count = opts['count']
2820 2826 if count <= 0:
2821 2827 raise error.Abort('invalide run count: %d' % count)
2822 2828 allresults = []
2823 2829 for c in range(count):
2824 2830 timing = _timeonewrite(
2825 2831 ui,
2826 2832 rl,
2827 2833 source,
2828 2834 startrev,
2829 2835 stoprev,
2830 2836 c + 1,
2831 2837 lazydeltabase=lazydeltabase,
2832 2838 clearcaches=clearcaches,
2833 2839 )
2834 2840 allresults.append(timing)
2835 2841
2836 2842 ### consolidate the results in a single list
2837 2843 results = []
2838 2844 for idx, (rev, t) in enumerate(allresults[0]):
2839 2845 ts = [t]
2840 2846 for other in allresults[1:]:
2841 2847 orev, ot = other[idx]
2842 2848 assert orev == rev
2843 2849 ts.append(ot)
2844 2850 results.append((rev, ts))
2845 2851 resultcount = len(results)
2846 2852
2847 2853 ### Compute and display relevant statistics
2848 2854
2849 2855 # get a formatter
2850 2856 fm = ui.formatter(b'perf', opts)
2851 2857 displayall = ui.configbool(b"perf", b"all-timing", False)
2852 2858
2853 2859 # print individual details if requested
2854 2860 if opts['details']:
2855 2861 for idx, item in enumerate(results, 1):
2856 2862 rev, data = item
2857 2863 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2858 2864 formatone(fm, data, title=title, displayall=displayall)
2859 2865
2860 2866 # sorts results by median time
2861 2867 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2862 2868 # list of (name, index) to display)
2863 2869 relevants = [
2864 2870 ("min", 0),
2865 2871 ("10%", resultcount * 10 // 100),
2866 2872 ("25%", resultcount * 25 // 100),
2867 2873 ("50%", resultcount * 70 // 100),
2868 2874 ("75%", resultcount * 75 // 100),
2869 2875 ("90%", resultcount * 90 // 100),
2870 2876 ("95%", resultcount * 95 // 100),
2871 2877 ("99%", resultcount * 99 // 100),
2872 2878 ("99.9%", resultcount * 999 // 1000),
2873 2879 ("99.99%", resultcount * 9999 // 10000),
2874 2880 ("99.999%", resultcount * 99999 // 100000),
2875 2881 ("max", -1),
2876 2882 ]
2877 2883 if not ui.quiet:
2878 2884 for name, idx in relevants:
2879 2885 data = results[idx]
2880 2886 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2881 2887 formatone(fm, data[1], title=title, displayall=displayall)
2882 2888
2883 2889 # XXX summing that many float will not be very precise, we ignore this fact
2884 2890 # for now
2885 2891 totaltime = []
2886 2892 for item in allresults:
2887 2893 totaltime.append(
2888 2894 (
2889 2895 sum(x[1][0] for x in item),
2890 2896 sum(x[1][1] for x in item),
2891 2897 sum(x[1][2] for x in item),
2892 2898 )
2893 2899 )
2894 2900 formatone(
2895 2901 fm,
2896 2902 totaltime,
2897 2903 title="total time (%d revs)" % resultcount,
2898 2904 displayall=displayall,
2899 2905 )
2900 2906 fm.end()
2901 2907
2902 2908
2903 2909 class _faketr(object):
2904 2910 def add(s, x, y, z=None):
2905 2911 return None
2906 2912
2907 2913
2908 2914 def _timeonewrite(
2909 2915 ui,
2910 2916 orig,
2911 2917 source,
2912 2918 startrev,
2913 2919 stoprev,
2914 2920 runidx=None,
2915 2921 lazydeltabase=True,
2916 2922 clearcaches=True,
2917 2923 ):
2918 2924 timings = []
2919 2925 tr = _faketr()
2920 2926 with _temprevlog(ui, orig, startrev) as dest:
2921 2927 dest._lazydeltabase = lazydeltabase
2922 2928 revs = list(orig.revs(startrev, stoprev))
2923 2929 total = len(revs)
2924 2930 topic = 'adding'
2925 2931 if runidx is not None:
2926 2932 topic += ' (run #%d)' % runidx
2927 2933 # Support both old and new progress API
2928 2934 if util.safehasattr(ui, 'makeprogress'):
2929 2935 progress = ui.makeprogress(topic, unit='revs', total=total)
2930 2936
2931 2937 def updateprogress(pos):
2932 2938 progress.update(pos)
2933 2939
2934 2940 def completeprogress():
2935 2941 progress.complete()
2936 2942
2937 2943 else:
2938 2944
2939 2945 def updateprogress(pos):
2940 2946 ui.progress(topic, pos, unit='revs', total=total)
2941 2947
2942 2948 def completeprogress():
2943 2949 ui.progress(topic, None, unit='revs', total=total)
2944 2950
2945 2951 for idx, rev in enumerate(revs):
2946 2952 updateprogress(idx)
2947 2953 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2948 2954 if clearcaches:
2949 2955 dest.index.clearcaches()
2950 2956 dest.clearcaches()
2951 2957 with timeone() as r:
2952 2958 dest.addrawrevision(*addargs, **addkwargs)
2953 2959 timings.append((rev, r[0]))
2954 2960 updateprogress(total)
2955 2961 completeprogress()
2956 2962 return timings
2957 2963
2958 2964
2959 2965 def _getrevisionseed(orig, rev, tr, source):
2960 2966 from mercurial.node import nullid
2961 2967
2962 2968 linkrev = orig.linkrev(rev)
2963 2969 node = orig.node(rev)
2964 2970 p1, p2 = orig.parents(node)
2965 2971 flags = orig.flags(rev)
2966 2972 cachedelta = None
2967 2973 text = None
2968 2974
2969 2975 if source == b'full':
2970 2976 text = orig.revision(rev)
2971 2977 elif source == b'parent-1':
2972 2978 baserev = orig.rev(p1)
2973 2979 cachedelta = (baserev, orig.revdiff(p1, rev))
2974 2980 elif source == b'parent-2':
2975 2981 parent = p2
2976 2982 if p2 == nullid:
2977 2983 parent = p1
2978 2984 baserev = orig.rev(parent)
2979 2985 cachedelta = (baserev, orig.revdiff(parent, rev))
2980 2986 elif source == b'parent-smallest':
2981 2987 p1diff = orig.revdiff(p1, rev)
2982 2988 parent = p1
2983 2989 diff = p1diff
2984 2990 if p2 != nullid:
2985 2991 p2diff = orig.revdiff(p2, rev)
2986 2992 if len(p1diff) > len(p2diff):
2987 2993 parent = p2
2988 2994 diff = p2diff
2989 2995 baserev = orig.rev(parent)
2990 2996 cachedelta = (baserev, diff)
2991 2997 elif source == b'storage':
2992 2998 baserev = orig.deltaparent(rev)
2993 2999 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2994 3000
2995 3001 return (
2996 3002 (text, tr, linkrev, p1, p2),
2997 3003 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2998 3004 )
2999 3005
3000 3006
3001 3007 @contextlib.contextmanager
3002 3008 def _temprevlog(ui, orig, truncaterev):
3003 3009 from mercurial import vfs as vfsmod
3004 3010
3005 3011 if orig._inline:
3006 3012 raise error.Abort('not supporting inline revlog (yet)')
3007 3013 revlogkwargs = {}
3008 3014 k = 'upperboundcomp'
3009 3015 if util.safehasattr(orig, k):
3010 3016 revlogkwargs[k] = getattr(orig, k)
3011 3017
3012 3018 origindexpath = orig.opener.join(orig.indexfile)
3013 3019 origdatapath = orig.opener.join(orig.datafile)
3014 3020 indexname = 'revlog.i'
3015 3021 dataname = 'revlog.d'
3016 3022
3017 3023 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3018 3024 try:
3019 3025 # copy the data file in a temporary directory
3020 3026 ui.debug('copying data in %s\n' % tmpdir)
3021 3027 destindexpath = os.path.join(tmpdir, 'revlog.i')
3022 3028 destdatapath = os.path.join(tmpdir, 'revlog.d')
3023 3029 shutil.copyfile(origindexpath, destindexpath)
3024 3030 shutil.copyfile(origdatapath, destdatapath)
3025 3031
3026 3032 # remove the data we want to add again
3027 3033 ui.debug('truncating data to be rewritten\n')
3028 3034 with open(destindexpath, 'ab') as index:
3029 3035 index.seek(0)
3030 3036 index.truncate(truncaterev * orig._io.size)
3031 3037 with open(destdatapath, 'ab') as data:
3032 3038 data.seek(0)
3033 3039 data.truncate(orig.start(truncaterev))
3034 3040
3035 3041 # instantiate a new revlog from the temporary copy
3036 3042 ui.debug('truncating adding to be rewritten\n')
3037 3043 vfs = vfsmod.vfs(tmpdir)
3038 3044 vfs.options = getattr(orig.opener, 'options', None)
3039 3045
3040 3046 dest = revlog.revlog(
3041 3047 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3042 3048 )
3043 3049 if dest._inline:
3044 3050 raise error.Abort('not supporting inline revlog (yet)')
3045 3051 # make sure internals are initialized
3046 3052 dest.revision(len(dest) - 1)
3047 3053 yield dest
3048 3054 del dest, vfs
3049 3055 finally:
3050 3056 shutil.rmtree(tmpdir, True)
3051 3057
3052 3058
3053 3059 @command(
3054 3060 b'perf::revlogchunks|perfrevlogchunks',
3055 3061 revlogopts
3056 3062 + formatteropts
3057 3063 + [
3058 3064 (b'e', b'engines', b'', b'compression engines to use'),
3059 3065 (b's', b'startrev', 0, b'revision to start at'),
3060 3066 ],
3061 3067 b'-c|-m|FILE',
3062 3068 )
3063 3069 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3064 3070 """Benchmark operations on revlog chunks.
3065 3071
3066 3072 Logically, each revlog is a collection of fulltext revisions. However,
3067 3073 stored within each revlog are "chunks" of possibly compressed data. This
3068 3074 data needs to be read and decompressed or compressed and written.
3069 3075
3070 3076 This command measures the time it takes to read+decompress and recompress
3071 3077 chunks in a revlog. It effectively isolates I/O and compression performance.
3072 3078 For measurements of higher-level operations like resolving revisions,
3073 3079 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3074 3080 """
3075 3081 opts = _byteskwargs(opts)
3076 3082
3077 3083 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3078 3084
3079 3085 # _chunkraw was renamed to _getsegmentforrevs.
3080 3086 try:
3081 3087 segmentforrevs = rl._getsegmentforrevs
3082 3088 except AttributeError:
3083 3089 segmentforrevs = rl._chunkraw
3084 3090
3085 3091 # Verify engines argument.
3086 3092 if engines:
3087 3093 engines = {e.strip() for e in engines.split(b',')}
3088 3094 for engine in engines:
3089 3095 try:
3090 3096 util.compressionengines[engine]
3091 3097 except KeyError:
3092 3098 raise error.Abort(b'unknown compression engine: %s' % engine)
3093 3099 else:
3094 3100 engines = []
3095 3101 for e in util.compengines:
3096 3102 engine = util.compengines[e]
3097 3103 try:
3098 3104 if engine.available():
3099 3105 engine.revlogcompressor().compress(b'dummy')
3100 3106 engines.append(e)
3101 3107 except NotImplementedError:
3102 3108 pass
3103 3109
3104 3110 revs = list(rl.revs(startrev, len(rl) - 1))
3105 3111
3106 3112 def rlfh(rl):
3107 3113 if rl._inline:
3108 3114 return getsvfs(repo)(rl.indexfile)
3109 3115 else:
3110 3116 return getsvfs(repo)(rl.datafile)
3111 3117
3112 3118 def doread():
3113 3119 rl.clearcaches()
3114 3120 for rev in revs:
3115 3121 segmentforrevs(rev, rev)
3116 3122
3117 3123 def doreadcachedfh():
3118 3124 rl.clearcaches()
3119 3125 fh = rlfh(rl)
3120 3126 for rev in revs:
3121 3127 segmentforrevs(rev, rev, df=fh)
3122 3128
3123 3129 def doreadbatch():
3124 3130 rl.clearcaches()
3125 3131 segmentforrevs(revs[0], revs[-1])
3126 3132
3127 3133 def doreadbatchcachedfh():
3128 3134 rl.clearcaches()
3129 3135 fh = rlfh(rl)
3130 3136 segmentforrevs(revs[0], revs[-1], df=fh)
3131 3137
3132 3138 def dochunk():
3133 3139 rl.clearcaches()
3134 3140 fh = rlfh(rl)
3135 3141 for rev in revs:
3136 3142 rl._chunk(rev, df=fh)
3137 3143
3138 3144 chunks = [None]
3139 3145
3140 3146 def dochunkbatch():
3141 3147 rl.clearcaches()
3142 3148 fh = rlfh(rl)
3143 3149 # Save chunks as a side-effect.
3144 3150 chunks[0] = rl._chunks(revs, df=fh)
3145 3151
3146 3152 def docompress(compressor):
3147 3153 rl.clearcaches()
3148 3154
3149 3155 try:
3150 3156 # Swap in the requested compression engine.
3151 3157 oldcompressor = rl._compressor
3152 3158 rl._compressor = compressor
3153 3159 for chunk in chunks[0]:
3154 3160 rl.compress(chunk)
3155 3161 finally:
3156 3162 rl._compressor = oldcompressor
3157 3163
3158 3164 benches = [
3159 3165 (lambda: doread(), b'read'),
3160 3166 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3161 3167 (lambda: doreadbatch(), b'read batch'),
3162 3168 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3163 3169 (lambda: dochunk(), b'chunk'),
3164 3170 (lambda: dochunkbatch(), b'chunk batch'),
3165 3171 ]
3166 3172
3167 3173 for engine in sorted(engines):
3168 3174 compressor = util.compengines[engine].revlogcompressor()
3169 3175 benches.append(
3170 3176 (
3171 3177 functools.partial(docompress, compressor),
3172 3178 b'compress w/ %s' % engine,
3173 3179 )
3174 3180 )
3175 3181
3176 3182 for fn, title in benches:
3177 3183 timer, fm = gettimer(ui, opts)
3178 3184 timer(fn, title=title)
3179 3185 fm.end()
3180 3186
3181 3187
3182 3188 @command(
3183 3189 b'perf::revlogrevision|perfrevlogrevision',
3184 3190 revlogopts
3185 3191 + formatteropts
3186 3192 + [(b'', b'cache', False, b'use caches instead of clearing')],
3187 3193 b'-c|-m|FILE REV',
3188 3194 )
3189 3195 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3190 3196 """Benchmark obtaining a revlog revision.
3191 3197
3192 3198 Obtaining a revlog revision consists of roughly the following steps:
3193 3199
3194 3200 1. Compute the delta chain
3195 3201 2. Slice the delta chain if applicable
3196 3202 3. Obtain the raw chunks for that delta chain
3197 3203 4. Decompress each raw chunk
3198 3204 5. Apply binary patches to obtain fulltext
3199 3205 6. Verify hash of fulltext
3200 3206
3201 3207 This command measures the time spent in each of these phases.
3202 3208 """
3203 3209 opts = _byteskwargs(opts)
3204 3210
3205 3211 if opts.get(b'changelog') or opts.get(b'manifest'):
3206 3212 file_, rev = None, file_
3207 3213 elif rev is None:
3208 3214 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3209 3215
3210 3216 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3211 3217
3212 3218 # _chunkraw was renamed to _getsegmentforrevs.
3213 3219 try:
3214 3220 segmentforrevs = r._getsegmentforrevs
3215 3221 except AttributeError:
3216 3222 segmentforrevs = r._chunkraw
3217 3223
3218 3224 node = r.lookup(rev)
3219 3225 rev = r.rev(node)
3220 3226
3221 3227 def getrawchunks(data, chain):
3222 3228 start = r.start
3223 3229 length = r.length
3224 3230 inline = r._inline
3225 3231 iosize = r._io.size
3226 3232 buffer = util.buffer
3227 3233
3228 3234 chunks = []
3229 3235 ladd = chunks.append
3230 3236 for idx, item in enumerate(chain):
3231 3237 offset = start(item[0])
3232 3238 bits = data[idx]
3233 3239 for rev in item:
3234 3240 chunkstart = start(rev)
3235 3241 if inline:
3236 3242 chunkstart += (rev + 1) * iosize
3237 3243 chunklength = length(rev)
3238 3244 ladd(buffer(bits, chunkstart - offset, chunklength))
3239 3245
3240 3246 return chunks
3241 3247
3242 3248 def dodeltachain(rev):
3243 3249 if not cache:
3244 3250 r.clearcaches()
3245 3251 r._deltachain(rev)
3246 3252
3247 3253 def doread(chain):
3248 3254 if not cache:
3249 3255 r.clearcaches()
3250 3256 for item in slicedchain:
3251 3257 segmentforrevs(item[0], item[-1])
3252 3258
3253 3259 def doslice(r, chain, size):
3254 3260 for s in slicechunk(r, chain, targetsize=size):
3255 3261 pass
3256 3262
3257 3263 def dorawchunks(data, chain):
3258 3264 if not cache:
3259 3265 r.clearcaches()
3260 3266 getrawchunks(data, chain)
3261 3267
3262 3268 def dodecompress(chunks):
3263 3269 decomp = r.decompress
3264 3270 for chunk in chunks:
3265 3271 decomp(chunk)
3266 3272
3267 3273 def dopatch(text, bins):
3268 3274 if not cache:
3269 3275 r.clearcaches()
3270 3276 mdiff.patches(text, bins)
3271 3277
3272 3278 def dohash(text):
3273 3279 if not cache:
3274 3280 r.clearcaches()
3275 3281 r.checkhash(text, node, rev=rev)
3276 3282
3277 3283 def dorevision():
3278 3284 if not cache:
3279 3285 r.clearcaches()
3280 3286 r.revision(node)
3281 3287
3282 3288 try:
3283 3289 from mercurial.revlogutils.deltas import slicechunk
3284 3290 except ImportError:
3285 3291 slicechunk = getattr(revlog, '_slicechunk', None)
3286 3292
3287 3293 size = r.length(rev)
3288 3294 chain = r._deltachain(rev)[0]
3289 3295 if not getattr(r, '_withsparseread', False):
3290 3296 slicedchain = (chain,)
3291 3297 else:
3292 3298 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3293 3299 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3294 3300 rawchunks = getrawchunks(data, slicedchain)
3295 3301 bins = r._chunks(chain)
3296 3302 text = bytes(bins[0])
3297 3303 bins = bins[1:]
3298 3304 text = mdiff.patches(text, bins)
3299 3305
3300 3306 benches = [
3301 3307 (lambda: dorevision(), b'full'),
3302 3308 (lambda: dodeltachain(rev), b'deltachain'),
3303 3309 (lambda: doread(chain), b'read'),
3304 3310 ]
3305 3311
3306 3312 if getattr(r, '_withsparseread', False):
3307 3313 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3308 3314 benches.append(slicing)
3309 3315
3310 3316 benches.extend(
3311 3317 [
3312 3318 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3313 3319 (lambda: dodecompress(rawchunks), b'decompress'),
3314 3320 (lambda: dopatch(text, bins), b'patch'),
3315 3321 (lambda: dohash(text), b'hash'),
3316 3322 ]
3317 3323 )
3318 3324
3319 3325 timer, fm = gettimer(ui, opts)
3320 3326 for fn, title in benches:
3321 3327 timer(fn, title=title)
3322 3328 fm.end()
3323 3329
3324 3330
3325 3331 @command(
3326 3332 b'perf::revset|perfrevset',
3327 3333 [
3328 3334 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3329 3335 (b'', b'contexts', False, b'obtain changectx for each revision'),
3330 3336 ]
3331 3337 + formatteropts,
3332 3338 b"REVSET",
3333 3339 )
3334 3340 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3335 3341 """benchmark the execution time of a revset
3336 3342
3337 3343 Use the --clean option if need to evaluate the impact of build volatile
3338 3344 revisions set cache on the revset execution. Volatile cache hold filtered
3339 3345 and obsolete related cache."""
3340 3346 opts = _byteskwargs(opts)
3341 3347
3342 3348 timer, fm = gettimer(ui, opts)
3343 3349
3344 3350 def d():
3345 3351 if clear:
3346 3352 repo.invalidatevolatilesets()
3347 3353 if contexts:
3348 3354 for ctx in repo.set(expr):
3349 3355 pass
3350 3356 else:
3351 3357 for r in repo.revs(expr):
3352 3358 pass
3353 3359
3354 3360 timer(d)
3355 3361 fm.end()
3356 3362
3357 3363
3358 3364 @command(
3359 3365 b'perf::volatilesets|perfvolatilesets',
3360 3366 [
3361 3367 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3362 3368 ]
3363 3369 + formatteropts,
3364 3370 )
3365 3371 def perfvolatilesets(ui, repo, *names, **opts):
3366 3372 """benchmark the computation of various volatile set
3367 3373
3368 3374 Volatile set computes element related to filtering and obsolescence."""
3369 3375 opts = _byteskwargs(opts)
3370 3376 timer, fm = gettimer(ui, opts)
3371 3377 repo = repo.unfiltered()
3372 3378
3373 3379 def getobs(name):
3374 3380 def d():
3375 3381 repo.invalidatevolatilesets()
3376 3382 if opts[b'clear_obsstore']:
3377 3383 clearfilecache(repo, b'obsstore')
3378 3384 obsolete.getrevs(repo, name)
3379 3385
3380 3386 return d
3381 3387
3382 3388 allobs = sorted(obsolete.cachefuncs)
3383 3389 if names:
3384 3390 allobs = [n for n in allobs if n in names]
3385 3391
3386 3392 for name in allobs:
3387 3393 timer(getobs(name), title=name)
3388 3394
3389 3395 def getfiltered(name):
3390 3396 def d():
3391 3397 repo.invalidatevolatilesets()
3392 3398 if opts[b'clear_obsstore']:
3393 3399 clearfilecache(repo, b'obsstore')
3394 3400 repoview.filterrevs(repo, name)
3395 3401
3396 3402 return d
3397 3403
3398 3404 allfilter = sorted(repoview.filtertable)
3399 3405 if names:
3400 3406 allfilter = [n for n in allfilter if n in names]
3401 3407
3402 3408 for name in allfilter:
3403 3409 timer(getfiltered(name), title=name)
3404 3410 fm.end()
3405 3411
3406 3412
3407 3413 @command(
3408 3414 b'perf::branchmap|perfbranchmap',
3409 3415 [
3410 3416 (b'f', b'full', False, b'Includes build time of subset'),
3411 3417 (
3412 3418 b'',
3413 3419 b'clear-revbranch',
3414 3420 False,
3415 3421 b'purge the revbranch cache between computation',
3416 3422 ),
3417 3423 ]
3418 3424 + formatteropts,
3419 3425 )
3420 3426 def perfbranchmap(ui, repo, *filternames, **opts):
3421 3427 """benchmark the update of a branchmap
3422 3428
3423 3429 This benchmarks the full repo.branchmap() call with read and write disabled
3424 3430 """
3425 3431 opts = _byteskwargs(opts)
3426 3432 full = opts.get(b"full", False)
3427 3433 clear_revbranch = opts.get(b"clear_revbranch", False)
3428 3434 timer, fm = gettimer(ui, opts)
3429 3435
3430 3436 def getbranchmap(filtername):
3431 3437 """generate a benchmark function for the filtername"""
3432 3438 if filtername is None:
3433 3439 view = repo
3434 3440 else:
3435 3441 view = repo.filtered(filtername)
3436 3442 if util.safehasattr(view._branchcaches, '_per_filter'):
3437 3443 filtered = view._branchcaches._per_filter
3438 3444 else:
3439 3445 # older versions
3440 3446 filtered = view._branchcaches
3441 3447
3442 3448 def d():
3443 3449 if clear_revbranch:
3444 3450 repo.revbranchcache()._clear()
3445 3451 if full:
3446 3452 view._branchcaches.clear()
3447 3453 else:
3448 3454 filtered.pop(filtername, None)
3449 3455 view.branchmap()
3450 3456
3451 3457 return d
3452 3458
3453 3459 # add filter in smaller subset to bigger subset
3454 3460 possiblefilters = set(repoview.filtertable)
3455 3461 if filternames:
3456 3462 possiblefilters &= set(filternames)
3457 3463 subsettable = getbranchmapsubsettable()
3458 3464 allfilters = []
3459 3465 while possiblefilters:
3460 3466 for name in possiblefilters:
3461 3467 subset = subsettable.get(name)
3462 3468 if subset not in possiblefilters:
3463 3469 break
3464 3470 else:
3465 3471 assert False, b'subset cycle %s!' % possiblefilters
3466 3472 allfilters.append(name)
3467 3473 possiblefilters.remove(name)
3468 3474
3469 3475 # warm the cache
3470 3476 if not full:
3471 3477 for name in allfilters:
3472 3478 repo.filtered(name).branchmap()
3473 3479 if not filternames or b'unfiltered' in filternames:
3474 3480 # add unfiltered
3475 3481 allfilters.append(None)
3476 3482
3477 3483 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3478 3484 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3479 3485 branchcacheread.set(classmethod(lambda *args: None))
3480 3486 else:
3481 3487 # older versions
3482 3488 branchcacheread = safeattrsetter(branchmap, b'read')
3483 3489 branchcacheread.set(lambda *args: None)
3484 3490 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3485 3491 branchcachewrite.set(lambda *args: None)
3486 3492 try:
3487 3493 for name in allfilters:
3488 3494 printname = name
3489 3495 if name is None:
3490 3496 printname = b'unfiltered'
3491 3497 timer(getbranchmap(name), title=printname)
3492 3498 finally:
3493 3499 branchcacheread.restore()
3494 3500 branchcachewrite.restore()
3495 3501 fm.end()
3496 3502
3497 3503
3498 3504 @command(
3499 3505 b'perf::branchmapupdate|perfbranchmapupdate',
3500 3506 [
3501 3507 (b'', b'base', [], b'subset of revision to start from'),
3502 3508 (b'', b'target', [], b'subset of revision to end with'),
3503 3509 (b'', b'clear-caches', False, b'clear cache between each runs'),
3504 3510 ]
3505 3511 + formatteropts,
3506 3512 )
3507 3513 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3508 3514 """benchmark branchmap update from for <base> revs to <target> revs
3509 3515
3510 3516 If `--clear-caches` is passed, the following items will be reset before
3511 3517 each update:
3512 3518 * the changelog instance and associated indexes
3513 3519 * the rev-branch-cache instance
3514 3520
3515 3521 Examples:
3516 3522
3517 3523 # update for the one last revision
3518 3524 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3519 3525
3520 3526 $ update for change coming with a new branch
3521 3527 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3522 3528 """
3523 3529 from mercurial import branchmap
3524 3530 from mercurial import repoview
3525 3531
3526 3532 opts = _byteskwargs(opts)
3527 3533 timer, fm = gettimer(ui, opts)
3528 3534 clearcaches = opts[b'clear_caches']
3529 3535 unfi = repo.unfiltered()
3530 3536 x = [None] # used to pass data between closure
3531 3537
3532 3538 # we use a `list` here to avoid possible side effect from smartset
3533 3539 baserevs = list(scmutil.revrange(repo, base))
3534 3540 targetrevs = list(scmutil.revrange(repo, target))
3535 3541 if not baserevs:
3536 3542 raise error.Abort(b'no revisions selected for --base')
3537 3543 if not targetrevs:
3538 3544 raise error.Abort(b'no revisions selected for --target')
3539 3545
3540 3546 # make sure the target branchmap also contains the one in the base
3541 3547 targetrevs = list(set(baserevs) | set(targetrevs))
3542 3548 targetrevs.sort()
3543 3549
3544 3550 cl = repo.changelog
3545 3551 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3546 3552 allbaserevs.sort()
3547 3553 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3548 3554
3549 3555 newrevs = list(alltargetrevs.difference(allbaserevs))
3550 3556 newrevs.sort()
3551 3557
3552 3558 allrevs = frozenset(unfi.changelog.revs())
3553 3559 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3554 3560 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3555 3561
3556 3562 def basefilter(repo, visibilityexceptions=None):
3557 3563 return basefilterrevs
3558 3564
3559 3565 def targetfilter(repo, visibilityexceptions=None):
3560 3566 return targetfilterrevs
3561 3567
3562 3568 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3563 3569 ui.status(msg % (len(allbaserevs), len(newrevs)))
3564 3570 if targetfilterrevs:
3565 3571 msg = b'(%d revisions still filtered)\n'
3566 3572 ui.status(msg % len(targetfilterrevs))
3567 3573
3568 3574 try:
3569 3575 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3570 3576 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3571 3577
3572 3578 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3573 3579 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3574 3580
3575 3581 # try to find an existing branchmap to reuse
3576 3582 subsettable = getbranchmapsubsettable()
3577 3583 candidatefilter = subsettable.get(None)
3578 3584 while candidatefilter is not None:
3579 3585 candidatebm = repo.filtered(candidatefilter).branchmap()
3580 3586 if candidatebm.validfor(baserepo):
3581 3587 filtered = repoview.filterrevs(repo, candidatefilter)
3582 3588 missing = [r for r in allbaserevs if r in filtered]
3583 3589 base = candidatebm.copy()
3584 3590 base.update(baserepo, missing)
3585 3591 break
3586 3592 candidatefilter = subsettable.get(candidatefilter)
3587 3593 else:
3588 3594 # no suitable subset where found
3589 3595 base = branchmap.branchcache()
3590 3596 base.update(baserepo, allbaserevs)
3591 3597
3592 3598 def setup():
3593 3599 x[0] = base.copy()
3594 3600 if clearcaches:
3595 3601 unfi._revbranchcache = None
3596 3602 clearchangelog(repo)
3597 3603
3598 3604 def bench():
3599 3605 x[0].update(targetrepo, newrevs)
3600 3606
3601 3607 timer(bench, setup=setup)
3602 3608 fm.end()
3603 3609 finally:
3604 3610 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3605 3611 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3606 3612
3607 3613
3608 3614 @command(
3609 3615 b'perf::branchmapload|perfbranchmapload',
3610 3616 [
3611 3617 (b'f', b'filter', b'', b'Specify repoview filter'),
3612 3618 (b'', b'list', False, b'List brachmap filter caches'),
3613 3619 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3614 3620 ]
3615 3621 + formatteropts,
3616 3622 )
3617 3623 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3618 3624 """benchmark reading the branchmap"""
3619 3625 opts = _byteskwargs(opts)
3620 3626 clearrevlogs = opts[b'clear_revlogs']
3621 3627
3622 3628 if list:
3623 3629 for name, kind, st in repo.cachevfs.readdir(stat=True):
3624 3630 if name.startswith(b'branch2'):
3625 3631 filtername = name.partition(b'-')[2] or b'unfiltered'
3626 3632 ui.status(
3627 3633 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3628 3634 )
3629 3635 return
3630 3636 if not filter:
3631 3637 filter = None
3632 3638 subsettable = getbranchmapsubsettable()
3633 3639 if filter is None:
3634 3640 repo = repo.unfiltered()
3635 3641 else:
3636 3642 repo = repoview.repoview(repo, filter)
3637 3643
3638 3644 repo.branchmap() # make sure we have a relevant, up to date branchmap
3639 3645
3640 3646 try:
3641 3647 fromfile = branchmap.branchcache.fromfile
3642 3648 except AttributeError:
3643 3649 # older versions
3644 3650 fromfile = branchmap.read
3645 3651
3646 3652 currentfilter = filter
3647 3653 # try once without timer, the filter may not be cached
3648 3654 while fromfile(repo) is None:
3649 3655 currentfilter = subsettable.get(currentfilter)
3650 3656 if currentfilter is None:
3651 3657 raise error.Abort(
3652 3658 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3653 3659 )
3654 3660 repo = repo.filtered(currentfilter)
3655 3661 timer, fm = gettimer(ui, opts)
3656 3662
3657 3663 def setup():
3658 3664 if clearrevlogs:
3659 3665 clearchangelog(repo)
3660 3666
3661 3667 def bench():
3662 3668 fromfile(repo)
3663 3669
3664 3670 timer(bench, setup=setup)
3665 3671 fm.end()
3666 3672
3667 3673
3668 3674 @command(b'perf::loadmarkers|perfloadmarkers')
3669 3675 def perfloadmarkers(ui, repo):
3670 3676 """benchmark the time to parse the on-disk markers for a repo
3671 3677
3672 3678 Result is the number of markers in the repo."""
3673 3679 timer, fm = gettimer(ui)
3674 3680 svfs = getsvfs(repo)
3675 3681 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3676 3682 fm.end()
3677 3683
3678 3684
3679 3685 @command(
3680 3686 b'perf::lrucachedict|perflrucachedict',
3681 3687 formatteropts
3682 3688 + [
3683 3689 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3684 3690 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3685 3691 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3686 3692 (b'', b'size', 4, b'size of cache'),
3687 3693 (b'', b'gets', 10000, b'number of key lookups'),
3688 3694 (b'', b'sets', 10000, b'number of key sets'),
3689 3695 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3690 3696 (
3691 3697 b'',
3692 3698 b'mixedgetfreq',
3693 3699 50,
3694 3700 b'frequency of get vs set ops in mixed mode',
3695 3701 ),
3696 3702 ],
3697 3703 norepo=True,
3698 3704 )
3699 3705 def perflrucache(
3700 3706 ui,
3701 3707 mincost=0,
3702 3708 maxcost=100,
3703 3709 costlimit=0,
3704 3710 size=4,
3705 3711 gets=10000,
3706 3712 sets=10000,
3707 3713 mixed=10000,
3708 3714 mixedgetfreq=50,
3709 3715 **opts
3710 3716 ):
3711 3717 opts = _byteskwargs(opts)
3712 3718
3713 3719 def doinit():
3714 3720 for i in _xrange(10000):
3715 3721 util.lrucachedict(size)
3716 3722
3717 3723 costrange = list(range(mincost, maxcost + 1))
3718 3724
3719 3725 values = []
3720 3726 for i in _xrange(size):
3721 3727 values.append(random.randint(0, _maxint))
3722 3728
3723 3729 # Get mode fills the cache and tests raw lookup performance with no
3724 3730 # eviction.
3725 3731 getseq = []
3726 3732 for i in _xrange(gets):
3727 3733 getseq.append(random.choice(values))
3728 3734
3729 3735 def dogets():
3730 3736 d = util.lrucachedict(size)
3731 3737 for v in values:
3732 3738 d[v] = v
3733 3739 for key in getseq:
3734 3740 value = d[key]
3735 3741 value # silence pyflakes warning
3736 3742
3737 3743 def dogetscost():
3738 3744 d = util.lrucachedict(size, maxcost=costlimit)
3739 3745 for i, v in enumerate(values):
3740 3746 d.insert(v, v, cost=costs[i])
3741 3747 for key in getseq:
3742 3748 try:
3743 3749 value = d[key]
3744 3750 value # silence pyflakes warning
3745 3751 except KeyError:
3746 3752 pass
3747 3753
3748 3754 # Set mode tests insertion speed with cache eviction.
3749 3755 setseq = []
3750 3756 costs = []
3751 3757 for i in _xrange(sets):
3752 3758 setseq.append(random.randint(0, _maxint))
3753 3759 costs.append(random.choice(costrange))
3754 3760
3755 3761 def doinserts():
3756 3762 d = util.lrucachedict(size)
3757 3763 for v in setseq:
3758 3764 d.insert(v, v)
3759 3765
3760 3766 def doinsertscost():
3761 3767 d = util.lrucachedict(size, maxcost=costlimit)
3762 3768 for i, v in enumerate(setseq):
3763 3769 d.insert(v, v, cost=costs[i])
3764 3770
3765 3771 def dosets():
3766 3772 d = util.lrucachedict(size)
3767 3773 for v in setseq:
3768 3774 d[v] = v
3769 3775
3770 3776 # Mixed mode randomly performs gets and sets with eviction.
3771 3777 mixedops = []
3772 3778 for i in _xrange(mixed):
3773 3779 r = random.randint(0, 100)
3774 3780 if r < mixedgetfreq:
3775 3781 op = 0
3776 3782 else:
3777 3783 op = 1
3778 3784
3779 3785 mixedops.append(
3780 3786 (op, random.randint(0, size * 2), random.choice(costrange))
3781 3787 )
3782 3788
3783 3789 def domixed():
3784 3790 d = util.lrucachedict(size)
3785 3791
3786 3792 for op, v, cost in mixedops:
3787 3793 if op == 0:
3788 3794 try:
3789 3795 d[v]
3790 3796 except KeyError:
3791 3797 pass
3792 3798 else:
3793 3799 d[v] = v
3794 3800
3795 3801 def domixedcost():
3796 3802 d = util.lrucachedict(size, maxcost=costlimit)
3797 3803
3798 3804 for op, v, cost in mixedops:
3799 3805 if op == 0:
3800 3806 try:
3801 3807 d[v]
3802 3808 except KeyError:
3803 3809 pass
3804 3810 else:
3805 3811 d.insert(v, v, cost=cost)
3806 3812
3807 3813 benches = [
3808 3814 (doinit, b'init'),
3809 3815 ]
3810 3816
3811 3817 if costlimit:
3812 3818 benches.extend(
3813 3819 [
3814 3820 (dogetscost, b'gets w/ cost limit'),
3815 3821 (doinsertscost, b'inserts w/ cost limit'),
3816 3822 (domixedcost, b'mixed w/ cost limit'),
3817 3823 ]
3818 3824 )
3819 3825 else:
3820 3826 benches.extend(
3821 3827 [
3822 3828 (dogets, b'gets'),
3823 3829 (doinserts, b'inserts'),
3824 3830 (dosets, b'sets'),
3825 3831 (domixed, b'mixed'),
3826 3832 ]
3827 3833 )
3828 3834
3829 3835 for fn, title in benches:
3830 3836 timer, fm = gettimer(ui, opts)
3831 3837 timer(fn, title=title)
3832 3838 fm.end()
3833 3839
3834 3840
3835 3841 @command(
3836 3842 b'perf::write|perfwrite',
3837 3843 formatteropts
3838 3844 + [
3839 3845 (b'', b'write-method', b'write', b'ui write method'),
3840 3846 (b'', b'nlines', 100, b'number of lines'),
3841 3847 (b'', b'nitems', 100, b'number of items (per line)'),
3842 3848 (b'', b'item', b'x', b'item that is written'),
3843 3849 (b'', b'batch-line', None, b'pass whole line to write method at once'),
3844 3850 (b'', b'flush-line', None, b'flush after each line'),
3845 3851 ],
3846 3852 )
3847 3853 def perfwrite(ui, repo, **opts):
3848 3854 """microbenchmark ui.write (and others)"""
3849 3855 opts = _byteskwargs(opts)
3850 3856
3851 3857 write = getattr(ui, _sysstr(opts[b'write_method']))
3852 3858 nlines = int(opts[b'nlines'])
3853 3859 nitems = int(opts[b'nitems'])
3854 3860 item = opts[b'item']
3855 3861 batch_line = opts.get(b'batch_line')
3856 3862 flush_line = opts.get(b'flush_line')
3857 3863
3858 3864 if batch_line:
3859 3865 line = item * nitems + b'\n'
3860 3866
3861 3867 def benchmark():
3862 3868 for i in pycompat.xrange(nlines):
3863 3869 if batch_line:
3864 3870 write(line)
3865 3871 else:
3866 3872 for i in pycompat.xrange(nitems):
3867 3873 write(item)
3868 3874 write(b'\n')
3869 3875 if flush_line:
3870 3876 ui.flush()
3871 3877 ui.flush()
3872 3878
3873 3879 timer, fm = gettimer(ui, opts)
3874 3880 timer(benchmark)
3875 3881 fm.end()
3876 3882
3877 3883
3878 3884 def uisetup(ui):
3879 3885 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3880 3886 commands, b'debugrevlogopts'
3881 3887 ):
3882 3888 # for "historical portability":
3883 3889 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3884 3890 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3885 3891 # openrevlog() should cause failure, because it has been
3886 3892 # available since 3.5 (or 49c583ca48c4).
3887 3893 def openrevlog(orig, repo, cmd, file_, opts):
3888 3894 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3889 3895 raise error.Abort(
3890 3896 b"This version doesn't support --dir option",
3891 3897 hint=b"use 3.5 or later",
3892 3898 )
3893 3899 return orig(repo, cmd, file_, opts)
3894 3900
3895 3901 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3896 3902
3897 3903
3898 3904 @command(
3899 3905 b'perf::progress|perfprogress',
3900 3906 formatteropts
3901 3907 + [
3902 3908 (b'', b'topic', b'topic', b'topic for progress messages'),
3903 3909 (b'c', b'total', 1000000, b'total value we are progressing to'),
3904 3910 ],
3905 3911 norepo=True,
3906 3912 )
3907 3913 def perfprogress(ui, topic=None, total=None, **opts):
3908 3914 """printing of progress bars"""
3909 3915 opts = _byteskwargs(opts)
3910 3916
3911 3917 timer, fm = gettimer(ui, opts)
3912 3918
3913 3919 def doprogress():
3914 3920 with ui.makeprogress(topic, total=total) as progress:
3915 3921 for i in _xrange(total):
3916 3922 progress.increment()
3917 3923
3918 3924 timer(doprogress)
3919 3925 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now