##// END OF EJS Templates
perf: document `perfdirstate`
marmoute -
r43391:ce315b1f default
parent child Browse files
Show More
@@ -1,3751 +1,3756
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if b'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'profile-benchmark',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 )
301 301 configitem(
302 302 b'perf',
303 303 b'run-limits',
304 304 default=mercurial.configitems.dynamicdefault,
305 305 experimental=True,
306 306 )
307 307 except (ImportError, AttributeError):
308 308 pass
309 309 except TypeError:
310 310 # compatibility fix for a11fd395e83f
311 311 # hg version: 5.2
312 312 configitem(
313 313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 314 )
315 315 configitem(
316 316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 320 )
321 321 configitem(
322 322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 326 )
327 327 configitem(
328 328 b'perf',
329 329 b'profile-benchmark',
330 330 default=mercurial.configitems.dynamicdefault,
331 331 )
332 332 configitem(
333 333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 334 )
335 335
336 336
337 337 def getlen(ui):
338 338 if ui.configbool(b"perf", b"stub", False):
339 339 return lambda x: 1
340 340 return len
341 341
342 342
343 343 class noop(object):
344 344 """dummy context manager"""
345 345
346 346 def __enter__(self):
347 347 pass
348 348
349 349 def __exit__(self, *args):
350 350 pass
351 351
352 352
353 353 NOOPCTX = noop()
354 354
355 355
356 356 def gettimer(ui, opts=None):
357 357 """return a timer function and formatter: (timer, formatter)
358 358
359 359 This function exists to gather the creation of formatter in a single
360 360 place instead of duplicating it in all performance commands."""
361 361
362 362 # enforce an idle period before execution to counteract power management
363 363 # experimental config: perf.presleep
364 364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365 365
366 366 if opts is None:
367 367 opts = {}
368 368 # redirect all to stderr unless buffer api is in use
369 369 if not ui._buffers:
370 370 ui = ui.copy()
371 371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 372 if uifout:
373 373 # for "historical portability":
374 374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 375 uifout.set(ui.ferr)
376 376
377 377 # get a formatter
378 378 uiformatter = getattr(ui, 'formatter', None)
379 379 if uiformatter:
380 380 fm = uiformatter(b'perf', opts)
381 381 else:
382 382 # for "historical portability":
383 383 # define formatter locally, because ui.formatter has been
384 384 # available since 2.2 (or ae5f92e154d3)
385 385 from mercurial import node
386 386
387 387 class defaultformatter(object):
388 388 """Minimized composition of baseformatter and plainformatter
389 389 """
390 390
391 391 def __init__(self, ui, topic, opts):
392 392 self._ui = ui
393 393 if ui.debugflag:
394 394 self.hexfunc = node.hex
395 395 else:
396 396 self.hexfunc = node.short
397 397
398 398 def __nonzero__(self):
399 399 return False
400 400
401 401 __bool__ = __nonzero__
402 402
403 403 def startitem(self):
404 404 pass
405 405
406 406 def data(self, **data):
407 407 pass
408 408
409 409 def write(self, fields, deftext, *fielddata, **opts):
410 410 self._ui.write(deftext % fielddata, **opts)
411 411
412 412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 413 if cond:
414 414 self._ui.write(deftext % fielddata, **opts)
415 415
416 416 def plain(self, text, **opts):
417 417 self._ui.write(text, **opts)
418 418
419 419 def end(self):
420 420 pass
421 421
422 422 fm = defaultformatter(ui, b'perf', opts)
423 423
424 424 # stub function, runs code only once instead of in a loop
425 425 # experimental config: perf.stub
426 426 if ui.configbool(b"perf", b"stub", False):
427 427 return functools.partial(stub_timer, fm), fm
428 428
429 429 # experimental config: perf.all-timing
430 430 displayall = ui.configbool(b"perf", b"all-timing", False)
431 431
432 432 # experimental config: perf.run-limits
433 433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 434 limits = []
435 435 for item in limitspec:
436 436 parts = item.split(b'-', 1)
437 437 if len(parts) < 2:
438 438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 439 continue
440 440 try:
441 441 time_limit = float(_sysstr(parts[0]))
442 442 except ValueError as e:
443 443 ui.warn(
444 444 (
445 445 b'malformatted run limit entry, %s: %s\n'
446 446 % (_bytestr(e), item)
447 447 )
448 448 )
449 449 continue
450 450 try:
451 451 run_limit = int(_sysstr(parts[1]))
452 452 except ValueError as e:
453 453 ui.warn(
454 454 (
455 455 b'malformatted run limit entry, %s: %s\n'
456 456 % (_bytestr(e), item)
457 457 )
458 458 )
459 459 continue
460 460 limits.append((time_limit, run_limit))
461 461 if not limits:
462 462 limits = DEFAULTLIMITS
463 463
464 464 profiler = None
465 465 if profiling is not None:
466 466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 467 profiler = profiling.profile(ui)
468 468
469 469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 470 t = functools.partial(
471 471 _timer,
472 472 fm,
473 473 displayall=displayall,
474 474 limits=limits,
475 475 prerun=prerun,
476 476 profiler=profiler,
477 477 )
478 478 return t, fm
479 479
480 480
481 481 def stub_timer(fm, func, setup=None, title=None):
482 482 if setup is not None:
483 483 setup()
484 484 func()
485 485
486 486
487 487 @contextlib.contextmanager
488 488 def timeone():
489 489 r = []
490 490 ostart = os.times()
491 491 cstart = util.timer()
492 492 yield r
493 493 cstop = util.timer()
494 494 ostop = os.times()
495 495 a, b = ostart, ostop
496 496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497 497
498 498
499 499 # list of stop condition (elapsed time, minimal run count)
500 500 DEFAULTLIMITS = (
501 501 (3.0, 100),
502 502 (10.0, 3),
503 503 )
504 504
505 505
506 506 def _timer(
507 507 fm,
508 508 func,
509 509 setup=None,
510 510 title=None,
511 511 displayall=False,
512 512 limits=DEFAULTLIMITS,
513 513 prerun=0,
514 514 profiler=None,
515 515 ):
516 516 gc.collect()
517 517 results = []
518 518 begin = util.timer()
519 519 count = 0
520 520 if profiler is None:
521 521 profiler = NOOPCTX
522 522 for i in range(prerun):
523 523 if setup is not None:
524 524 setup()
525 525 func()
526 526 keepgoing = True
527 527 while keepgoing:
528 528 if setup is not None:
529 529 setup()
530 530 with profiler:
531 531 with timeone() as item:
532 532 r = func()
533 533 profiler = NOOPCTX
534 534 count += 1
535 535 results.append(item[0])
536 536 cstop = util.timer()
537 537 # Look for a stop condition.
538 538 elapsed = cstop - begin
539 539 for t, mincount in limits:
540 540 if elapsed >= t and count >= mincount:
541 541 keepgoing = False
542 542 break
543 543
544 544 formatone(fm, results, title=title, result=r, displayall=displayall)
545 545
546 546
547 547 def formatone(fm, timings, title=None, result=None, displayall=False):
548 548
549 549 count = len(timings)
550 550
551 551 fm.startitem()
552 552
553 553 if title:
554 554 fm.write(b'title', b'! %s\n', title)
555 555 if result:
556 556 fm.write(b'result', b'! result: %s\n', result)
557 557
558 558 def display(role, entry):
559 559 prefix = b''
560 560 if role != b'best':
561 561 prefix = b'%s.' % role
562 562 fm.plain(b'!')
563 563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 565 fm.write(prefix + b'user', b' user %f', entry[1])
566 566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 568 fm.plain(b'\n')
569 569
570 570 timings.sort()
571 571 min_val = timings[0]
572 572 display(b'best', min_val)
573 573 if displayall:
574 574 max_val = timings[-1]
575 575 display(b'max', max_val)
576 576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 577 display(b'avg', avg)
578 578 median = timings[len(timings) // 2]
579 579 display(b'median', median)
580 580
581 581
582 582 # utilities for historical portability
583 583
584 584
585 585 def getint(ui, section, name, default):
586 586 # for "historical portability":
587 587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 588 v = ui.config(section, name, None)
589 589 if v is None:
590 590 return default
591 591 try:
592 592 return int(v)
593 593 except ValueError:
594 594 raise error.ConfigError(
595 595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 596 )
597 597
598 598
599 599 def safeattrsetter(obj, name, ignoremissing=False):
600 600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601 601
602 602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 603 at runtime. This avoids overlooking removal of an attribute, which
604 604 breaks assumption of performance measurement, in the future.
605 605
606 606 This function returns the object to (1) assign a new value, and
607 607 (2) restore an original value to the attribute.
608 608
609 609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 610 abortion, and this function returns None. This is useful to
611 611 examine an attribute, which isn't ensured in all Mercurial
612 612 versions.
613 613 """
614 614 if not util.safehasattr(obj, name):
615 615 if ignoremissing:
616 616 return None
617 617 raise error.Abort(
618 618 (
619 619 b"missing attribute %s of %s might break assumption"
620 620 b" of performance measurement"
621 621 )
622 622 % (name, obj)
623 623 )
624 624
625 625 origvalue = getattr(obj, _sysstr(name))
626 626
627 627 class attrutil(object):
628 628 def set(self, newvalue):
629 629 setattr(obj, _sysstr(name), newvalue)
630 630
631 631 def restore(self):
632 632 setattr(obj, _sysstr(name), origvalue)
633 633
634 634 return attrutil()
635 635
636 636
637 637 # utilities to examine each internal API changes
638 638
639 639
640 640 def getbranchmapsubsettable():
641 641 # for "historical portability":
642 642 # subsettable is defined in:
643 643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 644 # - repoview since 2.5 (or 59a9f18d4587)
645 645 # - repoviewutil since 5.0
646 646 for mod in (branchmap, repoview, repoviewutil):
647 647 subsettable = getattr(mod, 'subsettable', None)
648 648 if subsettable:
649 649 return subsettable
650 650
651 651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 652 # branchmap and repoview modules exist, but subsettable attribute
653 653 # doesn't)
654 654 raise error.Abort(
655 655 b"perfbranchmap not available with this Mercurial",
656 656 hint=b"use 2.5 or later",
657 657 )
658 658
659 659
660 660 def getsvfs(repo):
661 661 """Return appropriate object to access files under .hg/store
662 662 """
663 663 # for "historical portability":
664 664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 665 svfs = getattr(repo, 'svfs', None)
666 666 if svfs:
667 667 return svfs
668 668 else:
669 669 return getattr(repo, 'sopener')
670 670
671 671
672 672 def getvfs(repo):
673 673 """Return appropriate object to access files under .hg
674 674 """
675 675 # for "historical portability":
676 676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 677 vfs = getattr(repo, 'vfs', None)
678 678 if vfs:
679 679 return vfs
680 680 else:
681 681 return getattr(repo, 'opener')
682 682
683 683
684 684 def repocleartagscachefunc(repo):
685 685 """Return the function to clear tags cache according to repo internal API
686 686 """
687 687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 689 # correct way to clear tags cache, because existing code paths
690 690 # expect _tagscache to be a structured object.
691 691 def clearcache():
692 692 # _tagscache has been filteredpropertycache since 2.5 (or
693 693 # 98c867ac1330), and delattr() can't work in such case
694 694 if b'_tagscache' in vars(repo):
695 695 del repo.__dict__[b'_tagscache']
696 696
697 697 return clearcache
698 698
699 699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 700 if repotags: # since 1.4 (or 5614a628d173)
701 701 return lambda: repotags.set(None)
702 702
703 703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 705 return lambda: repotagscache.set(None)
706 706
707 707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 708 # this point, but it isn't so problematic, because:
709 709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 710 # in perftags() causes failure soon
711 711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 712 raise error.Abort(b"tags API of this hg command is unknown")
713 713
714 714
715 715 # utilities to clear cache
716 716
717 717
718 718 def clearfilecache(obj, attrname):
719 719 unfiltered = getattr(obj, 'unfiltered', None)
720 720 if unfiltered is not None:
721 721 obj = obj.unfiltered()
722 722 if attrname in vars(obj):
723 723 delattr(obj, attrname)
724 724 obj._filecache.pop(attrname, None)
725 725
726 726
727 727 def clearchangelog(repo):
728 728 if repo is not repo.unfiltered():
729 729 object.__setattr__(repo, r'_clcachekey', None)
730 730 object.__setattr__(repo, r'_clcache', None)
731 731 clearfilecache(repo.unfiltered(), 'changelog')
732 732
733 733
734 734 # perf commands
735 735
736 736
737 737 @command(b'perfwalk', formatteropts)
738 738 def perfwalk(ui, repo, *pats, **opts):
739 739 opts = _byteskwargs(opts)
740 740 timer, fm = gettimer(ui, opts)
741 741 m = scmutil.match(repo[None], pats, {})
742 742 timer(
743 743 lambda: len(
744 744 list(
745 745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 746 )
747 747 )
748 748 )
749 749 fm.end()
750 750
751 751
752 752 @command(b'perfannotate', formatteropts)
753 753 def perfannotate(ui, repo, f, **opts):
754 754 opts = _byteskwargs(opts)
755 755 timer, fm = gettimer(ui, opts)
756 756 fc = repo[b'.'][f]
757 757 timer(lambda: len(fc.annotate(True)))
758 758 fm.end()
759 759
760 760
761 761 @command(
762 762 b'perfstatus',
763 763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 764 + formatteropts,
765 765 )
766 766 def perfstatus(ui, repo, **opts):
767 767 """benchmark the performance of a single status call
768 768
769 769 The repository data are preserved between each call.
770 770
771 771 By default, only the status of the tracked file are requested. If
772 772 `--unknown` is passed, the "unknown" files are also tracked.
773 773 """
774 774 opts = _byteskwargs(opts)
775 775 # m = match.always(repo.root, repo.getcwd())
776 776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 777 # False))))
778 778 timer, fm = gettimer(ui, opts)
779 779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 780 fm.end()
781 781
782 782
783 783 @command(b'perfaddremove', formatteropts)
784 784 def perfaddremove(ui, repo, **opts):
785 785 opts = _byteskwargs(opts)
786 786 timer, fm = gettimer(ui, opts)
787 787 try:
788 788 oldquiet = repo.ui.quiet
789 789 repo.ui.quiet = True
790 790 matcher = scmutil.match(repo[None])
791 791 opts[b'dry_run'] = True
792 792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 793 uipathfn = scmutil.getuipathfn(repo)
794 794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 795 else:
796 796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 797 finally:
798 798 repo.ui.quiet = oldquiet
799 799 fm.end()
800 800
801 801
802 802 def clearcaches(cl):
803 803 # behave somewhat consistently across internal API changes
804 804 if util.safehasattr(cl, b'clearcaches'):
805 805 cl.clearcaches()
806 806 elif util.safehasattr(cl, b'_nodecache'):
807 807 from mercurial.node import nullid, nullrev
808 808
809 809 cl._nodecache = {nullid: nullrev}
810 810 cl._nodepos = None
811 811
812 812
813 813 @command(b'perfheads', formatteropts)
814 814 def perfheads(ui, repo, **opts):
815 815 """benchmark the computation of a changelog heads"""
816 816 opts = _byteskwargs(opts)
817 817 timer, fm = gettimer(ui, opts)
818 818 cl = repo.changelog
819 819
820 820 def s():
821 821 clearcaches(cl)
822 822
823 823 def d():
824 824 len(cl.headrevs())
825 825
826 826 timer(d, setup=s)
827 827 fm.end()
828 828
829 829
830 830 @command(
831 831 b'perftags',
832 832 formatteropts
833 833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 834 )
835 835 def perftags(ui, repo, **opts):
836 836 opts = _byteskwargs(opts)
837 837 timer, fm = gettimer(ui, opts)
838 838 repocleartagscache = repocleartagscachefunc(repo)
839 839 clearrevlogs = opts[b'clear_revlogs']
840 840
841 841 def s():
842 842 if clearrevlogs:
843 843 clearchangelog(repo)
844 844 clearfilecache(repo.unfiltered(), 'manifest')
845 845 repocleartagscache()
846 846
847 847 def t():
848 848 return len(repo.tags())
849 849
850 850 timer(t, setup=s)
851 851 fm.end()
852 852
853 853
854 854 @command(b'perfancestors', formatteropts)
855 855 def perfancestors(ui, repo, **opts):
856 856 opts = _byteskwargs(opts)
857 857 timer, fm = gettimer(ui, opts)
858 858 heads = repo.changelog.headrevs()
859 859
860 860 def d():
861 861 for a in repo.changelog.ancestors(heads):
862 862 pass
863 863
864 864 timer(d)
865 865 fm.end()
866 866
867 867
868 868 @command(b'perfancestorset', formatteropts)
869 869 def perfancestorset(ui, repo, revset, **opts):
870 870 opts = _byteskwargs(opts)
871 871 timer, fm = gettimer(ui, opts)
872 872 revs = repo.revs(revset)
873 873 heads = repo.changelog.headrevs()
874 874
875 875 def d():
876 876 s = repo.changelog.ancestors(heads)
877 877 for rev in revs:
878 878 rev in s
879 879
880 880 timer(d)
881 881 fm.end()
882 882
883 883
884 884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 885 def perfdiscovery(ui, repo, path, **opts):
886 886 """benchmark discovery between local repo and the peer at given path
887 887 """
888 888 repos = [repo, None]
889 889 timer, fm = gettimer(ui, opts)
890 890 path = ui.expandpath(path)
891 891
892 892 def s():
893 893 repos[1] = hg.peer(ui, opts, path)
894 894
895 895 def d():
896 896 setdiscovery.findcommonheads(ui, *repos)
897 897
898 898 timer(d, setup=s)
899 899 fm.end()
900 900
901 901
902 902 @command(
903 903 b'perfbookmarks',
904 904 formatteropts
905 905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 906 )
907 907 def perfbookmarks(ui, repo, **opts):
908 908 """benchmark parsing bookmarks from disk to memory"""
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911
912 912 clearrevlogs = opts[b'clear_revlogs']
913 913
914 914 def s():
915 915 if clearrevlogs:
916 916 clearchangelog(repo)
917 917 clearfilecache(repo, b'_bookmarks')
918 918
919 919 def d():
920 920 repo._bookmarks
921 921
922 922 timer(d, setup=s)
923 923 fm.end()
924 924
925 925
926 926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 927 def perfbundleread(ui, repo, bundlepath, **opts):
928 928 """Benchmark reading of bundle files.
929 929
930 930 This command is meant to isolate the I/O part of bundle reading as
931 931 much as possible.
932 932 """
933 933 from mercurial import (
934 934 bundle2,
935 935 exchange,
936 936 streamclone,
937 937 )
938 938
939 939 opts = _byteskwargs(opts)
940 940
941 941 def makebench(fn):
942 942 def run():
943 943 with open(bundlepath, b'rb') as fh:
944 944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 945 fn(bundle)
946 946
947 947 return run
948 948
949 949 def makereadnbytes(size):
950 950 def run():
951 951 with open(bundlepath, b'rb') as fh:
952 952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 953 while bundle.read(size):
954 954 pass
955 955
956 956 return run
957 957
958 958 def makestdioread(size):
959 959 def run():
960 960 with open(bundlepath, b'rb') as fh:
961 961 while fh.read(size):
962 962 pass
963 963
964 964 return run
965 965
966 966 # bundle1
967 967
968 968 def deltaiter(bundle):
969 969 for delta in bundle.deltaiter():
970 970 pass
971 971
972 972 def iterchunks(bundle):
973 973 for chunk in bundle.getchunks():
974 974 pass
975 975
976 976 # bundle2
977 977
978 978 def forwardchunks(bundle):
979 979 for chunk in bundle._forwardchunks():
980 980 pass
981 981
982 982 def iterparts(bundle):
983 983 for part in bundle.iterparts():
984 984 pass
985 985
986 986 def iterpartsseekable(bundle):
987 987 for part in bundle.iterparts(seekable=True):
988 988 pass
989 989
990 990 def seek(bundle):
991 991 for part in bundle.iterparts(seekable=True):
992 992 part.seek(0, os.SEEK_END)
993 993
994 994 def makepartreadnbytes(size):
995 995 def run():
996 996 with open(bundlepath, b'rb') as fh:
997 997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 998 for part in bundle.iterparts():
999 999 while part.read(size):
1000 1000 pass
1001 1001
1002 1002 return run
1003 1003
1004 1004 benches = [
1005 1005 (makestdioread(8192), b'read(8k)'),
1006 1006 (makestdioread(16384), b'read(16k)'),
1007 1007 (makestdioread(32768), b'read(32k)'),
1008 1008 (makestdioread(131072), b'read(128k)'),
1009 1009 ]
1010 1010
1011 1011 with open(bundlepath, b'rb') as fh:
1012 1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013 1013
1014 1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 1015 benches.extend(
1016 1016 [
1017 1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 1023 ]
1024 1024 )
1025 1025 elif isinstance(bundle, bundle2.unbundle20):
1026 1026 benches.extend(
1027 1027 [
1028 1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 1030 (
1031 1031 makebench(iterpartsseekable),
1032 1032 b'bundle2 iterparts() seekable',
1033 1033 ),
1034 1034 (makebench(seek), b'bundle2 part seek()'),
1035 1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 1039 ]
1040 1040 )
1041 1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 1042 raise error.Abort(b'stream clone bundles not supported')
1043 1043 else:
1044 1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045 1045
1046 1046 for fn, title in benches:
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 timer(fn, title=title)
1049 1049 fm.end()
1050 1050
1051 1051
1052 1052 @command(
1053 1053 b'perfchangegroupchangelog',
1054 1054 formatteropts
1055 1055 + [
1056 1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 1058 ],
1059 1059 )
1060 1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 1061 """Benchmark producing a changelog group for a changegroup.
1062 1062
1063 1063 This measures the time spent processing the changelog during a
1064 1064 bundle operation. This occurs during `hg bundle` and on a server
1065 1065 processing a `getbundle` wire protocol request (handles clones
1066 1066 and pull requests).
1067 1067
1068 1068 By default, all revisions are added to the changegroup.
1069 1069 """
1070 1070 opts = _byteskwargs(opts)
1071 1071 cl = repo.changelog
1072 1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 1073 bundler = changegroup.getbundler(cgversion, repo)
1074 1074
1075 1075 def d():
1076 1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 1077 for chunk in chunks:
1078 1078 pass
1079 1079
1080 1080 timer, fm = gettimer(ui, opts)
1081 1081
1082 1082 # Terminal printing can interfere with timing. So disable it.
1083 1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 1084 timer(d)
1085 1085
1086 1086 fm.end()
1087 1087
1088 1088
1089 1089 @command(b'perfdirs', formatteropts)
1090 1090 def perfdirs(ui, repo, **opts):
1091 1091 opts = _byteskwargs(opts)
1092 1092 timer, fm = gettimer(ui, opts)
1093 1093 dirstate = repo.dirstate
1094 1094 b'a' in dirstate
1095 1095
1096 1096 def d():
1097 1097 dirstate.hasdir(b'a')
1098 1098 del dirstate._map._dirs
1099 1099
1100 1100 timer(d)
1101 1101 fm.end()
1102 1102
1103 1103
1104 1104 @command(b'perfdirstate', formatteropts)
1105 1105 def perfdirstate(ui, repo, **opts):
1106 """benchmap the time necessary to load a dirstate from scratch
1107
1108 The dirstate is loaded to the point were a "contains" request can be
1109 answered.
1110 """
1106 1111 opts = _byteskwargs(opts)
1107 1112 timer, fm = gettimer(ui, opts)
1108 1113 b"a" in repo.dirstate
1109 1114
1110 1115 def d():
1111 1116 repo.dirstate.invalidate()
1112 1117 b"a" in repo.dirstate
1113 1118
1114 1119 timer(d)
1115 1120 fm.end()
1116 1121
1117 1122
1118 1123 @command(b'perfdirstatedirs', formatteropts)
1119 1124 def perfdirstatedirs(ui, repo, **opts):
1120 1125 opts = _byteskwargs(opts)
1121 1126 timer, fm = gettimer(ui, opts)
1122 1127 b"a" in repo.dirstate
1123 1128
1124 1129 def d():
1125 1130 repo.dirstate.hasdir(b"a")
1126 1131 del repo.dirstate._map._dirs
1127 1132
1128 1133 timer(d)
1129 1134 fm.end()
1130 1135
1131 1136
1132 1137 @command(b'perfdirstatefoldmap', formatteropts)
1133 1138 def perfdirstatefoldmap(ui, repo, **opts):
1134 1139 opts = _byteskwargs(opts)
1135 1140 timer, fm = gettimer(ui, opts)
1136 1141 dirstate = repo.dirstate
1137 1142 b'a' in dirstate
1138 1143
1139 1144 def d():
1140 1145 dirstate._map.filefoldmap.get(b'a')
1141 1146 del dirstate._map.filefoldmap
1142 1147
1143 1148 timer(d)
1144 1149 fm.end()
1145 1150
1146 1151
1147 1152 @command(b'perfdirfoldmap', formatteropts)
1148 1153 def perfdirfoldmap(ui, repo, **opts):
1149 1154 opts = _byteskwargs(opts)
1150 1155 timer, fm = gettimer(ui, opts)
1151 1156 dirstate = repo.dirstate
1152 1157 b'a' in dirstate
1153 1158
1154 1159 def d():
1155 1160 dirstate._map.dirfoldmap.get(b'a')
1156 1161 del dirstate._map.dirfoldmap
1157 1162 del dirstate._map._dirs
1158 1163
1159 1164 timer(d)
1160 1165 fm.end()
1161 1166
1162 1167
1163 1168 @command(b'perfdirstatewrite', formatteropts)
1164 1169 def perfdirstatewrite(ui, repo, **opts):
1165 1170 opts = _byteskwargs(opts)
1166 1171 timer, fm = gettimer(ui, opts)
1167 1172 ds = repo.dirstate
1168 1173 b"a" in ds
1169 1174
1170 1175 def d():
1171 1176 ds._dirty = True
1172 1177 ds.write(repo.currenttransaction())
1173 1178
1174 1179 timer(d)
1175 1180 fm.end()
1176 1181
1177 1182
1178 1183 def _getmergerevs(repo, opts):
1179 1184 """parse command argument to return rev involved in merge
1180 1185
1181 1186 input: options dictionnary with `rev`, `from` and `bse`
1182 1187 output: (localctx, otherctx, basectx)
1183 1188 """
1184 1189 if opts[b'from']:
1185 1190 fromrev = scmutil.revsingle(repo, opts[b'from'])
1186 1191 wctx = repo[fromrev]
1187 1192 else:
1188 1193 wctx = repo[None]
1189 1194 # we don't want working dir files to be stat'd in the benchmark, so
1190 1195 # prime that cache
1191 1196 wctx.dirty()
1192 1197 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1193 1198 if opts[b'base']:
1194 1199 fromrev = scmutil.revsingle(repo, opts[b'base'])
1195 1200 ancestor = repo[fromrev]
1196 1201 else:
1197 1202 ancestor = wctx.ancestor(rctx)
1198 1203 return (wctx, rctx, ancestor)
1199 1204
1200 1205
1201 1206 @command(
1202 1207 b'perfmergecalculate',
1203 1208 [
1204 1209 (b'r', b'rev', b'.', b'rev to merge against'),
1205 1210 (b'', b'from', b'', b'rev to merge from'),
1206 1211 (b'', b'base', b'', b'the revision to use as base'),
1207 1212 ]
1208 1213 + formatteropts,
1209 1214 )
1210 1215 def perfmergecalculate(ui, repo, **opts):
1211 1216 opts = _byteskwargs(opts)
1212 1217 timer, fm = gettimer(ui, opts)
1213 1218
1214 1219 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1215 1220
1216 1221 def d():
1217 1222 # acceptremote is True because we don't want prompts in the middle of
1218 1223 # our benchmark
1219 1224 merge.calculateupdates(
1220 1225 repo,
1221 1226 wctx,
1222 1227 rctx,
1223 1228 [ancestor],
1224 1229 branchmerge=False,
1225 1230 force=False,
1226 1231 acceptremote=True,
1227 1232 followcopies=True,
1228 1233 )
1229 1234
1230 1235 timer(d)
1231 1236 fm.end()
1232 1237
1233 1238
1234 1239 @command(
1235 1240 b'perfmergecopies',
1236 1241 [
1237 1242 (b'r', b'rev', b'.', b'rev to merge against'),
1238 1243 (b'', b'from', b'', b'rev to merge from'),
1239 1244 (b'', b'base', b'', b'the revision to use as base'),
1240 1245 ]
1241 1246 + formatteropts,
1242 1247 )
1243 1248 def perfmergecopies(ui, repo, **opts):
1244 1249 """measure runtime of `copies.mergecopies`"""
1245 1250 opts = _byteskwargs(opts)
1246 1251 timer, fm = gettimer(ui, opts)
1247 1252 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1248 1253
1249 1254 def d():
1250 1255 # acceptremote is True because we don't want prompts in the middle of
1251 1256 # our benchmark
1252 1257 copies.mergecopies(repo, wctx, rctx, ancestor)
1253 1258
1254 1259 timer(d)
1255 1260 fm.end()
1256 1261
1257 1262
1258 1263 @command(b'perfpathcopies', [], b"REV REV")
1259 1264 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1260 1265 """benchmark the copy tracing logic"""
1261 1266 opts = _byteskwargs(opts)
1262 1267 timer, fm = gettimer(ui, opts)
1263 1268 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1264 1269 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1265 1270
1266 1271 def d():
1267 1272 copies.pathcopies(ctx1, ctx2)
1268 1273
1269 1274 timer(d)
1270 1275 fm.end()
1271 1276
1272 1277
1273 1278 @command(
1274 1279 b'perfphases',
1275 1280 [(b'', b'full', False, b'include file reading time too'),],
1276 1281 b"",
1277 1282 )
1278 1283 def perfphases(ui, repo, **opts):
1279 1284 """benchmark phasesets computation"""
1280 1285 opts = _byteskwargs(opts)
1281 1286 timer, fm = gettimer(ui, opts)
1282 1287 _phases = repo._phasecache
1283 1288 full = opts.get(b'full')
1284 1289
1285 1290 def d():
1286 1291 phases = _phases
1287 1292 if full:
1288 1293 clearfilecache(repo, b'_phasecache')
1289 1294 phases = repo._phasecache
1290 1295 phases.invalidate()
1291 1296 phases.loadphaserevs(repo)
1292 1297
1293 1298 timer(d)
1294 1299 fm.end()
1295 1300
1296 1301
1297 1302 @command(b'perfphasesremote', [], b"[DEST]")
1298 1303 def perfphasesremote(ui, repo, dest=None, **opts):
1299 1304 """benchmark time needed to analyse phases of the remote server"""
1300 1305 from mercurial.node import bin
1301 1306 from mercurial import (
1302 1307 exchange,
1303 1308 hg,
1304 1309 phases,
1305 1310 )
1306 1311
1307 1312 opts = _byteskwargs(opts)
1308 1313 timer, fm = gettimer(ui, opts)
1309 1314
1310 1315 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1311 1316 if not path:
1312 1317 raise error.Abort(
1313 1318 b'default repository not configured!',
1314 1319 hint=b"see 'hg help config.paths'",
1315 1320 )
1316 1321 dest = path.pushloc or path.loc
1317 1322 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1318 1323 other = hg.peer(repo, opts, dest)
1319 1324
1320 1325 # easier to perform discovery through the operation
1321 1326 op = exchange.pushoperation(repo, other)
1322 1327 exchange._pushdiscoverychangeset(op)
1323 1328
1324 1329 remotesubset = op.fallbackheads
1325 1330
1326 1331 with other.commandexecutor() as e:
1327 1332 remotephases = e.callcommand(
1328 1333 b'listkeys', {b'namespace': b'phases'}
1329 1334 ).result()
1330 1335 del other
1331 1336 publishing = remotephases.get(b'publishing', False)
1332 1337 if publishing:
1333 1338 ui.statusnoi18n(b'publishing: yes\n')
1334 1339 else:
1335 1340 ui.statusnoi18n(b'publishing: no\n')
1336 1341
1337 1342 nodemap = repo.changelog.nodemap
1338 1343 nonpublishroots = 0
1339 1344 for nhex, phase in remotephases.iteritems():
1340 1345 if nhex == b'publishing': # ignore data related to publish option
1341 1346 continue
1342 1347 node = bin(nhex)
1343 1348 if node in nodemap and int(phase):
1344 1349 nonpublishroots += 1
1345 1350 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1346 1351 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1347 1352
1348 1353 def d():
1349 1354 phases.remotephasessummary(repo, remotesubset, remotephases)
1350 1355
1351 1356 timer(d)
1352 1357 fm.end()
1353 1358
1354 1359
1355 1360 @command(
1356 1361 b'perfmanifest',
1357 1362 [
1358 1363 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1359 1364 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1360 1365 ]
1361 1366 + formatteropts,
1362 1367 b'REV|NODE',
1363 1368 )
1364 1369 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1365 1370 """benchmark the time to read a manifest from disk and return a usable
1366 1371 dict-like object
1367 1372
1368 1373 Manifest caches are cleared before retrieval."""
1369 1374 opts = _byteskwargs(opts)
1370 1375 timer, fm = gettimer(ui, opts)
1371 1376 if not manifest_rev:
1372 1377 ctx = scmutil.revsingle(repo, rev, rev)
1373 1378 t = ctx.manifestnode()
1374 1379 else:
1375 1380 from mercurial.node import bin
1376 1381
1377 1382 if len(rev) == 40:
1378 1383 t = bin(rev)
1379 1384 else:
1380 1385 try:
1381 1386 rev = int(rev)
1382 1387
1383 1388 if util.safehasattr(repo.manifestlog, b'getstorage'):
1384 1389 t = repo.manifestlog.getstorage(b'').node(rev)
1385 1390 else:
1386 1391 t = repo.manifestlog._revlog.lookup(rev)
1387 1392 except ValueError:
1388 1393 raise error.Abort(
1389 1394 b'manifest revision must be integer or full node'
1390 1395 )
1391 1396
1392 1397 def d():
1393 1398 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1394 1399 repo.manifestlog[t].read()
1395 1400
1396 1401 timer(d)
1397 1402 fm.end()
1398 1403
1399 1404
1400 1405 @command(b'perfchangeset', formatteropts)
1401 1406 def perfchangeset(ui, repo, rev, **opts):
1402 1407 opts = _byteskwargs(opts)
1403 1408 timer, fm = gettimer(ui, opts)
1404 1409 n = scmutil.revsingle(repo, rev).node()
1405 1410
1406 1411 def d():
1407 1412 repo.changelog.read(n)
1408 1413 # repo.changelog._cache = None
1409 1414
1410 1415 timer(d)
1411 1416 fm.end()
1412 1417
1413 1418
1414 1419 @command(b'perfignore', formatteropts)
1415 1420 def perfignore(ui, repo, **opts):
1416 1421 """benchmark operation related to computing ignore"""
1417 1422 opts = _byteskwargs(opts)
1418 1423 timer, fm = gettimer(ui, opts)
1419 1424 dirstate = repo.dirstate
1420 1425
1421 1426 def setupone():
1422 1427 dirstate.invalidate()
1423 1428 clearfilecache(dirstate, b'_ignore')
1424 1429
1425 1430 def runone():
1426 1431 dirstate._ignore
1427 1432
1428 1433 timer(runone, setup=setupone, title=b"load")
1429 1434 fm.end()
1430 1435
1431 1436
1432 1437 @command(
1433 1438 b'perfindex',
1434 1439 [
1435 1440 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1436 1441 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1437 1442 ]
1438 1443 + formatteropts,
1439 1444 )
1440 1445 def perfindex(ui, repo, **opts):
1441 1446 """benchmark index creation time followed by a lookup
1442 1447
1443 1448 The default is to look `tip` up. Depending on the index implementation,
1444 1449 the revision looked up can matters. For example, an implementation
1445 1450 scanning the index will have a faster lookup time for `--rev tip` than for
1446 1451 `--rev 0`. The number of looked up revisions and their order can also
1447 1452 matters.
1448 1453
1449 1454 Example of useful set to test:
1450 1455 * tip
1451 1456 * 0
1452 1457 * -10:
1453 1458 * :10
1454 1459 * -10: + :10
1455 1460 * :10: + -10:
1456 1461 * -10000:
1457 1462 * -10000: + 0
1458 1463
1459 1464 It is not currently possible to check for lookup of a missing node. For
1460 1465 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1461 1466 import mercurial.revlog
1462 1467
1463 1468 opts = _byteskwargs(opts)
1464 1469 timer, fm = gettimer(ui, opts)
1465 1470 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1466 1471 if opts[b'no_lookup']:
1467 1472 if opts['rev']:
1468 1473 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1469 1474 nodes = []
1470 1475 elif not opts[b'rev']:
1471 1476 nodes = [repo[b"tip"].node()]
1472 1477 else:
1473 1478 revs = scmutil.revrange(repo, opts[b'rev'])
1474 1479 cl = repo.changelog
1475 1480 nodes = [cl.node(r) for r in revs]
1476 1481
1477 1482 unfi = repo.unfiltered()
1478 1483 # find the filecache func directly
1479 1484 # This avoid polluting the benchmark with the filecache logic
1480 1485 makecl = unfi.__class__.changelog.func
1481 1486
1482 1487 def setup():
1483 1488 # probably not necessary, but for good measure
1484 1489 clearchangelog(unfi)
1485 1490
1486 1491 def d():
1487 1492 cl = makecl(unfi)
1488 1493 for n in nodes:
1489 1494 cl.rev(n)
1490 1495
1491 1496 timer(d, setup=setup)
1492 1497 fm.end()
1493 1498
1494 1499
1495 1500 @command(
1496 1501 b'perfnodemap',
1497 1502 [
1498 1503 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1499 1504 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1500 1505 ]
1501 1506 + formatteropts,
1502 1507 )
1503 1508 def perfnodemap(ui, repo, **opts):
1504 1509 """benchmark the time necessary to look up revision from a cold nodemap
1505 1510
1506 1511 Depending on the implementation, the amount and order of revision we look
1507 1512 up can varies. Example of useful set to test:
1508 1513 * tip
1509 1514 * 0
1510 1515 * -10:
1511 1516 * :10
1512 1517 * -10: + :10
1513 1518 * :10: + -10:
1514 1519 * -10000:
1515 1520 * -10000: + 0
1516 1521
1517 1522 The command currently focus on valid binary lookup. Benchmarking for
1518 1523 hexlookup, prefix lookup and missing lookup would also be valuable.
1519 1524 """
1520 1525 import mercurial.revlog
1521 1526
1522 1527 opts = _byteskwargs(opts)
1523 1528 timer, fm = gettimer(ui, opts)
1524 1529 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1525 1530
1526 1531 unfi = repo.unfiltered()
1527 1532 clearcaches = opts['clear_caches']
1528 1533 # find the filecache func directly
1529 1534 # This avoid polluting the benchmark with the filecache logic
1530 1535 makecl = unfi.__class__.changelog.func
1531 1536 if not opts[b'rev']:
1532 1537 raise error.Abort('use --rev to specify revisions to look up')
1533 1538 revs = scmutil.revrange(repo, opts[b'rev'])
1534 1539 cl = repo.changelog
1535 1540 nodes = [cl.node(r) for r in revs]
1536 1541
1537 1542 # use a list to pass reference to a nodemap from one closure to the next
1538 1543 nodeget = [None]
1539 1544
1540 1545 def setnodeget():
1541 1546 # probably not necessary, but for good measure
1542 1547 clearchangelog(unfi)
1543 1548 nodeget[0] = makecl(unfi).nodemap.get
1544 1549
1545 1550 def d():
1546 1551 get = nodeget[0]
1547 1552 for n in nodes:
1548 1553 get(n)
1549 1554
1550 1555 setup = None
1551 1556 if clearcaches:
1552 1557
1553 1558 def setup():
1554 1559 setnodeget()
1555 1560
1556 1561 else:
1557 1562 setnodeget()
1558 1563 d() # prewarm the data structure
1559 1564 timer(d, setup=setup)
1560 1565 fm.end()
1561 1566
1562 1567
1563 1568 @command(b'perfstartup', formatteropts)
1564 1569 def perfstartup(ui, repo, **opts):
1565 1570 opts = _byteskwargs(opts)
1566 1571 timer, fm = gettimer(ui, opts)
1567 1572
1568 1573 def d():
1569 1574 if os.name != r'nt':
1570 1575 os.system(
1571 1576 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1572 1577 )
1573 1578 else:
1574 1579 os.environ[r'HGRCPATH'] = r' '
1575 1580 os.system(r"%s version -q > NUL" % sys.argv[0])
1576 1581
1577 1582 timer(d)
1578 1583 fm.end()
1579 1584
1580 1585
1581 1586 @command(b'perfparents', formatteropts)
1582 1587 def perfparents(ui, repo, **opts):
1583 1588 """benchmark the time necessary to fetch one changeset's parents.
1584 1589
1585 1590 The fetch is done using the `node identifier`, traversing all object layers
1586 1591 from the repository object. The first N revisions will be used for this
1587 1592 benchmark. N is controlled by the ``perf.parentscount`` config option
1588 1593 (default: 1000).
1589 1594 """
1590 1595 opts = _byteskwargs(opts)
1591 1596 timer, fm = gettimer(ui, opts)
1592 1597 # control the number of commits perfparents iterates over
1593 1598 # experimental config: perf.parentscount
1594 1599 count = getint(ui, b"perf", b"parentscount", 1000)
1595 1600 if len(repo.changelog) < count:
1596 1601 raise error.Abort(b"repo needs %d commits for this test" % count)
1597 1602 repo = repo.unfiltered()
1598 1603 nl = [repo.changelog.node(i) for i in _xrange(count)]
1599 1604
1600 1605 def d():
1601 1606 for n in nl:
1602 1607 repo.changelog.parents(n)
1603 1608
1604 1609 timer(d)
1605 1610 fm.end()
1606 1611
1607 1612
1608 1613 @command(b'perfctxfiles', formatteropts)
1609 1614 def perfctxfiles(ui, repo, x, **opts):
1610 1615 opts = _byteskwargs(opts)
1611 1616 x = int(x)
1612 1617 timer, fm = gettimer(ui, opts)
1613 1618
1614 1619 def d():
1615 1620 len(repo[x].files())
1616 1621
1617 1622 timer(d)
1618 1623 fm.end()
1619 1624
1620 1625
1621 1626 @command(b'perfrawfiles', formatteropts)
1622 1627 def perfrawfiles(ui, repo, x, **opts):
1623 1628 opts = _byteskwargs(opts)
1624 1629 x = int(x)
1625 1630 timer, fm = gettimer(ui, opts)
1626 1631 cl = repo.changelog
1627 1632
1628 1633 def d():
1629 1634 len(cl.read(x)[3])
1630 1635
1631 1636 timer(d)
1632 1637 fm.end()
1633 1638
1634 1639
1635 1640 @command(b'perflookup', formatteropts)
1636 1641 def perflookup(ui, repo, rev, **opts):
1637 1642 opts = _byteskwargs(opts)
1638 1643 timer, fm = gettimer(ui, opts)
1639 1644 timer(lambda: len(repo.lookup(rev)))
1640 1645 fm.end()
1641 1646
1642 1647
1643 1648 @command(
1644 1649 b'perflinelogedits',
1645 1650 [
1646 1651 (b'n', b'edits', 10000, b'number of edits'),
1647 1652 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1648 1653 ],
1649 1654 norepo=True,
1650 1655 )
1651 1656 def perflinelogedits(ui, **opts):
1652 1657 from mercurial import linelog
1653 1658
1654 1659 opts = _byteskwargs(opts)
1655 1660
1656 1661 edits = opts[b'edits']
1657 1662 maxhunklines = opts[b'max_hunk_lines']
1658 1663
1659 1664 maxb1 = 100000
1660 1665 random.seed(0)
1661 1666 randint = random.randint
1662 1667 currentlines = 0
1663 1668 arglist = []
1664 1669 for rev in _xrange(edits):
1665 1670 a1 = randint(0, currentlines)
1666 1671 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1667 1672 b1 = randint(0, maxb1)
1668 1673 b2 = randint(b1, b1 + maxhunklines)
1669 1674 currentlines += (b2 - b1) - (a2 - a1)
1670 1675 arglist.append((rev, a1, a2, b1, b2))
1671 1676
1672 1677 def d():
1673 1678 ll = linelog.linelog()
1674 1679 for args in arglist:
1675 1680 ll.replacelines(*args)
1676 1681
1677 1682 timer, fm = gettimer(ui, opts)
1678 1683 timer(d)
1679 1684 fm.end()
1680 1685
1681 1686
1682 1687 @command(b'perfrevrange', formatteropts)
1683 1688 def perfrevrange(ui, repo, *specs, **opts):
1684 1689 opts = _byteskwargs(opts)
1685 1690 timer, fm = gettimer(ui, opts)
1686 1691 revrange = scmutil.revrange
1687 1692 timer(lambda: len(revrange(repo, specs)))
1688 1693 fm.end()
1689 1694
1690 1695
1691 1696 @command(b'perfnodelookup', formatteropts)
1692 1697 def perfnodelookup(ui, repo, rev, **opts):
1693 1698 opts = _byteskwargs(opts)
1694 1699 timer, fm = gettimer(ui, opts)
1695 1700 import mercurial.revlog
1696 1701
1697 1702 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1698 1703 n = scmutil.revsingle(repo, rev).node()
1699 1704 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1700 1705
1701 1706 def d():
1702 1707 cl.rev(n)
1703 1708 clearcaches(cl)
1704 1709
1705 1710 timer(d)
1706 1711 fm.end()
1707 1712
1708 1713
1709 1714 @command(
1710 1715 b'perflog',
1711 1716 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1712 1717 )
1713 1718 def perflog(ui, repo, rev=None, **opts):
1714 1719 opts = _byteskwargs(opts)
1715 1720 if rev is None:
1716 1721 rev = []
1717 1722 timer, fm = gettimer(ui, opts)
1718 1723 ui.pushbuffer()
1719 1724 timer(
1720 1725 lambda: commands.log(
1721 1726 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1722 1727 )
1723 1728 )
1724 1729 ui.popbuffer()
1725 1730 fm.end()
1726 1731
1727 1732
1728 1733 @command(b'perfmoonwalk', formatteropts)
1729 1734 def perfmoonwalk(ui, repo, **opts):
1730 1735 """benchmark walking the changelog backwards
1731 1736
1732 1737 This also loads the changelog data for each revision in the changelog.
1733 1738 """
1734 1739 opts = _byteskwargs(opts)
1735 1740 timer, fm = gettimer(ui, opts)
1736 1741
1737 1742 def moonwalk():
1738 1743 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1739 1744 ctx = repo[i]
1740 1745 ctx.branch() # read changelog data (in addition to the index)
1741 1746
1742 1747 timer(moonwalk)
1743 1748 fm.end()
1744 1749
1745 1750
1746 1751 @command(
1747 1752 b'perftemplating',
1748 1753 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1749 1754 )
1750 1755 def perftemplating(ui, repo, testedtemplate=None, **opts):
1751 1756 """test the rendering time of a given template"""
1752 1757 if makelogtemplater is None:
1753 1758 raise error.Abort(
1754 1759 b"perftemplating not available with this Mercurial",
1755 1760 hint=b"use 4.3 or later",
1756 1761 )
1757 1762
1758 1763 opts = _byteskwargs(opts)
1759 1764
1760 1765 nullui = ui.copy()
1761 1766 nullui.fout = open(os.devnull, r'wb')
1762 1767 nullui.disablepager()
1763 1768 revs = opts.get(b'rev')
1764 1769 if not revs:
1765 1770 revs = [b'all()']
1766 1771 revs = list(scmutil.revrange(repo, revs))
1767 1772
1768 1773 defaulttemplate = (
1769 1774 b'{date|shortdate} [{rev}:{node|short}]'
1770 1775 b' {author|person}: {desc|firstline}\n'
1771 1776 )
1772 1777 if testedtemplate is None:
1773 1778 testedtemplate = defaulttemplate
1774 1779 displayer = makelogtemplater(nullui, repo, testedtemplate)
1775 1780
1776 1781 def format():
1777 1782 for r in revs:
1778 1783 ctx = repo[r]
1779 1784 displayer.show(ctx)
1780 1785 displayer.flush(ctx)
1781 1786
1782 1787 timer, fm = gettimer(ui, opts)
1783 1788 timer(format)
1784 1789 fm.end()
1785 1790
1786 1791
1787 1792 def _displaystats(ui, opts, entries, data):
1788 1793 pass
1789 1794 # use a second formatter because the data are quite different, not sure
1790 1795 # how it flies with the templater.
1791 1796 fm = ui.formatter(b'perf-stats', opts)
1792 1797 for key, title in entries:
1793 1798 values = data[key]
1794 1799 nbvalues = len(data)
1795 1800 values.sort()
1796 1801 stats = {
1797 1802 'key': key,
1798 1803 'title': title,
1799 1804 'nbitems': len(values),
1800 1805 'min': values[0][0],
1801 1806 '10%': values[(nbvalues * 10) // 100][0],
1802 1807 '25%': values[(nbvalues * 25) // 100][0],
1803 1808 '50%': values[(nbvalues * 50) // 100][0],
1804 1809 '75%': values[(nbvalues * 75) // 100][0],
1805 1810 '80%': values[(nbvalues * 80) // 100][0],
1806 1811 '85%': values[(nbvalues * 85) // 100][0],
1807 1812 '90%': values[(nbvalues * 90) // 100][0],
1808 1813 '95%': values[(nbvalues * 95) // 100][0],
1809 1814 '99%': values[(nbvalues * 99) // 100][0],
1810 1815 'max': values[-1][0],
1811 1816 }
1812 1817 fm.startitem()
1813 1818 fm.data(**stats)
1814 1819 # make node pretty for the human output
1815 1820 fm.plain('### %s (%d items)\n' % (title, len(values)))
1816 1821 lines = [
1817 1822 'min',
1818 1823 '10%',
1819 1824 '25%',
1820 1825 '50%',
1821 1826 '75%',
1822 1827 '80%',
1823 1828 '85%',
1824 1829 '90%',
1825 1830 '95%',
1826 1831 '99%',
1827 1832 'max',
1828 1833 ]
1829 1834 for l in lines:
1830 1835 fm.plain('%s: %s\n' % (l, stats[l]))
1831 1836 fm.end()
1832 1837
1833 1838
1834 1839 @command(
1835 1840 b'perfhelper-mergecopies',
1836 1841 formatteropts
1837 1842 + [
1838 1843 (b'r', b'revs', [], b'restrict search to these revisions'),
1839 1844 (b'', b'timing', False, b'provides extra data (costly)'),
1840 1845 (b'', b'stats', False, b'provides statistic about the measured data'),
1841 1846 ],
1842 1847 )
1843 1848 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1844 1849 """find statistics about potential parameters for `perfmergecopies`
1845 1850
1846 1851 This command find (base, p1, p2) triplet relevant for copytracing
1847 1852 benchmarking in the context of a merge. It reports values for some of the
1848 1853 parameters that impact merge copy tracing time during merge.
1849 1854
1850 1855 If `--timing` is set, rename detection is run and the associated timing
1851 1856 will be reported. The extra details come at the cost of slower command
1852 1857 execution.
1853 1858
1854 1859 Since rename detection is only run once, other factors might easily
1855 1860 affect the precision of the timing. However it should give a good
1856 1861 approximation of which revision triplets are very costly.
1857 1862 """
1858 1863 opts = _byteskwargs(opts)
1859 1864 fm = ui.formatter(b'perf', opts)
1860 1865 dotiming = opts[b'timing']
1861 1866 dostats = opts[b'stats']
1862 1867
1863 1868 output_template = [
1864 1869 ("base", "%(base)12s"),
1865 1870 ("p1", "%(p1.node)12s"),
1866 1871 ("p2", "%(p2.node)12s"),
1867 1872 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1868 1873 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1869 1874 ("p1.renames", "%(p1.renamedfiles)12d"),
1870 1875 ("p1.time", "%(p1.time)12.3f"),
1871 1876 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1872 1877 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1873 1878 ("p2.renames", "%(p2.renamedfiles)12d"),
1874 1879 ("p2.time", "%(p2.time)12.3f"),
1875 1880 ("renames", "%(nbrenamedfiles)12d"),
1876 1881 ("total.time", "%(time)12.3f"),
1877 1882 ]
1878 1883 if not dotiming:
1879 1884 output_template = [
1880 1885 i
1881 1886 for i in output_template
1882 1887 if not ('time' in i[0] or 'renames' in i[0])
1883 1888 ]
1884 1889 header_names = [h for (h, v) in output_template]
1885 1890 output = ' '.join([v for (h, v) in output_template]) + '\n'
1886 1891 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1887 1892 fm.plain(header % tuple(header_names))
1888 1893
1889 1894 if not revs:
1890 1895 revs = ['all()']
1891 1896 revs = scmutil.revrange(repo, revs)
1892 1897
1893 1898 if dostats:
1894 1899 alldata = {
1895 1900 'nbrevs': [],
1896 1901 'nbmissingfiles': [],
1897 1902 }
1898 1903 if dotiming:
1899 1904 alldata['parentnbrenames'] = []
1900 1905 alldata['totalnbrenames'] = []
1901 1906 alldata['parenttime'] = []
1902 1907 alldata['totaltime'] = []
1903 1908
1904 1909 roi = repo.revs('merge() and %ld', revs)
1905 1910 for r in roi:
1906 1911 ctx = repo[r]
1907 1912 p1 = ctx.p1()
1908 1913 p2 = ctx.p2()
1909 1914 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1910 1915 for b in bases:
1911 1916 b = repo[b]
1912 1917 p1missing = copies._computeforwardmissing(b, p1)
1913 1918 p2missing = copies._computeforwardmissing(b, p2)
1914 1919 data = {
1915 1920 b'base': b.hex(),
1916 1921 b'p1.node': p1.hex(),
1917 1922 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1918 1923 b'p1.nbmissingfiles': len(p1missing),
1919 1924 b'p2.node': p2.hex(),
1920 1925 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1921 1926 b'p2.nbmissingfiles': len(p2missing),
1922 1927 }
1923 1928 if dostats:
1924 1929 if p1missing:
1925 1930 alldata['nbrevs'].append(
1926 1931 (data['p1.nbrevs'], b.hex(), p1.hex())
1927 1932 )
1928 1933 alldata['nbmissingfiles'].append(
1929 1934 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1930 1935 )
1931 1936 if p2missing:
1932 1937 alldata['nbrevs'].append(
1933 1938 (data['p2.nbrevs'], b.hex(), p2.hex())
1934 1939 )
1935 1940 alldata['nbmissingfiles'].append(
1936 1941 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1937 1942 )
1938 1943 if dotiming:
1939 1944 begin = util.timer()
1940 1945 mergedata = copies.mergecopies(repo, p1, p2, b)
1941 1946 end = util.timer()
1942 1947 # not very stable timing since we did only one run
1943 1948 data['time'] = end - begin
1944 1949 # mergedata contains five dicts: "copy", "movewithdir",
1945 1950 # "diverge", "renamedelete" and "dirmove".
1946 1951 # The first 4 are about renamed file so lets count that.
1947 1952 renames = len(mergedata[0])
1948 1953 renames += len(mergedata[1])
1949 1954 renames += len(mergedata[2])
1950 1955 renames += len(mergedata[3])
1951 1956 data['nbrenamedfiles'] = renames
1952 1957 begin = util.timer()
1953 1958 p1renames = copies.pathcopies(b, p1)
1954 1959 end = util.timer()
1955 1960 data['p1.time'] = end - begin
1956 1961 begin = util.timer()
1957 1962 p2renames = copies.pathcopies(b, p2)
1958 1963 data['p2.time'] = end - begin
1959 1964 end = util.timer()
1960 1965 data['p1.renamedfiles'] = len(p1renames)
1961 1966 data['p2.renamedfiles'] = len(p2renames)
1962 1967
1963 1968 if dostats:
1964 1969 if p1missing:
1965 1970 alldata['parentnbrenames'].append(
1966 1971 (data['p1.renamedfiles'], b.hex(), p1.hex())
1967 1972 )
1968 1973 alldata['parenttime'].append(
1969 1974 (data['p1.time'], b.hex(), p1.hex())
1970 1975 )
1971 1976 if p2missing:
1972 1977 alldata['parentnbrenames'].append(
1973 1978 (data['p2.renamedfiles'], b.hex(), p2.hex())
1974 1979 )
1975 1980 alldata['parenttime'].append(
1976 1981 (data['p2.time'], b.hex(), p2.hex())
1977 1982 )
1978 1983 if p1missing or p2missing:
1979 1984 alldata['totalnbrenames'].append(
1980 1985 (
1981 1986 data['nbrenamedfiles'],
1982 1987 b.hex(),
1983 1988 p1.hex(),
1984 1989 p2.hex(),
1985 1990 )
1986 1991 )
1987 1992 alldata['totaltime'].append(
1988 1993 (data['time'], b.hex(), p1.hex(), p2.hex())
1989 1994 )
1990 1995 fm.startitem()
1991 1996 fm.data(**data)
1992 1997 # make node pretty for the human output
1993 1998 out = data.copy()
1994 1999 out['base'] = fm.hexfunc(b.node())
1995 2000 out['p1.node'] = fm.hexfunc(p1.node())
1996 2001 out['p2.node'] = fm.hexfunc(p2.node())
1997 2002 fm.plain(output % out)
1998 2003
1999 2004 fm.end()
2000 2005 if dostats:
2001 2006 # use a second formatter because the data are quite different, not sure
2002 2007 # how it flies with the templater.
2003 2008 entries = [
2004 2009 ('nbrevs', 'number of revision covered'),
2005 2010 ('nbmissingfiles', 'number of missing files at head'),
2006 2011 ]
2007 2012 if dotiming:
2008 2013 entries.append(
2009 2014 ('parentnbrenames', 'rename from one parent to base')
2010 2015 )
2011 2016 entries.append(('totalnbrenames', 'total number of renames'))
2012 2017 entries.append(('parenttime', 'time for one parent'))
2013 2018 entries.append(('totaltime', 'time for both parents'))
2014 2019 _displaystats(ui, opts, entries, alldata)
2015 2020
2016 2021
2017 2022 @command(
2018 2023 b'perfhelper-pathcopies',
2019 2024 formatteropts
2020 2025 + [
2021 2026 (b'r', b'revs', [], b'restrict search to these revisions'),
2022 2027 (b'', b'timing', False, b'provides extra data (costly)'),
2023 2028 (b'', b'stats', False, b'provides statistic about the measured data'),
2024 2029 ],
2025 2030 )
2026 2031 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2027 2032 """find statistic about potential parameters for the `perftracecopies`
2028 2033
2029 2034 This command find source-destination pair relevant for copytracing testing.
2030 2035 It report value for some of the parameters that impact copy tracing time.
2031 2036
2032 2037 If `--timing` is set, rename detection is run and the associated timing
2033 2038 will be reported. The extra details comes at the cost of a slower command
2034 2039 execution.
2035 2040
2036 2041 Since the rename detection is only run once, other factors might easily
2037 2042 affect the precision of the timing. However it should give a good
2038 2043 approximation of which revision pairs are very costly.
2039 2044 """
2040 2045 opts = _byteskwargs(opts)
2041 2046 fm = ui.formatter(b'perf', opts)
2042 2047 dotiming = opts[b'timing']
2043 2048 dostats = opts[b'stats']
2044 2049
2045 2050 if dotiming:
2046 2051 header = '%12s %12s %12s %12s %12s %12s\n'
2047 2052 output = (
2048 2053 "%(source)12s %(destination)12s "
2049 2054 "%(nbrevs)12d %(nbmissingfiles)12d "
2050 2055 "%(nbrenamedfiles)12d %(time)18.5f\n"
2051 2056 )
2052 2057 header_names = (
2053 2058 "source",
2054 2059 "destination",
2055 2060 "nb-revs",
2056 2061 "nb-files",
2057 2062 "nb-renames",
2058 2063 "time",
2059 2064 )
2060 2065 fm.plain(header % header_names)
2061 2066 else:
2062 2067 header = '%12s %12s %12s %12s\n'
2063 2068 output = (
2064 2069 "%(source)12s %(destination)12s "
2065 2070 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2066 2071 )
2067 2072 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2068 2073
2069 2074 if not revs:
2070 2075 revs = ['all()']
2071 2076 revs = scmutil.revrange(repo, revs)
2072 2077
2073 2078 if dostats:
2074 2079 alldata = {
2075 2080 'nbrevs': [],
2076 2081 'nbmissingfiles': [],
2077 2082 }
2078 2083 if dotiming:
2079 2084 alldata['nbrenames'] = []
2080 2085 alldata['time'] = []
2081 2086
2082 2087 roi = repo.revs('merge() and %ld', revs)
2083 2088 for r in roi:
2084 2089 ctx = repo[r]
2085 2090 p1 = ctx.p1().rev()
2086 2091 p2 = ctx.p2().rev()
2087 2092 bases = repo.changelog._commonancestorsheads(p1, p2)
2088 2093 for p in (p1, p2):
2089 2094 for b in bases:
2090 2095 base = repo[b]
2091 2096 parent = repo[p]
2092 2097 missing = copies._computeforwardmissing(base, parent)
2093 2098 if not missing:
2094 2099 continue
2095 2100 data = {
2096 2101 b'source': base.hex(),
2097 2102 b'destination': parent.hex(),
2098 2103 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2099 2104 b'nbmissingfiles': len(missing),
2100 2105 }
2101 2106 if dostats:
2102 2107 alldata['nbrevs'].append(
2103 2108 (data['nbrevs'], base.hex(), parent.hex(),)
2104 2109 )
2105 2110 alldata['nbmissingfiles'].append(
2106 2111 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2107 2112 )
2108 2113 if dotiming:
2109 2114 begin = util.timer()
2110 2115 renames = copies.pathcopies(base, parent)
2111 2116 end = util.timer()
2112 2117 # not very stable timing since we did only one run
2113 2118 data['time'] = end - begin
2114 2119 data['nbrenamedfiles'] = len(renames)
2115 2120 if dostats:
2116 2121 alldata['time'].append(
2117 2122 (data['time'], base.hex(), parent.hex(),)
2118 2123 )
2119 2124 alldata['nbrenames'].append(
2120 2125 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2121 2126 )
2122 2127 fm.startitem()
2123 2128 fm.data(**data)
2124 2129 out = data.copy()
2125 2130 out['source'] = fm.hexfunc(base.node())
2126 2131 out['destination'] = fm.hexfunc(parent.node())
2127 2132 fm.plain(output % out)
2128 2133
2129 2134 fm.end()
2130 2135 if dostats:
2131 2136 # use a second formatter because the data are quite different, not sure
2132 2137 # how it flies with the templater.
2133 2138 fm = ui.formatter(b'perf', opts)
2134 2139 entries = [
2135 2140 ('nbrevs', 'number of revision covered'),
2136 2141 ('nbmissingfiles', 'number of missing files at head'),
2137 2142 ]
2138 2143 if dotiming:
2139 2144 entries.append(('nbrenames', 'renamed files'))
2140 2145 entries.append(('time', 'time'))
2141 2146 _displaystats(ui, opts, entries, alldata)
2142 2147
2143 2148
2144 2149 @command(b'perfcca', formatteropts)
2145 2150 def perfcca(ui, repo, **opts):
2146 2151 opts = _byteskwargs(opts)
2147 2152 timer, fm = gettimer(ui, opts)
2148 2153 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2149 2154 fm.end()
2150 2155
2151 2156
2152 2157 @command(b'perffncacheload', formatteropts)
2153 2158 def perffncacheload(ui, repo, **opts):
2154 2159 opts = _byteskwargs(opts)
2155 2160 timer, fm = gettimer(ui, opts)
2156 2161 s = repo.store
2157 2162
2158 2163 def d():
2159 2164 s.fncache._load()
2160 2165
2161 2166 timer(d)
2162 2167 fm.end()
2163 2168
2164 2169
2165 2170 @command(b'perffncachewrite', formatteropts)
2166 2171 def perffncachewrite(ui, repo, **opts):
2167 2172 opts = _byteskwargs(opts)
2168 2173 timer, fm = gettimer(ui, opts)
2169 2174 s = repo.store
2170 2175 lock = repo.lock()
2171 2176 s.fncache._load()
2172 2177 tr = repo.transaction(b'perffncachewrite')
2173 2178 tr.addbackup(b'fncache')
2174 2179
2175 2180 def d():
2176 2181 s.fncache._dirty = True
2177 2182 s.fncache.write(tr)
2178 2183
2179 2184 timer(d)
2180 2185 tr.close()
2181 2186 lock.release()
2182 2187 fm.end()
2183 2188
2184 2189
2185 2190 @command(b'perffncacheencode', formatteropts)
2186 2191 def perffncacheencode(ui, repo, **opts):
2187 2192 opts = _byteskwargs(opts)
2188 2193 timer, fm = gettimer(ui, opts)
2189 2194 s = repo.store
2190 2195 s.fncache._load()
2191 2196
2192 2197 def d():
2193 2198 for p in s.fncache.entries:
2194 2199 s.encode(p)
2195 2200
2196 2201 timer(d)
2197 2202 fm.end()
2198 2203
2199 2204
2200 2205 def _bdiffworker(q, blocks, xdiff, ready, done):
2201 2206 while not done.is_set():
2202 2207 pair = q.get()
2203 2208 while pair is not None:
2204 2209 if xdiff:
2205 2210 mdiff.bdiff.xdiffblocks(*pair)
2206 2211 elif blocks:
2207 2212 mdiff.bdiff.blocks(*pair)
2208 2213 else:
2209 2214 mdiff.textdiff(*pair)
2210 2215 q.task_done()
2211 2216 pair = q.get()
2212 2217 q.task_done() # for the None one
2213 2218 with ready:
2214 2219 ready.wait()
2215 2220
2216 2221
2217 2222 def _manifestrevision(repo, mnode):
2218 2223 ml = repo.manifestlog
2219 2224
2220 2225 if util.safehasattr(ml, b'getstorage'):
2221 2226 store = ml.getstorage(b'')
2222 2227 else:
2223 2228 store = ml._revlog
2224 2229
2225 2230 return store.revision(mnode)
2226 2231
2227 2232
2228 2233 @command(
2229 2234 b'perfbdiff',
2230 2235 revlogopts
2231 2236 + formatteropts
2232 2237 + [
2233 2238 (
2234 2239 b'',
2235 2240 b'count',
2236 2241 1,
2237 2242 b'number of revisions to test (when using --startrev)',
2238 2243 ),
2239 2244 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2240 2245 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2241 2246 (b'', b'blocks', False, b'test computing diffs into blocks'),
2242 2247 (b'', b'xdiff', False, b'use xdiff algorithm'),
2243 2248 ],
2244 2249 b'-c|-m|FILE REV',
2245 2250 )
2246 2251 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2247 2252 """benchmark a bdiff between revisions
2248 2253
2249 2254 By default, benchmark a bdiff between its delta parent and itself.
2250 2255
2251 2256 With ``--count``, benchmark bdiffs between delta parents and self for N
2252 2257 revisions starting at the specified revision.
2253 2258
2254 2259 With ``--alldata``, assume the requested revision is a changeset and
2255 2260 measure bdiffs for all changes related to that changeset (manifest
2256 2261 and filelogs).
2257 2262 """
2258 2263 opts = _byteskwargs(opts)
2259 2264
2260 2265 if opts[b'xdiff'] and not opts[b'blocks']:
2261 2266 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2262 2267
2263 2268 if opts[b'alldata']:
2264 2269 opts[b'changelog'] = True
2265 2270
2266 2271 if opts.get(b'changelog') or opts.get(b'manifest'):
2267 2272 file_, rev = None, file_
2268 2273 elif rev is None:
2269 2274 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2270 2275
2271 2276 blocks = opts[b'blocks']
2272 2277 xdiff = opts[b'xdiff']
2273 2278 textpairs = []
2274 2279
2275 2280 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2276 2281
2277 2282 startrev = r.rev(r.lookup(rev))
2278 2283 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2279 2284 if opts[b'alldata']:
2280 2285 # Load revisions associated with changeset.
2281 2286 ctx = repo[rev]
2282 2287 mtext = _manifestrevision(repo, ctx.manifestnode())
2283 2288 for pctx in ctx.parents():
2284 2289 pman = _manifestrevision(repo, pctx.manifestnode())
2285 2290 textpairs.append((pman, mtext))
2286 2291
2287 2292 # Load filelog revisions by iterating manifest delta.
2288 2293 man = ctx.manifest()
2289 2294 pman = ctx.p1().manifest()
2290 2295 for filename, change in pman.diff(man).items():
2291 2296 fctx = repo.file(filename)
2292 2297 f1 = fctx.revision(change[0][0] or -1)
2293 2298 f2 = fctx.revision(change[1][0] or -1)
2294 2299 textpairs.append((f1, f2))
2295 2300 else:
2296 2301 dp = r.deltaparent(rev)
2297 2302 textpairs.append((r.revision(dp), r.revision(rev)))
2298 2303
2299 2304 withthreads = threads > 0
2300 2305 if not withthreads:
2301 2306
2302 2307 def d():
2303 2308 for pair in textpairs:
2304 2309 if xdiff:
2305 2310 mdiff.bdiff.xdiffblocks(*pair)
2306 2311 elif blocks:
2307 2312 mdiff.bdiff.blocks(*pair)
2308 2313 else:
2309 2314 mdiff.textdiff(*pair)
2310 2315
2311 2316 else:
2312 2317 q = queue()
2313 2318 for i in _xrange(threads):
2314 2319 q.put(None)
2315 2320 ready = threading.Condition()
2316 2321 done = threading.Event()
2317 2322 for i in _xrange(threads):
2318 2323 threading.Thread(
2319 2324 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2320 2325 ).start()
2321 2326 q.join()
2322 2327
2323 2328 def d():
2324 2329 for pair in textpairs:
2325 2330 q.put(pair)
2326 2331 for i in _xrange(threads):
2327 2332 q.put(None)
2328 2333 with ready:
2329 2334 ready.notify_all()
2330 2335 q.join()
2331 2336
2332 2337 timer, fm = gettimer(ui, opts)
2333 2338 timer(d)
2334 2339 fm.end()
2335 2340
2336 2341 if withthreads:
2337 2342 done.set()
2338 2343 for i in _xrange(threads):
2339 2344 q.put(None)
2340 2345 with ready:
2341 2346 ready.notify_all()
2342 2347
2343 2348
2344 2349 @command(
2345 2350 b'perfunidiff',
2346 2351 revlogopts
2347 2352 + formatteropts
2348 2353 + [
2349 2354 (
2350 2355 b'',
2351 2356 b'count',
2352 2357 1,
2353 2358 b'number of revisions to test (when using --startrev)',
2354 2359 ),
2355 2360 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2356 2361 ],
2357 2362 b'-c|-m|FILE REV',
2358 2363 )
2359 2364 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2360 2365 """benchmark a unified diff between revisions
2361 2366
2362 2367 This doesn't include any copy tracing - it's just a unified diff
2363 2368 of the texts.
2364 2369
2365 2370 By default, benchmark a diff between its delta parent and itself.
2366 2371
2367 2372 With ``--count``, benchmark diffs between delta parents and self for N
2368 2373 revisions starting at the specified revision.
2369 2374
2370 2375 With ``--alldata``, assume the requested revision is a changeset and
2371 2376 measure diffs for all changes related to that changeset (manifest
2372 2377 and filelogs).
2373 2378 """
2374 2379 opts = _byteskwargs(opts)
2375 2380 if opts[b'alldata']:
2376 2381 opts[b'changelog'] = True
2377 2382
2378 2383 if opts.get(b'changelog') or opts.get(b'manifest'):
2379 2384 file_, rev = None, file_
2380 2385 elif rev is None:
2381 2386 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2382 2387
2383 2388 textpairs = []
2384 2389
2385 2390 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2386 2391
2387 2392 startrev = r.rev(r.lookup(rev))
2388 2393 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2389 2394 if opts[b'alldata']:
2390 2395 # Load revisions associated with changeset.
2391 2396 ctx = repo[rev]
2392 2397 mtext = _manifestrevision(repo, ctx.manifestnode())
2393 2398 for pctx in ctx.parents():
2394 2399 pman = _manifestrevision(repo, pctx.manifestnode())
2395 2400 textpairs.append((pman, mtext))
2396 2401
2397 2402 # Load filelog revisions by iterating manifest delta.
2398 2403 man = ctx.manifest()
2399 2404 pman = ctx.p1().manifest()
2400 2405 for filename, change in pman.diff(man).items():
2401 2406 fctx = repo.file(filename)
2402 2407 f1 = fctx.revision(change[0][0] or -1)
2403 2408 f2 = fctx.revision(change[1][0] or -1)
2404 2409 textpairs.append((f1, f2))
2405 2410 else:
2406 2411 dp = r.deltaparent(rev)
2407 2412 textpairs.append((r.revision(dp), r.revision(rev)))
2408 2413
2409 2414 def d():
2410 2415 for left, right in textpairs:
2411 2416 # The date strings don't matter, so we pass empty strings.
2412 2417 headerlines, hunks = mdiff.unidiff(
2413 2418 left, b'', right, b'', b'left', b'right', binary=False
2414 2419 )
2415 2420 # consume iterators in roughly the way patch.py does
2416 2421 b'\n'.join(headerlines)
2417 2422 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2418 2423
2419 2424 timer, fm = gettimer(ui, opts)
2420 2425 timer(d)
2421 2426 fm.end()
2422 2427
2423 2428
2424 2429 @command(b'perfdiffwd', formatteropts)
2425 2430 def perfdiffwd(ui, repo, **opts):
2426 2431 """Profile diff of working directory changes"""
2427 2432 opts = _byteskwargs(opts)
2428 2433 timer, fm = gettimer(ui, opts)
2429 2434 options = {
2430 2435 'w': 'ignore_all_space',
2431 2436 'b': 'ignore_space_change',
2432 2437 'B': 'ignore_blank_lines',
2433 2438 }
2434 2439
2435 2440 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2436 2441 opts = dict((options[c], b'1') for c in diffopt)
2437 2442
2438 2443 def d():
2439 2444 ui.pushbuffer()
2440 2445 commands.diff(ui, repo, **opts)
2441 2446 ui.popbuffer()
2442 2447
2443 2448 diffopt = diffopt.encode('ascii')
2444 2449 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2445 2450 timer(d, title=title)
2446 2451 fm.end()
2447 2452
2448 2453
2449 2454 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2450 2455 def perfrevlogindex(ui, repo, file_=None, **opts):
2451 2456 """Benchmark operations against a revlog index.
2452 2457
2453 2458 This tests constructing a revlog instance, reading index data,
2454 2459 parsing index data, and performing various operations related to
2455 2460 index data.
2456 2461 """
2457 2462
2458 2463 opts = _byteskwargs(opts)
2459 2464
2460 2465 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2461 2466
2462 2467 opener = getattr(rl, 'opener') # trick linter
2463 2468 indexfile = rl.indexfile
2464 2469 data = opener.read(indexfile)
2465 2470
2466 2471 header = struct.unpack(b'>I', data[0:4])[0]
2467 2472 version = header & 0xFFFF
2468 2473 if version == 1:
2469 2474 revlogio = revlog.revlogio()
2470 2475 inline = header & (1 << 16)
2471 2476 else:
2472 2477 raise error.Abort(b'unsupported revlog version: %d' % version)
2473 2478
2474 2479 rllen = len(rl)
2475 2480
2476 2481 node0 = rl.node(0)
2477 2482 node25 = rl.node(rllen // 4)
2478 2483 node50 = rl.node(rllen // 2)
2479 2484 node75 = rl.node(rllen // 4 * 3)
2480 2485 node100 = rl.node(rllen - 1)
2481 2486
2482 2487 allrevs = range(rllen)
2483 2488 allrevsrev = list(reversed(allrevs))
2484 2489 allnodes = [rl.node(rev) for rev in range(rllen)]
2485 2490 allnodesrev = list(reversed(allnodes))
2486 2491
2487 2492 def constructor():
2488 2493 revlog.revlog(opener, indexfile)
2489 2494
2490 2495 def read():
2491 2496 with opener(indexfile) as fh:
2492 2497 fh.read()
2493 2498
2494 2499 def parseindex():
2495 2500 revlogio.parseindex(data, inline)
2496 2501
2497 2502 def getentry(revornode):
2498 2503 index = revlogio.parseindex(data, inline)[0]
2499 2504 index[revornode]
2500 2505
2501 2506 def getentries(revs, count=1):
2502 2507 index = revlogio.parseindex(data, inline)[0]
2503 2508
2504 2509 for i in range(count):
2505 2510 for rev in revs:
2506 2511 index[rev]
2507 2512
2508 2513 def resolvenode(node):
2509 2514 nodemap = revlogio.parseindex(data, inline)[1]
2510 2515 # This only works for the C code.
2511 2516 if nodemap is None:
2512 2517 return
2513 2518
2514 2519 try:
2515 2520 nodemap[node]
2516 2521 except error.RevlogError:
2517 2522 pass
2518 2523
2519 2524 def resolvenodes(nodes, count=1):
2520 2525 nodemap = revlogio.parseindex(data, inline)[1]
2521 2526 if nodemap is None:
2522 2527 return
2523 2528
2524 2529 for i in range(count):
2525 2530 for node in nodes:
2526 2531 try:
2527 2532 nodemap[node]
2528 2533 except error.RevlogError:
2529 2534 pass
2530 2535
2531 2536 benches = [
2532 2537 (constructor, b'revlog constructor'),
2533 2538 (read, b'read'),
2534 2539 (parseindex, b'create index object'),
2535 2540 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2536 2541 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2537 2542 (lambda: resolvenode(node0), b'look up node at rev 0'),
2538 2543 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2539 2544 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2540 2545 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2541 2546 (lambda: resolvenode(node100), b'look up node at tip'),
2542 2547 # 2x variation is to measure caching impact.
2543 2548 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2544 2549 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2545 2550 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2546 2551 (
2547 2552 lambda: resolvenodes(allnodesrev, 2),
2548 2553 b'look up all nodes 2x (reverse)',
2549 2554 ),
2550 2555 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2551 2556 (
2552 2557 lambda: getentries(allrevs, 2),
2553 2558 b'retrieve all index entries 2x (forward)',
2554 2559 ),
2555 2560 (
2556 2561 lambda: getentries(allrevsrev),
2557 2562 b'retrieve all index entries (reverse)',
2558 2563 ),
2559 2564 (
2560 2565 lambda: getentries(allrevsrev, 2),
2561 2566 b'retrieve all index entries 2x (reverse)',
2562 2567 ),
2563 2568 ]
2564 2569
2565 2570 for fn, title in benches:
2566 2571 timer, fm = gettimer(ui, opts)
2567 2572 timer(fn, title=title)
2568 2573 fm.end()
2569 2574
2570 2575
2571 2576 @command(
2572 2577 b'perfrevlogrevisions',
2573 2578 revlogopts
2574 2579 + formatteropts
2575 2580 + [
2576 2581 (b'd', b'dist', 100, b'distance between the revisions'),
2577 2582 (b's', b'startrev', 0, b'revision to start reading at'),
2578 2583 (b'', b'reverse', False, b'read in reverse'),
2579 2584 ],
2580 2585 b'-c|-m|FILE',
2581 2586 )
2582 2587 def perfrevlogrevisions(
2583 2588 ui, repo, file_=None, startrev=0, reverse=False, **opts
2584 2589 ):
2585 2590 """Benchmark reading a series of revisions from a revlog.
2586 2591
2587 2592 By default, we read every ``-d/--dist`` revision from 0 to tip of
2588 2593 the specified revlog.
2589 2594
2590 2595 The start revision can be defined via ``-s/--startrev``.
2591 2596 """
2592 2597 opts = _byteskwargs(opts)
2593 2598
2594 2599 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2595 2600 rllen = getlen(ui)(rl)
2596 2601
2597 2602 if startrev < 0:
2598 2603 startrev = rllen + startrev
2599 2604
2600 2605 def d():
2601 2606 rl.clearcaches()
2602 2607
2603 2608 beginrev = startrev
2604 2609 endrev = rllen
2605 2610 dist = opts[b'dist']
2606 2611
2607 2612 if reverse:
2608 2613 beginrev, endrev = endrev - 1, beginrev - 1
2609 2614 dist = -1 * dist
2610 2615
2611 2616 for x in _xrange(beginrev, endrev, dist):
2612 2617 # Old revisions don't support passing int.
2613 2618 n = rl.node(x)
2614 2619 rl.revision(n)
2615 2620
2616 2621 timer, fm = gettimer(ui, opts)
2617 2622 timer(d)
2618 2623 fm.end()
2619 2624
2620 2625
2621 2626 @command(
2622 2627 b'perfrevlogwrite',
2623 2628 revlogopts
2624 2629 + formatteropts
2625 2630 + [
2626 2631 (b's', b'startrev', 1000, b'revision to start writing at'),
2627 2632 (b'', b'stoprev', -1, b'last revision to write'),
2628 2633 (b'', b'count', 3, b'number of passes to perform'),
2629 2634 (b'', b'details', False, b'print timing for every revisions tested'),
2630 2635 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2631 2636 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2632 2637 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2633 2638 ],
2634 2639 b'-c|-m|FILE',
2635 2640 )
2636 2641 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2637 2642 """Benchmark writing a series of revisions to a revlog.
2638 2643
2639 2644 Possible source values are:
2640 2645 * `full`: add from a full text (default).
2641 2646 * `parent-1`: add from a delta to the first parent
2642 2647 * `parent-2`: add from a delta to the second parent if it exists
2643 2648 (use a delta from the first parent otherwise)
2644 2649 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2645 2650 * `storage`: add from the existing precomputed deltas
2646 2651
2647 2652 Note: This performance command measures performance in a custom way. As a
2648 2653 result some of the global configuration of the 'perf' command does not
2649 2654 apply to it:
2650 2655
2651 2656 * ``pre-run``: disabled
2652 2657
2653 2658 * ``profile-benchmark``: disabled
2654 2659
2655 2660 * ``run-limits``: disabled use --count instead
2656 2661 """
2657 2662 opts = _byteskwargs(opts)
2658 2663
2659 2664 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2660 2665 rllen = getlen(ui)(rl)
2661 2666 if startrev < 0:
2662 2667 startrev = rllen + startrev
2663 2668 if stoprev < 0:
2664 2669 stoprev = rllen + stoprev
2665 2670
2666 2671 lazydeltabase = opts['lazydeltabase']
2667 2672 source = opts['source']
2668 2673 clearcaches = opts['clear_caches']
2669 2674 validsource = (
2670 2675 b'full',
2671 2676 b'parent-1',
2672 2677 b'parent-2',
2673 2678 b'parent-smallest',
2674 2679 b'storage',
2675 2680 )
2676 2681 if source not in validsource:
2677 2682 raise error.Abort('invalid source type: %s' % source)
2678 2683
2679 2684 ### actually gather results
2680 2685 count = opts['count']
2681 2686 if count <= 0:
2682 2687 raise error.Abort('invalide run count: %d' % count)
2683 2688 allresults = []
2684 2689 for c in range(count):
2685 2690 timing = _timeonewrite(
2686 2691 ui,
2687 2692 rl,
2688 2693 source,
2689 2694 startrev,
2690 2695 stoprev,
2691 2696 c + 1,
2692 2697 lazydeltabase=lazydeltabase,
2693 2698 clearcaches=clearcaches,
2694 2699 )
2695 2700 allresults.append(timing)
2696 2701
2697 2702 ### consolidate the results in a single list
2698 2703 results = []
2699 2704 for idx, (rev, t) in enumerate(allresults[0]):
2700 2705 ts = [t]
2701 2706 for other in allresults[1:]:
2702 2707 orev, ot = other[idx]
2703 2708 assert orev == rev
2704 2709 ts.append(ot)
2705 2710 results.append((rev, ts))
2706 2711 resultcount = len(results)
2707 2712
2708 2713 ### Compute and display relevant statistics
2709 2714
2710 2715 # get a formatter
2711 2716 fm = ui.formatter(b'perf', opts)
2712 2717 displayall = ui.configbool(b"perf", b"all-timing", False)
2713 2718
2714 2719 # print individual details if requested
2715 2720 if opts['details']:
2716 2721 for idx, item in enumerate(results, 1):
2717 2722 rev, data = item
2718 2723 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2719 2724 formatone(fm, data, title=title, displayall=displayall)
2720 2725
2721 2726 # sorts results by median time
2722 2727 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2723 2728 # list of (name, index) to display)
2724 2729 relevants = [
2725 2730 ("min", 0),
2726 2731 ("10%", resultcount * 10 // 100),
2727 2732 ("25%", resultcount * 25 // 100),
2728 2733 ("50%", resultcount * 70 // 100),
2729 2734 ("75%", resultcount * 75 // 100),
2730 2735 ("90%", resultcount * 90 // 100),
2731 2736 ("95%", resultcount * 95 // 100),
2732 2737 ("99%", resultcount * 99 // 100),
2733 2738 ("99.9%", resultcount * 999 // 1000),
2734 2739 ("99.99%", resultcount * 9999 // 10000),
2735 2740 ("99.999%", resultcount * 99999 // 100000),
2736 2741 ("max", -1),
2737 2742 ]
2738 2743 if not ui.quiet:
2739 2744 for name, idx in relevants:
2740 2745 data = results[idx]
2741 2746 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2742 2747 formatone(fm, data[1], title=title, displayall=displayall)
2743 2748
2744 2749 # XXX summing that many float will not be very precise, we ignore this fact
2745 2750 # for now
2746 2751 totaltime = []
2747 2752 for item in allresults:
2748 2753 totaltime.append(
2749 2754 (
2750 2755 sum(x[1][0] for x in item),
2751 2756 sum(x[1][1] for x in item),
2752 2757 sum(x[1][2] for x in item),
2753 2758 )
2754 2759 )
2755 2760 formatone(
2756 2761 fm,
2757 2762 totaltime,
2758 2763 title="total time (%d revs)" % resultcount,
2759 2764 displayall=displayall,
2760 2765 )
2761 2766 fm.end()
2762 2767
2763 2768
2764 2769 class _faketr(object):
2765 2770 def add(s, x, y, z=None):
2766 2771 return None
2767 2772
2768 2773
2769 2774 def _timeonewrite(
2770 2775 ui,
2771 2776 orig,
2772 2777 source,
2773 2778 startrev,
2774 2779 stoprev,
2775 2780 runidx=None,
2776 2781 lazydeltabase=True,
2777 2782 clearcaches=True,
2778 2783 ):
2779 2784 timings = []
2780 2785 tr = _faketr()
2781 2786 with _temprevlog(ui, orig, startrev) as dest:
2782 2787 dest._lazydeltabase = lazydeltabase
2783 2788 revs = list(orig.revs(startrev, stoprev))
2784 2789 total = len(revs)
2785 2790 topic = 'adding'
2786 2791 if runidx is not None:
2787 2792 topic += ' (run #%d)' % runidx
2788 2793 # Support both old and new progress API
2789 2794 if util.safehasattr(ui, 'makeprogress'):
2790 2795 progress = ui.makeprogress(topic, unit='revs', total=total)
2791 2796
2792 2797 def updateprogress(pos):
2793 2798 progress.update(pos)
2794 2799
2795 2800 def completeprogress():
2796 2801 progress.complete()
2797 2802
2798 2803 else:
2799 2804
2800 2805 def updateprogress(pos):
2801 2806 ui.progress(topic, pos, unit='revs', total=total)
2802 2807
2803 2808 def completeprogress():
2804 2809 ui.progress(topic, None, unit='revs', total=total)
2805 2810
2806 2811 for idx, rev in enumerate(revs):
2807 2812 updateprogress(idx)
2808 2813 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2809 2814 if clearcaches:
2810 2815 dest.index.clearcaches()
2811 2816 dest.clearcaches()
2812 2817 with timeone() as r:
2813 2818 dest.addrawrevision(*addargs, **addkwargs)
2814 2819 timings.append((rev, r[0]))
2815 2820 updateprogress(total)
2816 2821 completeprogress()
2817 2822 return timings
2818 2823
2819 2824
2820 2825 def _getrevisionseed(orig, rev, tr, source):
2821 2826 from mercurial.node import nullid
2822 2827
2823 2828 linkrev = orig.linkrev(rev)
2824 2829 node = orig.node(rev)
2825 2830 p1, p2 = orig.parents(node)
2826 2831 flags = orig.flags(rev)
2827 2832 cachedelta = None
2828 2833 text = None
2829 2834
2830 2835 if source == b'full':
2831 2836 text = orig.revision(rev)
2832 2837 elif source == b'parent-1':
2833 2838 baserev = orig.rev(p1)
2834 2839 cachedelta = (baserev, orig.revdiff(p1, rev))
2835 2840 elif source == b'parent-2':
2836 2841 parent = p2
2837 2842 if p2 == nullid:
2838 2843 parent = p1
2839 2844 baserev = orig.rev(parent)
2840 2845 cachedelta = (baserev, orig.revdiff(parent, rev))
2841 2846 elif source == b'parent-smallest':
2842 2847 p1diff = orig.revdiff(p1, rev)
2843 2848 parent = p1
2844 2849 diff = p1diff
2845 2850 if p2 != nullid:
2846 2851 p2diff = orig.revdiff(p2, rev)
2847 2852 if len(p1diff) > len(p2diff):
2848 2853 parent = p2
2849 2854 diff = p2diff
2850 2855 baserev = orig.rev(parent)
2851 2856 cachedelta = (baserev, diff)
2852 2857 elif source == b'storage':
2853 2858 baserev = orig.deltaparent(rev)
2854 2859 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2855 2860
2856 2861 return (
2857 2862 (text, tr, linkrev, p1, p2),
2858 2863 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2859 2864 )
2860 2865
2861 2866
2862 2867 @contextlib.contextmanager
2863 2868 def _temprevlog(ui, orig, truncaterev):
2864 2869 from mercurial import vfs as vfsmod
2865 2870
2866 2871 if orig._inline:
2867 2872 raise error.Abort('not supporting inline revlog (yet)')
2868 2873 revlogkwargs = {}
2869 2874 k = 'upperboundcomp'
2870 2875 if util.safehasattr(orig, k):
2871 2876 revlogkwargs[k] = getattr(orig, k)
2872 2877
2873 2878 origindexpath = orig.opener.join(orig.indexfile)
2874 2879 origdatapath = orig.opener.join(orig.datafile)
2875 2880 indexname = 'revlog.i'
2876 2881 dataname = 'revlog.d'
2877 2882
2878 2883 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2879 2884 try:
2880 2885 # copy the data file in a temporary directory
2881 2886 ui.debug('copying data in %s\n' % tmpdir)
2882 2887 destindexpath = os.path.join(tmpdir, 'revlog.i')
2883 2888 destdatapath = os.path.join(tmpdir, 'revlog.d')
2884 2889 shutil.copyfile(origindexpath, destindexpath)
2885 2890 shutil.copyfile(origdatapath, destdatapath)
2886 2891
2887 2892 # remove the data we want to add again
2888 2893 ui.debug('truncating data to be rewritten\n')
2889 2894 with open(destindexpath, 'ab') as index:
2890 2895 index.seek(0)
2891 2896 index.truncate(truncaterev * orig._io.size)
2892 2897 with open(destdatapath, 'ab') as data:
2893 2898 data.seek(0)
2894 2899 data.truncate(orig.start(truncaterev))
2895 2900
2896 2901 # instantiate a new revlog from the temporary copy
2897 2902 ui.debug('truncating adding to be rewritten\n')
2898 2903 vfs = vfsmod.vfs(tmpdir)
2899 2904 vfs.options = getattr(orig.opener, 'options', None)
2900 2905
2901 2906 dest = revlog.revlog(
2902 2907 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2903 2908 )
2904 2909 if dest._inline:
2905 2910 raise error.Abort('not supporting inline revlog (yet)')
2906 2911 # make sure internals are initialized
2907 2912 dest.revision(len(dest) - 1)
2908 2913 yield dest
2909 2914 del dest, vfs
2910 2915 finally:
2911 2916 shutil.rmtree(tmpdir, True)
2912 2917
2913 2918
2914 2919 @command(
2915 2920 b'perfrevlogchunks',
2916 2921 revlogopts
2917 2922 + formatteropts
2918 2923 + [
2919 2924 (b'e', b'engines', b'', b'compression engines to use'),
2920 2925 (b's', b'startrev', 0, b'revision to start at'),
2921 2926 ],
2922 2927 b'-c|-m|FILE',
2923 2928 )
2924 2929 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2925 2930 """Benchmark operations on revlog chunks.
2926 2931
2927 2932 Logically, each revlog is a collection of fulltext revisions. However,
2928 2933 stored within each revlog are "chunks" of possibly compressed data. This
2929 2934 data needs to be read and decompressed or compressed and written.
2930 2935
2931 2936 This command measures the time it takes to read+decompress and recompress
2932 2937 chunks in a revlog. It effectively isolates I/O and compression performance.
2933 2938 For measurements of higher-level operations like resolving revisions,
2934 2939 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2935 2940 """
2936 2941 opts = _byteskwargs(opts)
2937 2942
2938 2943 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2939 2944
2940 2945 # _chunkraw was renamed to _getsegmentforrevs.
2941 2946 try:
2942 2947 segmentforrevs = rl._getsegmentforrevs
2943 2948 except AttributeError:
2944 2949 segmentforrevs = rl._chunkraw
2945 2950
2946 2951 # Verify engines argument.
2947 2952 if engines:
2948 2953 engines = set(e.strip() for e in engines.split(b','))
2949 2954 for engine in engines:
2950 2955 try:
2951 2956 util.compressionengines[engine]
2952 2957 except KeyError:
2953 2958 raise error.Abort(b'unknown compression engine: %s' % engine)
2954 2959 else:
2955 2960 engines = []
2956 2961 for e in util.compengines:
2957 2962 engine = util.compengines[e]
2958 2963 try:
2959 2964 if engine.available():
2960 2965 engine.revlogcompressor().compress(b'dummy')
2961 2966 engines.append(e)
2962 2967 except NotImplementedError:
2963 2968 pass
2964 2969
2965 2970 revs = list(rl.revs(startrev, len(rl) - 1))
2966 2971
2967 2972 def rlfh(rl):
2968 2973 if rl._inline:
2969 2974 return getsvfs(repo)(rl.indexfile)
2970 2975 else:
2971 2976 return getsvfs(repo)(rl.datafile)
2972 2977
2973 2978 def doread():
2974 2979 rl.clearcaches()
2975 2980 for rev in revs:
2976 2981 segmentforrevs(rev, rev)
2977 2982
2978 2983 def doreadcachedfh():
2979 2984 rl.clearcaches()
2980 2985 fh = rlfh(rl)
2981 2986 for rev in revs:
2982 2987 segmentforrevs(rev, rev, df=fh)
2983 2988
2984 2989 def doreadbatch():
2985 2990 rl.clearcaches()
2986 2991 segmentforrevs(revs[0], revs[-1])
2987 2992
2988 2993 def doreadbatchcachedfh():
2989 2994 rl.clearcaches()
2990 2995 fh = rlfh(rl)
2991 2996 segmentforrevs(revs[0], revs[-1], df=fh)
2992 2997
2993 2998 def dochunk():
2994 2999 rl.clearcaches()
2995 3000 fh = rlfh(rl)
2996 3001 for rev in revs:
2997 3002 rl._chunk(rev, df=fh)
2998 3003
2999 3004 chunks = [None]
3000 3005
3001 3006 def dochunkbatch():
3002 3007 rl.clearcaches()
3003 3008 fh = rlfh(rl)
3004 3009 # Save chunks as a side-effect.
3005 3010 chunks[0] = rl._chunks(revs, df=fh)
3006 3011
3007 3012 def docompress(compressor):
3008 3013 rl.clearcaches()
3009 3014
3010 3015 try:
3011 3016 # Swap in the requested compression engine.
3012 3017 oldcompressor = rl._compressor
3013 3018 rl._compressor = compressor
3014 3019 for chunk in chunks[0]:
3015 3020 rl.compress(chunk)
3016 3021 finally:
3017 3022 rl._compressor = oldcompressor
3018 3023
3019 3024 benches = [
3020 3025 (lambda: doread(), b'read'),
3021 3026 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3022 3027 (lambda: doreadbatch(), b'read batch'),
3023 3028 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3024 3029 (lambda: dochunk(), b'chunk'),
3025 3030 (lambda: dochunkbatch(), b'chunk batch'),
3026 3031 ]
3027 3032
3028 3033 for engine in sorted(engines):
3029 3034 compressor = util.compengines[engine].revlogcompressor()
3030 3035 benches.append(
3031 3036 (
3032 3037 functools.partial(docompress, compressor),
3033 3038 b'compress w/ %s' % engine,
3034 3039 )
3035 3040 )
3036 3041
3037 3042 for fn, title in benches:
3038 3043 timer, fm = gettimer(ui, opts)
3039 3044 timer(fn, title=title)
3040 3045 fm.end()
3041 3046
3042 3047
3043 3048 @command(
3044 3049 b'perfrevlogrevision',
3045 3050 revlogopts
3046 3051 + formatteropts
3047 3052 + [(b'', b'cache', False, b'use caches instead of clearing')],
3048 3053 b'-c|-m|FILE REV',
3049 3054 )
3050 3055 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3051 3056 """Benchmark obtaining a revlog revision.
3052 3057
3053 3058 Obtaining a revlog revision consists of roughly the following steps:
3054 3059
3055 3060 1. Compute the delta chain
3056 3061 2. Slice the delta chain if applicable
3057 3062 3. Obtain the raw chunks for that delta chain
3058 3063 4. Decompress each raw chunk
3059 3064 5. Apply binary patches to obtain fulltext
3060 3065 6. Verify hash of fulltext
3061 3066
3062 3067 This command measures the time spent in each of these phases.
3063 3068 """
3064 3069 opts = _byteskwargs(opts)
3065 3070
3066 3071 if opts.get(b'changelog') or opts.get(b'manifest'):
3067 3072 file_, rev = None, file_
3068 3073 elif rev is None:
3069 3074 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3070 3075
3071 3076 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3072 3077
3073 3078 # _chunkraw was renamed to _getsegmentforrevs.
3074 3079 try:
3075 3080 segmentforrevs = r._getsegmentforrevs
3076 3081 except AttributeError:
3077 3082 segmentforrevs = r._chunkraw
3078 3083
3079 3084 node = r.lookup(rev)
3080 3085 rev = r.rev(node)
3081 3086
3082 3087 def getrawchunks(data, chain):
3083 3088 start = r.start
3084 3089 length = r.length
3085 3090 inline = r._inline
3086 3091 iosize = r._io.size
3087 3092 buffer = util.buffer
3088 3093
3089 3094 chunks = []
3090 3095 ladd = chunks.append
3091 3096 for idx, item in enumerate(chain):
3092 3097 offset = start(item[0])
3093 3098 bits = data[idx]
3094 3099 for rev in item:
3095 3100 chunkstart = start(rev)
3096 3101 if inline:
3097 3102 chunkstart += (rev + 1) * iosize
3098 3103 chunklength = length(rev)
3099 3104 ladd(buffer(bits, chunkstart - offset, chunklength))
3100 3105
3101 3106 return chunks
3102 3107
3103 3108 def dodeltachain(rev):
3104 3109 if not cache:
3105 3110 r.clearcaches()
3106 3111 r._deltachain(rev)
3107 3112
3108 3113 def doread(chain):
3109 3114 if not cache:
3110 3115 r.clearcaches()
3111 3116 for item in slicedchain:
3112 3117 segmentforrevs(item[0], item[-1])
3113 3118
3114 3119 def doslice(r, chain, size):
3115 3120 for s in slicechunk(r, chain, targetsize=size):
3116 3121 pass
3117 3122
3118 3123 def dorawchunks(data, chain):
3119 3124 if not cache:
3120 3125 r.clearcaches()
3121 3126 getrawchunks(data, chain)
3122 3127
3123 3128 def dodecompress(chunks):
3124 3129 decomp = r.decompress
3125 3130 for chunk in chunks:
3126 3131 decomp(chunk)
3127 3132
3128 3133 def dopatch(text, bins):
3129 3134 if not cache:
3130 3135 r.clearcaches()
3131 3136 mdiff.patches(text, bins)
3132 3137
3133 3138 def dohash(text):
3134 3139 if not cache:
3135 3140 r.clearcaches()
3136 3141 r.checkhash(text, node, rev=rev)
3137 3142
3138 3143 def dorevision():
3139 3144 if not cache:
3140 3145 r.clearcaches()
3141 3146 r.revision(node)
3142 3147
3143 3148 try:
3144 3149 from mercurial.revlogutils.deltas import slicechunk
3145 3150 except ImportError:
3146 3151 slicechunk = getattr(revlog, '_slicechunk', None)
3147 3152
3148 3153 size = r.length(rev)
3149 3154 chain = r._deltachain(rev)[0]
3150 3155 if not getattr(r, '_withsparseread', False):
3151 3156 slicedchain = (chain,)
3152 3157 else:
3153 3158 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3154 3159 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3155 3160 rawchunks = getrawchunks(data, slicedchain)
3156 3161 bins = r._chunks(chain)
3157 3162 text = bytes(bins[0])
3158 3163 bins = bins[1:]
3159 3164 text = mdiff.patches(text, bins)
3160 3165
3161 3166 benches = [
3162 3167 (lambda: dorevision(), b'full'),
3163 3168 (lambda: dodeltachain(rev), b'deltachain'),
3164 3169 (lambda: doread(chain), b'read'),
3165 3170 ]
3166 3171
3167 3172 if getattr(r, '_withsparseread', False):
3168 3173 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3169 3174 benches.append(slicing)
3170 3175
3171 3176 benches.extend(
3172 3177 [
3173 3178 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3174 3179 (lambda: dodecompress(rawchunks), b'decompress'),
3175 3180 (lambda: dopatch(text, bins), b'patch'),
3176 3181 (lambda: dohash(text), b'hash'),
3177 3182 ]
3178 3183 )
3179 3184
3180 3185 timer, fm = gettimer(ui, opts)
3181 3186 for fn, title in benches:
3182 3187 timer(fn, title=title)
3183 3188 fm.end()
3184 3189
3185 3190
3186 3191 @command(
3187 3192 b'perfrevset',
3188 3193 [
3189 3194 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3190 3195 (b'', b'contexts', False, b'obtain changectx for each revision'),
3191 3196 ]
3192 3197 + formatteropts,
3193 3198 b"REVSET",
3194 3199 )
3195 3200 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3196 3201 """benchmark the execution time of a revset
3197 3202
3198 3203 Use the --clean option if need to evaluate the impact of build volatile
3199 3204 revisions set cache on the revset execution. Volatile cache hold filtered
3200 3205 and obsolete related cache."""
3201 3206 opts = _byteskwargs(opts)
3202 3207
3203 3208 timer, fm = gettimer(ui, opts)
3204 3209
3205 3210 def d():
3206 3211 if clear:
3207 3212 repo.invalidatevolatilesets()
3208 3213 if contexts:
3209 3214 for ctx in repo.set(expr):
3210 3215 pass
3211 3216 else:
3212 3217 for r in repo.revs(expr):
3213 3218 pass
3214 3219
3215 3220 timer(d)
3216 3221 fm.end()
3217 3222
3218 3223
3219 3224 @command(
3220 3225 b'perfvolatilesets',
3221 3226 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3222 3227 + formatteropts,
3223 3228 )
3224 3229 def perfvolatilesets(ui, repo, *names, **opts):
3225 3230 """benchmark the computation of various volatile set
3226 3231
3227 3232 Volatile set computes element related to filtering and obsolescence."""
3228 3233 opts = _byteskwargs(opts)
3229 3234 timer, fm = gettimer(ui, opts)
3230 3235 repo = repo.unfiltered()
3231 3236
3232 3237 def getobs(name):
3233 3238 def d():
3234 3239 repo.invalidatevolatilesets()
3235 3240 if opts[b'clear_obsstore']:
3236 3241 clearfilecache(repo, b'obsstore')
3237 3242 obsolete.getrevs(repo, name)
3238 3243
3239 3244 return d
3240 3245
3241 3246 allobs = sorted(obsolete.cachefuncs)
3242 3247 if names:
3243 3248 allobs = [n for n in allobs if n in names]
3244 3249
3245 3250 for name in allobs:
3246 3251 timer(getobs(name), title=name)
3247 3252
3248 3253 def getfiltered(name):
3249 3254 def d():
3250 3255 repo.invalidatevolatilesets()
3251 3256 if opts[b'clear_obsstore']:
3252 3257 clearfilecache(repo, b'obsstore')
3253 3258 repoview.filterrevs(repo, name)
3254 3259
3255 3260 return d
3256 3261
3257 3262 allfilter = sorted(repoview.filtertable)
3258 3263 if names:
3259 3264 allfilter = [n for n in allfilter if n in names]
3260 3265
3261 3266 for name in allfilter:
3262 3267 timer(getfiltered(name), title=name)
3263 3268 fm.end()
3264 3269
3265 3270
3266 3271 @command(
3267 3272 b'perfbranchmap',
3268 3273 [
3269 3274 (b'f', b'full', False, b'Includes build time of subset'),
3270 3275 (
3271 3276 b'',
3272 3277 b'clear-revbranch',
3273 3278 False,
3274 3279 b'purge the revbranch cache between computation',
3275 3280 ),
3276 3281 ]
3277 3282 + formatteropts,
3278 3283 )
3279 3284 def perfbranchmap(ui, repo, *filternames, **opts):
3280 3285 """benchmark the update of a branchmap
3281 3286
3282 3287 This benchmarks the full repo.branchmap() call with read and write disabled
3283 3288 """
3284 3289 opts = _byteskwargs(opts)
3285 3290 full = opts.get(b"full", False)
3286 3291 clear_revbranch = opts.get(b"clear_revbranch", False)
3287 3292 timer, fm = gettimer(ui, opts)
3288 3293
3289 3294 def getbranchmap(filtername):
3290 3295 """generate a benchmark function for the filtername"""
3291 3296 if filtername is None:
3292 3297 view = repo
3293 3298 else:
3294 3299 view = repo.filtered(filtername)
3295 3300 if util.safehasattr(view._branchcaches, '_per_filter'):
3296 3301 filtered = view._branchcaches._per_filter
3297 3302 else:
3298 3303 # older versions
3299 3304 filtered = view._branchcaches
3300 3305
3301 3306 def d():
3302 3307 if clear_revbranch:
3303 3308 repo.revbranchcache()._clear()
3304 3309 if full:
3305 3310 view._branchcaches.clear()
3306 3311 else:
3307 3312 filtered.pop(filtername, None)
3308 3313 view.branchmap()
3309 3314
3310 3315 return d
3311 3316
3312 3317 # add filter in smaller subset to bigger subset
3313 3318 possiblefilters = set(repoview.filtertable)
3314 3319 if filternames:
3315 3320 possiblefilters &= set(filternames)
3316 3321 subsettable = getbranchmapsubsettable()
3317 3322 allfilters = []
3318 3323 while possiblefilters:
3319 3324 for name in possiblefilters:
3320 3325 subset = subsettable.get(name)
3321 3326 if subset not in possiblefilters:
3322 3327 break
3323 3328 else:
3324 3329 assert False, b'subset cycle %s!' % possiblefilters
3325 3330 allfilters.append(name)
3326 3331 possiblefilters.remove(name)
3327 3332
3328 3333 # warm the cache
3329 3334 if not full:
3330 3335 for name in allfilters:
3331 3336 repo.filtered(name).branchmap()
3332 3337 if not filternames or b'unfiltered' in filternames:
3333 3338 # add unfiltered
3334 3339 allfilters.append(None)
3335 3340
3336 3341 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3337 3342 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3338 3343 branchcacheread.set(classmethod(lambda *args: None))
3339 3344 else:
3340 3345 # older versions
3341 3346 branchcacheread = safeattrsetter(branchmap, b'read')
3342 3347 branchcacheread.set(lambda *args: None)
3343 3348 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3344 3349 branchcachewrite.set(lambda *args: None)
3345 3350 try:
3346 3351 for name in allfilters:
3347 3352 printname = name
3348 3353 if name is None:
3349 3354 printname = b'unfiltered'
3350 3355 timer(getbranchmap(name), title=str(printname))
3351 3356 finally:
3352 3357 branchcacheread.restore()
3353 3358 branchcachewrite.restore()
3354 3359 fm.end()
3355 3360
3356 3361
3357 3362 @command(
3358 3363 b'perfbranchmapupdate',
3359 3364 [
3360 3365 (b'', b'base', [], b'subset of revision to start from'),
3361 3366 (b'', b'target', [], b'subset of revision to end with'),
3362 3367 (b'', b'clear-caches', False, b'clear cache between each runs'),
3363 3368 ]
3364 3369 + formatteropts,
3365 3370 )
3366 3371 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3367 3372 """benchmark branchmap update from for <base> revs to <target> revs
3368 3373
3369 3374 If `--clear-caches` is passed, the following items will be reset before
3370 3375 each update:
3371 3376 * the changelog instance and associated indexes
3372 3377 * the rev-branch-cache instance
3373 3378
3374 3379 Examples:
3375 3380
3376 3381 # update for the one last revision
3377 3382 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3378 3383
3379 3384 $ update for change coming with a new branch
3380 3385 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3381 3386 """
3382 3387 from mercurial import branchmap
3383 3388 from mercurial import repoview
3384 3389
3385 3390 opts = _byteskwargs(opts)
3386 3391 timer, fm = gettimer(ui, opts)
3387 3392 clearcaches = opts[b'clear_caches']
3388 3393 unfi = repo.unfiltered()
3389 3394 x = [None] # used to pass data between closure
3390 3395
3391 3396 # we use a `list` here to avoid possible side effect from smartset
3392 3397 baserevs = list(scmutil.revrange(repo, base))
3393 3398 targetrevs = list(scmutil.revrange(repo, target))
3394 3399 if not baserevs:
3395 3400 raise error.Abort(b'no revisions selected for --base')
3396 3401 if not targetrevs:
3397 3402 raise error.Abort(b'no revisions selected for --target')
3398 3403
3399 3404 # make sure the target branchmap also contains the one in the base
3400 3405 targetrevs = list(set(baserevs) | set(targetrevs))
3401 3406 targetrevs.sort()
3402 3407
3403 3408 cl = repo.changelog
3404 3409 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3405 3410 allbaserevs.sort()
3406 3411 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3407 3412
3408 3413 newrevs = list(alltargetrevs.difference(allbaserevs))
3409 3414 newrevs.sort()
3410 3415
3411 3416 allrevs = frozenset(unfi.changelog.revs())
3412 3417 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3413 3418 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3414 3419
3415 3420 def basefilter(repo, visibilityexceptions=None):
3416 3421 return basefilterrevs
3417 3422
3418 3423 def targetfilter(repo, visibilityexceptions=None):
3419 3424 return targetfilterrevs
3420 3425
3421 3426 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3422 3427 ui.status(msg % (len(allbaserevs), len(newrevs)))
3423 3428 if targetfilterrevs:
3424 3429 msg = b'(%d revisions still filtered)\n'
3425 3430 ui.status(msg % len(targetfilterrevs))
3426 3431
3427 3432 try:
3428 3433 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3429 3434 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3430 3435
3431 3436 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3432 3437 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3433 3438
3434 3439 # try to find an existing branchmap to reuse
3435 3440 subsettable = getbranchmapsubsettable()
3436 3441 candidatefilter = subsettable.get(None)
3437 3442 while candidatefilter is not None:
3438 3443 candidatebm = repo.filtered(candidatefilter).branchmap()
3439 3444 if candidatebm.validfor(baserepo):
3440 3445 filtered = repoview.filterrevs(repo, candidatefilter)
3441 3446 missing = [r for r in allbaserevs if r in filtered]
3442 3447 base = candidatebm.copy()
3443 3448 base.update(baserepo, missing)
3444 3449 break
3445 3450 candidatefilter = subsettable.get(candidatefilter)
3446 3451 else:
3447 3452 # no suitable subset where found
3448 3453 base = branchmap.branchcache()
3449 3454 base.update(baserepo, allbaserevs)
3450 3455
3451 3456 def setup():
3452 3457 x[0] = base.copy()
3453 3458 if clearcaches:
3454 3459 unfi._revbranchcache = None
3455 3460 clearchangelog(repo)
3456 3461
3457 3462 def bench():
3458 3463 x[0].update(targetrepo, newrevs)
3459 3464
3460 3465 timer(bench, setup=setup)
3461 3466 fm.end()
3462 3467 finally:
3463 3468 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3464 3469 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3465 3470
3466 3471
3467 3472 @command(
3468 3473 b'perfbranchmapload',
3469 3474 [
3470 3475 (b'f', b'filter', b'', b'Specify repoview filter'),
3471 3476 (b'', b'list', False, b'List brachmap filter caches'),
3472 3477 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3473 3478 ]
3474 3479 + formatteropts,
3475 3480 )
3476 3481 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3477 3482 """benchmark reading the branchmap"""
3478 3483 opts = _byteskwargs(opts)
3479 3484 clearrevlogs = opts[b'clear_revlogs']
3480 3485
3481 3486 if list:
3482 3487 for name, kind, st in repo.cachevfs.readdir(stat=True):
3483 3488 if name.startswith(b'branch2'):
3484 3489 filtername = name.partition(b'-')[2] or b'unfiltered'
3485 3490 ui.status(
3486 3491 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3487 3492 )
3488 3493 return
3489 3494 if not filter:
3490 3495 filter = None
3491 3496 subsettable = getbranchmapsubsettable()
3492 3497 if filter is None:
3493 3498 repo = repo.unfiltered()
3494 3499 else:
3495 3500 repo = repoview.repoview(repo, filter)
3496 3501
3497 3502 repo.branchmap() # make sure we have a relevant, up to date branchmap
3498 3503
3499 3504 try:
3500 3505 fromfile = branchmap.branchcache.fromfile
3501 3506 except AttributeError:
3502 3507 # older versions
3503 3508 fromfile = branchmap.read
3504 3509
3505 3510 currentfilter = filter
3506 3511 # try once without timer, the filter may not be cached
3507 3512 while fromfile(repo) is None:
3508 3513 currentfilter = subsettable.get(currentfilter)
3509 3514 if currentfilter is None:
3510 3515 raise error.Abort(
3511 3516 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3512 3517 )
3513 3518 repo = repo.filtered(currentfilter)
3514 3519 timer, fm = gettimer(ui, opts)
3515 3520
3516 3521 def setup():
3517 3522 if clearrevlogs:
3518 3523 clearchangelog(repo)
3519 3524
3520 3525 def bench():
3521 3526 fromfile(repo)
3522 3527
3523 3528 timer(bench, setup=setup)
3524 3529 fm.end()
3525 3530
3526 3531
3527 3532 @command(b'perfloadmarkers')
3528 3533 def perfloadmarkers(ui, repo):
3529 3534 """benchmark the time to parse the on-disk markers for a repo
3530 3535
3531 3536 Result is the number of markers in the repo."""
3532 3537 timer, fm = gettimer(ui)
3533 3538 svfs = getsvfs(repo)
3534 3539 timer(lambda: len(obsolete.obsstore(svfs)))
3535 3540 fm.end()
3536 3541
3537 3542
3538 3543 @command(
3539 3544 b'perflrucachedict',
3540 3545 formatteropts
3541 3546 + [
3542 3547 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3543 3548 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3544 3549 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3545 3550 (b'', b'size', 4, b'size of cache'),
3546 3551 (b'', b'gets', 10000, b'number of key lookups'),
3547 3552 (b'', b'sets', 10000, b'number of key sets'),
3548 3553 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3549 3554 (
3550 3555 b'',
3551 3556 b'mixedgetfreq',
3552 3557 50,
3553 3558 b'frequency of get vs set ops in mixed mode',
3554 3559 ),
3555 3560 ],
3556 3561 norepo=True,
3557 3562 )
3558 3563 def perflrucache(
3559 3564 ui,
3560 3565 mincost=0,
3561 3566 maxcost=100,
3562 3567 costlimit=0,
3563 3568 size=4,
3564 3569 gets=10000,
3565 3570 sets=10000,
3566 3571 mixed=10000,
3567 3572 mixedgetfreq=50,
3568 3573 **opts
3569 3574 ):
3570 3575 opts = _byteskwargs(opts)
3571 3576
3572 3577 def doinit():
3573 3578 for i in _xrange(10000):
3574 3579 util.lrucachedict(size)
3575 3580
3576 3581 costrange = list(range(mincost, maxcost + 1))
3577 3582
3578 3583 values = []
3579 3584 for i in _xrange(size):
3580 3585 values.append(random.randint(0, _maxint))
3581 3586
3582 3587 # Get mode fills the cache and tests raw lookup performance with no
3583 3588 # eviction.
3584 3589 getseq = []
3585 3590 for i in _xrange(gets):
3586 3591 getseq.append(random.choice(values))
3587 3592
3588 3593 def dogets():
3589 3594 d = util.lrucachedict(size)
3590 3595 for v in values:
3591 3596 d[v] = v
3592 3597 for key in getseq:
3593 3598 value = d[key]
3594 3599 value # silence pyflakes warning
3595 3600
3596 3601 def dogetscost():
3597 3602 d = util.lrucachedict(size, maxcost=costlimit)
3598 3603 for i, v in enumerate(values):
3599 3604 d.insert(v, v, cost=costs[i])
3600 3605 for key in getseq:
3601 3606 try:
3602 3607 value = d[key]
3603 3608 value # silence pyflakes warning
3604 3609 except KeyError:
3605 3610 pass
3606 3611
3607 3612 # Set mode tests insertion speed with cache eviction.
3608 3613 setseq = []
3609 3614 costs = []
3610 3615 for i in _xrange(sets):
3611 3616 setseq.append(random.randint(0, _maxint))
3612 3617 costs.append(random.choice(costrange))
3613 3618
3614 3619 def doinserts():
3615 3620 d = util.lrucachedict(size)
3616 3621 for v in setseq:
3617 3622 d.insert(v, v)
3618 3623
3619 3624 def doinsertscost():
3620 3625 d = util.lrucachedict(size, maxcost=costlimit)
3621 3626 for i, v in enumerate(setseq):
3622 3627 d.insert(v, v, cost=costs[i])
3623 3628
3624 3629 def dosets():
3625 3630 d = util.lrucachedict(size)
3626 3631 for v in setseq:
3627 3632 d[v] = v
3628 3633
3629 3634 # Mixed mode randomly performs gets and sets with eviction.
3630 3635 mixedops = []
3631 3636 for i in _xrange(mixed):
3632 3637 r = random.randint(0, 100)
3633 3638 if r < mixedgetfreq:
3634 3639 op = 0
3635 3640 else:
3636 3641 op = 1
3637 3642
3638 3643 mixedops.append(
3639 3644 (op, random.randint(0, size * 2), random.choice(costrange))
3640 3645 )
3641 3646
3642 3647 def domixed():
3643 3648 d = util.lrucachedict(size)
3644 3649
3645 3650 for op, v, cost in mixedops:
3646 3651 if op == 0:
3647 3652 try:
3648 3653 d[v]
3649 3654 except KeyError:
3650 3655 pass
3651 3656 else:
3652 3657 d[v] = v
3653 3658
3654 3659 def domixedcost():
3655 3660 d = util.lrucachedict(size, maxcost=costlimit)
3656 3661
3657 3662 for op, v, cost in mixedops:
3658 3663 if op == 0:
3659 3664 try:
3660 3665 d[v]
3661 3666 except KeyError:
3662 3667 pass
3663 3668 else:
3664 3669 d.insert(v, v, cost=cost)
3665 3670
3666 3671 benches = [
3667 3672 (doinit, b'init'),
3668 3673 ]
3669 3674
3670 3675 if costlimit:
3671 3676 benches.extend(
3672 3677 [
3673 3678 (dogetscost, b'gets w/ cost limit'),
3674 3679 (doinsertscost, b'inserts w/ cost limit'),
3675 3680 (domixedcost, b'mixed w/ cost limit'),
3676 3681 ]
3677 3682 )
3678 3683 else:
3679 3684 benches.extend(
3680 3685 [
3681 3686 (dogets, b'gets'),
3682 3687 (doinserts, b'inserts'),
3683 3688 (dosets, b'sets'),
3684 3689 (domixed, b'mixed'),
3685 3690 ]
3686 3691 )
3687 3692
3688 3693 for fn, title in benches:
3689 3694 timer, fm = gettimer(ui, opts)
3690 3695 timer(fn, title=title)
3691 3696 fm.end()
3692 3697
3693 3698
3694 3699 @command(b'perfwrite', formatteropts)
3695 3700 def perfwrite(ui, repo, **opts):
3696 3701 """microbenchmark ui.write
3697 3702 """
3698 3703 opts = _byteskwargs(opts)
3699 3704
3700 3705 timer, fm = gettimer(ui, opts)
3701 3706
3702 3707 def write():
3703 3708 for i in range(100000):
3704 3709 ui.writenoi18n(b'Testing write performance\n')
3705 3710
3706 3711 timer(write)
3707 3712 fm.end()
3708 3713
3709 3714
3710 3715 def uisetup(ui):
3711 3716 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3712 3717 commands, b'debugrevlogopts'
3713 3718 ):
3714 3719 # for "historical portability":
3715 3720 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3716 3721 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3717 3722 # openrevlog() should cause failure, because it has been
3718 3723 # available since 3.5 (or 49c583ca48c4).
3719 3724 def openrevlog(orig, repo, cmd, file_, opts):
3720 3725 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3721 3726 raise error.Abort(
3722 3727 b"This version doesn't support --dir option",
3723 3728 hint=b"use 3.5 or later",
3724 3729 )
3725 3730 return orig(repo, cmd, file_, opts)
3726 3731
3727 3732 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3728 3733
3729 3734
3730 3735 @command(
3731 3736 b'perfprogress',
3732 3737 formatteropts
3733 3738 + [
3734 3739 (b'', b'topic', b'topic', b'topic for progress messages'),
3735 3740 (b'c', b'total', 1000000, b'total value we are progressing to'),
3736 3741 ],
3737 3742 norepo=True,
3738 3743 )
3739 3744 def perfprogress(ui, topic=None, total=None, **opts):
3740 3745 """printing of progress bars"""
3741 3746 opts = _byteskwargs(opts)
3742 3747
3743 3748 timer, fm = gettimer(ui, opts)
3744 3749
3745 3750 def doprogress():
3746 3751 with ui.makeprogress(topic, total=total) as progress:
3747 3752 for i in _xrange(total):
3748 3753 progress.increment()
3749 3754
3750 3755 timer(doprogress)
3751 3756 fm.end()
@@ -1,396 +1,396
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perfaddremove
82 82 (no help text available)
83 83 perfancestors
84 84 (no help text available)
85 85 perfancestorset
86 86 (no help text available)
87 87 perfannotate (no help text available)
88 88 perfbdiff benchmark a bdiff between revisions
89 89 perfbookmarks
90 90 benchmark parsing bookmarks from disk to memory
91 91 perfbranchmap
92 92 benchmark the update of a branchmap
93 93 perfbranchmapload
94 94 benchmark reading the branchmap
95 95 perfbranchmapupdate
96 96 benchmark branchmap update from for <base> revs to <target>
97 97 revs
98 98 perfbundleread
99 99 Benchmark reading of bundle files.
100 100 perfcca (no help text available)
101 101 perfchangegroupchangelog
102 102 Benchmark producing a changelog group for a changegroup.
103 103 perfchangeset
104 104 (no help text available)
105 105 perfctxfiles (no help text available)
106 106 perfdiffwd Profile diff of working directory changes
107 107 perfdirfoldmap
108 108 (no help text available)
109 109 perfdirs (no help text available)
110 perfdirstate (no help text available)
110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 111 perfdirstatedirs
112 112 (no help text available)
113 113 perfdirstatefoldmap
114 114 (no help text available)
115 115 perfdirstatewrite
116 116 (no help text available)
117 117 perfdiscovery
118 118 benchmark discovery between local repo and the peer at given
119 119 path
120 120 perffncacheencode
121 121 (no help text available)
122 122 perffncacheload
123 123 (no help text available)
124 124 perffncachewrite
125 125 (no help text available)
126 126 perfheads benchmark the computation of a changelog heads
127 127 perfhelper-mergecopies
128 128 find statistics about potential parameters for
129 129 'perfmergecopies'
130 130 perfhelper-pathcopies
131 131 find statistic about potential parameters for the
132 132 'perftracecopies'
133 133 perfignore benchmark operation related to computing ignore
134 134 perfindex benchmark index creation time followed by a lookup
135 135 perflinelogedits
136 136 (no help text available)
137 137 perfloadmarkers
138 138 benchmark the time to parse the on-disk markers for a repo
139 139 perflog (no help text available)
140 140 perflookup (no help text available)
141 141 perflrucachedict
142 142 (no help text available)
143 143 perfmanifest benchmark the time to read a manifest from disk and return a
144 144 usable
145 145 perfmergecalculate
146 146 (no help text available)
147 147 perfmergecopies
148 148 measure runtime of 'copies.mergecopies'
149 149 perfmoonwalk benchmark walking the changelog backwards
150 150 perfnodelookup
151 151 (no help text available)
152 152 perfnodemap benchmark the time necessary to look up revision from a cold
153 153 nodemap
154 154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 155 perfpathcopies
156 156 benchmark the copy tracing logic
157 157 perfphases benchmark phasesets computation
158 158 perfphasesremote
159 159 benchmark time needed to analyse phases of the remote server
160 160 perfprogress printing of progress bars
161 161 perfrawfiles (no help text available)
162 162 perfrevlogchunks
163 163 Benchmark operations on revlog chunks.
164 164 perfrevlogindex
165 165 Benchmark operations against a revlog index.
166 166 perfrevlogrevision
167 167 Benchmark obtaining a revlog revision.
168 168 perfrevlogrevisions
169 169 Benchmark reading a series of revisions from a revlog.
170 170 perfrevlogwrite
171 171 Benchmark writing a series of revisions to a revlog.
172 172 perfrevrange (no help text available)
173 173 perfrevset benchmark the execution time of a revset
174 174 perfstartup (no help text available)
175 175 perfstatus benchmark the performance of a single status call
176 176 perftags (no help text available)
177 177 perftemplating
178 178 test the rendering time of a given template
179 179 perfunidiff benchmark a unified diff between revisions
180 180 perfvolatilesets
181 181 benchmark the computation of various volatile set
182 182 perfwalk (no help text available)
183 183 perfwrite microbenchmark ui.write
184 184
185 185 (use 'hg help -v perf' to show built-in aliases and global options)
186 186 $ hg perfaddremove
187 187 $ hg perfancestors
188 188 $ hg perfancestorset 2
189 189 $ hg perfannotate a
190 190 $ hg perfbdiff -c 1
191 191 $ hg perfbdiff --alldata 1
192 192 $ hg perfunidiff -c 1
193 193 $ hg perfunidiff --alldata 1
194 194 $ hg perfbookmarks
195 195 $ hg perfbranchmap
196 196 $ hg perfbranchmapload
197 197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 198 benchmark of branchmap with 3 revisions with 1 new ones
199 199 $ hg perfcca
200 200 $ hg perfchangegroupchangelog
201 201 $ hg perfchangegroupchangelog --cgversion 01
202 202 $ hg perfchangeset 2
203 203 $ hg perfctxfiles 2
204 204 $ hg perfdiffwd
205 205 $ hg perfdirfoldmap
206 206 $ hg perfdirs
207 207 $ hg perfdirstate
208 208 $ hg perfdirstatedirs
209 209 $ hg perfdirstatefoldmap
210 210 $ hg perfdirstatewrite
211 211 #if repofncache
212 212 $ hg perffncacheencode
213 213 $ hg perffncacheload
214 214 $ hg debugrebuildfncache
215 215 fncache already up to date
216 216 $ hg perffncachewrite
217 217 $ hg debugrebuildfncache
218 218 fncache already up to date
219 219 #endif
220 220 $ hg perfheads
221 221 $ hg perfignore
222 222 $ hg perfindex
223 223 $ hg perflinelogedits -n 1
224 224 $ hg perfloadmarkers
225 225 $ hg perflog
226 226 $ hg perflookup 2
227 227 $ hg perflrucache
228 228 $ hg perfmanifest 2
229 229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 230 $ hg perfmanifest -m 44fe2c8352bb
231 231 abort: manifest revision must be integer or full node
232 232 [255]
233 233 $ hg perfmergecalculate -r 3
234 234 $ hg perfmoonwalk
235 235 $ hg perfnodelookup 2
236 236 $ hg perfpathcopies 1 2
237 237 $ hg perfprogress --total 1000
238 238 $ hg perfrawfiles 2
239 239 $ hg perfrevlogindex -c
240 240 #if reporevlogstore
241 241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 242 #endif
243 243 $ hg perfrevlogrevision -m 0
244 244 $ hg perfrevlogchunks -c
245 245 $ hg perfrevrange
246 246 $ hg perfrevset 'all()'
247 247 $ hg perfstartup
248 248 $ hg perfstatus
249 249 $ hg perftags
250 250 $ hg perftemplating
251 251 $ hg perfvolatilesets
252 252 $ hg perfwalk
253 253 $ hg perfparents
254 254 $ hg perfdiscovery -q .
255 255
256 256 Test run control
257 257 ----------------
258 258
259 259 Simple single entry
260 260
261 261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 262 ! wall * comb * user * sys * (best of 15) (glob)
263 263
264 264 Multiple entries
265 265
266 266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 267 ! wall * comb * user * sys * (best of 5) (glob)
268 268
269 269 error case are ignored
270 270
271 271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 272 malformatted run limit entry, missing "-": 500
273 273 ! wall * comb * user * sys * (best of 5) (glob)
274 274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 277 ! wall * comb * user * sys * (best of 5) (glob)
278 278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 280 ! wall * comb * user * sys * (best of 5) (glob)
281 281
282 282 test actual output
283 283 ------------------
284 284
285 285 normal output:
286 286
287 287 $ hg perfheads --config perf.stub=no
288 288 ! wall * comb * user * sys * (best of *) (glob)
289 289
290 290 detailed output:
291 291
292 292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 293 ! wall * comb * user * sys * (best of *) (glob)
294 294 ! wall * comb * user * sys * (max of *) (glob)
295 295 ! wall * comb * user * sys * (avg of *) (glob)
296 296 ! wall * comb * user * sys * (median of *) (glob)
297 297
298 298 test json output
299 299 ----------------
300 300
301 301 normal output:
302 302
303 303 $ hg perfheads --template json --config perf.stub=no
304 304 [
305 305 {
306 306 "comb": *, (glob)
307 307 "count": *, (glob)
308 308 "sys": *, (glob)
309 309 "user": *, (glob)
310 310 "wall": * (glob)
311 311 }
312 312 ]
313 313
314 314 detailed output:
315 315
316 316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 317 [
318 318 {
319 319 "avg.comb": *, (glob)
320 320 "avg.count": *, (glob)
321 321 "avg.sys": *, (glob)
322 322 "avg.user": *, (glob)
323 323 "avg.wall": *, (glob)
324 324 "comb": *, (glob)
325 325 "count": *, (glob)
326 326 "max.comb": *, (glob)
327 327 "max.count": *, (glob)
328 328 "max.sys": *, (glob)
329 329 "max.user": *, (glob)
330 330 "max.wall": *, (glob)
331 331 "median.comb": *, (glob)
332 332 "median.count": *, (glob)
333 333 "median.sys": *, (glob)
334 334 "median.user": *, (glob)
335 335 "median.wall": *, (glob)
336 336 "sys": *, (glob)
337 337 "user": *, (glob)
338 338 "wall": * (glob)
339 339 }
340 340 ]
341 341
342 342 Test pre-run feature
343 343 --------------------
344 344
345 345 (perf discovery has some spurious output)
346 346
347 347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 348 ! wall * comb * user * sys * (best of 1) (glob)
349 349 searching for changes
350 350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 351 ! wall * comb * user * sys * (best of 1) (glob)
352 352 searching for changes
353 353 searching for changes
354 354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 355 ! wall * comb * user * sys * (best of 1) (glob)
356 356 searching for changes
357 357 searching for changes
358 358 searching for changes
359 359 searching for changes
360 360
361 361 test profile-benchmark option
362 362 ------------------------------
363 363
364 364 Function to check that statprof ran
365 365 $ statprofran () {
366 366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 367 > }
368 368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369 369
370 370 Check perf.py for historical portability
371 371 ----------------------------------------
372 372
373 373 $ cd "$TESTDIR/.."
374 374
375 375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 378 contrib/perf.py:\d+: (re)
379 379 > from mercurial import (
380 380 import newer module separately in try clause for early Mercurial
381 381 contrib/perf.py:\d+: (re)
382 382 > from mercurial import (
383 383 import newer module separately in try clause for early Mercurial
384 384 contrib/perf.py:\d+: (re)
385 385 > origindexpath = orig.opener.join(orig.indexfile)
386 386 use getvfs()/getsvfs() for early Mercurial
387 387 contrib/perf.py:\d+: (re)
388 388 > origdatapath = orig.opener.join(orig.datafile)
389 389 use getvfs()/getsvfs() for early Mercurial
390 390 contrib/perf.py:\d+: (re)
391 391 > vfs = vfsmod.vfs(tmpdir)
392 392 use getvfs()/getsvfs() for early Mercurial
393 393 contrib/perf.py:\d+: (re)
394 394 > vfs.options = getattr(orig.opener, 'options', None)
395 395 use getvfs()/getsvfs() for early Mercurial
396 396 [1]
General Comments 0
You need to be logged in to leave comments. Login now