##// END OF EJS Templates
perf: add a way to benchmark `dirstate.status`...
marmoute -
r43702:eabc5eec default draft
parent child Browse files
Show More
@@ -1,3821 +1,3837 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if b'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'profile-benchmark',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 )
301 301 configitem(
302 302 b'perf',
303 303 b'run-limits',
304 304 default=mercurial.configitems.dynamicdefault,
305 305 experimental=True,
306 306 )
307 307 except (ImportError, AttributeError):
308 308 pass
309 309 except TypeError:
310 310 # compatibility fix for a11fd395e83f
311 311 # hg version: 5.2
312 312 configitem(
313 313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 314 )
315 315 configitem(
316 316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 320 )
321 321 configitem(
322 322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 326 )
327 327 configitem(
328 328 b'perf',
329 329 b'profile-benchmark',
330 330 default=mercurial.configitems.dynamicdefault,
331 331 )
332 332 configitem(
333 333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 334 )
335 335
336 336
337 337 def getlen(ui):
338 338 if ui.configbool(b"perf", b"stub", False):
339 339 return lambda x: 1
340 340 return len
341 341
342 342
343 343 class noop(object):
344 344 """dummy context manager"""
345 345
346 346 def __enter__(self):
347 347 pass
348 348
349 349 def __exit__(self, *args):
350 350 pass
351 351
352 352
353 353 NOOPCTX = noop()
354 354
355 355
356 356 def gettimer(ui, opts=None):
357 357 """return a timer function and formatter: (timer, formatter)
358 358
359 359 This function exists to gather the creation of formatter in a single
360 360 place instead of duplicating it in all performance commands."""
361 361
362 362 # enforce an idle period before execution to counteract power management
363 363 # experimental config: perf.presleep
364 364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365 365
366 366 if opts is None:
367 367 opts = {}
368 368 # redirect all to stderr unless buffer api is in use
369 369 if not ui._buffers:
370 370 ui = ui.copy()
371 371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 372 if uifout:
373 373 # for "historical portability":
374 374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 375 uifout.set(ui.ferr)
376 376
377 377 # get a formatter
378 378 uiformatter = getattr(ui, 'formatter', None)
379 379 if uiformatter:
380 380 fm = uiformatter(b'perf', opts)
381 381 else:
382 382 # for "historical portability":
383 383 # define formatter locally, because ui.formatter has been
384 384 # available since 2.2 (or ae5f92e154d3)
385 385 from mercurial import node
386 386
387 387 class defaultformatter(object):
388 388 """Minimized composition of baseformatter and plainformatter
389 389 """
390 390
391 391 def __init__(self, ui, topic, opts):
392 392 self._ui = ui
393 393 if ui.debugflag:
394 394 self.hexfunc = node.hex
395 395 else:
396 396 self.hexfunc = node.short
397 397
398 398 def __nonzero__(self):
399 399 return False
400 400
401 401 __bool__ = __nonzero__
402 402
403 403 def startitem(self):
404 404 pass
405 405
406 406 def data(self, **data):
407 407 pass
408 408
409 409 def write(self, fields, deftext, *fielddata, **opts):
410 410 self._ui.write(deftext % fielddata, **opts)
411 411
412 412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 413 if cond:
414 414 self._ui.write(deftext % fielddata, **opts)
415 415
416 416 def plain(self, text, **opts):
417 417 self._ui.write(text, **opts)
418 418
419 419 def end(self):
420 420 pass
421 421
422 422 fm = defaultformatter(ui, b'perf', opts)
423 423
424 424 # stub function, runs code only once instead of in a loop
425 425 # experimental config: perf.stub
426 426 if ui.configbool(b"perf", b"stub", False):
427 427 return functools.partial(stub_timer, fm), fm
428 428
429 429 # experimental config: perf.all-timing
430 430 displayall = ui.configbool(b"perf", b"all-timing", False)
431 431
432 432 # experimental config: perf.run-limits
433 433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 434 limits = []
435 435 for item in limitspec:
436 436 parts = item.split(b'-', 1)
437 437 if len(parts) < 2:
438 438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 439 continue
440 440 try:
441 441 time_limit = float(_sysstr(parts[0]))
442 442 except ValueError as e:
443 443 ui.warn(
444 444 (
445 445 b'malformatted run limit entry, %s: %s\n'
446 446 % (_bytestr(e), item)
447 447 )
448 448 )
449 449 continue
450 450 try:
451 451 run_limit = int(_sysstr(parts[1]))
452 452 except ValueError as e:
453 453 ui.warn(
454 454 (
455 455 b'malformatted run limit entry, %s: %s\n'
456 456 % (_bytestr(e), item)
457 457 )
458 458 )
459 459 continue
460 460 limits.append((time_limit, run_limit))
461 461 if not limits:
462 462 limits = DEFAULTLIMITS
463 463
464 464 profiler = None
465 465 if profiling is not None:
466 466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 467 profiler = profiling.profile(ui)
468 468
469 469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 470 t = functools.partial(
471 471 _timer,
472 472 fm,
473 473 displayall=displayall,
474 474 limits=limits,
475 475 prerun=prerun,
476 476 profiler=profiler,
477 477 )
478 478 return t, fm
479 479
480 480
481 481 def stub_timer(fm, func, setup=None, title=None):
482 482 if setup is not None:
483 483 setup()
484 484 func()
485 485
486 486
487 487 @contextlib.contextmanager
488 488 def timeone():
489 489 r = []
490 490 ostart = os.times()
491 491 cstart = util.timer()
492 492 yield r
493 493 cstop = util.timer()
494 494 ostop = os.times()
495 495 a, b = ostart, ostop
496 496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497 497
498 498
499 499 # list of stop condition (elapsed time, minimal run count)
500 500 DEFAULTLIMITS = (
501 501 (3.0, 100),
502 502 (10.0, 3),
503 503 )
504 504
505 505
506 506 def _timer(
507 507 fm,
508 508 func,
509 509 setup=None,
510 510 title=None,
511 511 displayall=False,
512 512 limits=DEFAULTLIMITS,
513 513 prerun=0,
514 514 profiler=None,
515 515 ):
516 516 gc.collect()
517 517 results = []
518 518 begin = util.timer()
519 519 count = 0
520 520 if profiler is None:
521 521 profiler = NOOPCTX
522 522 for i in range(prerun):
523 523 if setup is not None:
524 524 setup()
525 525 func()
526 526 keepgoing = True
527 527 while keepgoing:
528 528 if setup is not None:
529 529 setup()
530 530 with profiler:
531 531 with timeone() as item:
532 532 r = func()
533 533 profiler = NOOPCTX
534 534 count += 1
535 535 results.append(item[0])
536 536 cstop = util.timer()
537 537 # Look for a stop condition.
538 538 elapsed = cstop - begin
539 539 for t, mincount in limits:
540 540 if elapsed >= t and count >= mincount:
541 541 keepgoing = False
542 542 break
543 543
544 544 formatone(fm, results, title=title, result=r, displayall=displayall)
545 545
546 546
547 547 def formatone(fm, timings, title=None, result=None, displayall=False):
548 548
549 549 count = len(timings)
550 550
551 551 fm.startitem()
552 552
553 553 if title:
554 554 fm.write(b'title', b'! %s\n', title)
555 555 if result:
556 556 fm.write(b'result', b'! result: %s\n', result)
557 557
558 558 def display(role, entry):
559 559 prefix = b''
560 560 if role != b'best':
561 561 prefix = b'%s.' % role
562 562 fm.plain(b'!')
563 563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 565 fm.write(prefix + b'user', b' user %f', entry[1])
566 566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 568 fm.plain(b'\n')
569 569
570 570 timings.sort()
571 571 min_val = timings[0]
572 572 display(b'best', min_val)
573 573 if displayall:
574 574 max_val = timings[-1]
575 575 display(b'max', max_val)
576 576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 577 display(b'avg', avg)
578 578 median = timings[len(timings) // 2]
579 579 display(b'median', median)
580 580
581 581
582 582 # utilities for historical portability
583 583
584 584
585 585 def getint(ui, section, name, default):
586 586 # for "historical portability":
587 587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 588 v = ui.config(section, name, None)
589 589 if v is None:
590 590 return default
591 591 try:
592 592 return int(v)
593 593 except ValueError:
594 594 raise error.ConfigError(
595 595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 596 )
597 597
598 598
599 599 def safeattrsetter(obj, name, ignoremissing=False):
600 600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601 601
602 602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 603 at runtime. This avoids overlooking removal of an attribute, which
604 604 breaks assumption of performance measurement, in the future.
605 605
606 606 This function returns the object to (1) assign a new value, and
607 607 (2) restore an original value to the attribute.
608 608
609 609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 610 abortion, and this function returns None. This is useful to
611 611 examine an attribute, which isn't ensured in all Mercurial
612 612 versions.
613 613 """
614 614 if not util.safehasattr(obj, name):
615 615 if ignoremissing:
616 616 return None
617 617 raise error.Abort(
618 618 (
619 619 b"missing attribute %s of %s might break assumption"
620 620 b" of performance measurement"
621 621 )
622 622 % (name, obj)
623 623 )
624 624
625 625 origvalue = getattr(obj, _sysstr(name))
626 626
627 627 class attrutil(object):
628 628 def set(self, newvalue):
629 629 setattr(obj, _sysstr(name), newvalue)
630 630
631 631 def restore(self):
632 632 setattr(obj, _sysstr(name), origvalue)
633 633
634 634 return attrutil()
635 635
636 636
637 637 # utilities to examine each internal API changes
638 638
639 639
640 640 def getbranchmapsubsettable():
641 641 # for "historical portability":
642 642 # subsettable is defined in:
643 643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 644 # - repoview since 2.5 (or 59a9f18d4587)
645 645 # - repoviewutil since 5.0
646 646 for mod in (branchmap, repoview, repoviewutil):
647 647 subsettable = getattr(mod, 'subsettable', None)
648 648 if subsettable:
649 649 return subsettable
650 650
651 651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 652 # branchmap and repoview modules exist, but subsettable attribute
653 653 # doesn't)
654 654 raise error.Abort(
655 655 b"perfbranchmap not available with this Mercurial",
656 656 hint=b"use 2.5 or later",
657 657 )
658 658
659 659
660 660 def getsvfs(repo):
661 661 """Return appropriate object to access files under .hg/store
662 662 """
663 663 # for "historical portability":
664 664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 665 svfs = getattr(repo, 'svfs', None)
666 666 if svfs:
667 667 return svfs
668 668 else:
669 669 return getattr(repo, 'sopener')
670 670
671 671
672 672 def getvfs(repo):
673 673 """Return appropriate object to access files under .hg
674 674 """
675 675 # for "historical portability":
676 676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 677 vfs = getattr(repo, 'vfs', None)
678 678 if vfs:
679 679 return vfs
680 680 else:
681 681 return getattr(repo, 'opener')
682 682
683 683
684 684 def repocleartagscachefunc(repo):
685 685 """Return the function to clear tags cache according to repo internal API
686 686 """
687 687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 689 # correct way to clear tags cache, because existing code paths
690 690 # expect _tagscache to be a structured object.
691 691 def clearcache():
692 692 # _tagscache has been filteredpropertycache since 2.5 (or
693 693 # 98c867ac1330), and delattr() can't work in such case
694 694 if b'_tagscache' in vars(repo):
695 695 del repo.__dict__[b'_tagscache']
696 696
697 697 return clearcache
698 698
699 699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 700 if repotags: # since 1.4 (or 5614a628d173)
701 701 return lambda: repotags.set(None)
702 702
703 703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 705 return lambda: repotagscache.set(None)
706 706
707 707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 708 # this point, but it isn't so problematic, because:
709 709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 710 # in perftags() causes failure soon
711 711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 712 raise error.Abort(b"tags API of this hg command is unknown")
713 713
714 714
715 715 # utilities to clear cache
716 716
717 717
718 718 def clearfilecache(obj, attrname):
719 719 unfiltered = getattr(obj, 'unfiltered', None)
720 720 if unfiltered is not None:
721 721 obj = obj.unfiltered()
722 722 if attrname in vars(obj):
723 723 delattr(obj, attrname)
724 724 obj._filecache.pop(attrname, None)
725 725
726 726
727 727 def clearchangelog(repo):
728 728 if repo is not repo.unfiltered():
729 729 object.__setattr__(repo, r'_clcachekey', None)
730 730 object.__setattr__(repo, r'_clcache', None)
731 731 clearfilecache(repo.unfiltered(), 'changelog')
732 732
733 733
734 734 # perf commands
735 735
736 736
737 737 @command(b'perfwalk', formatteropts)
738 738 def perfwalk(ui, repo, *pats, **opts):
739 739 opts = _byteskwargs(opts)
740 740 timer, fm = gettimer(ui, opts)
741 741 m = scmutil.match(repo[None], pats, {})
742 742 timer(
743 743 lambda: len(
744 744 list(
745 745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 746 )
747 747 )
748 748 )
749 749 fm.end()
750 750
751 751
752 752 @command(b'perfannotate', formatteropts)
753 753 def perfannotate(ui, repo, f, **opts):
754 754 opts = _byteskwargs(opts)
755 755 timer, fm = gettimer(ui, opts)
756 756 fc = repo[b'.'][f]
757 757 timer(lambda: len(fc.annotate(True)))
758 758 fm.end()
759 759
760 760
761 761 @command(
762 762 b'perfstatus',
763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
763 [
764 (b'u', b'unknown', False, b'ask status to look for unknown files'),
765 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
766 ]
764 767 + formatteropts,
765 768 )
766 769 def perfstatus(ui, repo, **opts):
767 770 """benchmark the performance of a single status call
768 771
769 772 The repository data are preserved between each call.
770 773
771 774 By default, only the status of the tracked file are requested. If
772 775 `--unknown` is passed, the "unknown" files are also tracked.
773 776 """
774 777 opts = _byteskwargs(opts)
775 778 # m = match.always(repo.root, repo.getcwd())
776 779 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 780 # False))))
778 781 timer, fm = gettimer(ui, opts)
779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
782 if opts[b'dirstate']:
783 dirstate = repo.dirstate
784 m = scmutil.matchall(repo)
785 unknown = opts[b'unknown']
786
787 def status_dirstate():
788 s = dirstate.status(
789 m, subrepos=[], ignored=False, clean=False, unknown=unknown
790 )
791 sum(map(len, s))
792
793 timer(status_dirstate)
794 else:
795 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 796 fm.end()
781 797
782 798
783 799 @command(b'perfaddremove', formatteropts)
784 800 def perfaddremove(ui, repo, **opts):
785 801 opts = _byteskwargs(opts)
786 802 timer, fm = gettimer(ui, opts)
787 803 try:
788 804 oldquiet = repo.ui.quiet
789 805 repo.ui.quiet = True
790 806 matcher = scmutil.match(repo[None])
791 807 opts[b'dry_run'] = True
792 808 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 809 uipathfn = scmutil.getuipathfn(repo)
794 810 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 811 else:
796 812 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 813 finally:
798 814 repo.ui.quiet = oldquiet
799 815 fm.end()
800 816
801 817
802 818 def clearcaches(cl):
803 819 # behave somewhat consistently across internal API changes
804 820 if util.safehasattr(cl, b'clearcaches'):
805 821 cl.clearcaches()
806 822 elif util.safehasattr(cl, b'_nodecache'):
807 823 from mercurial.node import nullid, nullrev
808 824
809 825 cl._nodecache = {nullid: nullrev}
810 826 cl._nodepos = None
811 827
812 828
813 829 @command(b'perfheads', formatteropts)
814 830 def perfheads(ui, repo, **opts):
815 831 """benchmark the computation of a changelog heads"""
816 832 opts = _byteskwargs(opts)
817 833 timer, fm = gettimer(ui, opts)
818 834 cl = repo.changelog
819 835
820 836 def s():
821 837 clearcaches(cl)
822 838
823 839 def d():
824 840 len(cl.headrevs())
825 841
826 842 timer(d, setup=s)
827 843 fm.end()
828 844
829 845
830 846 @command(
831 847 b'perftags',
832 848 formatteropts
833 849 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 850 )
835 851 def perftags(ui, repo, **opts):
836 852 opts = _byteskwargs(opts)
837 853 timer, fm = gettimer(ui, opts)
838 854 repocleartagscache = repocleartagscachefunc(repo)
839 855 clearrevlogs = opts[b'clear_revlogs']
840 856
841 857 def s():
842 858 if clearrevlogs:
843 859 clearchangelog(repo)
844 860 clearfilecache(repo.unfiltered(), 'manifest')
845 861 repocleartagscache()
846 862
847 863 def t():
848 864 return len(repo.tags())
849 865
850 866 timer(t, setup=s)
851 867 fm.end()
852 868
853 869
854 870 @command(b'perfancestors', formatteropts)
855 871 def perfancestors(ui, repo, **opts):
856 872 opts = _byteskwargs(opts)
857 873 timer, fm = gettimer(ui, opts)
858 874 heads = repo.changelog.headrevs()
859 875
860 876 def d():
861 877 for a in repo.changelog.ancestors(heads):
862 878 pass
863 879
864 880 timer(d)
865 881 fm.end()
866 882
867 883
868 884 @command(b'perfancestorset', formatteropts)
869 885 def perfancestorset(ui, repo, revset, **opts):
870 886 opts = _byteskwargs(opts)
871 887 timer, fm = gettimer(ui, opts)
872 888 revs = repo.revs(revset)
873 889 heads = repo.changelog.headrevs()
874 890
875 891 def d():
876 892 s = repo.changelog.ancestors(heads)
877 893 for rev in revs:
878 894 rev in s
879 895
880 896 timer(d)
881 897 fm.end()
882 898
883 899
884 900 @command(b'perfdiscovery', formatteropts, b'PATH')
885 901 def perfdiscovery(ui, repo, path, **opts):
886 902 """benchmark discovery between local repo and the peer at given path
887 903 """
888 904 repos = [repo, None]
889 905 timer, fm = gettimer(ui, opts)
890 906 path = ui.expandpath(path)
891 907
892 908 def s():
893 909 repos[1] = hg.peer(ui, opts, path)
894 910
895 911 def d():
896 912 setdiscovery.findcommonheads(ui, *repos)
897 913
898 914 timer(d, setup=s)
899 915 fm.end()
900 916
901 917
902 918 @command(
903 919 b'perfbookmarks',
904 920 formatteropts
905 921 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 922 )
907 923 def perfbookmarks(ui, repo, **opts):
908 924 """benchmark parsing bookmarks from disk to memory"""
909 925 opts = _byteskwargs(opts)
910 926 timer, fm = gettimer(ui, opts)
911 927
912 928 clearrevlogs = opts[b'clear_revlogs']
913 929
914 930 def s():
915 931 if clearrevlogs:
916 932 clearchangelog(repo)
917 933 clearfilecache(repo, b'_bookmarks')
918 934
919 935 def d():
920 936 repo._bookmarks
921 937
922 938 timer(d, setup=s)
923 939 fm.end()
924 940
925 941
926 942 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 943 def perfbundleread(ui, repo, bundlepath, **opts):
928 944 """Benchmark reading of bundle files.
929 945
930 946 This command is meant to isolate the I/O part of bundle reading as
931 947 much as possible.
932 948 """
933 949 from mercurial import (
934 950 bundle2,
935 951 exchange,
936 952 streamclone,
937 953 )
938 954
939 955 opts = _byteskwargs(opts)
940 956
941 957 def makebench(fn):
942 958 def run():
943 959 with open(bundlepath, b'rb') as fh:
944 960 bundle = exchange.readbundle(ui, fh, bundlepath)
945 961 fn(bundle)
946 962
947 963 return run
948 964
949 965 def makereadnbytes(size):
950 966 def run():
951 967 with open(bundlepath, b'rb') as fh:
952 968 bundle = exchange.readbundle(ui, fh, bundlepath)
953 969 while bundle.read(size):
954 970 pass
955 971
956 972 return run
957 973
958 974 def makestdioread(size):
959 975 def run():
960 976 with open(bundlepath, b'rb') as fh:
961 977 while fh.read(size):
962 978 pass
963 979
964 980 return run
965 981
966 982 # bundle1
967 983
968 984 def deltaiter(bundle):
969 985 for delta in bundle.deltaiter():
970 986 pass
971 987
972 988 def iterchunks(bundle):
973 989 for chunk in bundle.getchunks():
974 990 pass
975 991
976 992 # bundle2
977 993
978 994 def forwardchunks(bundle):
979 995 for chunk in bundle._forwardchunks():
980 996 pass
981 997
982 998 def iterparts(bundle):
983 999 for part in bundle.iterparts():
984 1000 pass
985 1001
986 1002 def iterpartsseekable(bundle):
987 1003 for part in bundle.iterparts(seekable=True):
988 1004 pass
989 1005
990 1006 def seek(bundle):
991 1007 for part in bundle.iterparts(seekable=True):
992 1008 part.seek(0, os.SEEK_END)
993 1009
994 1010 def makepartreadnbytes(size):
995 1011 def run():
996 1012 with open(bundlepath, b'rb') as fh:
997 1013 bundle = exchange.readbundle(ui, fh, bundlepath)
998 1014 for part in bundle.iterparts():
999 1015 while part.read(size):
1000 1016 pass
1001 1017
1002 1018 return run
1003 1019
1004 1020 benches = [
1005 1021 (makestdioread(8192), b'read(8k)'),
1006 1022 (makestdioread(16384), b'read(16k)'),
1007 1023 (makestdioread(32768), b'read(32k)'),
1008 1024 (makestdioread(131072), b'read(128k)'),
1009 1025 ]
1010 1026
1011 1027 with open(bundlepath, b'rb') as fh:
1012 1028 bundle = exchange.readbundle(ui, fh, bundlepath)
1013 1029
1014 1030 if isinstance(bundle, changegroup.cg1unpacker):
1015 1031 benches.extend(
1016 1032 [
1017 1033 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 1034 (makebench(iterchunks), b'cg1 getchunks()'),
1019 1035 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 1036 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 1037 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 1038 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 1039 ]
1024 1040 )
1025 1041 elif isinstance(bundle, bundle2.unbundle20):
1026 1042 benches.extend(
1027 1043 [
1028 1044 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 1045 (makebench(iterparts), b'bundle2 iterparts()'),
1030 1046 (
1031 1047 makebench(iterpartsseekable),
1032 1048 b'bundle2 iterparts() seekable',
1033 1049 ),
1034 1050 (makebench(seek), b'bundle2 part seek()'),
1035 1051 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 1052 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 1053 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 1054 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 1055 ]
1040 1056 )
1041 1057 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 1058 raise error.Abort(b'stream clone bundles not supported')
1043 1059 else:
1044 1060 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045 1061
1046 1062 for fn, title in benches:
1047 1063 timer, fm = gettimer(ui, opts)
1048 1064 timer(fn, title=title)
1049 1065 fm.end()
1050 1066
1051 1067
1052 1068 @command(
1053 1069 b'perfchangegroupchangelog',
1054 1070 formatteropts
1055 1071 + [
1056 1072 (b'', b'cgversion', b'02', b'changegroup version'),
1057 1073 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 1074 ],
1059 1075 )
1060 1076 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 1077 """Benchmark producing a changelog group for a changegroup.
1062 1078
1063 1079 This measures the time spent processing the changelog during a
1064 1080 bundle operation. This occurs during `hg bundle` and on a server
1065 1081 processing a `getbundle` wire protocol request (handles clones
1066 1082 and pull requests).
1067 1083
1068 1084 By default, all revisions are added to the changegroup.
1069 1085 """
1070 1086 opts = _byteskwargs(opts)
1071 1087 cl = repo.changelog
1072 1088 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 1089 bundler = changegroup.getbundler(cgversion, repo)
1074 1090
1075 1091 def d():
1076 1092 state, chunks = bundler._generatechangelog(cl, nodes)
1077 1093 for chunk in chunks:
1078 1094 pass
1079 1095
1080 1096 timer, fm = gettimer(ui, opts)
1081 1097
1082 1098 # Terminal printing can interfere with timing. So disable it.
1083 1099 with ui.configoverride({(b'progress', b'disable'): True}):
1084 1100 timer(d)
1085 1101
1086 1102 fm.end()
1087 1103
1088 1104
1089 1105 @command(b'perfdirs', formatteropts)
1090 1106 def perfdirs(ui, repo, **opts):
1091 1107 opts = _byteskwargs(opts)
1092 1108 timer, fm = gettimer(ui, opts)
1093 1109 dirstate = repo.dirstate
1094 1110 b'a' in dirstate
1095 1111
1096 1112 def d():
1097 1113 dirstate.hasdir(b'a')
1098 1114 del dirstate._map._dirs
1099 1115
1100 1116 timer(d)
1101 1117 fm.end()
1102 1118
1103 1119
1104 1120 @command(
1105 1121 b'perfdirstate',
1106 1122 [
1107 1123 (
1108 1124 b'',
1109 1125 b'iteration',
1110 1126 None,
1111 1127 b'benchmark a full iteration for the dirstate',
1112 1128 ),
1113 1129 (
1114 1130 b'',
1115 1131 b'contains',
1116 1132 None,
1117 1133 b'benchmark a large amount of `nf in dirstate` calls',
1118 1134 ),
1119 1135 ]
1120 1136 + formatteropts,
1121 1137 )
1122 1138 def perfdirstate(ui, repo, **opts):
1123 1139 """benchmap the time of various distate operations
1124 1140
1125 1141 By default benchmark the time necessary to load a dirstate from scratch.
1126 1142 The dirstate is loaded to the point were a "contains" request can be
1127 1143 answered.
1128 1144 """
1129 1145 opts = _byteskwargs(opts)
1130 1146 timer, fm = gettimer(ui, opts)
1131 1147 b"a" in repo.dirstate
1132 1148
1133 1149 if opts[b'iteration'] and opts[b'contains']:
1134 1150 msg = b'only specify one of --iteration or --contains'
1135 1151 raise error.Abort(msg)
1136 1152
1137 1153 if opts[b'iteration']:
1138 1154 setup = None
1139 1155 dirstate = repo.dirstate
1140 1156
1141 1157 def d():
1142 1158 for f in dirstate:
1143 1159 pass
1144 1160
1145 1161 elif opts[b'contains']:
1146 1162 setup = None
1147 1163 dirstate = repo.dirstate
1148 1164 allfiles = list(dirstate)
1149 1165 # also add file path that will be "missing" from the dirstate
1150 1166 allfiles.extend([f[::-1] for f in allfiles])
1151 1167
1152 1168 def d():
1153 1169 for f in allfiles:
1154 1170 f in dirstate
1155 1171
1156 1172 else:
1157 1173
1158 1174 def setup():
1159 1175 repo.dirstate.invalidate()
1160 1176
1161 1177 def d():
1162 1178 b"a" in repo.dirstate
1163 1179
1164 1180 timer(d, setup=setup)
1165 1181 fm.end()
1166 1182
1167 1183
1168 1184 @command(b'perfdirstatedirs', formatteropts)
1169 1185 def perfdirstatedirs(ui, repo, **opts):
1170 1186 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1171 1187 """
1172 1188 opts = _byteskwargs(opts)
1173 1189 timer, fm = gettimer(ui, opts)
1174 1190 repo.dirstate.hasdir(b"a")
1175 1191
1176 1192 def setup():
1177 1193 del repo.dirstate._map._dirs
1178 1194
1179 1195 def d():
1180 1196 repo.dirstate.hasdir(b"a")
1181 1197
1182 1198 timer(d, setup=setup)
1183 1199 fm.end()
1184 1200
1185 1201
1186 1202 @command(b'perfdirstatefoldmap', formatteropts)
1187 1203 def perfdirstatefoldmap(ui, repo, **opts):
1188 1204 """benchmap a `dirstate._map.filefoldmap.get()` request
1189 1205
1190 1206 The dirstate filefoldmap cache is dropped between every request.
1191 1207 """
1192 1208 opts = _byteskwargs(opts)
1193 1209 timer, fm = gettimer(ui, opts)
1194 1210 dirstate = repo.dirstate
1195 1211 dirstate._map.filefoldmap.get(b'a')
1196 1212
1197 1213 def setup():
1198 1214 del dirstate._map.filefoldmap
1199 1215
1200 1216 def d():
1201 1217 dirstate._map.filefoldmap.get(b'a')
1202 1218
1203 1219 timer(d, setup=setup)
1204 1220 fm.end()
1205 1221
1206 1222
1207 1223 @command(b'perfdirfoldmap', formatteropts)
1208 1224 def perfdirfoldmap(ui, repo, **opts):
1209 1225 """benchmap a `dirstate._map.dirfoldmap.get()` request
1210 1226
1211 1227 The dirstate dirfoldmap cache is dropped between every request.
1212 1228 """
1213 1229 opts = _byteskwargs(opts)
1214 1230 timer, fm = gettimer(ui, opts)
1215 1231 dirstate = repo.dirstate
1216 1232 dirstate._map.dirfoldmap.get(b'a')
1217 1233
1218 1234 def setup():
1219 1235 del dirstate._map.dirfoldmap
1220 1236 del dirstate._map._dirs
1221 1237
1222 1238 def d():
1223 1239 dirstate._map.dirfoldmap.get(b'a')
1224 1240
1225 1241 timer(d, setup=setup)
1226 1242 fm.end()
1227 1243
1228 1244
1229 1245 @command(b'perfdirstatewrite', formatteropts)
1230 1246 def perfdirstatewrite(ui, repo, **opts):
1231 1247 """benchmap the time it take to write a dirstate on disk
1232 1248 """
1233 1249 opts = _byteskwargs(opts)
1234 1250 timer, fm = gettimer(ui, opts)
1235 1251 ds = repo.dirstate
1236 1252 b"a" in ds
1237 1253
1238 1254 def setup():
1239 1255 ds._dirty = True
1240 1256
1241 1257 def d():
1242 1258 ds.write(repo.currenttransaction())
1243 1259
1244 1260 timer(d, setup=setup)
1245 1261 fm.end()
1246 1262
1247 1263
1248 1264 def _getmergerevs(repo, opts):
1249 1265 """parse command argument to return rev involved in merge
1250 1266
1251 1267 input: options dictionnary with `rev`, `from` and `bse`
1252 1268 output: (localctx, otherctx, basectx)
1253 1269 """
1254 1270 if opts[b'from']:
1255 1271 fromrev = scmutil.revsingle(repo, opts[b'from'])
1256 1272 wctx = repo[fromrev]
1257 1273 else:
1258 1274 wctx = repo[None]
1259 1275 # we don't want working dir files to be stat'd in the benchmark, so
1260 1276 # prime that cache
1261 1277 wctx.dirty()
1262 1278 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1263 1279 if opts[b'base']:
1264 1280 fromrev = scmutil.revsingle(repo, opts[b'base'])
1265 1281 ancestor = repo[fromrev]
1266 1282 else:
1267 1283 ancestor = wctx.ancestor(rctx)
1268 1284 return (wctx, rctx, ancestor)
1269 1285
1270 1286
1271 1287 @command(
1272 1288 b'perfmergecalculate',
1273 1289 [
1274 1290 (b'r', b'rev', b'.', b'rev to merge against'),
1275 1291 (b'', b'from', b'', b'rev to merge from'),
1276 1292 (b'', b'base', b'', b'the revision to use as base'),
1277 1293 ]
1278 1294 + formatteropts,
1279 1295 )
1280 1296 def perfmergecalculate(ui, repo, **opts):
1281 1297 opts = _byteskwargs(opts)
1282 1298 timer, fm = gettimer(ui, opts)
1283 1299
1284 1300 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1285 1301
1286 1302 def d():
1287 1303 # acceptremote is True because we don't want prompts in the middle of
1288 1304 # our benchmark
1289 1305 merge.calculateupdates(
1290 1306 repo,
1291 1307 wctx,
1292 1308 rctx,
1293 1309 [ancestor],
1294 1310 branchmerge=False,
1295 1311 force=False,
1296 1312 acceptremote=True,
1297 1313 followcopies=True,
1298 1314 )
1299 1315
1300 1316 timer(d)
1301 1317 fm.end()
1302 1318
1303 1319
1304 1320 @command(
1305 1321 b'perfmergecopies',
1306 1322 [
1307 1323 (b'r', b'rev', b'.', b'rev to merge against'),
1308 1324 (b'', b'from', b'', b'rev to merge from'),
1309 1325 (b'', b'base', b'', b'the revision to use as base'),
1310 1326 ]
1311 1327 + formatteropts,
1312 1328 )
1313 1329 def perfmergecopies(ui, repo, **opts):
1314 1330 """measure runtime of `copies.mergecopies`"""
1315 1331 opts = _byteskwargs(opts)
1316 1332 timer, fm = gettimer(ui, opts)
1317 1333 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1318 1334
1319 1335 def d():
1320 1336 # acceptremote is True because we don't want prompts in the middle of
1321 1337 # our benchmark
1322 1338 copies.mergecopies(repo, wctx, rctx, ancestor)
1323 1339
1324 1340 timer(d)
1325 1341 fm.end()
1326 1342
1327 1343
1328 1344 @command(b'perfpathcopies', [], b"REV REV")
1329 1345 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1330 1346 """benchmark the copy tracing logic"""
1331 1347 opts = _byteskwargs(opts)
1332 1348 timer, fm = gettimer(ui, opts)
1333 1349 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1334 1350 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1335 1351
1336 1352 def d():
1337 1353 copies.pathcopies(ctx1, ctx2)
1338 1354
1339 1355 timer(d)
1340 1356 fm.end()
1341 1357
1342 1358
1343 1359 @command(
1344 1360 b'perfphases',
1345 1361 [(b'', b'full', False, b'include file reading time too'),],
1346 1362 b"",
1347 1363 )
1348 1364 def perfphases(ui, repo, **opts):
1349 1365 """benchmark phasesets computation"""
1350 1366 opts = _byteskwargs(opts)
1351 1367 timer, fm = gettimer(ui, opts)
1352 1368 _phases = repo._phasecache
1353 1369 full = opts.get(b'full')
1354 1370
1355 1371 def d():
1356 1372 phases = _phases
1357 1373 if full:
1358 1374 clearfilecache(repo, b'_phasecache')
1359 1375 phases = repo._phasecache
1360 1376 phases.invalidate()
1361 1377 phases.loadphaserevs(repo)
1362 1378
1363 1379 timer(d)
1364 1380 fm.end()
1365 1381
1366 1382
1367 1383 @command(b'perfphasesremote', [], b"[DEST]")
1368 1384 def perfphasesremote(ui, repo, dest=None, **opts):
1369 1385 """benchmark time needed to analyse phases of the remote server"""
1370 1386 from mercurial.node import bin
1371 1387 from mercurial import (
1372 1388 exchange,
1373 1389 hg,
1374 1390 phases,
1375 1391 )
1376 1392
1377 1393 opts = _byteskwargs(opts)
1378 1394 timer, fm = gettimer(ui, opts)
1379 1395
1380 1396 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1381 1397 if not path:
1382 1398 raise error.Abort(
1383 1399 b'default repository not configured!',
1384 1400 hint=b"see 'hg help config.paths'",
1385 1401 )
1386 1402 dest = path.pushloc or path.loc
1387 1403 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1388 1404 other = hg.peer(repo, opts, dest)
1389 1405
1390 1406 # easier to perform discovery through the operation
1391 1407 op = exchange.pushoperation(repo, other)
1392 1408 exchange._pushdiscoverychangeset(op)
1393 1409
1394 1410 remotesubset = op.fallbackheads
1395 1411
1396 1412 with other.commandexecutor() as e:
1397 1413 remotephases = e.callcommand(
1398 1414 b'listkeys', {b'namespace': b'phases'}
1399 1415 ).result()
1400 1416 del other
1401 1417 publishing = remotephases.get(b'publishing', False)
1402 1418 if publishing:
1403 1419 ui.statusnoi18n(b'publishing: yes\n')
1404 1420 else:
1405 1421 ui.statusnoi18n(b'publishing: no\n')
1406 1422
1407 1423 nodemap = repo.changelog.nodemap
1408 1424 nonpublishroots = 0
1409 1425 for nhex, phase in remotephases.iteritems():
1410 1426 if nhex == b'publishing': # ignore data related to publish option
1411 1427 continue
1412 1428 node = bin(nhex)
1413 1429 if node in nodemap and int(phase):
1414 1430 nonpublishroots += 1
1415 1431 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1416 1432 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1417 1433
1418 1434 def d():
1419 1435 phases.remotephasessummary(repo, remotesubset, remotephases)
1420 1436
1421 1437 timer(d)
1422 1438 fm.end()
1423 1439
1424 1440
1425 1441 @command(
1426 1442 b'perfmanifest',
1427 1443 [
1428 1444 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1429 1445 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1430 1446 ]
1431 1447 + formatteropts,
1432 1448 b'REV|NODE',
1433 1449 )
1434 1450 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1435 1451 """benchmark the time to read a manifest from disk and return a usable
1436 1452 dict-like object
1437 1453
1438 1454 Manifest caches are cleared before retrieval."""
1439 1455 opts = _byteskwargs(opts)
1440 1456 timer, fm = gettimer(ui, opts)
1441 1457 if not manifest_rev:
1442 1458 ctx = scmutil.revsingle(repo, rev, rev)
1443 1459 t = ctx.manifestnode()
1444 1460 else:
1445 1461 from mercurial.node import bin
1446 1462
1447 1463 if len(rev) == 40:
1448 1464 t = bin(rev)
1449 1465 else:
1450 1466 try:
1451 1467 rev = int(rev)
1452 1468
1453 1469 if util.safehasattr(repo.manifestlog, b'getstorage'):
1454 1470 t = repo.manifestlog.getstorage(b'').node(rev)
1455 1471 else:
1456 1472 t = repo.manifestlog._revlog.lookup(rev)
1457 1473 except ValueError:
1458 1474 raise error.Abort(
1459 1475 b'manifest revision must be integer or full node'
1460 1476 )
1461 1477
1462 1478 def d():
1463 1479 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1464 1480 repo.manifestlog[t].read()
1465 1481
1466 1482 timer(d)
1467 1483 fm.end()
1468 1484
1469 1485
1470 1486 @command(b'perfchangeset', formatteropts)
1471 1487 def perfchangeset(ui, repo, rev, **opts):
1472 1488 opts = _byteskwargs(opts)
1473 1489 timer, fm = gettimer(ui, opts)
1474 1490 n = scmutil.revsingle(repo, rev).node()
1475 1491
1476 1492 def d():
1477 1493 repo.changelog.read(n)
1478 1494 # repo.changelog._cache = None
1479 1495
1480 1496 timer(d)
1481 1497 fm.end()
1482 1498
1483 1499
1484 1500 @command(b'perfignore', formatteropts)
1485 1501 def perfignore(ui, repo, **opts):
1486 1502 """benchmark operation related to computing ignore"""
1487 1503 opts = _byteskwargs(opts)
1488 1504 timer, fm = gettimer(ui, opts)
1489 1505 dirstate = repo.dirstate
1490 1506
1491 1507 def setupone():
1492 1508 dirstate.invalidate()
1493 1509 clearfilecache(dirstate, b'_ignore')
1494 1510
1495 1511 def runone():
1496 1512 dirstate._ignore
1497 1513
1498 1514 timer(runone, setup=setupone, title=b"load")
1499 1515 fm.end()
1500 1516
1501 1517
1502 1518 @command(
1503 1519 b'perfindex',
1504 1520 [
1505 1521 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1506 1522 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1507 1523 ]
1508 1524 + formatteropts,
1509 1525 )
1510 1526 def perfindex(ui, repo, **opts):
1511 1527 """benchmark index creation time followed by a lookup
1512 1528
1513 1529 The default is to look `tip` up. Depending on the index implementation,
1514 1530 the revision looked up can matters. For example, an implementation
1515 1531 scanning the index will have a faster lookup time for `--rev tip` than for
1516 1532 `--rev 0`. The number of looked up revisions and their order can also
1517 1533 matters.
1518 1534
1519 1535 Example of useful set to test:
1520 1536 * tip
1521 1537 * 0
1522 1538 * -10:
1523 1539 * :10
1524 1540 * -10: + :10
1525 1541 * :10: + -10:
1526 1542 * -10000:
1527 1543 * -10000: + 0
1528 1544
1529 1545 It is not currently possible to check for lookup of a missing node. For
1530 1546 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1531 1547 import mercurial.revlog
1532 1548
1533 1549 opts = _byteskwargs(opts)
1534 1550 timer, fm = gettimer(ui, opts)
1535 1551 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1536 1552 if opts[b'no_lookup']:
1537 1553 if opts['rev']:
1538 1554 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1539 1555 nodes = []
1540 1556 elif not opts[b'rev']:
1541 1557 nodes = [repo[b"tip"].node()]
1542 1558 else:
1543 1559 revs = scmutil.revrange(repo, opts[b'rev'])
1544 1560 cl = repo.changelog
1545 1561 nodes = [cl.node(r) for r in revs]
1546 1562
1547 1563 unfi = repo.unfiltered()
1548 1564 # find the filecache func directly
1549 1565 # This avoid polluting the benchmark with the filecache logic
1550 1566 makecl = unfi.__class__.changelog.func
1551 1567
1552 1568 def setup():
1553 1569 # probably not necessary, but for good measure
1554 1570 clearchangelog(unfi)
1555 1571
1556 1572 def d():
1557 1573 cl = makecl(unfi)
1558 1574 for n in nodes:
1559 1575 cl.rev(n)
1560 1576
1561 1577 timer(d, setup=setup)
1562 1578 fm.end()
1563 1579
1564 1580
1565 1581 @command(
1566 1582 b'perfnodemap',
1567 1583 [
1568 1584 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1569 1585 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1570 1586 ]
1571 1587 + formatteropts,
1572 1588 )
1573 1589 def perfnodemap(ui, repo, **opts):
1574 1590 """benchmark the time necessary to look up revision from a cold nodemap
1575 1591
1576 1592 Depending on the implementation, the amount and order of revision we look
1577 1593 up can varies. Example of useful set to test:
1578 1594 * tip
1579 1595 * 0
1580 1596 * -10:
1581 1597 * :10
1582 1598 * -10: + :10
1583 1599 * :10: + -10:
1584 1600 * -10000:
1585 1601 * -10000: + 0
1586 1602
1587 1603 The command currently focus on valid binary lookup. Benchmarking for
1588 1604 hexlookup, prefix lookup and missing lookup would also be valuable.
1589 1605 """
1590 1606 import mercurial.revlog
1591 1607
1592 1608 opts = _byteskwargs(opts)
1593 1609 timer, fm = gettimer(ui, opts)
1594 1610 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1595 1611
1596 1612 unfi = repo.unfiltered()
1597 1613 clearcaches = opts['clear_caches']
1598 1614 # find the filecache func directly
1599 1615 # This avoid polluting the benchmark with the filecache logic
1600 1616 makecl = unfi.__class__.changelog.func
1601 1617 if not opts[b'rev']:
1602 1618 raise error.Abort('use --rev to specify revisions to look up')
1603 1619 revs = scmutil.revrange(repo, opts[b'rev'])
1604 1620 cl = repo.changelog
1605 1621 nodes = [cl.node(r) for r in revs]
1606 1622
1607 1623 # use a list to pass reference to a nodemap from one closure to the next
1608 1624 nodeget = [None]
1609 1625
1610 1626 def setnodeget():
1611 1627 # probably not necessary, but for good measure
1612 1628 clearchangelog(unfi)
1613 1629 nodeget[0] = makecl(unfi).nodemap.get
1614 1630
1615 1631 def d():
1616 1632 get = nodeget[0]
1617 1633 for n in nodes:
1618 1634 get(n)
1619 1635
1620 1636 setup = None
1621 1637 if clearcaches:
1622 1638
1623 1639 def setup():
1624 1640 setnodeget()
1625 1641
1626 1642 else:
1627 1643 setnodeget()
1628 1644 d() # prewarm the data structure
1629 1645 timer(d, setup=setup)
1630 1646 fm.end()
1631 1647
1632 1648
1633 1649 @command(b'perfstartup', formatteropts)
1634 1650 def perfstartup(ui, repo, **opts):
1635 1651 opts = _byteskwargs(opts)
1636 1652 timer, fm = gettimer(ui, opts)
1637 1653
1638 1654 def d():
1639 1655 if os.name != r'nt':
1640 1656 os.system(
1641 1657 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1642 1658 )
1643 1659 else:
1644 1660 os.environ[r'HGRCPATH'] = r' '
1645 1661 os.system(r"%s version -q > NUL" % sys.argv[0])
1646 1662
1647 1663 timer(d)
1648 1664 fm.end()
1649 1665
1650 1666
1651 1667 @command(b'perfparents', formatteropts)
1652 1668 def perfparents(ui, repo, **opts):
1653 1669 """benchmark the time necessary to fetch one changeset's parents.
1654 1670
1655 1671 The fetch is done using the `node identifier`, traversing all object layers
1656 1672 from the repository object. The first N revisions will be used for this
1657 1673 benchmark. N is controlled by the ``perf.parentscount`` config option
1658 1674 (default: 1000).
1659 1675 """
1660 1676 opts = _byteskwargs(opts)
1661 1677 timer, fm = gettimer(ui, opts)
1662 1678 # control the number of commits perfparents iterates over
1663 1679 # experimental config: perf.parentscount
1664 1680 count = getint(ui, b"perf", b"parentscount", 1000)
1665 1681 if len(repo.changelog) < count:
1666 1682 raise error.Abort(b"repo needs %d commits for this test" % count)
1667 1683 repo = repo.unfiltered()
1668 1684 nl = [repo.changelog.node(i) for i in _xrange(count)]
1669 1685
1670 1686 def d():
1671 1687 for n in nl:
1672 1688 repo.changelog.parents(n)
1673 1689
1674 1690 timer(d)
1675 1691 fm.end()
1676 1692
1677 1693
1678 1694 @command(b'perfctxfiles', formatteropts)
1679 1695 def perfctxfiles(ui, repo, x, **opts):
1680 1696 opts = _byteskwargs(opts)
1681 1697 x = int(x)
1682 1698 timer, fm = gettimer(ui, opts)
1683 1699
1684 1700 def d():
1685 1701 len(repo[x].files())
1686 1702
1687 1703 timer(d)
1688 1704 fm.end()
1689 1705
1690 1706
1691 1707 @command(b'perfrawfiles', formatteropts)
1692 1708 def perfrawfiles(ui, repo, x, **opts):
1693 1709 opts = _byteskwargs(opts)
1694 1710 x = int(x)
1695 1711 timer, fm = gettimer(ui, opts)
1696 1712 cl = repo.changelog
1697 1713
1698 1714 def d():
1699 1715 len(cl.read(x)[3])
1700 1716
1701 1717 timer(d)
1702 1718 fm.end()
1703 1719
1704 1720
1705 1721 @command(b'perflookup', formatteropts)
1706 1722 def perflookup(ui, repo, rev, **opts):
1707 1723 opts = _byteskwargs(opts)
1708 1724 timer, fm = gettimer(ui, opts)
1709 1725 timer(lambda: len(repo.lookup(rev)))
1710 1726 fm.end()
1711 1727
1712 1728
1713 1729 @command(
1714 1730 b'perflinelogedits',
1715 1731 [
1716 1732 (b'n', b'edits', 10000, b'number of edits'),
1717 1733 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1718 1734 ],
1719 1735 norepo=True,
1720 1736 )
1721 1737 def perflinelogedits(ui, **opts):
1722 1738 from mercurial import linelog
1723 1739
1724 1740 opts = _byteskwargs(opts)
1725 1741
1726 1742 edits = opts[b'edits']
1727 1743 maxhunklines = opts[b'max_hunk_lines']
1728 1744
1729 1745 maxb1 = 100000
1730 1746 random.seed(0)
1731 1747 randint = random.randint
1732 1748 currentlines = 0
1733 1749 arglist = []
1734 1750 for rev in _xrange(edits):
1735 1751 a1 = randint(0, currentlines)
1736 1752 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1737 1753 b1 = randint(0, maxb1)
1738 1754 b2 = randint(b1, b1 + maxhunklines)
1739 1755 currentlines += (b2 - b1) - (a2 - a1)
1740 1756 arglist.append((rev, a1, a2, b1, b2))
1741 1757
1742 1758 def d():
1743 1759 ll = linelog.linelog()
1744 1760 for args in arglist:
1745 1761 ll.replacelines(*args)
1746 1762
1747 1763 timer, fm = gettimer(ui, opts)
1748 1764 timer(d)
1749 1765 fm.end()
1750 1766
1751 1767
1752 1768 @command(b'perfrevrange', formatteropts)
1753 1769 def perfrevrange(ui, repo, *specs, **opts):
1754 1770 opts = _byteskwargs(opts)
1755 1771 timer, fm = gettimer(ui, opts)
1756 1772 revrange = scmutil.revrange
1757 1773 timer(lambda: len(revrange(repo, specs)))
1758 1774 fm.end()
1759 1775
1760 1776
1761 1777 @command(b'perfnodelookup', formatteropts)
1762 1778 def perfnodelookup(ui, repo, rev, **opts):
1763 1779 opts = _byteskwargs(opts)
1764 1780 timer, fm = gettimer(ui, opts)
1765 1781 import mercurial.revlog
1766 1782
1767 1783 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1768 1784 n = scmutil.revsingle(repo, rev).node()
1769 1785 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1770 1786
1771 1787 def d():
1772 1788 cl.rev(n)
1773 1789 clearcaches(cl)
1774 1790
1775 1791 timer(d)
1776 1792 fm.end()
1777 1793
1778 1794
1779 1795 @command(
1780 1796 b'perflog',
1781 1797 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1782 1798 )
1783 1799 def perflog(ui, repo, rev=None, **opts):
1784 1800 opts = _byteskwargs(opts)
1785 1801 if rev is None:
1786 1802 rev = []
1787 1803 timer, fm = gettimer(ui, opts)
1788 1804 ui.pushbuffer()
1789 1805 timer(
1790 1806 lambda: commands.log(
1791 1807 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1792 1808 )
1793 1809 )
1794 1810 ui.popbuffer()
1795 1811 fm.end()
1796 1812
1797 1813
1798 1814 @command(b'perfmoonwalk', formatteropts)
1799 1815 def perfmoonwalk(ui, repo, **opts):
1800 1816 """benchmark walking the changelog backwards
1801 1817
1802 1818 This also loads the changelog data for each revision in the changelog.
1803 1819 """
1804 1820 opts = _byteskwargs(opts)
1805 1821 timer, fm = gettimer(ui, opts)
1806 1822
1807 1823 def moonwalk():
1808 1824 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1809 1825 ctx = repo[i]
1810 1826 ctx.branch() # read changelog data (in addition to the index)
1811 1827
1812 1828 timer(moonwalk)
1813 1829 fm.end()
1814 1830
1815 1831
1816 1832 @command(
1817 1833 b'perftemplating',
1818 1834 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1819 1835 )
1820 1836 def perftemplating(ui, repo, testedtemplate=None, **opts):
1821 1837 """test the rendering time of a given template"""
1822 1838 if makelogtemplater is None:
1823 1839 raise error.Abort(
1824 1840 b"perftemplating not available with this Mercurial",
1825 1841 hint=b"use 4.3 or later",
1826 1842 )
1827 1843
1828 1844 opts = _byteskwargs(opts)
1829 1845
1830 1846 nullui = ui.copy()
1831 1847 nullui.fout = open(os.devnull, r'wb')
1832 1848 nullui.disablepager()
1833 1849 revs = opts.get(b'rev')
1834 1850 if not revs:
1835 1851 revs = [b'all()']
1836 1852 revs = list(scmutil.revrange(repo, revs))
1837 1853
1838 1854 defaulttemplate = (
1839 1855 b'{date|shortdate} [{rev}:{node|short}]'
1840 1856 b' {author|person}: {desc|firstline}\n'
1841 1857 )
1842 1858 if testedtemplate is None:
1843 1859 testedtemplate = defaulttemplate
1844 1860 displayer = makelogtemplater(nullui, repo, testedtemplate)
1845 1861
1846 1862 def format():
1847 1863 for r in revs:
1848 1864 ctx = repo[r]
1849 1865 displayer.show(ctx)
1850 1866 displayer.flush(ctx)
1851 1867
1852 1868 timer, fm = gettimer(ui, opts)
1853 1869 timer(format)
1854 1870 fm.end()
1855 1871
1856 1872
1857 1873 def _displaystats(ui, opts, entries, data):
1858 1874 pass
1859 1875 # use a second formatter because the data are quite different, not sure
1860 1876 # how it flies with the templater.
1861 1877 fm = ui.formatter(b'perf-stats', opts)
1862 1878 for key, title in entries:
1863 1879 values = data[key]
1864 1880 nbvalues = len(data)
1865 1881 values.sort()
1866 1882 stats = {
1867 1883 'key': key,
1868 1884 'title': title,
1869 1885 'nbitems': len(values),
1870 1886 'min': values[0][0],
1871 1887 '10%': values[(nbvalues * 10) // 100][0],
1872 1888 '25%': values[(nbvalues * 25) // 100][0],
1873 1889 '50%': values[(nbvalues * 50) // 100][0],
1874 1890 '75%': values[(nbvalues * 75) // 100][0],
1875 1891 '80%': values[(nbvalues * 80) // 100][0],
1876 1892 '85%': values[(nbvalues * 85) // 100][0],
1877 1893 '90%': values[(nbvalues * 90) // 100][0],
1878 1894 '95%': values[(nbvalues * 95) // 100][0],
1879 1895 '99%': values[(nbvalues * 99) // 100][0],
1880 1896 'max': values[-1][0],
1881 1897 }
1882 1898 fm.startitem()
1883 1899 fm.data(**stats)
1884 1900 # make node pretty for the human output
1885 1901 fm.plain('### %s (%d items)\n' % (title, len(values)))
1886 1902 lines = [
1887 1903 'min',
1888 1904 '10%',
1889 1905 '25%',
1890 1906 '50%',
1891 1907 '75%',
1892 1908 '80%',
1893 1909 '85%',
1894 1910 '90%',
1895 1911 '95%',
1896 1912 '99%',
1897 1913 'max',
1898 1914 ]
1899 1915 for l in lines:
1900 1916 fm.plain('%s: %s\n' % (l, stats[l]))
1901 1917 fm.end()
1902 1918
1903 1919
1904 1920 @command(
1905 1921 b'perfhelper-mergecopies',
1906 1922 formatteropts
1907 1923 + [
1908 1924 (b'r', b'revs', [], b'restrict search to these revisions'),
1909 1925 (b'', b'timing', False, b'provides extra data (costly)'),
1910 1926 (b'', b'stats', False, b'provides statistic about the measured data'),
1911 1927 ],
1912 1928 )
1913 1929 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1914 1930 """find statistics about potential parameters for `perfmergecopies`
1915 1931
1916 1932 This command find (base, p1, p2) triplet relevant for copytracing
1917 1933 benchmarking in the context of a merge. It reports values for some of the
1918 1934 parameters that impact merge copy tracing time during merge.
1919 1935
1920 1936 If `--timing` is set, rename detection is run and the associated timing
1921 1937 will be reported. The extra details come at the cost of slower command
1922 1938 execution.
1923 1939
1924 1940 Since rename detection is only run once, other factors might easily
1925 1941 affect the precision of the timing. However it should give a good
1926 1942 approximation of which revision triplets are very costly.
1927 1943 """
1928 1944 opts = _byteskwargs(opts)
1929 1945 fm = ui.formatter(b'perf', opts)
1930 1946 dotiming = opts[b'timing']
1931 1947 dostats = opts[b'stats']
1932 1948
1933 1949 output_template = [
1934 1950 ("base", "%(base)12s"),
1935 1951 ("p1", "%(p1.node)12s"),
1936 1952 ("p2", "%(p2.node)12s"),
1937 1953 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1938 1954 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1939 1955 ("p1.renames", "%(p1.renamedfiles)12d"),
1940 1956 ("p1.time", "%(p1.time)12.3f"),
1941 1957 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1942 1958 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1943 1959 ("p2.renames", "%(p2.renamedfiles)12d"),
1944 1960 ("p2.time", "%(p2.time)12.3f"),
1945 1961 ("renames", "%(nbrenamedfiles)12d"),
1946 1962 ("total.time", "%(time)12.3f"),
1947 1963 ]
1948 1964 if not dotiming:
1949 1965 output_template = [
1950 1966 i
1951 1967 for i in output_template
1952 1968 if not ('time' in i[0] or 'renames' in i[0])
1953 1969 ]
1954 1970 header_names = [h for (h, v) in output_template]
1955 1971 output = ' '.join([v for (h, v) in output_template]) + '\n'
1956 1972 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1957 1973 fm.plain(header % tuple(header_names))
1958 1974
1959 1975 if not revs:
1960 1976 revs = ['all()']
1961 1977 revs = scmutil.revrange(repo, revs)
1962 1978
1963 1979 if dostats:
1964 1980 alldata = {
1965 1981 'nbrevs': [],
1966 1982 'nbmissingfiles': [],
1967 1983 }
1968 1984 if dotiming:
1969 1985 alldata['parentnbrenames'] = []
1970 1986 alldata['totalnbrenames'] = []
1971 1987 alldata['parenttime'] = []
1972 1988 alldata['totaltime'] = []
1973 1989
1974 1990 roi = repo.revs('merge() and %ld', revs)
1975 1991 for r in roi:
1976 1992 ctx = repo[r]
1977 1993 p1 = ctx.p1()
1978 1994 p2 = ctx.p2()
1979 1995 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1980 1996 for b in bases:
1981 1997 b = repo[b]
1982 1998 p1missing = copies._computeforwardmissing(b, p1)
1983 1999 p2missing = copies._computeforwardmissing(b, p2)
1984 2000 data = {
1985 2001 b'base': b.hex(),
1986 2002 b'p1.node': p1.hex(),
1987 2003 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
1988 2004 b'p1.nbmissingfiles': len(p1missing),
1989 2005 b'p2.node': p2.hex(),
1990 2006 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
1991 2007 b'p2.nbmissingfiles': len(p2missing),
1992 2008 }
1993 2009 if dostats:
1994 2010 if p1missing:
1995 2011 alldata['nbrevs'].append(
1996 2012 (data['p1.nbrevs'], b.hex(), p1.hex())
1997 2013 )
1998 2014 alldata['nbmissingfiles'].append(
1999 2015 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2000 2016 )
2001 2017 if p2missing:
2002 2018 alldata['nbrevs'].append(
2003 2019 (data['p2.nbrevs'], b.hex(), p2.hex())
2004 2020 )
2005 2021 alldata['nbmissingfiles'].append(
2006 2022 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2007 2023 )
2008 2024 if dotiming:
2009 2025 begin = util.timer()
2010 2026 mergedata = copies.mergecopies(repo, p1, p2, b)
2011 2027 end = util.timer()
2012 2028 # not very stable timing since we did only one run
2013 2029 data['time'] = end - begin
2014 2030 # mergedata contains five dicts: "copy", "movewithdir",
2015 2031 # "diverge", "renamedelete" and "dirmove".
2016 2032 # The first 4 are about renamed file so lets count that.
2017 2033 renames = len(mergedata[0])
2018 2034 renames += len(mergedata[1])
2019 2035 renames += len(mergedata[2])
2020 2036 renames += len(mergedata[3])
2021 2037 data['nbrenamedfiles'] = renames
2022 2038 begin = util.timer()
2023 2039 p1renames = copies.pathcopies(b, p1)
2024 2040 end = util.timer()
2025 2041 data['p1.time'] = end - begin
2026 2042 begin = util.timer()
2027 2043 p2renames = copies.pathcopies(b, p2)
2028 2044 data['p2.time'] = end - begin
2029 2045 end = util.timer()
2030 2046 data['p1.renamedfiles'] = len(p1renames)
2031 2047 data['p2.renamedfiles'] = len(p2renames)
2032 2048
2033 2049 if dostats:
2034 2050 if p1missing:
2035 2051 alldata['parentnbrenames'].append(
2036 2052 (data['p1.renamedfiles'], b.hex(), p1.hex())
2037 2053 )
2038 2054 alldata['parenttime'].append(
2039 2055 (data['p1.time'], b.hex(), p1.hex())
2040 2056 )
2041 2057 if p2missing:
2042 2058 alldata['parentnbrenames'].append(
2043 2059 (data['p2.renamedfiles'], b.hex(), p2.hex())
2044 2060 )
2045 2061 alldata['parenttime'].append(
2046 2062 (data['p2.time'], b.hex(), p2.hex())
2047 2063 )
2048 2064 if p1missing or p2missing:
2049 2065 alldata['totalnbrenames'].append(
2050 2066 (
2051 2067 data['nbrenamedfiles'],
2052 2068 b.hex(),
2053 2069 p1.hex(),
2054 2070 p2.hex(),
2055 2071 )
2056 2072 )
2057 2073 alldata['totaltime'].append(
2058 2074 (data['time'], b.hex(), p1.hex(), p2.hex())
2059 2075 )
2060 2076 fm.startitem()
2061 2077 fm.data(**data)
2062 2078 # make node pretty for the human output
2063 2079 out = data.copy()
2064 2080 out['base'] = fm.hexfunc(b.node())
2065 2081 out['p1.node'] = fm.hexfunc(p1.node())
2066 2082 out['p2.node'] = fm.hexfunc(p2.node())
2067 2083 fm.plain(output % out)
2068 2084
2069 2085 fm.end()
2070 2086 if dostats:
2071 2087 # use a second formatter because the data are quite different, not sure
2072 2088 # how it flies with the templater.
2073 2089 entries = [
2074 2090 ('nbrevs', 'number of revision covered'),
2075 2091 ('nbmissingfiles', 'number of missing files at head'),
2076 2092 ]
2077 2093 if dotiming:
2078 2094 entries.append(
2079 2095 ('parentnbrenames', 'rename from one parent to base')
2080 2096 )
2081 2097 entries.append(('totalnbrenames', 'total number of renames'))
2082 2098 entries.append(('parenttime', 'time for one parent'))
2083 2099 entries.append(('totaltime', 'time for both parents'))
2084 2100 _displaystats(ui, opts, entries, alldata)
2085 2101
2086 2102
2087 2103 @command(
2088 2104 b'perfhelper-pathcopies',
2089 2105 formatteropts
2090 2106 + [
2091 2107 (b'r', b'revs', [], b'restrict search to these revisions'),
2092 2108 (b'', b'timing', False, b'provides extra data (costly)'),
2093 2109 (b'', b'stats', False, b'provides statistic about the measured data'),
2094 2110 ],
2095 2111 )
2096 2112 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2097 2113 """find statistic about potential parameters for the `perftracecopies`
2098 2114
2099 2115 This command find source-destination pair relevant for copytracing testing.
2100 2116 It report value for some of the parameters that impact copy tracing time.
2101 2117
2102 2118 If `--timing` is set, rename detection is run and the associated timing
2103 2119 will be reported. The extra details comes at the cost of a slower command
2104 2120 execution.
2105 2121
2106 2122 Since the rename detection is only run once, other factors might easily
2107 2123 affect the precision of the timing. However it should give a good
2108 2124 approximation of which revision pairs are very costly.
2109 2125 """
2110 2126 opts = _byteskwargs(opts)
2111 2127 fm = ui.formatter(b'perf', opts)
2112 2128 dotiming = opts[b'timing']
2113 2129 dostats = opts[b'stats']
2114 2130
2115 2131 if dotiming:
2116 2132 header = '%12s %12s %12s %12s %12s %12s\n'
2117 2133 output = (
2118 2134 "%(source)12s %(destination)12s "
2119 2135 "%(nbrevs)12d %(nbmissingfiles)12d "
2120 2136 "%(nbrenamedfiles)12d %(time)18.5f\n"
2121 2137 )
2122 2138 header_names = (
2123 2139 "source",
2124 2140 "destination",
2125 2141 "nb-revs",
2126 2142 "nb-files",
2127 2143 "nb-renames",
2128 2144 "time",
2129 2145 )
2130 2146 fm.plain(header % header_names)
2131 2147 else:
2132 2148 header = '%12s %12s %12s %12s\n'
2133 2149 output = (
2134 2150 "%(source)12s %(destination)12s "
2135 2151 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2136 2152 )
2137 2153 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2138 2154
2139 2155 if not revs:
2140 2156 revs = ['all()']
2141 2157 revs = scmutil.revrange(repo, revs)
2142 2158
2143 2159 if dostats:
2144 2160 alldata = {
2145 2161 'nbrevs': [],
2146 2162 'nbmissingfiles': [],
2147 2163 }
2148 2164 if dotiming:
2149 2165 alldata['nbrenames'] = []
2150 2166 alldata['time'] = []
2151 2167
2152 2168 roi = repo.revs('merge() and %ld', revs)
2153 2169 for r in roi:
2154 2170 ctx = repo[r]
2155 2171 p1 = ctx.p1().rev()
2156 2172 p2 = ctx.p2().rev()
2157 2173 bases = repo.changelog._commonancestorsheads(p1, p2)
2158 2174 for p in (p1, p2):
2159 2175 for b in bases:
2160 2176 base = repo[b]
2161 2177 parent = repo[p]
2162 2178 missing = copies._computeforwardmissing(base, parent)
2163 2179 if not missing:
2164 2180 continue
2165 2181 data = {
2166 2182 b'source': base.hex(),
2167 2183 b'destination': parent.hex(),
2168 2184 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2169 2185 b'nbmissingfiles': len(missing),
2170 2186 }
2171 2187 if dostats:
2172 2188 alldata['nbrevs'].append(
2173 2189 (data['nbrevs'], base.hex(), parent.hex(),)
2174 2190 )
2175 2191 alldata['nbmissingfiles'].append(
2176 2192 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2177 2193 )
2178 2194 if dotiming:
2179 2195 begin = util.timer()
2180 2196 renames = copies.pathcopies(base, parent)
2181 2197 end = util.timer()
2182 2198 # not very stable timing since we did only one run
2183 2199 data['time'] = end - begin
2184 2200 data['nbrenamedfiles'] = len(renames)
2185 2201 if dostats:
2186 2202 alldata['time'].append(
2187 2203 (data['time'], base.hex(), parent.hex(),)
2188 2204 )
2189 2205 alldata['nbrenames'].append(
2190 2206 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2191 2207 )
2192 2208 fm.startitem()
2193 2209 fm.data(**data)
2194 2210 out = data.copy()
2195 2211 out['source'] = fm.hexfunc(base.node())
2196 2212 out['destination'] = fm.hexfunc(parent.node())
2197 2213 fm.plain(output % out)
2198 2214
2199 2215 fm.end()
2200 2216 if dostats:
2201 2217 # use a second formatter because the data are quite different, not sure
2202 2218 # how it flies with the templater.
2203 2219 fm = ui.formatter(b'perf', opts)
2204 2220 entries = [
2205 2221 ('nbrevs', 'number of revision covered'),
2206 2222 ('nbmissingfiles', 'number of missing files at head'),
2207 2223 ]
2208 2224 if dotiming:
2209 2225 entries.append(('nbrenames', 'renamed files'))
2210 2226 entries.append(('time', 'time'))
2211 2227 _displaystats(ui, opts, entries, alldata)
2212 2228
2213 2229
2214 2230 @command(b'perfcca', formatteropts)
2215 2231 def perfcca(ui, repo, **opts):
2216 2232 opts = _byteskwargs(opts)
2217 2233 timer, fm = gettimer(ui, opts)
2218 2234 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2219 2235 fm.end()
2220 2236
2221 2237
2222 2238 @command(b'perffncacheload', formatteropts)
2223 2239 def perffncacheload(ui, repo, **opts):
2224 2240 opts = _byteskwargs(opts)
2225 2241 timer, fm = gettimer(ui, opts)
2226 2242 s = repo.store
2227 2243
2228 2244 def d():
2229 2245 s.fncache._load()
2230 2246
2231 2247 timer(d)
2232 2248 fm.end()
2233 2249
2234 2250
2235 2251 @command(b'perffncachewrite', formatteropts)
2236 2252 def perffncachewrite(ui, repo, **opts):
2237 2253 opts = _byteskwargs(opts)
2238 2254 timer, fm = gettimer(ui, opts)
2239 2255 s = repo.store
2240 2256 lock = repo.lock()
2241 2257 s.fncache._load()
2242 2258 tr = repo.transaction(b'perffncachewrite')
2243 2259 tr.addbackup(b'fncache')
2244 2260
2245 2261 def d():
2246 2262 s.fncache._dirty = True
2247 2263 s.fncache.write(tr)
2248 2264
2249 2265 timer(d)
2250 2266 tr.close()
2251 2267 lock.release()
2252 2268 fm.end()
2253 2269
2254 2270
2255 2271 @command(b'perffncacheencode', formatteropts)
2256 2272 def perffncacheencode(ui, repo, **opts):
2257 2273 opts = _byteskwargs(opts)
2258 2274 timer, fm = gettimer(ui, opts)
2259 2275 s = repo.store
2260 2276 s.fncache._load()
2261 2277
2262 2278 def d():
2263 2279 for p in s.fncache.entries:
2264 2280 s.encode(p)
2265 2281
2266 2282 timer(d)
2267 2283 fm.end()
2268 2284
2269 2285
2270 2286 def _bdiffworker(q, blocks, xdiff, ready, done):
2271 2287 while not done.is_set():
2272 2288 pair = q.get()
2273 2289 while pair is not None:
2274 2290 if xdiff:
2275 2291 mdiff.bdiff.xdiffblocks(*pair)
2276 2292 elif blocks:
2277 2293 mdiff.bdiff.blocks(*pair)
2278 2294 else:
2279 2295 mdiff.textdiff(*pair)
2280 2296 q.task_done()
2281 2297 pair = q.get()
2282 2298 q.task_done() # for the None one
2283 2299 with ready:
2284 2300 ready.wait()
2285 2301
2286 2302
2287 2303 def _manifestrevision(repo, mnode):
2288 2304 ml = repo.manifestlog
2289 2305
2290 2306 if util.safehasattr(ml, b'getstorage'):
2291 2307 store = ml.getstorage(b'')
2292 2308 else:
2293 2309 store = ml._revlog
2294 2310
2295 2311 return store.revision(mnode)
2296 2312
2297 2313
2298 2314 @command(
2299 2315 b'perfbdiff',
2300 2316 revlogopts
2301 2317 + formatteropts
2302 2318 + [
2303 2319 (
2304 2320 b'',
2305 2321 b'count',
2306 2322 1,
2307 2323 b'number of revisions to test (when using --startrev)',
2308 2324 ),
2309 2325 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2310 2326 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2311 2327 (b'', b'blocks', False, b'test computing diffs into blocks'),
2312 2328 (b'', b'xdiff', False, b'use xdiff algorithm'),
2313 2329 ],
2314 2330 b'-c|-m|FILE REV',
2315 2331 )
2316 2332 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2317 2333 """benchmark a bdiff between revisions
2318 2334
2319 2335 By default, benchmark a bdiff between its delta parent and itself.
2320 2336
2321 2337 With ``--count``, benchmark bdiffs between delta parents and self for N
2322 2338 revisions starting at the specified revision.
2323 2339
2324 2340 With ``--alldata``, assume the requested revision is a changeset and
2325 2341 measure bdiffs for all changes related to that changeset (manifest
2326 2342 and filelogs).
2327 2343 """
2328 2344 opts = _byteskwargs(opts)
2329 2345
2330 2346 if opts[b'xdiff'] and not opts[b'blocks']:
2331 2347 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2332 2348
2333 2349 if opts[b'alldata']:
2334 2350 opts[b'changelog'] = True
2335 2351
2336 2352 if opts.get(b'changelog') or opts.get(b'manifest'):
2337 2353 file_, rev = None, file_
2338 2354 elif rev is None:
2339 2355 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2340 2356
2341 2357 blocks = opts[b'blocks']
2342 2358 xdiff = opts[b'xdiff']
2343 2359 textpairs = []
2344 2360
2345 2361 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2346 2362
2347 2363 startrev = r.rev(r.lookup(rev))
2348 2364 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2349 2365 if opts[b'alldata']:
2350 2366 # Load revisions associated with changeset.
2351 2367 ctx = repo[rev]
2352 2368 mtext = _manifestrevision(repo, ctx.manifestnode())
2353 2369 for pctx in ctx.parents():
2354 2370 pman = _manifestrevision(repo, pctx.manifestnode())
2355 2371 textpairs.append((pman, mtext))
2356 2372
2357 2373 # Load filelog revisions by iterating manifest delta.
2358 2374 man = ctx.manifest()
2359 2375 pman = ctx.p1().manifest()
2360 2376 for filename, change in pman.diff(man).items():
2361 2377 fctx = repo.file(filename)
2362 2378 f1 = fctx.revision(change[0][0] or -1)
2363 2379 f2 = fctx.revision(change[1][0] or -1)
2364 2380 textpairs.append((f1, f2))
2365 2381 else:
2366 2382 dp = r.deltaparent(rev)
2367 2383 textpairs.append((r.revision(dp), r.revision(rev)))
2368 2384
2369 2385 withthreads = threads > 0
2370 2386 if not withthreads:
2371 2387
2372 2388 def d():
2373 2389 for pair in textpairs:
2374 2390 if xdiff:
2375 2391 mdiff.bdiff.xdiffblocks(*pair)
2376 2392 elif blocks:
2377 2393 mdiff.bdiff.blocks(*pair)
2378 2394 else:
2379 2395 mdiff.textdiff(*pair)
2380 2396
2381 2397 else:
2382 2398 q = queue()
2383 2399 for i in _xrange(threads):
2384 2400 q.put(None)
2385 2401 ready = threading.Condition()
2386 2402 done = threading.Event()
2387 2403 for i in _xrange(threads):
2388 2404 threading.Thread(
2389 2405 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2390 2406 ).start()
2391 2407 q.join()
2392 2408
2393 2409 def d():
2394 2410 for pair in textpairs:
2395 2411 q.put(pair)
2396 2412 for i in _xrange(threads):
2397 2413 q.put(None)
2398 2414 with ready:
2399 2415 ready.notify_all()
2400 2416 q.join()
2401 2417
2402 2418 timer, fm = gettimer(ui, opts)
2403 2419 timer(d)
2404 2420 fm.end()
2405 2421
2406 2422 if withthreads:
2407 2423 done.set()
2408 2424 for i in _xrange(threads):
2409 2425 q.put(None)
2410 2426 with ready:
2411 2427 ready.notify_all()
2412 2428
2413 2429
2414 2430 @command(
2415 2431 b'perfunidiff',
2416 2432 revlogopts
2417 2433 + formatteropts
2418 2434 + [
2419 2435 (
2420 2436 b'',
2421 2437 b'count',
2422 2438 1,
2423 2439 b'number of revisions to test (when using --startrev)',
2424 2440 ),
2425 2441 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2426 2442 ],
2427 2443 b'-c|-m|FILE REV',
2428 2444 )
2429 2445 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2430 2446 """benchmark a unified diff between revisions
2431 2447
2432 2448 This doesn't include any copy tracing - it's just a unified diff
2433 2449 of the texts.
2434 2450
2435 2451 By default, benchmark a diff between its delta parent and itself.
2436 2452
2437 2453 With ``--count``, benchmark diffs between delta parents and self for N
2438 2454 revisions starting at the specified revision.
2439 2455
2440 2456 With ``--alldata``, assume the requested revision is a changeset and
2441 2457 measure diffs for all changes related to that changeset (manifest
2442 2458 and filelogs).
2443 2459 """
2444 2460 opts = _byteskwargs(opts)
2445 2461 if opts[b'alldata']:
2446 2462 opts[b'changelog'] = True
2447 2463
2448 2464 if opts.get(b'changelog') or opts.get(b'manifest'):
2449 2465 file_, rev = None, file_
2450 2466 elif rev is None:
2451 2467 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2452 2468
2453 2469 textpairs = []
2454 2470
2455 2471 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2456 2472
2457 2473 startrev = r.rev(r.lookup(rev))
2458 2474 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2459 2475 if opts[b'alldata']:
2460 2476 # Load revisions associated with changeset.
2461 2477 ctx = repo[rev]
2462 2478 mtext = _manifestrevision(repo, ctx.manifestnode())
2463 2479 for pctx in ctx.parents():
2464 2480 pman = _manifestrevision(repo, pctx.manifestnode())
2465 2481 textpairs.append((pman, mtext))
2466 2482
2467 2483 # Load filelog revisions by iterating manifest delta.
2468 2484 man = ctx.manifest()
2469 2485 pman = ctx.p1().manifest()
2470 2486 for filename, change in pman.diff(man).items():
2471 2487 fctx = repo.file(filename)
2472 2488 f1 = fctx.revision(change[0][0] or -1)
2473 2489 f2 = fctx.revision(change[1][0] or -1)
2474 2490 textpairs.append((f1, f2))
2475 2491 else:
2476 2492 dp = r.deltaparent(rev)
2477 2493 textpairs.append((r.revision(dp), r.revision(rev)))
2478 2494
2479 2495 def d():
2480 2496 for left, right in textpairs:
2481 2497 # The date strings don't matter, so we pass empty strings.
2482 2498 headerlines, hunks = mdiff.unidiff(
2483 2499 left, b'', right, b'', b'left', b'right', binary=False
2484 2500 )
2485 2501 # consume iterators in roughly the way patch.py does
2486 2502 b'\n'.join(headerlines)
2487 2503 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2488 2504
2489 2505 timer, fm = gettimer(ui, opts)
2490 2506 timer(d)
2491 2507 fm.end()
2492 2508
2493 2509
2494 2510 @command(b'perfdiffwd', formatteropts)
2495 2511 def perfdiffwd(ui, repo, **opts):
2496 2512 """Profile diff of working directory changes"""
2497 2513 opts = _byteskwargs(opts)
2498 2514 timer, fm = gettimer(ui, opts)
2499 2515 options = {
2500 2516 'w': 'ignore_all_space',
2501 2517 'b': 'ignore_space_change',
2502 2518 'B': 'ignore_blank_lines',
2503 2519 }
2504 2520
2505 2521 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2506 2522 opts = dict((options[c], b'1') for c in diffopt)
2507 2523
2508 2524 def d():
2509 2525 ui.pushbuffer()
2510 2526 commands.diff(ui, repo, **opts)
2511 2527 ui.popbuffer()
2512 2528
2513 2529 diffopt = diffopt.encode('ascii')
2514 2530 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2515 2531 timer(d, title=title)
2516 2532 fm.end()
2517 2533
2518 2534
2519 2535 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2520 2536 def perfrevlogindex(ui, repo, file_=None, **opts):
2521 2537 """Benchmark operations against a revlog index.
2522 2538
2523 2539 This tests constructing a revlog instance, reading index data,
2524 2540 parsing index data, and performing various operations related to
2525 2541 index data.
2526 2542 """
2527 2543
2528 2544 opts = _byteskwargs(opts)
2529 2545
2530 2546 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2531 2547
2532 2548 opener = getattr(rl, 'opener') # trick linter
2533 2549 indexfile = rl.indexfile
2534 2550 data = opener.read(indexfile)
2535 2551
2536 2552 header = struct.unpack(b'>I', data[0:4])[0]
2537 2553 version = header & 0xFFFF
2538 2554 if version == 1:
2539 2555 revlogio = revlog.revlogio()
2540 2556 inline = header & (1 << 16)
2541 2557 else:
2542 2558 raise error.Abort(b'unsupported revlog version: %d' % version)
2543 2559
2544 2560 rllen = len(rl)
2545 2561
2546 2562 node0 = rl.node(0)
2547 2563 node25 = rl.node(rllen // 4)
2548 2564 node50 = rl.node(rllen // 2)
2549 2565 node75 = rl.node(rllen // 4 * 3)
2550 2566 node100 = rl.node(rllen - 1)
2551 2567
2552 2568 allrevs = range(rllen)
2553 2569 allrevsrev = list(reversed(allrevs))
2554 2570 allnodes = [rl.node(rev) for rev in range(rllen)]
2555 2571 allnodesrev = list(reversed(allnodes))
2556 2572
2557 2573 def constructor():
2558 2574 revlog.revlog(opener, indexfile)
2559 2575
2560 2576 def read():
2561 2577 with opener(indexfile) as fh:
2562 2578 fh.read()
2563 2579
2564 2580 def parseindex():
2565 2581 revlogio.parseindex(data, inline)
2566 2582
2567 2583 def getentry(revornode):
2568 2584 index = revlogio.parseindex(data, inline)[0]
2569 2585 index[revornode]
2570 2586
2571 2587 def getentries(revs, count=1):
2572 2588 index = revlogio.parseindex(data, inline)[0]
2573 2589
2574 2590 for i in range(count):
2575 2591 for rev in revs:
2576 2592 index[rev]
2577 2593
2578 2594 def resolvenode(node):
2579 2595 nodemap = revlogio.parseindex(data, inline)[1]
2580 2596 # This only works for the C code.
2581 2597 if nodemap is None:
2582 2598 return
2583 2599
2584 2600 try:
2585 2601 nodemap[node]
2586 2602 except error.RevlogError:
2587 2603 pass
2588 2604
2589 2605 def resolvenodes(nodes, count=1):
2590 2606 nodemap = revlogio.parseindex(data, inline)[1]
2591 2607 if nodemap is None:
2592 2608 return
2593 2609
2594 2610 for i in range(count):
2595 2611 for node in nodes:
2596 2612 try:
2597 2613 nodemap[node]
2598 2614 except error.RevlogError:
2599 2615 pass
2600 2616
2601 2617 benches = [
2602 2618 (constructor, b'revlog constructor'),
2603 2619 (read, b'read'),
2604 2620 (parseindex, b'create index object'),
2605 2621 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2606 2622 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2607 2623 (lambda: resolvenode(node0), b'look up node at rev 0'),
2608 2624 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2609 2625 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2610 2626 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2611 2627 (lambda: resolvenode(node100), b'look up node at tip'),
2612 2628 # 2x variation is to measure caching impact.
2613 2629 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2614 2630 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2615 2631 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2616 2632 (
2617 2633 lambda: resolvenodes(allnodesrev, 2),
2618 2634 b'look up all nodes 2x (reverse)',
2619 2635 ),
2620 2636 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2621 2637 (
2622 2638 lambda: getentries(allrevs, 2),
2623 2639 b'retrieve all index entries 2x (forward)',
2624 2640 ),
2625 2641 (
2626 2642 lambda: getentries(allrevsrev),
2627 2643 b'retrieve all index entries (reverse)',
2628 2644 ),
2629 2645 (
2630 2646 lambda: getentries(allrevsrev, 2),
2631 2647 b'retrieve all index entries 2x (reverse)',
2632 2648 ),
2633 2649 ]
2634 2650
2635 2651 for fn, title in benches:
2636 2652 timer, fm = gettimer(ui, opts)
2637 2653 timer(fn, title=title)
2638 2654 fm.end()
2639 2655
2640 2656
2641 2657 @command(
2642 2658 b'perfrevlogrevisions',
2643 2659 revlogopts
2644 2660 + formatteropts
2645 2661 + [
2646 2662 (b'd', b'dist', 100, b'distance between the revisions'),
2647 2663 (b's', b'startrev', 0, b'revision to start reading at'),
2648 2664 (b'', b'reverse', False, b'read in reverse'),
2649 2665 ],
2650 2666 b'-c|-m|FILE',
2651 2667 )
2652 2668 def perfrevlogrevisions(
2653 2669 ui, repo, file_=None, startrev=0, reverse=False, **opts
2654 2670 ):
2655 2671 """Benchmark reading a series of revisions from a revlog.
2656 2672
2657 2673 By default, we read every ``-d/--dist`` revision from 0 to tip of
2658 2674 the specified revlog.
2659 2675
2660 2676 The start revision can be defined via ``-s/--startrev``.
2661 2677 """
2662 2678 opts = _byteskwargs(opts)
2663 2679
2664 2680 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2665 2681 rllen = getlen(ui)(rl)
2666 2682
2667 2683 if startrev < 0:
2668 2684 startrev = rllen + startrev
2669 2685
2670 2686 def d():
2671 2687 rl.clearcaches()
2672 2688
2673 2689 beginrev = startrev
2674 2690 endrev = rllen
2675 2691 dist = opts[b'dist']
2676 2692
2677 2693 if reverse:
2678 2694 beginrev, endrev = endrev - 1, beginrev - 1
2679 2695 dist = -1 * dist
2680 2696
2681 2697 for x in _xrange(beginrev, endrev, dist):
2682 2698 # Old revisions don't support passing int.
2683 2699 n = rl.node(x)
2684 2700 rl.revision(n)
2685 2701
2686 2702 timer, fm = gettimer(ui, opts)
2687 2703 timer(d)
2688 2704 fm.end()
2689 2705
2690 2706
2691 2707 @command(
2692 2708 b'perfrevlogwrite',
2693 2709 revlogopts
2694 2710 + formatteropts
2695 2711 + [
2696 2712 (b's', b'startrev', 1000, b'revision to start writing at'),
2697 2713 (b'', b'stoprev', -1, b'last revision to write'),
2698 2714 (b'', b'count', 3, b'number of passes to perform'),
2699 2715 (b'', b'details', False, b'print timing for every revisions tested'),
2700 2716 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2701 2717 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2702 2718 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2703 2719 ],
2704 2720 b'-c|-m|FILE',
2705 2721 )
2706 2722 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2707 2723 """Benchmark writing a series of revisions to a revlog.
2708 2724
2709 2725 Possible source values are:
2710 2726 * `full`: add from a full text (default).
2711 2727 * `parent-1`: add from a delta to the first parent
2712 2728 * `parent-2`: add from a delta to the second parent if it exists
2713 2729 (use a delta from the first parent otherwise)
2714 2730 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2715 2731 * `storage`: add from the existing precomputed deltas
2716 2732
2717 2733 Note: This performance command measures performance in a custom way. As a
2718 2734 result some of the global configuration of the 'perf' command does not
2719 2735 apply to it:
2720 2736
2721 2737 * ``pre-run``: disabled
2722 2738
2723 2739 * ``profile-benchmark``: disabled
2724 2740
2725 2741 * ``run-limits``: disabled use --count instead
2726 2742 """
2727 2743 opts = _byteskwargs(opts)
2728 2744
2729 2745 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2730 2746 rllen = getlen(ui)(rl)
2731 2747 if startrev < 0:
2732 2748 startrev = rllen + startrev
2733 2749 if stoprev < 0:
2734 2750 stoprev = rllen + stoprev
2735 2751
2736 2752 lazydeltabase = opts['lazydeltabase']
2737 2753 source = opts['source']
2738 2754 clearcaches = opts['clear_caches']
2739 2755 validsource = (
2740 2756 b'full',
2741 2757 b'parent-1',
2742 2758 b'parent-2',
2743 2759 b'parent-smallest',
2744 2760 b'storage',
2745 2761 )
2746 2762 if source not in validsource:
2747 2763 raise error.Abort('invalid source type: %s' % source)
2748 2764
2749 2765 ### actually gather results
2750 2766 count = opts['count']
2751 2767 if count <= 0:
2752 2768 raise error.Abort('invalide run count: %d' % count)
2753 2769 allresults = []
2754 2770 for c in range(count):
2755 2771 timing = _timeonewrite(
2756 2772 ui,
2757 2773 rl,
2758 2774 source,
2759 2775 startrev,
2760 2776 stoprev,
2761 2777 c + 1,
2762 2778 lazydeltabase=lazydeltabase,
2763 2779 clearcaches=clearcaches,
2764 2780 )
2765 2781 allresults.append(timing)
2766 2782
2767 2783 ### consolidate the results in a single list
2768 2784 results = []
2769 2785 for idx, (rev, t) in enumerate(allresults[0]):
2770 2786 ts = [t]
2771 2787 for other in allresults[1:]:
2772 2788 orev, ot = other[idx]
2773 2789 assert orev == rev
2774 2790 ts.append(ot)
2775 2791 results.append((rev, ts))
2776 2792 resultcount = len(results)
2777 2793
2778 2794 ### Compute and display relevant statistics
2779 2795
2780 2796 # get a formatter
2781 2797 fm = ui.formatter(b'perf', opts)
2782 2798 displayall = ui.configbool(b"perf", b"all-timing", False)
2783 2799
2784 2800 # print individual details if requested
2785 2801 if opts['details']:
2786 2802 for idx, item in enumerate(results, 1):
2787 2803 rev, data = item
2788 2804 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2789 2805 formatone(fm, data, title=title, displayall=displayall)
2790 2806
2791 2807 # sorts results by median time
2792 2808 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2793 2809 # list of (name, index) to display)
2794 2810 relevants = [
2795 2811 ("min", 0),
2796 2812 ("10%", resultcount * 10 // 100),
2797 2813 ("25%", resultcount * 25 // 100),
2798 2814 ("50%", resultcount * 70 // 100),
2799 2815 ("75%", resultcount * 75 // 100),
2800 2816 ("90%", resultcount * 90 // 100),
2801 2817 ("95%", resultcount * 95 // 100),
2802 2818 ("99%", resultcount * 99 // 100),
2803 2819 ("99.9%", resultcount * 999 // 1000),
2804 2820 ("99.99%", resultcount * 9999 // 10000),
2805 2821 ("99.999%", resultcount * 99999 // 100000),
2806 2822 ("max", -1),
2807 2823 ]
2808 2824 if not ui.quiet:
2809 2825 for name, idx in relevants:
2810 2826 data = results[idx]
2811 2827 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2812 2828 formatone(fm, data[1], title=title, displayall=displayall)
2813 2829
2814 2830 # XXX summing that many float will not be very precise, we ignore this fact
2815 2831 # for now
2816 2832 totaltime = []
2817 2833 for item in allresults:
2818 2834 totaltime.append(
2819 2835 (
2820 2836 sum(x[1][0] for x in item),
2821 2837 sum(x[1][1] for x in item),
2822 2838 sum(x[1][2] for x in item),
2823 2839 )
2824 2840 )
2825 2841 formatone(
2826 2842 fm,
2827 2843 totaltime,
2828 2844 title="total time (%d revs)" % resultcount,
2829 2845 displayall=displayall,
2830 2846 )
2831 2847 fm.end()
2832 2848
2833 2849
2834 2850 class _faketr(object):
2835 2851 def add(s, x, y, z=None):
2836 2852 return None
2837 2853
2838 2854
2839 2855 def _timeonewrite(
2840 2856 ui,
2841 2857 orig,
2842 2858 source,
2843 2859 startrev,
2844 2860 stoprev,
2845 2861 runidx=None,
2846 2862 lazydeltabase=True,
2847 2863 clearcaches=True,
2848 2864 ):
2849 2865 timings = []
2850 2866 tr = _faketr()
2851 2867 with _temprevlog(ui, orig, startrev) as dest:
2852 2868 dest._lazydeltabase = lazydeltabase
2853 2869 revs = list(orig.revs(startrev, stoprev))
2854 2870 total = len(revs)
2855 2871 topic = 'adding'
2856 2872 if runidx is not None:
2857 2873 topic += ' (run #%d)' % runidx
2858 2874 # Support both old and new progress API
2859 2875 if util.safehasattr(ui, 'makeprogress'):
2860 2876 progress = ui.makeprogress(topic, unit='revs', total=total)
2861 2877
2862 2878 def updateprogress(pos):
2863 2879 progress.update(pos)
2864 2880
2865 2881 def completeprogress():
2866 2882 progress.complete()
2867 2883
2868 2884 else:
2869 2885
2870 2886 def updateprogress(pos):
2871 2887 ui.progress(topic, pos, unit='revs', total=total)
2872 2888
2873 2889 def completeprogress():
2874 2890 ui.progress(topic, None, unit='revs', total=total)
2875 2891
2876 2892 for idx, rev in enumerate(revs):
2877 2893 updateprogress(idx)
2878 2894 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2879 2895 if clearcaches:
2880 2896 dest.index.clearcaches()
2881 2897 dest.clearcaches()
2882 2898 with timeone() as r:
2883 2899 dest.addrawrevision(*addargs, **addkwargs)
2884 2900 timings.append((rev, r[0]))
2885 2901 updateprogress(total)
2886 2902 completeprogress()
2887 2903 return timings
2888 2904
2889 2905
2890 2906 def _getrevisionseed(orig, rev, tr, source):
2891 2907 from mercurial.node import nullid
2892 2908
2893 2909 linkrev = orig.linkrev(rev)
2894 2910 node = orig.node(rev)
2895 2911 p1, p2 = orig.parents(node)
2896 2912 flags = orig.flags(rev)
2897 2913 cachedelta = None
2898 2914 text = None
2899 2915
2900 2916 if source == b'full':
2901 2917 text = orig.revision(rev)
2902 2918 elif source == b'parent-1':
2903 2919 baserev = orig.rev(p1)
2904 2920 cachedelta = (baserev, orig.revdiff(p1, rev))
2905 2921 elif source == b'parent-2':
2906 2922 parent = p2
2907 2923 if p2 == nullid:
2908 2924 parent = p1
2909 2925 baserev = orig.rev(parent)
2910 2926 cachedelta = (baserev, orig.revdiff(parent, rev))
2911 2927 elif source == b'parent-smallest':
2912 2928 p1diff = orig.revdiff(p1, rev)
2913 2929 parent = p1
2914 2930 diff = p1diff
2915 2931 if p2 != nullid:
2916 2932 p2diff = orig.revdiff(p2, rev)
2917 2933 if len(p1diff) > len(p2diff):
2918 2934 parent = p2
2919 2935 diff = p2diff
2920 2936 baserev = orig.rev(parent)
2921 2937 cachedelta = (baserev, diff)
2922 2938 elif source == b'storage':
2923 2939 baserev = orig.deltaparent(rev)
2924 2940 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2925 2941
2926 2942 return (
2927 2943 (text, tr, linkrev, p1, p2),
2928 2944 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2929 2945 )
2930 2946
2931 2947
2932 2948 @contextlib.contextmanager
2933 2949 def _temprevlog(ui, orig, truncaterev):
2934 2950 from mercurial import vfs as vfsmod
2935 2951
2936 2952 if orig._inline:
2937 2953 raise error.Abort('not supporting inline revlog (yet)')
2938 2954 revlogkwargs = {}
2939 2955 k = 'upperboundcomp'
2940 2956 if util.safehasattr(orig, k):
2941 2957 revlogkwargs[k] = getattr(orig, k)
2942 2958
2943 2959 origindexpath = orig.opener.join(orig.indexfile)
2944 2960 origdatapath = orig.opener.join(orig.datafile)
2945 2961 indexname = 'revlog.i'
2946 2962 dataname = 'revlog.d'
2947 2963
2948 2964 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2949 2965 try:
2950 2966 # copy the data file in a temporary directory
2951 2967 ui.debug('copying data in %s\n' % tmpdir)
2952 2968 destindexpath = os.path.join(tmpdir, 'revlog.i')
2953 2969 destdatapath = os.path.join(tmpdir, 'revlog.d')
2954 2970 shutil.copyfile(origindexpath, destindexpath)
2955 2971 shutil.copyfile(origdatapath, destdatapath)
2956 2972
2957 2973 # remove the data we want to add again
2958 2974 ui.debug('truncating data to be rewritten\n')
2959 2975 with open(destindexpath, 'ab') as index:
2960 2976 index.seek(0)
2961 2977 index.truncate(truncaterev * orig._io.size)
2962 2978 with open(destdatapath, 'ab') as data:
2963 2979 data.seek(0)
2964 2980 data.truncate(orig.start(truncaterev))
2965 2981
2966 2982 # instantiate a new revlog from the temporary copy
2967 2983 ui.debug('truncating adding to be rewritten\n')
2968 2984 vfs = vfsmod.vfs(tmpdir)
2969 2985 vfs.options = getattr(orig.opener, 'options', None)
2970 2986
2971 2987 dest = revlog.revlog(
2972 2988 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2973 2989 )
2974 2990 if dest._inline:
2975 2991 raise error.Abort('not supporting inline revlog (yet)')
2976 2992 # make sure internals are initialized
2977 2993 dest.revision(len(dest) - 1)
2978 2994 yield dest
2979 2995 del dest, vfs
2980 2996 finally:
2981 2997 shutil.rmtree(tmpdir, True)
2982 2998
2983 2999
2984 3000 @command(
2985 3001 b'perfrevlogchunks',
2986 3002 revlogopts
2987 3003 + formatteropts
2988 3004 + [
2989 3005 (b'e', b'engines', b'', b'compression engines to use'),
2990 3006 (b's', b'startrev', 0, b'revision to start at'),
2991 3007 ],
2992 3008 b'-c|-m|FILE',
2993 3009 )
2994 3010 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2995 3011 """Benchmark operations on revlog chunks.
2996 3012
2997 3013 Logically, each revlog is a collection of fulltext revisions. However,
2998 3014 stored within each revlog are "chunks" of possibly compressed data. This
2999 3015 data needs to be read and decompressed or compressed and written.
3000 3016
3001 3017 This command measures the time it takes to read+decompress and recompress
3002 3018 chunks in a revlog. It effectively isolates I/O and compression performance.
3003 3019 For measurements of higher-level operations like resolving revisions,
3004 3020 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3005 3021 """
3006 3022 opts = _byteskwargs(opts)
3007 3023
3008 3024 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3009 3025
3010 3026 # _chunkraw was renamed to _getsegmentforrevs.
3011 3027 try:
3012 3028 segmentforrevs = rl._getsegmentforrevs
3013 3029 except AttributeError:
3014 3030 segmentforrevs = rl._chunkraw
3015 3031
3016 3032 # Verify engines argument.
3017 3033 if engines:
3018 3034 engines = set(e.strip() for e in engines.split(b','))
3019 3035 for engine in engines:
3020 3036 try:
3021 3037 util.compressionengines[engine]
3022 3038 except KeyError:
3023 3039 raise error.Abort(b'unknown compression engine: %s' % engine)
3024 3040 else:
3025 3041 engines = []
3026 3042 for e in util.compengines:
3027 3043 engine = util.compengines[e]
3028 3044 try:
3029 3045 if engine.available():
3030 3046 engine.revlogcompressor().compress(b'dummy')
3031 3047 engines.append(e)
3032 3048 except NotImplementedError:
3033 3049 pass
3034 3050
3035 3051 revs = list(rl.revs(startrev, len(rl) - 1))
3036 3052
3037 3053 def rlfh(rl):
3038 3054 if rl._inline:
3039 3055 return getsvfs(repo)(rl.indexfile)
3040 3056 else:
3041 3057 return getsvfs(repo)(rl.datafile)
3042 3058
3043 3059 def doread():
3044 3060 rl.clearcaches()
3045 3061 for rev in revs:
3046 3062 segmentforrevs(rev, rev)
3047 3063
3048 3064 def doreadcachedfh():
3049 3065 rl.clearcaches()
3050 3066 fh = rlfh(rl)
3051 3067 for rev in revs:
3052 3068 segmentforrevs(rev, rev, df=fh)
3053 3069
3054 3070 def doreadbatch():
3055 3071 rl.clearcaches()
3056 3072 segmentforrevs(revs[0], revs[-1])
3057 3073
3058 3074 def doreadbatchcachedfh():
3059 3075 rl.clearcaches()
3060 3076 fh = rlfh(rl)
3061 3077 segmentforrevs(revs[0], revs[-1], df=fh)
3062 3078
3063 3079 def dochunk():
3064 3080 rl.clearcaches()
3065 3081 fh = rlfh(rl)
3066 3082 for rev in revs:
3067 3083 rl._chunk(rev, df=fh)
3068 3084
3069 3085 chunks = [None]
3070 3086
3071 3087 def dochunkbatch():
3072 3088 rl.clearcaches()
3073 3089 fh = rlfh(rl)
3074 3090 # Save chunks as a side-effect.
3075 3091 chunks[0] = rl._chunks(revs, df=fh)
3076 3092
3077 3093 def docompress(compressor):
3078 3094 rl.clearcaches()
3079 3095
3080 3096 try:
3081 3097 # Swap in the requested compression engine.
3082 3098 oldcompressor = rl._compressor
3083 3099 rl._compressor = compressor
3084 3100 for chunk in chunks[0]:
3085 3101 rl.compress(chunk)
3086 3102 finally:
3087 3103 rl._compressor = oldcompressor
3088 3104
3089 3105 benches = [
3090 3106 (lambda: doread(), b'read'),
3091 3107 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3092 3108 (lambda: doreadbatch(), b'read batch'),
3093 3109 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3094 3110 (lambda: dochunk(), b'chunk'),
3095 3111 (lambda: dochunkbatch(), b'chunk batch'),
3096 3112 ]
3097 3113
3098 3114 for engine in sorted(engines):
3099 3115 compressor = util.compengines[engine].revlogcompressor()
3100 3116 benches.append(
3101 3117 (
3102 3118 functools.partial(docompress, compressor),
3103 3119 b'compress w/ %s' % engine,
3104 3120 )
3105 3121 )
3106 3122
3107 3123 for fn, title in benches:
3108 3124 timer, fm = gettimer(ui, opts)
3109 3125 timer(fn, title=title)
3110 3126 fm.end()
3111 3127
3112 3128
3113 3129 @command(
3114 3130 b'perfrevlogrevision',
3115 3131 revlogopts
3116 3132 + formatteropts
3117 3133 + [(b'', b'cache', False, b'use caches instead of clearing')],
3118 3134 b'-c|-m|FILE REV',
3119 3135 )
3120 3136 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3121 3137 """Benchmark obtaining a revlog revision.
3122 3138
3123 3139 Obtaining a revlog revision consists of roughly the following steps:
3124 3140
3125 3141 1. Compute the delta chain
3126 3142 2. Slice the delta chain if applicable
3127 3143 3. Obtain the raw chunks for that delta chain
3128 3144 4. Decompress each raw chunk
3129 3145 5. Apply binary patches to obtain fulltext
3130 3146 6. Verify hash of fulltext
3131 3147
3132 3148 This command measures the time spent in each of these phases.
3133 3149 """
3134 3150 opts = _byteskwargs(opts)
3135 3151
3136 3152 if opts.get(b'changelog') or opts.get(b'manifest'):
3137 3153 file_, rev = None, file_
3138 3154 elif rev is None:
3139 3155 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3140 3156
3141 3157 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3142 3158
3143 3159 # _chunkraw was renamed to _getsegmentforrevs.
3144 3160 try:
3145 3161 segmentforrevs = r._getsegmentforrevs
3146 3162 except AttributeError:
3147 3163 segmentforrevs = r._chunkraw
3148 3164
3149 3165 node = r.lookup(rev)
3150 3166 rev = r.rev(node)
3151 3167
3152 3168 def getrawchunks(data, chain):
3153 3169 start = r.start
3154 3170 length = r.length
3155 3171 inline = r._inline
3156 3172 iosize = r._io.size
3157 3173 buffer = util.buffer
3158 3174
3159 3175 chunks = []
3160 3176 ladd = chunks.append
3161 3177 for idx, item in enumerate(chain):
3162 3178 offset = start(item[0])
3163 3179 bits = data[idx]
3164 3180 for rev in item:
3165 3181 chunkstart = start(rev)
3166 3182 if inline:
3167 3183 chunkstart += (rev + 1) * iosize
3168 3184 chunklength = length(rev)
3169 3185 ladd(buffer(bits, chunkstart - offset, chunklength))
3170 3186
3171 3187 return chunks
3172 3188
3173 3189 def dodeltachain(rev):
3174 3190 if not cache:
3175 3191 r.clearcaches()
3176 3192 r._deltachain(rev)
3177 3193
3178 3194 def doread(chain):
3179 3195 if not cache:
3180 3196 r.clearcaches()
3181 3197 for item in slicedchain:
3182 3198 segmentforrevs(item[0], item[-1])
3183 3199
3184 3200 def doslice(r, chain, size):
3185 3201 for s in slicechunk(r, chain, targetsize=size):
3186 3202 pass
3187 3203
3188 3204 def dorawchunks(data, chain):
3189 3205 if not cache:
3190 3206 r.clearcaches()
3191 3207 getrawchunks(data, chain)
3192 3208
3193 3209 def dodecompress(chunks):
3194 3210 decomp = r.decompress
3195 3211 for chunk in chunks:
3196 3212 decomp(chunk)
3197 3213
3198 3214 def dopatch(text, bins):
3199 3215 if not cache:
3200 3216 r.clearcaches()
3201 3217 mdiff.patches(text, bins)
3202 3218
3203 3219 def dohash(text):
3204 3220 if not cache:
3205 3221 r.clearcaches()
3206 3222 r.checkhash(text, node, rev=rev)
3207 3223
3208 3224 def dorevision():
3209 3225 if not cache:
3210 3226 r.clearcaches()
3211 3227 r.revision(node)
3212 3228
3213 3229 try:
3214 3230 from mercurial.revlogutils.deltas import slicechunk
3215 3231 except ImportError:
3216 3232 slicechunk = getattr(revlog, '_slicechunk', None)
3217 3233
3218 3234 size = r.length(rev)
3219 3235 chain = r._deltachain(rev)[0]
3220 3236 if not getattr(r, '_withsparseread', False):
3221 3237 slicedchain = (chain,)
3222 3238 else:
3223 3239 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3224 3240 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3225 3241 rawchunks = getrawchunks(data, slicedchain)
3226 3242 bins = r._chunks(chain)
3227 3243 text = bytes(bins[0])
3228 3244 bins = bins[1:]
3229 3245 text = mdiff.patches(text, bins)
3230 3246
3231 3247 benches = [
3232 3248 (lambda: dorevision(), b'full'),
3233 3249 (lambda: dodeltachain(rev), b'deltachain'),
3234 3250 (lambda: doread(chain), b'read'),
3235 3251 ]
3236 3252
3237 3253 if getattr(r, '_withsparseread', False):
3238 3254 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3239 3255 benches.append(slicing)
3240 3256
3241 3257 benches.extend(
3242 3258 [
3243 3259 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3244 3260 (lambda: dodecompress(rawchunks), b'decompress'),
3245 3261 (lambda: dopatch(text, bins), b'patch'),
3246 3262 (lambda: dohash(text), b'hash'),
3247 3263 ]
3248 3264 )
3249 3265
3250 3266 timer, fm = gettimer(ui, opts)
3251 3267 for fn, title in benches:
3252 3268 timer(fn, title=title)
3253 3269 fm.end()
3254 3270
3255 3271
3256 3272 @command(
3257 3273 b'perfrevset',
3258 3274 [
3259 3275 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3260 3276 (b'', b'contexts', False, b'obtain changectx for each revision'),
3261 3277 ]
3262 3278 + formatteropts,
3263 3279 b"REVSET",
3264 3280 )
3265 3281 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3266 3282 """benchmark the execution time of a revset
3267 3283
3268 3284 Use the --clean option if need to evaluate the impact of build volatile
3269 3285 revisions set cache on the revset execution. Volatile cache hold filtered
3270 3286 and obsolete related cache."""
3271 3287 opts = _byteskwargs(opts)
3272 3288
3273 3289 timer, fm = gettimer(ui, opts)
3274 3290
3275 3291 def d():
3276 3292 if clear:
3277 3293 repo.invalidatevolatilesets()
3278 3294 if contexts:
3279 3295 for ctx in repo.set(expr):
3280 3296 pass
3281 3297 else:
3282 3298 for r in repo.revs(expr):
3283 3299 pass
3284 3300
3285 3301 timer(d)
3286 3302 fm.end()
3287 3303
3288 3304
3289 3305 @command(
3290 3306 b'perfvolatilesets',
3291 3307 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3292 3308 + formatteropts,
3293 3309 )
3294 3310 def perfvolatilesets(ui, repo, *names, **opts):
3295 3311 """benchmark the computation of various volatile set
3296 3312
3297 3313 Volatile set computes element related to filtering and obsolescence."""
3298 3314 opts = _byteskwargs(opts)
3299 3315 timer, fm = gettimer(ui, opts)
3300 3316 repo = repo.unfiltered()
3301 3317
3302 3318 def getobs(name):
3303 3319 def d():
3304 3320 repo.invalidatevolatilesets()
3305 3321 if opts[b'clear_obsstore']:
3306 3322 clearfilecache(repo, b'obsstore')
3307 3323 obsolete.getrevs(repo, name)
3308 3324
3309 3325 return d
3310 3326
3311 3327 allobs = sorted(obsolete.cachefuncs)
3312 3328 if names:
3313 3329 allobs = [n for n in allobs if n in names]
3314 3330
3315 3331 for name in allobs:
3316 3332 timer(getobs(name), title=name)
3317 3333
3318 3334 def getfiltered(name):
3319 3335 def d():
3320 3336 repo.invalidatevolatilesets()
3321 3337 if opts[b'clear_obsstore']:
3322 3338 clearfilecache(repo, b'obsstore')
3323 3339 repoview.filterrevs(repo, name)
3324 3340
3325 3341 return d
3326 3342
3327 3343 allfilter = sorted(repoview.filtertable)
3328 3344 if names:
3329 3345 allfilter = [n for n in allfilter if n in names]
3330 3346
3331 3347 for name in allfilter:
3332 3348 timer(getfiltered(name), title=name)
3333 3349 fm.end()
3334 3350
3335 3351
3336 3352 @command(
3337 3353 b'perfbranchmap',
3338 3354 [
3339 3355 (b'f', b'full', False, b'Includes build time of subset'),
3340 3356 (
3341 3357 b'',
3342 3358 b'clear-revbranch',
3343 3359 False,
3344 3360 b'purge the revbranch cache between computation',
3345 3361 ),
3346 3362 ]
3347 3363 + formatteropts,
3348 3364 )
3349 3365 def perfbranchmap(ui, repo, *filternames, **opts):
3350 3366 """benchmark the update of a branchmap
3351 3367
3352 3368 This benchmarks the full repo.branchmap() call with read and write disabled
3353 3369 """
3354 3370 opts = _byteskwargs(opts)
3355 3371 full = opts.get(b"full", False)
3356 3372 clear_revbranch = opts.get(b"clear_revbranch", False)
3357 3373 timer, fm = gettimer(ui, opts)
3358 3374
3359 3375 def getbranchmap(filtername):
3360 3376 """generate a benchmark function for the filtername"""
3361 3377 if filtername is None:
3362 3378 view = repo
3363 3379 else:
3364 3380 view = repo.filtered(filtername)
3365 3381 if util.safehasattr(view._branchcaches, '_per_filter'):
3366 3382 filtered = view._branchcaches._per_filter
3367 3383 else:
3368 3384 # older versions
3369 3385 filtered = view._branchcaches
3370 3386
3371 3387 def d():
3372 3388 if clear_revbranch:
3373 3389 repo.revbranchcache()._clear()
3374 3390 if full:
3375 3391 view._branchcaches.clear()
3376 3392 else:
3377 3393 filtered.pop(filtername, None)
3378 3394 view.branchmap()
3379 3395
3380 3396 return d
3381 3397
3382 3398 # add filter in smaller subset to bigger subset
3383 3399 possiblefilters = set(repoview.filtertable)
3384 3400 if filternames:
3385 3401 possiblefilters &= set(filternames)
3386 3402 subsettable = getbranchmapsubsettable()
3387 3403 allfilters = []
3388 3404 while possiblefilters:
3389 3405 for name in possiblefilters:
3390 3406 subset = subsettable.get(name)
3391 3407 if subset not in possiblefilters:
3392 3408 break
3393 3409 else:
3394 3410 assert False, b'subset cycle %s!' % possiblefilters
3395 3411 allfilters.append(name)
3396 3412 possiblefilters.remove(name)
3397 3413
3398 3414 # warm the cache
3399 3415 if not full:
3400 3416 for name in allfilters:
3401 3417 repo.filtered(name).branchmap()
3402 3418 if not filternames or b'unfiltered' in filternames:
3403 3419 # add unfiltered
3404 3420 allfilters.append(None)
3405 3421
3406 3422 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3407 3423 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3408 3424 branchcacheread.set(classmethod(lambda *args: None))
3409 3425 else:
3410 3426 # older versions
3411 3427 branchcacheread = safeattrsetter(branchmap, b'read')
3412 3428 branchcacheread.set(lambda *args: None)
3413 3429 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3414 3430 branchcachewrite.set(lambda *args: None)
3415 3431 try:
3416 3432 for name in allfilters:
3417 3433 printname = name
3418 3434 if name is None:
3419 3435 printname = b'unfiltered'
3420 3436 timer(getbranchmap(name), title=str(printname))
3421 3437 finally:
3422 3438 branchcacheread.restore()
3423 3439 branchcachewrite.restore()
3424 3440 fm.end()
3425 3441
3426 3442
3427 3443 @command(
3428 3444 b'perfbranchmapupdate',
3429 3445 [
3430 3446 (b'', b'base', [], b'subset of revision to start from'),
3431 3447 (b'', b'target', [], b'subset of revision to end with'),
3432 3448 (b'', b'clear-caches', False, b'clear cache between each runs'),
3433 3449 ]
3434 3450 + formatteropts,
3435 3451 )
3436 3452 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3437 3453 """benchmark branchmap update from for <base> revs to <target> revs
3438 3454
3439 3455 If `--clear-caches` is passed, the following items will be reset before
3440 3456 each update:
3441 3457 * the changelog instance and associated indexes
3442 3458 * the rev-branch-cache instance
3443 3459
3444 3460 Examples:
3445 3461
3446 3462 # update for the one last revision
3447 3463 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3448 3464
3449 3465 $ update for change coming with a new branch
3450 3466 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3451 3467 """
3452 3468 from mercurial import branchmap
3453 3469 from mercurial import repoview
3454 3470
3455 3471 opts = _byteskwargs(opts)
3456 3472 timer, fm = gettimer(ui, opts)
3457 3473 clearcaches = opts[b'clear_caches']
3458 3474 unfi = repo.unfiltered()
3459 3475 x = [None] # used to pass data between closure
3460 3476
3461 3477 # we use a `list` here to avoid possible side effect from smartset
3462 3478 baserevs = list(scmutil.revrange(repo, base))
3463 3479 targetrevs = list(scmutil.revrange(repo, target))
3464 3480 if not baserevs:
3465 3481 raise error.Abort(b'no revisions selected for --base')
3466 3482 if not targetrevs:
3467 3483 raise error.Abort(b'no revisions selected for --target')
3468 3484
3469 3485 # make sure the target branchmap also contains the one in the base
3470 3486 targetrevs = list(set(baserevs) | set(targetrevs))
3471 3487 targetrevs.sort()
3472 3488
3473 3489 cl = repo.changelog
3474 3490 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3475 3491 allbaserevs.sort()
3476 3492 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3477 3493
3478 3494 newrevs = list(alltargetrevs.difference(allbaserevs))
3479 3495 newrevs.sort()
3480 3496
3481 3497 allrevs = frozenset(unfi.changelog.revs())
3482 3498 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3483 3499 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3484 3500
3485 3501 def basefilter(repo, visibilityexceptions=None):
3486 3502 return basefilterrevs
3487 3503
3488 3504 def targetfilter(repo, visibilityexceptions=None):
3489 3505 return targetfilterrevs
3490 3506
3491 3507 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3492 3508 ui.status(msg % (len(allbaserevs), len(newrevs)))
3493 3509 if targetfilterrevs:
3494 3510 msg = b'(%d revisions still filtered)\n'
3495 3511 ui.status(msg % len(targetfilterrevs))
3496 3512
3497 3513 try:
3498 3514 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3499 3515 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3500 3516
3501 3517 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3502 3518 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3503 3519
3504 3520 # try to find an existing branchmap to reuse
3505 3521 subsettable = getbranchmapsubsettable()
3506 3522 candidatefilter = subsettable.get(None)
3507 3523 while candidatefilter is not None:
3508 3524 candidatebm = repo.filtered(candidatefilter).branchmap()
3509 3525 if candidatebm.validfor(baserepo):
3510 3526 filtered = repoview.filterrevs(repo, candidatefilter)
3511 3527 missing = [r for r in allbaserevs if r in filtered]
3512 3528 base = candidatebm.copy()
3513 3529 base.update(baserepo, missing)
3514 3530 break
3515 3531 candidatefilter = subsettable.get(candidatefilter)
3516 3532 else:
3517 3533 # no suitable subset where found
3518 3534 base = branchmap.branchcache()
3519 3535 base.update(baserepo, allbaserevs)
3520 3536
3521 3537 def setup():
3522 3538 x[0] = base.copy()
3523 3539 if clearcaches:
3524 3540 unfi._revbranchcache = None
3525 3541 clearchangelog(repo)
3526 3542
3527 3543 def bench():
3528 3544 x[0].update(targetrepo, newrevs)
3529 3545
3530 3546 timer(bench, setup=setup)
3531 3547 fm.end()
3532 3548 finally:
3533 3549 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3534 3550 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3535 3551
3536 3552
3537 3553 @command(
3538 3554 b'perfbranchmapload',
3539 3555 [
3540 3556 (b'f', b'filter', b'', b'Specify repoview filter'),
3541 3557 (b'', b'list', False, b'List brachmap filter caches'),
3542 3558 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3543 3559 ]
3544 3560 + formatteropts,
3545 3561 )
3546 3562 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3547 3563 """benchmark reading the branchmap"""
3548 3564 opts = _byteskwargs(opts)
3549 3565 clearrevlogs = opts[b'clear_revlogs']
3550 3566
3551 3567 if list:
3552 3568 for name, kind, st in repo.cachevfs.readdir(stat=True):
3553 3569 if name.startswith(b'branch2'):
3554 3570 filtername = name.partition(b'-')[2] or b'unfiltered'
3555 3571 ui.status(
3556 3572 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3557 3573 )
3558 3574 return
3559 3575 if not filter:
3560 3576 filter = None
3561 3577 subsettable = getbranchmapsubsettable()
3562 3578 if filter is None:
3563 3579 repo = repo.unfiltered()
3564 3580 else:
3565 3581 repo = repoview.repoview(repo, filter)
3566 3582
3567 3583 repo.branchmap() # make sure we have a relevant, up to date branchmap
3568 3584
3569 3585 try:
3570 3586 fromfile = branchmap.branchcache.fromfile
3571 3587 except AttributeError:
3572 3588 # older versions
3573 3589 fromfile = branchmap.read
3574 3590
3575 3591 currentfilter = filter
3576 3592 # try once without timer, the filter may not be cached
3577 3593 while fromfile(repo) is None:
3578 3594 currentfilter = subsettable.get(currentfilter)
3579 3595 if currentfilter is None:
3580 3596 raise error.Abort(
3581 3597 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3582 3598 )
3583 3599 repo = repo.filtered(currentfilter)
3584 3600 timer, fm = gettimer(ui, opts)
3585 3601
3586 3602 def setup():
3587 3603 if clearrevlogs:
3588 3604 clearchangelog(repo)
3589 3605
3590 3606 def bench():
3591 3607 fromfile(repo)
3592 3608
3593 3609 timer(bench, setup=setup)
3594 3610 fm.end()
3595 3611
3596 3612
3597 3613 @command(b'perfloadmarkers')
3598 3614 def perfloadmarkers(ui, repo):
3599 3615 """benchmark the time to parse the on-disk markers for a repo
3600 3616
3601 3617 Result is the number of markers in the repo."""
3602 3618 timer, fm = gettimer(ui)
3603 3619 svfs = getsvfs(repo)
3604 3620 timer(lambda: len(obsolete.obsstore(svfs)))
3605 3621 fm.end()
3606 3622
3607 3623
3608 3624 @command(
3609 3625 b'perflrucachedict',
3610 3626 formatteropts
3611 3627 + [
3612 3628 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3613 3629 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3614 3630 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3615 3631 (b'', b'size', 4, b'size of cache'),
3616 3632 (b'', b'gets', 10000, b'number of key lookups'),
3617 3633 (b'', b'sets', 10000, b'number of key sets'),
3618 3634 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3619 3635 (
3620 3636 b'',
3621 3637 b'mixedgetfreq',
3622 3638 50,
3623 3639 b'frequency of get vs set ops in mixed mode',
3624 3640 ),
3625 3641 ],
3626 3642 norepo=True,
3627 3643 )
3628 3644 def perflrucache(
3629 3645 ui,
3630 3646 mincost=0,
3631 3647 maxcost=100,
3632 3648 costlimit=0,
3633 3649 size=4,
3634 3650 gets=10000,
3635 3651 sets=10000,
3636 3652 mixed=10000,
3637 3653 mixedgetfreq=50,
3638 3654 **opts
3639 3655 ):
3640 3656 opts = _byteskwargs(opts)
3641 3657
3642 3658 def doinit():
3643 3659 for i in _xrange(10000):
3644 3660 util.lrucachedict(size)
3645 3661
3646 3662 costrange = list(range(mincost, maxcost + 1))
3647 3663
3648 3664 values = []
3649 3665 for i in _xrange(size):
3650 3666 values.append(random.randint(0, _maxint))
3651 3667
3652 3668 # Get mode fills the cache and tests raw lookup performance with no
3653 3669 # eviction.
3654 3670 getseq = []
3655 3671 for i in _xrange(gets):
3656 3672 getseq.append(random.choice(values))
3657 3673
3658 3674 def dogets():
3659 3675 d = util.lrucachedict(size)
3660 3676 for v in values:
3661 3677 d[v] = v
3662 3678 for key in getseq:
3663 3679 value = d[key]
3664 3680 value # silence pyflakes warning
3665 3681
3666 3682 def dogetscost():
3667 3683 d = util.lrucachedict(size, maxcost=costlimit)
3668 3684 for i, v in enumerate(values):
3669 3685 d.insert(v, v, cost=costs[i])
3670 3686 for key in getseq:
3671 3687 try:
3672 3688 value = d[key]
3673 3689 value # silence pyflakes warning
3674 3690 except KeyError:
3675 3691 pass
3676 3692
3677 3693 # Set mode tests insertion speed with cache eviction.
3678 3694 setseq = []
3679 3695 costs = []
3680 3696 for i in _xrange(sets):
3681 3697 setseq.append(random.randint(0, _maxint))
3682 3698 costs.append(random.choice(costrange))
3683 3699
3684 3700 def doinserts():
3685 3701 d = util.lrucachedict(size)
3686 3702 for v in setseq:
3687 3703 d.insert(v, v)
3688 3704
3689 3705 def doinsertscost():
3690 3706 d = util.lrucachedict(size, maxcost=costlimit)
3691 3707 for i, v in enumerate(setseq):
3692 3708 d.insert(v, v, cost=costs[i])
3693 3709
3694 3710 def dosets():
3695 3711 d = util.lrucachedict(size)
3696 3712 for v in setseq:
3697 3713 d[v] = v
3698 3714
3699 3715 # Mixed mode randomly performs gets and sets with eviction.
3700 3716 mixedops = []
3701 3717 for i in _xrange(mixed):
3702 3718 r = random.randint(0, 100)
3703 3719 if r < mixedgetfreq:
3704 3720 op = 0
3705 3721 else:
3706 3722 op = 1
3707 3723
3708 3724 mixedops.append(
3709 3725 (op, random.randint(0, size * 2), random.choice(costrange))
3710 3726 )
3711 3727
3712 3728 def domixed():
3713 3729 d = util.lrucachedict(size)
3714 3730
3715 3731 for op, v, cost in mixedops:
3716 3732 if op == 0:
3717 3733 try:
3718 3734 d[v]
3719 3735 except KeyError:
3720 3736 pass
3721 3737 else:
3722 3738 d[v] = v
3723 3739
3724 3740 def domixedcost():
3725 3741 d = util.lrucachedict(size, maxcost=costlimit)
3726 3742
3727 3743 for op, v, cost in mixedops:
3728 3744 if op == 0:
3729 3745 try:
3730 3746 d[v]
3731 3747 except KeyError:
3732 3748 pass
3733 3749 else:
3734 3750 d.insert(v, v, cost=cost)
3735 3751
3736 3752 benches = [
3737 3753 (doinit, b'init'),
3738 3754 ]
3739 3755
3740 3756 if costlimit:
3741 3757 benches.extend(
3742 3758 [
3743 3759 (dogetscost, b'gets w/ cost limit'),
3744 3760 (doinsertscost, b'inserts w/ cost limit'),
3745 3761 (domixedcost, b'mixed w/ cost limit'),
3746 3762 ]
3747 3763 )
3748 3764 else:
3749 3765 benches.extend(
3750 3766 [
3751 3767 (dogets, b'gets'),
3752 3768 (doinserts, b'inserts'),
3753 3769 (dosets, b'sets'),
3754 3770 (domixed, b'mixed'),
3755 3771 ]
3756 3772 )
3757 3773
3758 3774 for fn, title in benches:
3759 3775 timer, fm = gettimer(ui, opts)
3760 3776 timer(fn, title=title)
3761 3777 fm.end()
3762 3778
3763 3779
3764 3780 @command(b'perfwrite', formatteropts)
3765 3781 def perfwrite(ui, repo, **opts):
3766 3782 """microbenchmark ui.write
3767 3783 """
3768 3784 opts = _byteskwargs(opts)
3769 3785
3770 3786 timer, fm = gettimer(ui, opts)
3771 3787
3772 3788 def write():
3773 3789 for i in range(100000):
3774 3790 ui.writenoi18n(b'Testing write performance\n')
3775 3791
3776 3792 timer(write)
3777 3793 fm.end()
3778 3794
3779 3795
3780 3796 def uisetup(ui):
3781 3797 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3782 3798 commands, b'debugrevlogopts'
3783 3799 ):
3784 3800 # for "historical portability":
3785 3801 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3786 3802 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3787 3803 # openrevlog() should cause failure, because it has been
3788 3804 # available since 3.5 (or 49c583ca48c4).
3789 3805 def openrevlog(orig, repo, cmd, file_, opts):
3790 3806 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3791 3807 raise error.Abort(
3792 3808 b"This version doesn't support --dir option",
3793 3809 hint=b"use 3.5 or later",
3794 3810 )
3795 3811 return orig(repo, cmd, file_, opts)
3796 3812
3797 3813 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3798 3814
3799 3815
3800 3816 @command(
3801 3817 b'perfprogress',
3802 3818 formatteropts
3803 3819 + [
3804 3820 (b'', b'topic', b'topic', b'topic for progress messages'),
3805 3821 (b'c', b'total', 1000000, b'total value we are progressing to'),
3806 3822 ],
3807 3823 norepo=True,
3808 3824 )
3809 3825 def perfprogress(ui, topic=None, total=None, **opts):
3810 3826 """printing of progress bars"""
3811 3827 opts = _byteskwargs(opts)
3812 3828
3813 3829 timer, fm = gettimer(ui, opts)
3814 3830
3815 3831 def doprogress():
3816 3832 with ui.makeprogress(topic, total=total) as progress:
3817 3833 for i in _xrange(total):
3818 3834 progress.increment()
3819 3835
3820 3836 timer(doprogress)
3821 3837 fm.end()
@@ -1,398 +1,399 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perfaddremove
82 82 (no help text available)
83 83 perfancestors
84 84 (no help text available)
85 85 perfancestorset
86 86 (no help text available)
87 87 perfannotate (no help text available)
88 88 perfbdiff benchmark a bdiff between revisions
89 89 perfbookmarks
90 90 benchmark parsing bookmarks from disk to memory
91 91 perfbranchmap
92 92 benchmark the update of a branchmap
93 93 perfbranchmapload
94 94 benchmark reading the branchmap
95 95 perfbranchmapupdate
96 96 benchmark branchmap update from for <base> revs to <target>
97 97 revs
98 98 perfbundleread
99 99 Benchmark reading of bundle files.
100 100 perfcca (no help text available)
101 101 perfchangegroupchangelog
102 102 Benchmark producing a changelog group for a changegroup.
103 103 perfchangeset
104 104 (no help text available)
105 105 perfctxfiles (no help text available)
106 106 perfdiffwd Profile diff of working directory changes
107 107 perfdirfoldmap
108 108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 109 perfdirs (no help text available)
110 110 perfdirstate benchmap the time of various distate operations
111 111 perfdirstatedirs
112 112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 113 perfdirstatefoldmap
114 114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 115 perfdirstatewrite
116 116 benchmap the time it take to write a dirstate on disk
117 117 perfdiscovery
118 118 benchmark discovery between local repo and the peer at given
119 119 path
120 120 perffncacheencode
121 121 (no help text available)
122 122 perffncacheload
123 123 (no help text available)
124 124 perffncachewrite
125 125 (no help text available)
126 126 perfheads benchmark the computation of a changelog heads
127 127 perfhelper-mergecopies
128 128 find statistics about potential parameters for
129 129 'perfmergecopies'
130 130 perfhelper-pathcopies
131 131 find statistic about potential parameters for the
132 132 'perftracecopies'
133 133 perfignore benchmark operation related to computing ignore
134 134 perfindex benchmark index creation time followed by a lookup
135 135 perflinelogedits
136 136 (no help text available)
137 137 perfloadmarkers
138 138 benchmark the time to parse the on-disk markers for a repo
139 139 perflog (no help text available)
140 140 perflookup (no help text available)
141 141 perflrucachedict
142 142 (no help text available)
143 143 perfmanifest benchmark the time to read a manifest from disk and return a
144 144 usable
145 145 perfmergecalculate
146 146 (no help text available)
147 147 perfmergecopies
148 148 measure runtime of 'copies.mergecopies'
149 149 perfmoonwalk benchmark walking the changelog backwards
150 150 perfnodelookup
151 151 (no help text available)
152 152 perfnodemap benchmark the time necessary to look up revision from a cold
153 153 nodemap
154 154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 155 perfpathcopies
156 156 benchmark the copy tracing logic
157 157 perfphases benchmark phasesets computation
158 158 perfphasesremote
159 159 benchmark time needed to analyse phases of the remote server
160 160 perfprogress printing of progress bars
161 161 perfrawfiles (no help text available)
162 162 perfrevlogchunks
163 163 Benchmark operations on revlog chunks.
164 164 perfrevlogindex
165 165 Benchmark operations against a revlog index.
166 166 perfrevlogrevision
167 167 Benchmark obtaining a revlog revision.
168 168 perfrevlogrevisions
169 169 Benchmark reading a series of revisions from a revlog.
170 170 perfrevlogwrite
171 171 Benchmark writing a series of revisions to a revlog.
172 172 perfrevrange (no help text available)
173 173 perfrevset benchmark the execution time of a revset
174 174 perfstartup (no help text available)
175 175 perfstatus benchmark the performance of a single status call
176 176 perftags (no help text available)
177 177 perftemplating
178 178 test the rendering time of a given template
179 179 perfunidiff benchmark a unified diff between revisions
180 180 perfvolatilesets
181 181 benchmark the computation of various volatile set
182 182 perfwalk (no help text available)
183 183 perfwrite microbenchmark ui.write
184 184
185 185 (use 'hg help -v perf' to show built-in aliases and global options)
186 186 $ hg perfaddremove
187 187 $ hg perfancestors
188 188 $ hg perfancestorset 2
189 189 $ hg perfannotate a
190 190 $ hg perfbdiff -c 1
191 191 $ hg perfbdiff --alldata 1
192 192 $ hg perfunidiff -c 1
193 193 $ hg perfunidiff --alldata 1
194 194 $ hg perfbookmarks
195 195 $ hg perfbranchmap
196 196 $ hg perfbranchmapload
197 197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 198 benchmark of branchmap with 3 revisions with 1 new ones
199 199 $ hg perfcca
200 200 $ hg perfchangegroupchangelog
201 201 $ hg perfchangegroupchangelog --cgversion 01
202 202 $ hg perfchangeset 2
203 203 $ hg perfctxfiles 2
204 204 $ hg perfdiffwd
205 205 $ hg perfdirfoldmap
206 206 $ hg perfdirs
207 207 $ hg perfdirstate
208 208 $ hg perfdirstate --contains
209 209 $ hg perfdirstate --iteration
210 210 $ hg perfdirstatedirs
211 211 $ hg perfdirstatefoldmap
212 212 $ hg perfdirstatewrite
213 213 #if repofncache
214 214 $ hg perffncacheencode
215 215 $ hg perffncacheload
216 216 $ hg debugrebuildfncache
217 217 fncache already up to date
218 218 $ hg perffncachewrite
219 219 $ hg debugrebuildfncache
220 220 fncache already up to date
221 221 #endif
222 222 $ hg perfheads
223 223 $ hg perfignore
224 224 $ hg perfindex
225 225 $ hg perflinelogedits -n 1
226 226 $ hg perfloadmarkers
227 227 $ hg perflog
228 228 $ hg perflookup 2
229 229 $ hg perflrucache
230 230 $ hg perfmanifest 2
231 231 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
232 232 $ hg perfmanifest -m 44fe2c8352bb
233 233 abort: manifest revision must be integer or full node
234 234 [255]
235 235 $ hg perfmergecalculate -r 3
236 236 $ hg perfmoonwalk
237 237 $ hg perfnodelookup 2
238 238 $ hg perfpathcopies 1 2
239 239 $ hg perfprogress --total 1000
240 240 $ hg perfrawfiles 2
241 241 $ hg perfrevlogindex -c
242 242 #if reporevlogstore
243 243 $ hg perfrevlogrevisions .hg/store/data/a.i
244 244 #endif
245 245 $ hg perfrevlogrevision -m 0
246 246 $ hg perfrevlogchunks -c
247 247 $ hg perfrevrange
248 248 $ hg perfrevset 'all()'
249 249 $ hg perfstartup
250 250 $ hg perfstatus
251 $ hg perfstatus --dirstate
251 252 $ hg perftags
252 253 $ hg perftemplating
253 254 $ hg perfvolatilesets
254 255 $ hg perfwalk
255 256 $ hg perfparents
256 257 $ hg perfdiscovery -q .
257 258
258 259 Test run control
259 260 ----------------
260 261
261 262 Simple single entry
262 263
263 264 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
264 265 ! wall * comb * user * sys * (best of 15) (glob)
265 266
266 267 Multiple entries
267 268
268 269 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
269 270 ! wall * comb * user * sys * (best of 5) (glob)
270 271
271 272 error case are ignored
272 273
273 274 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
274 275 malformatted run limit entry, missing "-": 500
275 276 ! wall * comb * user * sys * (best of 5) (glob)
276 277 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
277 278 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
278 279 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
279 280 ! wall * comb * user * sys * (best of 5) (glob)
280 281 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
281 282 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
282 283 ! wall * comb * user * sys * (best of 5) (glob)
283 284
284 285 test actual output
285 286 ------------------
286 287
287 288 normal output:
288 289
289 290 $ hg perfheads --config perf.stub=no
290 291 ! wall * comb * user * sys * (best of *) (glob)
291 292
292 293 detailed output:
293 294
294 295 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
295 296 ! wall * comb * user * sys * (best of *) (glob)
296 297 ! wall * comb * user * sys * (max of *) (glob)
297 298 ! wall * comb * user * sys * (avg of *) (glob)
298 299 ! wall * comb * user * sys * (median of *) (glob)
299 300
300 301 test json output
301 302 ----------------
302 303
303 304 normal output:
304 305
305 306 $ hg perfheads --template json --config perf.stub=no
306 307 [
307 308 {
308 309 "comb": *, (glob)
309 310 "count": *, (glob)
310 311 "sys": *, (glob)
311 312 "user": *, (glob)
312 313 "wall": * (glob)
313 314 }
314 315 ]
315 316
316 317 detailed output:
317 318
318 319 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
319 320 [
320 321 {
321 322 "avg.comb": *, (glob)
322 323 "avg.count": *, (glob)
323 324 "avg.sys": *, (glob)
324 325 "avg.user": *, (glob)
325 326 "avg.wall": *, (glob)
326 327 "comb": *, (glob)
327 328 "count": *, (glob)
328 329 "max.comb": *, (glob)
329 330 "max.count": *, (glob)
330 331 "max.sys": *, (glob)
331 332 "max.user": *, (glob)
332 333 "max.wall": *, (glob)
333 334 "median.comb": *, (glob)
334 335 "median.count": *, (glob)
335 336 "median.sys": *, (glob)
336 337 "median.user": *, (glob)
337 338 "median.wall": *, (glob)
338 339 "sys": *, (glob)
339 340 "user": *, (glob)
340 341 "wall": * (glob)
341 342 }
342 343 ]
343 344
344 345 Test pre-run feature
345 346 --------------------
346 347
347 348 (perf discovery has some spurious output)
348 349
349 350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
350 351 ! wall * comb * user * sys * (best of 1) (glob)
351 352 searching for changes
352 353 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
353 354 ! wall * comb * user * sys * (best of 1) (glob)
354 355 searching for changes
355 356 searching for changes
356 357 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
357 358 ! wall * comb * user * sys * (best of 1) (glob)
358 359 searching for changes
359 360 searching for changes
360 361 searching for changes
361 362 searching for changes
362 363
363 364 test profile-benchmark option
364 365 ------------------------------
365 366
366 367 Function to check that statprof ran
367 368 $ statprofran () {
368 369 > egrep 'Sample count:|No samples recorded' > /dev/null
369 370 > }
370 371 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
371 372
372 373 Check perf.py for historical portability
373 374 ----------------------------------------
374 375
375 376 $ cd "$TESTDIR/.."
376 377
377 378 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
378 379 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
379 380 > "$TESTDIR"/check-perf-code.py contrib/perf.py
380 381 contrib/perf.py:\d+: (re)
381 382 > from mercurial import (
382 383 import newer module separately in try clause for early Mercurial
383 384 contrib/perf.py:\d+: (re)
384 385 > from mercurial import (
385 386 import newer module separately in try clause for early Mercurial
386 387 contrib/perf.py:\d+: (re)
387 388 > origindexpath = orig.opener.join(orig.indexfile)
388 389 use getvfs()/getsvfs() for early Mercurial
389 390 contrib/perf.py:\d+: (re)
390 391 > origdatapath = orig.opener.join(orig.datafile)
391 392 use getvfs()/getsvfs() for early Mercurial
392 393 contrib/perf.py:\d+: (re)
393 394 > vfs = vfsmod.vfs(tmpdir)
394 395 use getvfs()/getsvfs() for early Mercurial
395 396 contrib/perf.py:\d+: (re)
396 397 > vfs.options = getattr(orig.opener, 'options', None)
397 398 use getvfs()/getsvfs() for early Mercurial
398 399 [1]
General Comments 0
You need to be logged in to leave comments. Login now