##// END OF EJS Templates
perf: document `perfdirfoldmap`
marmoute -
r43397:0b32206c default
parent child Browse files
Show More
@@ -1,3768 +1,3772
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 from __future__ import absolute_import
58 58 import contextlib
59 59 import functools
60 60 import gc
61 61 import os
62 62 import random
63 63 import shutil
64 64 import struct
65 65 import sys
66 66 import tempfile
67 67 import threading
68 68 import time
69 69 from mercurial import (
70 70 changegroup,
71 71 cmdutil,
72 72 commands,
73 73 copies,
74 74 error,
75 75 extensions,
76 76 hg,
77 77 mdiff,
78 78 merge,
79 79 revlog,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122
123 123 def identity(a):
124 124 return a
125 125
126 126
127 127 try:
128 128 from mercurial import pycompat
129 129
130 130 getargspec = pycompat.getargspec # added to module after 4.5
131 131 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
132 132 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
133 133 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
134 134 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
135 135 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
136 136 if pycompat.ispy3:
137 137 _maxint = sys.maxsize # per py3 docs for replacing maxint
138 138 else:
139 139 _maxint = sys.maxint
140 140 except (NameError, ImportError, AttributeError):
141 141 import inspect
142 142
143 143 getargspec = inspect.getargspec
144 144 _byteskwargs = identity
145 145 _bytestr = str
146 146 fsencode = identity # no py3 support
147 147 _maxint = sys.maxint # no py3 support
148 148 _sysstr = lambda x: x # no py3 support
149 149 _xrange = xrange
150 150
151 151 try:
152 152 # 4.7+
153 153 queue = pycompat.queue.Queue
154 154 except (NameError, AttributeError, ImportError):
155 155 # <4.7.
156 156 try:
157 157 queue = pycompat.queue
158 158 except (NameError, AttributeError, ImportError):
159 159 import Queue as queue
160 160
161 161 try:
162 162 from mercurial import logcmdutil
163 163
164 164 makelogtemplater = logcmdutil.maketemplater
165 165 except (AttributeError, ImportError):
166 166 try:
167 167 makelogtemplater = cmdutil.makelogtemplater
168 168 except (AttributeError, ImportError):
169 169 makelogtemplater = None
170 170
171 171 # for "historical portability":
172 172 # define util.safehasattr forcibly, because util.safehasattr has been
173 173 # available since 1.9.3 (or 94b200a11cf7)
174 174 _undefined = object()
175 175
176 176
177 177 def safehasattr(thing, attr):
178 178 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
179 179
180 180
181 181 setattr(util, 'safehasattr', safehasattr)
182 182
183 183 # for "historical portability":
184 184 # define util.timer forcibly, because util.timer has been available
185 185 # since ae5d60bb70c9
186 186 if safehasattr(time, 'perf_counter'):
187 187 util.timer = time.perf_counter
188 188 elif os.name == b'nt':
189 189 util.timer = time.clock
190 190 else:
191 191 util.timer = time.time
192 192
193 193 # for "historical portability":
194 194 # use locally defined empty option list, if formatteropts isn't
195 195 # available, because commands.formatteropts has been available since
196 196 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
197 197 # available since 2.2 (or ae5f92e154d3)
198 198 formatteropts = getattr(
199 199 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
200 200 )
201 201
202 202 # for "historical portability":
203 203 # use locally defined option list, if debugrevlogopts isn't available,
204 204 # because commands.debugrevlogopts has been available since 3.7 (or
205 205 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
206 206 # since 1.9 (or a79fea6b3e77).
207 207 revlogopts = getattr(
208 208 cmdutil,
209 209 "debugrevlogopts",
210 210 getattr(
211 211 commands,
212 212 "debugrevlogopts",
213 213 [
214 214 (b'c', b'changelog', False, b'open changelog'),
215 215 (b'm', b'manifest', False, b'open manifest'),
216 216 (b'', b'dir', False, b'open directory manifest'),
217 217 ],
218 218 ),
219 219 )
220 220
221 221 cmdtable = {}
222 222
223 223 # for "historical portability":
224 224 # define parsealiases locally, because cmdutil.parsealiases has been
225 225 # available since 1.5 (or 6252852b4332)
226 226 def parsealiases(cmd):
227 227 return cmd.split(b"|")
228 228
229 229
230 230 if safehasattr(registrar, 'command'):
231 231 command = registrar.command(cmdtable)
232 232 elif safehasattr(cmdutil, 'command'):
233 233 command = cmdutil.command(cmdtable)
234 234 if b'norepo' not in getargspec(command).args:
235 235 # for "historical portability":
236 236 # wrap original cmdutil.command, because "norepo" option has
237 237 # been available since 3.1 (or 75a96326cecb)
238 238 _command = command
239 239
240 240 def command(name, options=(), synopsis=None, norepo=False):
241 241 if norepo:
242 242 commands.norepo += b' %s' % b' '.join(parsealiases(name))
243 243 return _command(name, list(options), synopsis)
244 244
245 245
246 246 else:
247 247 # for "historical portability":
248 248 # define "@command" annotation locally, because cmdutil.command
249 249 # has been available since 1.9 (or 2daa5179e73f)
250 250 def command(name, options=(), synopsis=None, norepo=False):
251 251 def decorator(func):
252 252 if synopsis:
253 253 cmdtable[name] = func, list(options), synopsis
254 254 else:
255 255 cmdtable[name] = func, list(options)
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return func
259 259
260 260 return decorator
261 261
262 262
263 263 try:
264 264 import mercurial.registrar
265 265 import mercurial.configitems
266 266
267 267 configtable = {}
268 268 configitem = mercurial.registrar.configitem(configtable)
269 269 configitem(
270 270 b'perf',
271 271 b'presleep',
272 272 default=mercurial.configitems.dynamicdefault,
273 273 experimental=True,
274 274 )
275 275 configitem(
276 276 b'perf',
277 277 b'stub',
278 278 default=mercurial.configitems.dynamicdefault,
279 279 experimental=True,
280 280 )
281 281 configitem(
282 282 b'perf',
283 283 b'parentscount',
284 284 default=mercurial.configitems.dynamicdefault,
285 285 experimental=True,
286 286 )
287 287 configitem(
288 288 b'perf',
289 289 b'all-timing',
290 290 default=mercurial.configitems.dynamicdefault,
291 291 experimental=True,
292 292 )
293 293 configitem(
294 294 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'profile-benchmark',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 )
301 301 configitem(
302 302 b'perf',
303 303 b'run-limits',
304 304 default=mercurial.configitems.dynamicdefault,
305 305 experimental=True,
306 306 )
307 307 except (ImportError, AttributeError):
308 308 pass
309 309 except TypeError:
310 310 # compatibility fix for a11fd395e83f
311 311 # hg version: 5.2
312 312 configitem(
313 313 b'perf', b'presleep', default=mercurial.configitems.dynamicdefault,
314 314 )
315 315 configitem(
316 316 b'perf', b'stub', default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf', b'parentscount', default=mercurial.configitems.dynamicdefault,
320 320 )
321 321 configitem(
322 322 b'perf', b'all-timing', default=mercurial.configitems.dynamicdefault,
323 323 )
324 324 configitem(
325 325 b'perf', b'pre-run', default=mercurial.configitems.dynamicdefault,
326 326 )
327 327 configitem(
328 328 b'perf',
329 329 b'profile-benchmark',
330 330 default=mercurial.configitems.dynamicdefault,
331 331 )
332 332 configitem(
333 333 b'perf', b'run-limits', default=mercurial.configitems.dynamicdefault,
334 334 )
335 335
336 336
337 337 def getlen(ui):
338 338 if ui.configbool(b"perf", b"stub", False):
339 339 return lambda x: 1
340 340 return len
341 341
342 342
343 343 class noop(object):
344 344 """dummy context manager"""
345 345
346 346 def __enter__(self):
347 347 pass
348 348
349 349 def __exit__(self, *args):
350 350 pass
351 351
352 352
353 353 NOOPCTX = noop()
354 354
355 355
356 356 def gettimer(ui, opts=None):
357 357 """return a timer function and formatter: (timer, formatter)
358 358
359 359 This function exists to gather the creation of formatter in a single
360 360 place instead of duplicating it in all performance commands."""
361 361
362 362 # enforce an idle period before execution to counteract power management
363 363 # experimental config: perf.presleep
364 364 time.sleep(getint(ui, b"perf", b"presleep", 1))
365 365
366 366 if opts is None:
367 367 opts = {}
368 368 # redirect all to stderr unless buffer api is in use
369 369 if not ui._buffers:
370 370 ui = ui.copy()
371 371 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
372 372 if uifout:
373 373 # for "historical portability":
374 374 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
375 375 uifout.set(ui.ferr)
376 376
377 377 # get a formatter
378 378 uiformatter = getattr(ui, 'formatter', None)
379 379 if uiformatter:
380 380 fm = uiformatter(b'perf', opts)
381 381 else:
382 382 # for "historical portability":
383 383 # define formatter locally, because ui.formatter has been
384 384 # available since 2.2 (or ae5f92e154d3)
385 385 from mercurial import node
386 386
387 387 class defaultformatter(object):
388 388 """Minimized composition of baseformatter and plainformatter
389 389 """
390 390
391 391 def __init__(self, ui, topic, opts):
392 392 self._ui = ui
393 393 if ui.debugflag:
394 394 self.hexfunc = node.hex
395 395 else:
396 396 self.hexfunc = node.short
397 397
398 398 def __nonzero__(self):
399 399 return False
400 400
401 401 __bool__ = __nonzero__
402 402
403 403 def startitem(self):
404 404 pass
405 405
406 406 def data(self, **data):
407 407 pass
408 408
409 409 def write(self, fields, deftext, *fielddata, **opts):
410 410 self._ui.write(deftext % fielddata, **opts)
411 411
412 412 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
413 413 if cond:
414 414 self._ui.write(deftext % fielddata, **opts)
415 415
416 416 def plain(self, text, **opts):
417 417 self._ui.write(text, **opts)
418 418
419 419 def end(self):
420 420 pass
421 421
422 422 fm = defaultformatter(ui, b'perf', opts)
423 423
424 424 # stub function, runs code only once instead of in a loop
425 425 # experimental config: perf.stub
426 426 if ui.configbool(b"perf", b"stub", False):
427 427 return functools.partial(stub_timer, fm), fm
428 428
429 429 # experimental config: perf.all-timing
430 430 displayall = ui.configbool(b"perf", b"all-timing", False)
431 431
432 432 # experimental config: perf.run-limits
433 433 limitspec = ui.configlist(b"perf", b"run-limits", [])
434 434 limits = []
435 435 for item in limitspec:
436 436 parts = item.split(b'-', 1)
437 437 if len(parts) < 2:
438 438 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
439 439 continue
440 440 try:
441 441 time_limit = float(_sysstr(parts[0]))
442 442 except ValueError as e:
443 443 ui.warn(
444 444 (
445 445 b'malformatted run limit entry, %s: %s\n'
446 446 % (_bytestr(e), item)
447 447 )
448 448 )
449 449 continue
450 450 try:
451 451 run_limit = int(_sysstr(parts[1]))
452 452 except ValueError as e:
453 453 ui.warn(
454 454 (
455 455 b'malformatted run limit entry, %s: %s\n'
456 456 % (_bytestr(e), item)
457 457 )
458 458 )
459 459 continue
460 460 limits.append((time_limit, run_limit))
461 461 if not limits:
462 462 limits = DEFAULTLIMITS
463 463
464 464 profiler = None
465 465 if profiling is not None:
466 466 if ui.configbool(b"perf", b"profile-benchmark", False):
467 467 profiler = profiling.profile(ui)
468 468
469 469 prerun = getint(ui, b"perf", b"pre-run", 0)
470 470 t = functools.partial(
471 471 _timer,
472 472 fm,
473 473 displayall=displayall,
474 474 limits=limits,
475 475 prerun=prerun,
476 476 profiler=profiler,
477 477 )
478 478 return t, fm
479 479
480 480
481 481 def stub_timer(fm, func, setup=None, title=None):
482 482 if setup is not None:
483 483 setup()
484 484 func()
485 485
486 486
487 487 @contextlib.contextmanager
488 488 def timeone():
489 489 r = []
490 490 ostart = os.times()
491 491 cstart = util.timer()
492 492 yield r
493 493 cstop = util.timer()
494 494 ostop = os.times()
495 495 a, b = ostart, ostop
496 496 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
497 497
498 498
499 499 # list of stop condition (elapsed time, minimal run count)
500 500 DEFAULTLIMITS = (
501 501 (3.0, 100),
502 502 (10.0, 3),
503 503 )
504 504
505 505
506 506 def _timer(
507 507 fm,
508 508 func,
509 509 setup=None,
510 510 title=None,
511 511 displayall=False,
512 512 limits=DEFAULTLIMITS,
513 513 prerun=0,
514 514 profiler=None,
515 515 ):
516 516 gc.collect()
517 517 results = []
518 518 begin = util.timer()
519 519 count = 0
520 520 if profiler is None:
521 521 profiler = NOOPCTX
522 522 for i in range(prerun):
523 523 if setup is not None:
524 524 setup()
525 525 func()
526 526 keepgoing = True
527 527 while keepgoing:
528 528 if setup is not None:
529 529 setup()
530 530 with profiler:
531 531 with timeone() as item:
532 532 r = func()
533 533 profiler = NOOPCTX
534 534 count += 1
535 535 results.append(item[0])
536 536 cstop = util.timer()
537 537 # Look for a stop condition.
538 538 elapsed = cstop - begin
539 539 for t, mincount in limits:
540 540 if elapsed >= t and count >= mincount:
541 541 keepgoing = False
542 542 break
543 543
544 544 formatone(fm, results, title=title, result=r, displayall=displayall)
545 545
546 546
547 547 def formatone(fm, timings, title=None, result=None, displayall=False):
548 548
549 549 count = len(timings)
550 550
551 551 fm.startitem()
552 552
553 553 if title:
554 554 fm.write(b'title', b'! %s\n', title)
555 555 if result:
556 556 fm.write(b'result', b'! result: %s\n', result)
557 557
558 558 def display(role, entry):
559 559 prefix = b''
560 560 if role != b'best':
561 561 prefix = b'%s.' % role
562 562 fm.plain(b'!')
563 563 fm.write(prefix + b'wall', b' wall %f', entry[0])
564 564 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
565 565 fm.write(prefix + b'user', b' user %f', entry[1])
566 566 fm.write(prefix + b'sys', b' sys %f', entry[2])
567 567 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
568 568 fm.plain(b'\n')
569 569
570 570 timings.sort()
571 571 min_val = timings[0]
572 572 display(b'best', min_val)
573 573 if displayall:
574 574 max_val = timings[-1]
575 575 display(b'max', max_val)
576 576 avg = tuple([sum(x) / count for x in zip(*timings)])
577 577 display(b'avg', avg)
578 578 median = timings[len(timings) // 2]
579 579 display(b'median', median)
580 580
581 581
582 582 # utilities for historical portability
583 583
584 584
585 585 def getint(ui, section, name, default):
586 586 # for "historical portability":
587 587 # ui.configint has been available since 1.9 (or fa2b596db182)
588 588 v = ui.config(section, name, None)
589 589 if v is None:
590 590 return default
591 591 try:
592 592 return int(v)
593 593 except ValueError:
594 594 raise error.ConfigError(
595 595 b"%s.%s is not an integer ('%s')" % (section, name, v)
596 596 )
597 597
598 598
599 599 def safeattrsetter(obj, name, ignoremissing=False):
600 600 """Ensure that 'obj' has 'name' attribute before subsequent setattr
601 601
602 602 This function is aborted, if 'obj' doesn't have 'name' attribute
603 603 at runtime. This avoids overlooking removal of an attribute, which
604 604 breaks assumption of performance measurement, in the future.
605 605
606 606 This function returns the object to (1) assign a new value, and
607 607 (2) restore an original value to the attribute.
608 608
609 609 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
610 610 abortion, and this function returns None. This is useful to
611 611 examine an attribute, which isn't ensured in all Mercurial
612 612 versions.
613 613 """
614 614 if not util.safehasattr(obj, name):
615 615 if ignoremissing:
616 616 return None
617 617 raise error.Abort(
618 618 (
619 619 b"missing attribute %s of %s might break assumption"
620 620 b" of performance measurement"
621 621 )
622 622 % (name, obj)
623 623 )
624 624
625 625 origvalue = getattr(obj, _sysstr(name))
626 626
627 627 class attrutil(object):
628 628 def set(self, newvalue):
629 629 setattr(obj, _sysstr(name), newvalue)
630 630
631 631 def restore(self):
632 632 setattr(obj, _sysstr(name), origvalue)
633 633
634 634 return attrutil()
635 635
636 636
637 637 # utilities to examine each internal API changes
638 638
639 639
640 640 def getbranchmapsubsettable():
641 641 # for "historical portability":
642 642 # subsettable is defined in:
643 643 # - branchmap since 2.9 (or 175c6fd8cacc)
644 644 # - repoview since 2.5 (or 59a9f18d4587)
645 645 # - repoviewutil since 5.0
646 646 for mod in (branchmap, repoview, repoviewutil):
647 647 subsettable = getattr(mod, 'subsettable', None)
648 648 if subsettable:
649 649 return subsettable
650 650
651 651 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
652 652 # branchmap and repoview modules exist, but subsettable attribute
653 653 # doesn't)
654 654 raise error.Abort(
655 655 b"perfbranchmap not available with this Mercurial",
656 656 hint=b"use 2.5 or later",
657 657 )
658 658
659 659
660 660 def getsvfs(repo):
661 661 """Return appropriate object to access files under .hg/store
662 662 """
663 663 # for "historical portability":
664 664 # repo.svfs has been available since 2.3 (or 7034365089bf)
665 665 svfs = getattr(repo, 'svfs', None)
666 666 if svfs:
667 667 return svfs
668 668 else:
669 669 return getattr(repo, 'sopener')
670 670
671 671
672 672 def getvfs(repo):
673 673 """Return appropriate object to access files under .hg
674 674 """
675 675 # for "historical portability":
676 676 # repo.vfs has been available since 2.3 (or 7034365089bf)
677 677 vfs = getattr(repo, 'vfs', None)
678 678 if vfs:
679 679 return vfs
680 680 else:
681 681 return getattr(repo, 'opener')
682 682
683 683
684 684 def repocleartagscachefunc(repo):
685 685 """Return the function to clear tags cache according to repo internal API
686 686 """
687 687 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
688 688 # in this case, setattr(repo, '_tagscache', None) or so isn't
689 689 # correct way to clear tags cache, because existing code paths
690 690 # expect _tagscache to be a structured object.
691 691 def clearcache():
692 692 # _tagscache has been filteredpropertycache since 2.5 (or
693 693 # 98c867ac1330), and delattr() can't work in such case
694 694 if b'_tagscache' in vars(repo):
695 695 del repo.__dict__[b'_tagscache']
696 696
697 697 return clearcache
698 698
699 699 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
700 700 if repotags: # since 1.4 (or 5614a628d173)
701 701 return lambda: repotags.set(None)
702 702
703 703 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
704 704 if repotagscache: # since 0.6 (or d7df759d0e97)
705 705 return lambda: repotagscache.set(None)
706 706
707 707 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
708 708 # this point, but it isn't so problematic, because:
709 709 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
710 710 # in perftags() causes failure soon
711 711 # - perf.py itself has been available since 1.1 (or eb240755386d)
712 712 raise error.Abort(b"tags API of this hg command is unknown")
713 713
714 714
715 715 # utilities to clear cache
716 716
717 717
718 718 def clearfilecache(obj, attrname):
719 719 unfiltered = getattr(obj, 'unfiltered', None)
720 720 if unfiltered is not None:
721 721 obj = obj.unfiltered()
722 722 if attrname in vars(obj):
723 723 delattr(obj, attrname)
724 724 obj._filecache.pop(attrname, None)
725 725
726 726
727 727 def clearchangelog(repo):
728 728 if repo is not repo.unfiltered():
729 729 object.__setattr__(repo, r'_clcachekey', None)
730 730 object.__setattr__(repo, r'_clcache', None)
731 731 clearfilecache(repo.unfiltered(), 'changelog')
732 732
733 733
734 734 # perf commands
735 735
736 736
737 737 @command(b'perfwalk', formatteropts)
738 738 def perfwalk(ui, repo, *pats, **opts):
739 739 opts = _byteskwargs(opts)
740 740 timer, fm = gettimer(ui, opts)
741 741 m = scmutil.match(repo[None], pats, {})
742 742 timer(
743 743 lambda: len(
744 744 list(
745 745 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
746 746 )
747 747 )
748 748 )
749 749 fm.end()
750 750
751 751
752 752 @command(b'perfannotate', formatteropts)
753 753 def perfannotate(ui, repo, f, **opts):
754 754 opts = _byteskwargs(opts)
755 755 timer, fm = gettimer(ui, opts)
756 756 fc = repo[b'.'][f]
757 757 timer(lambda: len(fc.annotate(True)))
758 758 fm.end()
759 759
760 760
761 761 @command(
762 762 b'perfstatus',
763 763 [(b'u', b'unknown', False, b'ask status to look for unknown files')]
764 764 + formatteropts,
765 765 )
766 766 def perfstatus(ui, repo, **opts):
767 767 """benchmark the performance of a single status call
768 768
769 769 The repository data are preserved between each call.
770 770
771 771 By default, only the status of the tracked file are requested. If
772 772 `--unknown` is passed, the "unknown" files are also tracked.
773 773 """
774 774 opts = _byteskwargs(opts)
775 775 # m = match.always(repo.root, repo.getcwd())
776 776 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
777 777 # False))))
778 778 timer, fm = gettimer(ui, opts)
779 779 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
780 780 fm.end()
781 781
782 782
783 783 @command(b'perfaddremove', formatteropts)
784 784 def perfaddremove(ui, repo, **opts):
785 785 opts = _byteskwargs(opts)
786 786 timer, fm = gettimer(ui, opts)
787 787 try:
788 788 oldquiet = repo.ui.quiet
789 789 repo.ui.quiet = True
790 790 matcher = scmutil.match(repo[None])
791 791 opts[b'dry_run'] = True
792 792 if b'uipathfn' in getargspec(scmutil.addremove).args:
793 793 uipathfn = scmutil.getuipathfn(repo)
794 794 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
795 795 else:
796 796 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
797 797 finally:
798 798 repo.ui.quiet = oldquiet
799 799 fm.end()
800 800
801 801
802 802 def clearcaches(cl):
803 803 # behave somewhat consistently across internal API changes
804 804 if util.safehasattr(cl, b'clearcaches'):
805 805 cl.clearcaches()
806 806 elif util.safehasattr(cl, b'_nodecache'):
807 807 from mercurial.node import nullid, nullrev
808 808
809 809 cl._nodecache = {nullid: nullrev}
810 810 cl._nodepos = None
811 811
812 812
813 813 @command(b'perfheads', formatteropts)
814 814 def perfheads(ui, repo, **opts):
815 815 """benchmark the computation of a changelog heads"""
816 816 opts = _byteskwargs(opts)
817 817 timer, fm = gettimer(ui, opts)
818 818 cl = repo.changelog
819 819
820 820 def s():
821 821 clearcaches(cl)
822 822
823 823 def d():
824 824 len(cl.headrevs())
825 825
826 826 timer(d, setup=s)
827 827 fm.end()
828 828
829 829
830 830 @command(
831 831 b'perftags',
832 832 formatteropts
833 833 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
834 834 )
835 835 def perftags(ui, repo, **opts):
836 836 opts = _byteskwargs(opts)
837 837 timer, fm = gettimer(ui, opts)
838 838 repocleartagscache = repocleartagscachefunc(repo)
839 839 clearrevlogs = opts[b'clear_revlogs']
840 840
841 841 def s():
842 842 if clearrevlogs:
843 843 clearchangelog(repo)
844 844 clearfilecache(repo.unfiltered(), 'manifest')
845 845 repocleartagscache()
846 846
847 847 def t():
848 848 return len(repo.tags())
849 849
850 850 timer(t, setup=s)
851 851 fm.end()
852 852
853 853
854 854 @command(b'perfancestors', formatteropts)
855 855 def perfancestors(ui, repo, **opts):
856 856 opts = _byteskwargs(opts)
857 857 timer, fm = gettimer(ui, opts)
858 858 heads = repo.changelog.headrevs()
859 859
860 860 def d():
861 861 for a in repo.changelog.ancestors(heads):
862 862 pass
863 863
864 864 timer(d)
865 865 fm.end()
866 866
867 867
868 868 @command(b'perfancestorset', formatteropts)
869 869 def perfancestorset(ui, repo, revset, **opts):
870 870 opts = _byteskwargs(opts)
871 871 timer, fm = gettimer(ui, opts)
872 872 revs = repo.revs(revset)
873 873 heads = repo.changelog.headrevs()
874 874
875 875 def d():
876 876 s = repo.changelog.ancestors(heads)
877 877 for rev in revs:
878 878 rev in s
879 879
880 880 timer(d)
881 881 fm.end()
882 882
883 883
884 884 @command(b'perfdiscovery', formatteropts, b'PATH')
885 885 def perfdiscovery(ui, repo, path, **opts):
886 886 """benchmark discovery between local repo and the peer at given path
887 887 """
888 888 repos = [repo, None]
889 889 timer, fm = gettimer(ui, opts)
890 890 path = ui.expandpath(path)
891 891
892 892 def s():
893 893 repos[1] = hg.peer(ui, opts, path)
894 894
895 895 def d():
896 896 setdiscovery.findcommonheads(ui, *repos)
897 897
898 898 timer(d, setup=s)
899 899 fm.end()
900 900
901 901
902 902 @command(
903 903 b'perfbookmarks',
904 904 formatteropts
905 905 + [(b'', b'clear-revlogs', False, b'refresh changelog and manifest'),],
906 906 )
907 907 def perfbookmarks(ui, repo, **opts):
908 908 """benchmark parsing bookmarks from disk to memory"""
909 909 opts = _byteskwargs(opts)
910 910 timer, fm = gettimer(ui, opts)
911 911
912 912 clearrevlogs = opts[b'clear_revlogs']
913 913
914 914 def s():
915 915 if clearrevlogs:
916 916 clearchangelog(repo)
917 917 clearfilecache(repo, b'_bookmarks')
918 918
919 919 def d():
920 920 repo._bookmarks
921 921
922 922 timer(d, setup=s)
923 923 fm.end()
924 924
925 925
926 926 @command(b'perfbundleread', formatteropts, b'BUNDLE')
927 927 def perfbundleread(ui, repo, bundlepath, **opts):
928 928 """Benchmark reading of bundle files.
929 929
930 930 This command is meant to isolate the I/O part of bundle reading as
931 931 much as possible.
932 932 """
933 933 from mercurial import (
934 934 bundle2,
935 935 exchange,
936 936 streamclone,
937 937 )
938 938
939 939 opts = _byteskwargs(opts)
940 940
941 941 def makebench(fn):
942 942 def run():
943 943 with open(bundlepath, b'rb') as fh:
944 944 bundle = exchange.readbundle(ui, fh, bundlepath)
945 945 fn(bundle)
946 946
947 947 return run
948 948
949 949 def makereadnbytes(size):
950 950 def run():
951 951 with open(bundlepath, b'rb') as fh:
952 952 bundle = exchange.readbundle(ui, fh, bundlepath)
953 953 while bundle.read(size):
954 954 pass
955 955
956 956 return run
957 957
958 958 def makestdioread(size):
959 959 def run():
960 960 with open(bundlepath, b'rb') as fh:
961 961 while fh.read(size):
962 962 pass
963 963
964 964 return run
965 965
966 966 # bundle1
967 967
968 968 def deltaiter(bundle):
969 969 for delta in bundle.deltaiter():
970 970 pass
971 971
972 972 def iterchunks(bundle):
973 973 for chunk in bundle.getchunks():
974 974 pass
975 975
976 976 # bundle2
977 977
978 978 def forwardchunks(bundle):
979 979 for chunk in bundle._forwardchunks():
980 980 pass
981 981
982 982 def iterparts(bundle):
983 983 for part in bundle.iterparts():
984 984 pass
985 985
986 986 def iterpartsseekable(bundle):
987 987 for part in bundle.iterparts(seekable=True):
988 988 pass
989 989
990 990 def seek(bundle):
991 991 for part in bundle.iterparts(seekable=True):
992 992 part.seek(0, os.SEEK_END)
993 993
994 994 def makepartreadnbytes(size):
995 995 def run():
996 996 with open(bundlepath, b'rb') as fh:
997 997 bundle = exchange.readbundle(ui, fh, bundlepath)
998 998 for part in bundle.iterparts():
999 999 while part.read(size):
1000 1000 pass
1001 1001
1002 1002 return run
1003 1003
1004 1004 benches = [
1005 1005 (makestdioread(8192), b'read(8k)'),
1006 1006 (makestdioread(16384), b'read(16k)'),
1007 1007 (makestdioread(32768), b'read(32k)'),
1008 1008 (makestdioread(131072), b'read(128k)'),
1009 1009 ]
1010 1010
1011 1011 with open(bundlepath, b'rb') as fh:
1012 1012 bundle = exchange.readbundle(ui, fh, bundlepath)
1013 1013
1014 1014 if isinstance(bundle, changegroup.cg1unpacker):
1015 1015 benches.extend(
1016 1016 [
1017 1017 (makebench(deltaiter), b'cg1 deltaiter()'),
1018 1018 (makebench(iterchunks), b'cg1 getchunks()'),
1019 1019 (makereadnbytes(8192), b'cg1 read(8k)'),
1020 1020 (makereadnbytes(16384), b'cg1 read(16k)'),
1021 1021 (makereadnbytes(32768), b'cg1 read(32k)'),
1022 1022 (makereadnbytes(131072), b'cg1 read(128k)'),
1023 1023 ]
1024 1024 )
1025 1025 elif isinstance(bundle, bundle2.unbundle20):
1026 1026 benches.extend(
1027 1027 [
1028 1028 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1029 1029 (makebench(iterparts), b'bundle2 iterparts()'),
1030 1030 (
1031 1031 makebench(iterpartsseekable),
1032 1032 b'bundle2 iterparts() seekable',
1033 1033 ),
1034 1034 (makebench(seek), b'bundle2 part seek()'),
1035 1035 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1036 1036 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1037 1037 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1038 1038 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1039 1039 ]
1040 1040 )
1041 1041 elif isinstance(bundle, streamclone.streamcloneapplier):
1042 1042 raise error.Abort(b'stream clone bundles not supported')
1043 1043 else:
1044 1044 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1045 1045
1046 1046 for fn, title in benches:
1047 1047 timer, fm = gettimer(ui, opts)
1048 1048 timer(fn, title=title)
1049 1049 fm.end()
1050 1050
1051 1051
1052 1052 @command(
1053 1053 b'perfchangegroupchangelog',
1054 1054 formatteropts
1055 1055 + [
1056 1056 (b'', b'cgversion', b'02', b'changegroup version'),
1057 1057 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1058 1058 ],
1059 1059 )
1060 1060 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1061 1061 """Benchmark producing a changelog group for a changegroup.
1062 1062
1063 1063 This measures the time spent processing the changelog during a
1064 1064 bundle operation. This occurs during `hg bundle` and on a server
1065 1065 processing a `getbundle` wire protocol request (handles clones
1066 1066 and pull requests).
1067 1067
1068 1068 By default, all revisions are added to the changegroup.
1069 1069 """
1070 1070 opts = _byteskwargs(opts)
1071 1071 cl = repo.changelog
1072 1072 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1073 1073 bundler = changegroup.getbundler(cgversion, repo)
1074 1074
1075 1075 def d():
1076 1076 state, chunks = bundler._generatechangelog(cl, nodes)
1077 1077 for chunk in chunks:
1078 1078 pass
1079 1079
1080 1080 timer, fm = gettimer(ui, opts)
1081 1081
1082 1082 # Terminal printing can interfere with timing. So disable it.
1083 1083 with ui.configoverride({(b'progress', b'disable'): True}):
1084 1084 timer(d)
1085 1085
1086 1086 fm.end()
1087 1087
1088 1088
1089 1089 @command(b'perfdirs', formatteropts)
1090 1090 def perfdirs(ui, repo, **opts):
1091 1091 opts = _byteskwargs(opts)
1092 1092 timer, fm = gettimer(ui, opts)
1093 1093 dirstate = repo.dirstate
1094 1094 b'a' in dirstate
1095 1095
1096 1096 def d():
1097 1097 dirstate.hasdir(b'a')
1098 1098 del dirstate._map._dirs
1099 1099
1100 1100 timer(d)
1101 1101 fm.end()
1102 1102
1103 1103
1104 1104 @command(b'perfdirstate', formatteropts)
1105 1105 def perfdirstate(ui, repo, **opts):
1106 1106 """benchmap the time necessary to load a dirstate from scratch
1107 1107
1108 1108 The dirstate is loaded to the point were a "contains" request can be
1109 1109 answered.
1110 1110 """
1111 1111 opts = _byteskwargs(opts)
1112 1112 timer, fm = gettimer(ui, opts)
1113 1113 b"a" in repo.dirstate
1114 1114
1115 1115 def setup():
1116 1116 repo.dirstate.invalidate()
1117 1117
1118 1118 def d():
1119 1119 b"a" in repo.dirstate
1120 1120
1121 1121 timer(d, setup=setup)
1122 1122 fm.end()
1123 1123
1124 1124
1125 1125 @command(b'perfdirstatedirs', formatteropts)
1126 1126 def perfdirstatedirs(ui, repo, **opts):
1127 1127 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache
1128 1128 """
1129 1129 opts = _byteskwargs(opts)
1130 1130 timer, fm = gettimer(ui, opts)
1131 1131 repo.dirstate.hasdir(b"a")
1132 1132
1133 1133 def setup():
1134 1134 del repo.dirstate._map._dirs
1135 1135
1136 1136 def d():
1137 1137 repo.dirstate.hasdir(b"a")
1138 1138
1139 1139 timer(d, setup=setup)
1140 1140 fm.end()
1141 1141
1142 1142
1143 1143 @command(b'perfdirstatefoldmap', formatteropts)
1144 1144 def perfdirstatefoldmap(ui, repo, **opts):
1145 1145 """benchmap a `dirstate._map.filefoldmap.get()` request
1146 1146
1147 1147 The dirstate filefoldmap cache is dropped between every request.
1148 1148 """
1149 1149 opts = _byteskwargs(opts)
1150 1150 timer, fm = gettimer(ui, opts)
1151 1151 dirstate = repo.dirstate
1152 1152 dirstate._map.filefoldmap.get(b'a')
1153 1153
1154 1154 def setup():
1155 1155 del dirstate._map.filefoldmap
1156 1156
1157 1157 def d():
1158 1158 dirstate._map.filefoldmap.get(b'a')
1159 1159
1160 1160 timer(d, setup=setup)
1161 1161 fm.end()
1162 1162
1163 1163
1164 1164 @command(b'perfdirfoldmap', formatteropts)
1165 1165 def perfdirfoldmap(ui, repo, **opts):
1166 """benchmap a `dirstate._map.dirfoldmap.get()` request
1167
1168 The dirstate dirfoldmap cache is dropped between every request.
1169 """
1166 1170 opts = _byteskwargs(opts)
1167 1171 timer, fm = gettimer(ui, opts)
1168 1172 dirstate = repo.dirstate
1169 1173 b'a' in dirstate
1170 1174
1171 1175 def d():
1172 1176 dirstate._map.dirfoldmap.get(b'a')
1173 1177 del dirstate._map.dirfoldmap
1174 1178 del dirstate._map._dirs
1175 1179
1176 1180 timer(d)
1177 1181 fm.end()
1178 1182
1179 1183
1180 1184 @command(b'perfdirstatewrite', formatteropts)
1181 1185 def perfdirstatewrite(ui, repo, **opts):
1182 1186 opts = _byteskwargs(opts)
1183 1187 timer, fm = gettimer(ui, opts)
1184 1188 ds = repo.dirstate
1185 1189 b"a" in ds
1186 1190
1187 1191 def d():
1188 1192 ds._dirty = True
1189 1193 ds.write(repo.currenttransaction())
1190 1194
1191 1195 timer(d)
1192 1196 fm.end()
1193 1197
1194 1198
1195 1199 def _getmergerevs(repo, opts):
1196 1200 """parse command argument to return rev involved in merge
1197 1201
1198 1202 input: options dictionnary with `rev`, `from` and `bse`
1199 1203 output: (localctx, otherctx, basectx)
1200 1204 """
1201 1205 if opts[b'from']:
1202 1206 fromrev = scmutil.revsingle(repo, opts[b'from'])
1203 1207 wctx = repo[fromrev]
1204 1208 else:
1205 1209 wctx = repo[None]
1206 1210 # we don't want working dir files to be stat'd in the benchmark, so
1207 1211 # prime that cache
1208 1212 wctx.dirty()
1209 1213 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1210 1214 if opts[b'base']:
1211 1215 fromrev = scmutil.revsingle(repo, opts[b'base'])
1212 1216 ancestor = repo[fromrev]
1213 1217 else:
1214 1218 ancestor = wctx.ancestor(rctx)
1215 1219 return (wctx, rctx, ancestor)
1216 1220
1217 1221
1218 1222 @command(
1219 1223 b'perfmergecalculate',
1220 1224 [
1221 1225 (b'r', b'rev', b'.', b'rev to merge against'),
1222 1226 (b'', b'from', b'', b'rev to merge from'),
1223 1227 (b'', b'base', b'', b'the revision to use as base'),
1224 1228 ]
1225 1229 + formatteropts,
1226 1230 )
1227 1231 def perfmergecalculate(ui, repo, **opts):
1228 1232 opts = _byteskwargs(opts)
1229 1233 timer, fm = gettimer(ui, opts)
1230 1234
1231 1235 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1232 1236
1233 1237 def d():
1234 1238 # acceptremote is True because we don't want prompts in the middle of
1235 1239 # our benchmark
1236 1240 merge.calculateupdates(
1237 1241 repo,
1238 1242 wctx,
1239 1243 rctx,
1240 1244 [ancestor],
1241 1245 branchmerge=False,
1242 1246 force=False,
1243 1247 acceptremote=True,
1244 1248 followcopies=True,
1245 1249 )
1246 1250
1247 1251 timer(d)
1248 1252 fm.end()
1249 1253
1250 1254
1251 1255 @command(
1252 1256 b'perfmergecopies',
1253 1257 [
1254 1258 (b'r', b'rev', b'.', b'rev to merge against'),
1255 1259 (b'', b'from', b'', b'rev to merge from'),
1256 1260 (b'', b'base', b'', b'the revision to use as base'),
1257 1261 ]
1258 1262 + formatteropts,
1259 1263 )
1260 1264 def perfmergecopies(ui, repo, **opts):
1261 1265 """measure runtime of `copies.mergecopies`"""
1262 1266 opts = _byteskwargs(opts)
1263 1267 timer, fm = gettimer(ui, opts)
1264 1268 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1265 1269
1266 1270 def d():
1267 1271 # acceptremote is True because we don't want prompts in the middle of
1268 1272 # our benchmark
1269 1273 copies.mergecopies(repo, wctx, rctx, ancestor)
1270 1274
1271 1275 timer(d)
1272 1276 fm.end()
1273 1277
1274 1278
1275 1279 @command(b'perfpathcopies', [], b"REV REV")
1276 1280 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1277 1281 """benchmark the copy tracing logic"""
1278 1282 opts = _byteskwargs(opts)
1279 1283 timer, fm = gettimer(ui, opts)
1280 1284 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1281 1285 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1282 1286
1283 1287 def d():
1284 1288 copies.pathcopies(ctx1, ctx2)
1285 1289
1286 1290 timer(d)
1287 1291 fm.end()
1288 1292
1289 1293
1290 1294 @command(
1291 1295 b'perfphases',
1292 1296 [(b'', b'full', False, b'include file reading time too'),],
1293 1297 b"",
1294 1298 )
1295 1299 def perfphases(ui, repo, **opts):
1296 1300 """benchmark phasesets computation"""
1297 1301 opts = _byteskwargs(opts)
1298 1302 timer, fm = gettimer(ui, opts)
1299 1303 _phases = repo._phasecache
1300 1304 full = opts.get(b'full')
1301 1305
1302 1306 def d():
1303 1307 phases = _phases
1304 1308 if full:
1305 1309 clearfilecache(repo, b'_phasecache')
1306 1310 phases = repo._phasecache
1307 1311 phases.invalidate()
1308 1312 phases.loadphaserevs(repo)
1309 1313
1310 1314 timer(d)
1311 1315 fm.end()
1312 1316
1313 1317
1314 1318 @command(b'perfphasesremote', [], b"[DEST]")
1315 1319 def perfphasesremote(ui, repo, dest=None, **opts):
1316 1320 """benchmark time needed to analyse phases of the remote server"""
1317 1321 from mercurial.node import bin
1318 1322 from mercurial import (
1319 1323 exchange,
1320 1324 hg,
1321 1325 phases,
1322 1326 )
1323 1327
1324 1328 opts = _byteskwargs(opts)
1325 1329 timer, fm = gettimer(ui, opts)
1326 1330
1327 1331 path = ui.paths.getpath(dest, default=(b'default-push', b'default'))
1328 1332 if not path:
1329 1333 raise error.Abort(
1330 1334 b'default repository not configured!',
1331 1335 hint=b"see 'hg help config.paths'",
1332 1336 )
1333 1337 dest = path.pushloc or path.loc
1334 1338 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1335 1339 other = hg.peer(repo, opts, dest)
1336 1340
1337 1341 # easier to perform discovery through the operation
1338 1342 op = exchange.pushoperation(repo, other)
1339 1343 exchange._pushdiscoverychangeset(op)
1340 1344
1341 1345 remotesubset = op.fallbackheads
1342 1346
1343 1347 with other.commandexecutor() as e:
1344 1348 remotephases = e.callcommand(
1345 1349 b'listkeys', {b'namespace': b'phases'}
1346 1350 ).result()
1347 1351 del other
1348 1352 publishing = remotephases.get(b'publishing', False)
1349 1353 if publishing:
1350 1354 ui.statusnoi18n(b'publishing: yes\n')
1351 1355 else:
1352 1356 ui.statusnoi18n(b'publishing: no\n')
1353 1357
1354 1358 nodemap = repo.changelog.nodemap
1355 1359 nonpublishroots = 0
1356 1360 for nhex, phase in remotephases.iteritems():
1357 1361 if nhex == b'publishing': # ignore data related to publish option
1358 1362 continue
1359 1363 node = bin(nhex)
1360 1364 if node in nodemap and int(phase):
1361 1365 nonpublishroots += 1
1362 1366 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1363 1367 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1364 1368
1365 1369 def d():
1366 1370 phases.remotephasessummary(repo, remotesubset, remotephases)
1367 1371
1368 1372 timer(d)
1369 1373 fm.end()
1370 1374
1371 1375
1372 1376 @command(
1373 1377 b'perfmanifest',
1374 1378 [
1375 1379 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1376 1380 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1377 1381 ]
1378 1382 + formatteropts,
1379 1383 b'REV|NODE',
1380 1384 )
1381 1385 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1382 1386 """benchmark the time to read a manifest from disk and return a usable
1383 1387 dict-like object
1384 1388
1385 1389 Manifest caches are cleared before retrieval."""
1386 1390 opts = _byteskwargs(opts)
1387 1391 timer, fm = gettimer(ui, opts)
1388 1392 if not manifest_rev:
1389 1393 ctx = scmutil.revsingle(repo, rev, rev)
1390 1394 t = ctx.manifestnode()
1391 1395 else:
1392 1396 from mercurial.node import bin
1393 1397
1394 1398 if len(rev) == 40:
1395 1399 t = bin(rev)
1396 1400 else:
1397 1401 try:
1398 1402 rev = int(rev)
1399 1403
1400 1404 if util.safehasattr(repo.manifestlog, b'getstorage'):
1401 1405 t = repo.manifestlog.getstorage(b'').node(rev)
1402 1406 else:
1403 1407 t = repo.manifestlog._revlog.lookup(rev)
1404 1408 except ValueError:
1405 1409 raise error.Abort(
1406 1410 b'manifest revision must be integer or full node'
1407 1411 )
1408 1412
1409 1413 def d():
1410 1414 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1411 1415 repo.manifestlog[t].read()
1412 1416
1413 1417 timer(d)
1414 1418 fm.end()
1415 1419
1416 1420
1417 1421 @command(b'perfchangeset', formatteropts)
1418 1422 def perfchangeset(ui, repo, rev, **opts):
1419 1423 opts = _byteskwargs(opts)
1420 1424 timer, fm = gettimer(ui, opts)
1421 1425 n = scmutil.revsingle(repo, rev).node()
1422 1426
1423 1427 def d():
1424 1428 repo.changelog.read(n)
1425 1429 # repo.changelog._cache = None
1426 1430
1427 1431 timer(d)
1428 1432 fm.end()
1429 1433
1430 1434
1431 1435 @command(b'perfignore', formatteropts)
1432 1436 def perfignore(ui, repo, **opts):
1433 1437 """benchmark operation related to computing ignore"""
1434 1438 opts = _byteskwargs(opts)
1435 1439 timer, fm = gettimer(ui, opts)
1436 1440 dirstate = repo.dirstate
1437 1441
1438 1442 def setupone():
1439 1443 dirstate.invalidate()
1440 1444 clearfilecache(dirstate, b'_ignore')
1441 1445
1442 1446 def runone():
1443 1447 dirstate._ignore
1444 1448
1445 1449 timer(runone, setup=setupone, title=b"load")
1446 1450 fm.end()
1447 1451
1448 1452
1449 1453 @command(
1450 1454 b'perfindex',
1451 1455 [
1452 1456 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1453 1457 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1454 1458 ]
1455 1459 + formatteropts,
1456 1460 )
1457 1461 def perfindex(ui, repo, **opts):
1458 1462 """benchmark index creation time followed by a lookup
1459 1463
1460 1464 The default is to look `tip` up. Depending on the index implementation,
1461 1465 the revision looked up can matters. For example, an implementation
1462 1466 scanning the index will have a faster lookup time for `--rev tip` than for
1463 1467 `--rev 0`. The number of looked up revisions and their order can also
1464 1468 matters.
1465 1469
1466 1470 Example of useful set to test:
1467 1471 * tip
1468 1472 * 0
1469 1473 * -10:
1470 1474 * :10
1471 1475 * -10: + :10
1472 1476 * :10: + -10:
1473 1477 * -10000:
1474 1478 * -10000: + 0
1475 1479
1476 1480 It is not currently possible to check for lookup of a missing node. For
1477 1481 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1478 1482 import mercurial.revlog
1479 1483
1480 1484 opts = _byteskwargs(opts)
1481 1485 timer, fm = gettimer(ui, opts)
1482 1486 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1483 1487 if opts[b'no_lookup']:
1484 1488 if opts['rev']:
1485 1489 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1486 1490 nodes = []
1487 1491 elif not opts[b'rev']:
1488 1492 nodes = [repo[b"tip"].node()]
1489 1493 else:
1490 1494 revs = scmutil.revrange(repo, opts[b'rev'])
1491 1495 cl = repo.changelog
1492 1496 nodes = [cl.node(r) for r in revs]
1493 1497
1494 1498 unfi = repo.unfiltered()
1495 1499 # find the filecache func directly
1496 1500 # This avoid polluting the benchmark with the filecache logic
1497 1501 makecl = unfi.__class__.changelog.func
1498 1502
1499 1503 def setup():
1500 1504 # probably not necessary, but for good measure
1501 1505 clearchangelog(unfi)
1502 1506
1503 1507 def d():
1504 1508 cl = makecl(unfi)
1505 1509 for n in nodes:
1506 1510 cl.rev(n)
1507 1511
1508 1512 timer(d, setup=setup)
1509 1513 fm.end()
1510 1514
1511 1515
1512 1516 @command(
1513 1517 b'perfnodemap',
1514 1518 [
1515 1519 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1516 1520 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1517 1521 ]
1518 1522 + formatteropts,
1519 1523 )
1520 1524 def perfnodemap(ui, repo, **opts):
1521 1525 """benchmark the time necessary to look up revision from a cold nodemap
1522 1526
1523 1527 Depending on the implementation, the amount and order of revision we look
1524 1528 up can varies. Example of useful set to test:
1525 1529 * tip
1526 1530 * 0
1527 1531 * -10:
1528 1532 * :10
1529 1533 * -10: + :10
1530 1534 * :10: + -10:
1531 1535 * -10000:
1532 1536 * -10000: + 0
1533 1537
1534 1538 The command currently focus on valid binary lookup. Benchmarking for
1535 1539 hexlookup, prefix lookup and missing lookup would also be valuable.
1536 1540 """
1537 1541 import mercurial.revlog
1538 1542
1539 1543 opts = _byteskwargs(opts)
1540 1544 timer, fm = gettimer(ui, opts)
1541 1545 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1542 1546
1543 1547 unfi = repo.unfiltered()
1544 1548 clearcaches = opts['clear_caches']
1545 1549 # find the filecache func directly
1546 1550 # This avoid polluting the benchmark with the filecache logic
1547 1551 makecl = unfi.__class__.changelog.func
1548 1552 if not opts[b'rev']:
1549 1553 raise error.Abort('use --rev to specify revisions to look up')
1550 1554 revs = scmutil.revrange(repo, opts[b'rev'])
1551 1555 cl = repo.changelog
1552 1556 nodes = [cl.node(r) for r in revs]
1553 1557
1554 1558 # use a list to pass reference to a nodemap from one closure to the next
1555 1559 nodeget = [None]
1556 1560
1557 1561 def setnodeget():
1558 1562 # probably not necessary, but for good measure
1559 1563 clearchangelog(unfi)
1560 1564 nodeget[0] = makecl(unfi).nodemap.get
1561 1565
1562 1566 def d():
1563 1567 get = nodeget[0]
1564 1568 for n in nodes:
1565 1569 get(n)
1566 1570
1567 1571 setup = None
1568 1572 if clearcaches:
1569 1573
1570 1574 def setup():
1571 1575 setnodeget()
1572 1576
1573 1577 else:
1574 1578 setnodeget()
1575 1579 d() # prewarm the data structure
1576 1580 timer(d, setup=setup)
1577 1581 fm.end()
1578 1582
1579 1583
1580 1584 @command(b'perfstartup', formatteropts)
1581 1585 def perfstartup(ui, repo, **opts):
1582 1586 opts = _byteskwargs(opts)
1583 1587 timer, fm = gettimer(ui, opts)
1584 1588
1585 1589 def d():
1586 1590 if os.name != r'nt':
1587 1591 os.system(
1588 1592 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1589 1593 )
1590 1594 else:
1591 1595 os.environ[r'HGRCPATH'] = r' '
1592 1596 os.system(r"%s version -q > NUL" % sys.argv[0])
1593 1597
1594 1598 timer(d)
1595 1599 fm.end()
1596 1600
1597 1601
1598 1602 @command(b'perfparents', formatteropts)
1599 1603 def perfparents(ui, repo, **opts):
1600 1604 """benchmark the time necessary to fetch one changeset's parents.
1601 1605
1602 1606 The fetch is done using the `node identifier`, traversing all object layers
1603 1607 from the repository object. The first N revisions will be used for this
1604 1608 benchmark. N is controlled by the ``perf.parentscount`` config option
1605 1609 (default: 1000).
1606 1610 """
1607 1611 opts = _byteskwargs(opts)
1608 1612 timer, fm = gettimer(ui, opts)
1609 1613 # control the number of commits perfparents iterates over
1610 1614 # experimental config: perf.parentscount
1611 1615 count = getint(ui, b"perf", b"parentscount", 1000)
1612 1616 if len(repo.changelog) < count:
1613 1617 raise error.Abort(b"repo needs %d commits for this test" % count)
1614 1618 repo = repo.unfiltered()
1615 1619 nl = [repo.changelog.node(i) for i in _xrange(count)]
1616 1620
1617 1621 def d():
1618 1622 for n in nl:
1619 1623 repo.changelog.parents(n)
1620 1624
1621 1625 timer(d)
1622 1626 fm.end()
1623 1627
1624 1628
1625 1629 @command(b'perfctxfiles', formatteropts)
1626 1630 def perfctxfiles(ui, repo, x, **opts):
1627 1631 opts = _byteskwargs(opts)
1628 1632 x = int(x)
1629 1633 timer, fm = gettimer(ui, opts)
1630 1634
1631 1635 def d():
1632 1636 len(repo[x].files())
1633 1637
1634 1638 timer(d)
1635 1639 fm.end()
1636 1640
1637 1641
1638 1642 @command(b'perfrawfiles', formatteropts)
1639 1643 def perfrawfiles(ui, repo, x, **opts):
1640 1644 opts = _byteskwargs(opts)
1641 1645 x = int(x)
1642 1646 timer, fm = gettimer(ui, opts)
1643 1647 cl = repo.changelog
1644 1648
1645 1649 def d():
1646 1650 len(cl.read(x)[3])
1647 1651
1648 1652 timer(d)
1649 1653 fm.end()
1650 1654
1651 1655
1652 1656 @command(b'perflookup', formatteropts)
1653 1657 def perflookup(ui, repo, rev, **opts):
1654 1658 opts = _byteskwargs(opts)
1655 1659 timer, fm = gettimer(ui, opts)
1656 1660 timer(lambda: len(repo.lookup(rev)))
1657 1661 fm.end()
1658 1662
1659 1663
1660 1664 @command(
1661 1665 b'perflinelogedits',
1662 1666 [
1663 1667 (b'n', b'edits', 10000, b'number of edits'),
1664 1668 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1665 1669 ],
1666 1670 norepo=True,
1667 1671 )
1668 1672 def perflinelogedits(ui, **opts):
1669 1673 from mercurial import linelog
1670 1674
1671 1675 opts = _byteskwargs(opts)
1672 1676
1673 1677 edits = opts[b'edits']
1674 1678 maxhunklines = opts[b'max_hunk_lines']
1675 1679
1676 1680 maxb1 = 100000
1677 1681 random.seed(0)
1678 1682 randint = random.randint
1679 1683 currentlines = 0
1680 1684 arglist = []
1681 1685 for rev in _xrange(edits):
1682 1686 a1 = randint(0, currentlines)
1683 1687 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1684 1688 b1 = randint(0, maxb1)
1685 1689 b2 = randint(b1, b1 + maxhunklines)
1686 1690 currentlines += (b2 - b1) - (a2 - a1)
1687 1691 arglist.append((rev, a1, a2, b1, b2))
1688 1692
1689 1693 def d():
1690 1694 ll = linelog.linelog()
1691 1695 for args in arglist:
1692 1696 ll.replacelines(*args)
1693 1697
1694 1698 timer, fm = gettimer(ui, opts)
1695 1699 timer(d)
1696 1700 fm.end()
1697 1701
1698 1702
1699 1703 @command(b'perfrevrange', formatteropts)
1700 1704 def perfrevrange(ui, repo, *specs, **opts):
1701 1705 opts = _byteskwargs(opts)
1702 1706 timer, fm = gettimer(ui, opts)
1703 1707 revrange = scmutil.revrange
1704 1708 timer(lambda: len(revrange(repo, specs)))
1705 1709 fm.end()
1706 1710
1707 1711
1708 1712 @command(b'perfnodelookup', formatteropts)
1709 1713 def perfnodelookup(ui, repo, rev, **opts):
1710 1714 opts = _byteskwargs(opts)
1711 1715 timer, fm = gettimer(ui, opts)
1712 1716 import mercurial.revlog
1713 1717
1714 1718 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1715 1719 n = scmutil.revsingle(repo, rev).node()
1716 1720 cl = mercurial.revlog.revlog(getsvfs(repo), b"00changelog.i")
1717 1721
1718 1722 def d():
1719 1723 cl.rev(n)
1720 1724 clearcaches(cl)
1721 1725
1722 1726 timer(d)
1723 1727 fm.end()
1724 1728
1725 1729
1726 1730 @command(
1727 1731 b'perflog',
1728 1732 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
1729 1733 )
1730 1734 def perflog(ui, repo, rev=None, **opts):
1731 1735 opts = _byteskwargs(opts)
1732 1736 if rev is None:
1733 1737 rev = []
1734 1738 timer, fm = gettimer(ui, opts)
1735 1739 ui.pushbuffer()
1736 1740 timer(
1737 1741 lambda: commands.log(
1738 1742 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
1739 1743 )
1740 1744 )
1741 1745 ui.popbuffer()
1742 1746 fm.end()
1743 1747
1744 1748
1745 1749 @command(b'perfmoonwalk', formatteropts)
1746 1750 def perfmoonwalk(ui, repo, **opts):
1747 1751 """benchmark walking the changelog backwards
1748 1752
1749 1753 This also loads the changelog data for each revision in the changelog.
1750 1754 """
1751 1755 opts = _byteskwargs(opts)
1752 1756 timer, fm = gettimer(ui, opts)
1753 1757
1754 1758 def moonwalk():
1755 1759 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
1756 1760 ctx = repo[i]
1757 1761 ctx.branch() # read changelog data (in addition to the index)
1758 1762
1759 1763 timer(moonwalk)
1760 1764 fm.end()
1761 1765
1762 1766
1763 1767 @command(
1764 1768 b'perftemplating',
1765 1769 [(b'r', b'rev', [], b'revisions to run the template on'),] + formatteropts,
1766 1770 )
1767 1771 def perftemplating(ui, repo, testedtemplate=None, **opts):
1768 1772 """test the rendering time of a given template"""
1769 1773 if makelogtemplater is None:
1770 1774 raise error.Abort(
1771 1775 b"perftemplating not available with this Mercurial",
1772 1776 hint=b"use 4.3 or later",
1773 1777 )
1774 1778
1775 1779 opts = _byteskwargs(opts)
1776 1780
1777 1781 nullui = ui.copy()
1778 1782 nullui.fout = open(os.devnull, r'wb')
1779 1783 nullui.disablepager()
1780 1784 revs = opts.get(b'rev')
1781 1785 if not revs:
1782 1786 revs = [b'all()']
1783 1787 revs = list(scmutil.revrange(repo, revs))
1784 1788
1785 1789 defaulttemplate = (
1786 1790 b'{date|shortdate} [{rev}:{node|short}]'
1787 1791 b' {author|person}: {desc|firstline}\n'
1788 1792 )
1789 1793 if testedtemplate is None:
1790 1794 testedtemplate = defaulttemplate
1791 1795 displayer = makelogtemplater(nullui, repo, testedtemplate)
1792 1796
1793 1797 def format():
1794 1798 for r in revs:
1795 1799 ctx = repo[r]
1796 1800 displayer.show(ctx)
1797 1801 displayer.flush(ctx)
1798 1802
1799 1803 timer, fm = gettimer(ui, opts)
1800 1804 timer(format)
1801 1805 fm.end()
1802 1806
1803 1807
1804 1808 def _displaystats(ui, opts, entries, data):
1805 1809 pass
1806 1810 # use a second formatter because the data are quite different, not sure
1807 1811 # how it flies with the templater.
1808 1812 fm = ui.formatter(b'perf-stats', opts)
1809 1813 for key, title in entries:
1810 1814 values = data[key]
1811 1815 nbvalues = len(data)
1812 1816 values.sort()
1813 1817 stats = {
1814 1818 'key': key,
1815 1819 'title': title,
1816 1820 'nbitems': len(values),
1817 1821 'min': values[0][0],
1818 1822 '10%': values[(nbvalues * 10) // 100][0],
1819 1823 '25%': values[(nbvalues * 25) // 100][0],
1820 1824 '50%': values[(nbvalues * 50) // 100][0],
1821 1825 '75%': values[(nbvalues * 75) // 100][0],
1822 1826 '80%': values[(nbvalues * 80) // 100][0],
1823 1827 '85%': values[(nbvalues * 85) // 100][0],
1824 1828 '90%': values[(nbvalues * 90) // 100][0],
1825 1829 '95%': values[(nbvalues * 95) // 100][0],
1826 1830 '99%': values[(nbvalues * 99) // 100][0],
1827 1831 'max': values[-1][0],
1828 1832 }
1829 1833 fm.startitem()
1830 1834 fm.data(**stats)
1831 1835 # make node pretty for the human output
1832 1836 fm.plain('### %s (%d items)\n' % (title, len(values)))
1833 1837 lines = [
1834 1838 'min',
1835 1839 '10%',
1836 1840 '25%',
1837 1841 '50%',
1838 1842 '75%',
1839 1843 '80%',
1840 1844 '85%',
1841 1845 '90%',
1842 1846 '95%',
1843 1847 '99%',
1844 1848 'max',
1845 1849 ]
1846 1850 for l in lines:
1847 1851 fm.plain('%s: %s\n' % (l, stats[l]))
1848 1852 fm.end()
1849 1853
1850 1854
1851 1855 @command(
1852 1856 b'perfhelper-mergecopies',
1853 1857 formatteropts
1854 1858 + [
1855 1859 (b'r', b'revs', [], b'restrict search to these revisions'),
1856 1860 (b'', b'timing', False, b'provides extra data (costly)'),
1857 1861 (b'', b'stats', False, b'provides statistic about the measured data'),
1858 1862 ],
1859 1863 )
1860 1864 def perfhelpermergecopies(ui, repo, revs=[], **opts):
1861 1865 """find statistics about potential parameters for `perfmergecopies`
1862 1866
1863 1867 This command find (base, p1, p2) triplet relevant for copytracing
1864 1868 benchmarking in the context of a merge. It reports values for some of the
1865 1869 parameters that impact merge copy tracing time during merge.
1866 1870
1867 1871 If `--timing` is set, rename detection is run and the associated timing
1868 1872 will be reported. The extra details come at the cost of slower command
1869 1873 execution.
1870 1874
1871 1875 Since rename detection is only run once, other factors might easily
1872 1876 affect the precision of the timing. However it should give a good
1873 1877 approximation of which revision triplets are very costly.
1874 1878 """
1875 1879 opts = _byteskwargs(opts)
1876 1880 fm = ui.formatter(b'perf', opts)
1877 1881 dotiming = opts[b'timing']
1878 1882 dostats = opts[b'stats']
1879 1883
1880 1884 output_template = [
1881 1885 ("base", "%(base)12s"),
1882 1886 ("p1", "%(p1.node)12s"),
1883 1887 ("p2", "%(p2.node)12s"),
1884 1888 ("p1.nb-revs", "%(p1.nbrevs)12d"),
1885 1889 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
1886 1890 ("p1.renames", "%(p1.renamedfiles)12d"),
1887 1891 ("p1.time", "%(p1.time)12.3f"),
1888 1892 ("p2.nb-revs", "%(p2.nbrevs)12d"),
1889 1893 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
1890 1894 ("p2.renames", "%(p2.renamedfiles)12d"),
1891 1895 ("p2.time", "%(p2.time)12.3f"),
1892 1896 ("renames", "%(nbrenamedfiles)12d"),
1893 1897 ("total.time", "%(time)12.3f"),
1894 1898 ]
1895 1899 if not dotiming:
1896 1900 output_template = [
1897 1901 i
1898 1902 for i in output_template
1899 1903 if not ('time' in i[0] or 'renames' in i[0])
1900 1904 ]
1901 1905 header_names = [h for (h, v) in output_template]
1902 1906 output = ' '.join([v for (h, v) in output_template]) + '\n'
1903 1907 header = ' '.join(['%12s'] * len(header_names)) + '\n'
1904 1908 fm.plain(header % tuple(header_names))
1905 1909
1906 1910 if not revs:
1907 1911 revs = ['all()']
1908 1912 revs = scmutil.revrange(repo, revs)
1909 1913
1910 1914 if dostats:
1911 1915 alldata = {
1912 1916 'nbrevs': [],
1913 1917 'nbmissingfiles': [],
1914 1918 }
1915 1919 if dotiming:
1916 1920 alldata['parentnbrenames'] = []
1917 1921 alldata['totalnbrenames'] = []
1918 1922 alldata['parenttime'] = []
1919 1923 alldata['totaltime'] = []
1920 1924
1921 1925 roi = repo.revs('merge() and %ld', revs)
1922 1926 for r in roi:
1923 1927 ctx = repo[r]
1924 1928 p1 = ctx.p1()
1925 1929 p2 = ctx.p2()
1926 1930 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
1927 1931 for b in bases:
1928 1932 b = repo[b]
1929 1933 p1missing = copies._computeforwardmissing(b, p1)
1930 1934 p2missing = copies._computeforwardmissing(b, p2)
1931 1935 data = {
1932 1936 b'base': b.hex(),
1933 1937 b'p1.node': p1.hex(),
1934 1938 b'p1.nbrevs': len(repo.revs('%d::%d', b.rev(), p1.rev())),
1935 1939 b'p1.nbmissingfiles': len(p1missing),
1936 1940 b'p2.node': p2.hex(),
1937 1941 b'p2.nbrevs': len(repo.revs('%d::%d', b.rev(), p2.rev())),
1938 1942 b'p2.nbmissingfiles': len(p2missing),
1939 1943 }
1940 1944 if dostats:
1941 1945 if p1missing:
1942 1946 alldata['nbrevs'].append(
1943 1947 (data['p1.nbrevs'], b.hex(), p1.hex())
1944 1948 )
1945 1949 alldata['nbmissingfiles'].append(
1946 1950 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
1947 1951 )
1948 1952 if p2missing:
1949 1953 alldata['nbrevs'].append(
1950 1954 (data['p2.nbrevs'], b.hex(), p2.hex())
1951 1955 )
1952 1956 alldata['nbmissingfiles'].append(
1953 1957 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
1954 1958 )
1955 1959 if dotiming:
1956 1960 begin = util.timer()
1957 1961 mergedata = copies.mergecopies(repo, p1, p2, b)
1958 1962 end = util.timer()
1959 1963 # not very stable timing since we did only one run
1960 1964 data['time'] = end - begin
1961 1965 # mergedata contains five dicts: "copy", "movewithdir",
1962 1966 # "diverge", "renamedelete" and "dirmove".
1963 1967 # The first 4 are about renamed file so lets count that.
1964 1968 renames = len(mergedata[0])
1965 1969 renames += len(mergedata[1])
1966 1970 renames += len(mergedata[2])
1967 1971 renames += len(mergedata[3])
1968 1972 data['nbrenamedfiles'] = renames
1969 1973 begin = util.timer()
1970 1974 p1renames = copies.pathcopies(b, p1)
1971 1975 end = util.timer()
1972 1976 data['p1.time'] = end - begin
1973 1977 begin = util.timer()
1974 1978 p2renames = copies.pathcopies(b, p2)
1975 1979 data['p2.time'] = end - begin
1976 1980 end = util.timer()
1977 1981 data['p1.renamedfiles'] = len(p1renames)
1978 1982 data['p2.renamedfiles'] = len(p2renames)
1979 1983
1980 1984 if dostats:
1981 1985 if p1missing:
1982 1986 alldata['parentnbrenames'].append(
1983 1987 (data['p1.renamedfiles'], b.hex(), p1.hex())
1984 1988 )
1985 1989 alldata['parenttime'].append(
1986 1990 (data['p1.time'], b.hex(), p1.hex())
1987 1991 )
1988 1992 if p2missing:
1989 1993 alldata['parentnbrenames'].append(
1990 1994 (data['p2.renamedfiles'], b.hex(), p2.hex())
1991 1995 )
1992 1996 alldata['parenttime'].append(
1993 1997 (data['p2.time'], b.hex(), p2.hex())
1994 1998 )
1995 1999 if p1missing or p2missing:
1996 2000 alldata['totalnbrenames'].append(
1997 2001 (
1998 2002 data['nbrenamedfiles'],
1999 2003 b.hex(),
2000 2004 p1.hex(),
2001 2005 p2.hex(),
2002 2006 )
2003 2007 )
2004 2008 alldata['totaltime'].append(
2005 2009 (data['time'], b.hex(), p1.hex(), p2.hex())
2006 2010 )
2007 2011 fm.startitem()
2008 2012 fm.data(**data)
2009 2013 # make node pretty for the human output
2010 2014 out = data.copy()
2011 2015 out['base'] = fm.hexfunc(b.node())
2012 2016 out['p1.node'] = fm.hexfunc(p1.node())
2013 2017 out['p2.node'] = fm.hexfunc(p2.node())
2014 2018 fm.plain(output % out)
2015 2019
2016 2020 fm.end()
2017 2021 if dostats:
2018 2022 # use a second formatter because the data are quite different, not sure
2019 2023 # how it flies with the templater.
2020 2024 entries = [
2021 2025 ('nbrevs', 'number of revision covered'),
2022 2026 ('nbmissingfiles', 'number of missing files at head'),
2023 2027 ]
2024 2028 if dotiming:
2025 2029 entries.append(
2026 2030 ('parentnbrenames', 'rename from one parent to base')
2027 2031 )
2028 2032 entries.append(('totalnbrenames', 'total number of renames'))
2029 2033 entries.append(('parenttime', 'time for one parent'))
2030 2034 entries.append(('totaltime', 'time for both parents'))
2031 2035 _displaystats(ui, opts, entries, alldata)
2032 2036
2033 2037
2034 2038 @command(
2035 2039 b'perfhelper-pathcopies',
2036 2040 formatteropts
2037 2041 + [
2038 2042 (b'r', b'revs', [], b'restrict search to these revisions'),
2039 2043 (b'', b'timing', False, b'provides extra data (costly)'),
2040 2044 (b'', b'stats', False, b'provides statistic about the measured data'),
2041 2045 ],
2042 2046 )
2043 2047 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2044 2048 """find statistic about potential parameters for the `perftracecopies`
2045 2049
2046 2050 This command find source-destination pair relevant for copytracing testing.
2047 2051 It report value for some of the parameters that impact copy tracing time.
2048 2052
2049 2053 If `--timing` is set, rename detection is run and the associated timing
2050 2054 will be reported. The extra details comes at the cost of a slower command
2051 2055 execution.
2052 2056
2053 2057 Since the rename detection is only run once, other factors might easily
2054 2058 affect the precision of the timing. However it should give a good
2055 2059 approximation of which revision pairs are very costly.
2056 2060 """
2057 2061 opts = _byteskwargs(opts)
2058 2062 fm = ui.formatter(b'perf', opts)
2059 2063 dotiming = opts[b'timing']
2060 2064 dostats = opts[b'stats']
2061 2065
2062 2066 if dotiming:
2063 2067 header = '%12s %12s %12s %12s %12s %12s\n'
2064 2068 output = (
2065 2069 "%(source)12s %(destination)12s "
2066 2070 "%(nbrevs)12d %(nbmissingfiles)12d "
2067 2071 "%(nbrenamedfiles)12d %(time)18.5f\n"
2068 2072 )
2069 2073 header_names = (
2070 2074 "source",
2071 2075 "destination",
2072 2076 "nb-revs",
2073 2077 "nb-files",
2074 2078 "nb-renames",
2075 2079 "time",
2076 2080 )
2077 2081 fm.plain(header % header_names)
2078 2082 else:
2079 2083 header = '%12s %12s %12s %12s\n'
2080 2084 output = (
2081 2085 "%(source)12s %(destination)12s "
2082 2086 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2083 2087 )
2084 2088 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2085 2089
2086 2090 if not revs:
2087 2091 revs = ['all()']
2088 2092 revs = scmutil.revrange(repo, revs)
2089 2093
2090 2094 if dostats:
2091 2095 alldata = {
2092 2096 'nbrevs': [],
2093 2097 'nbmissingfiles': [],
2094 2098 }
2095 2099 if dotiming:
2096 2100 alldata['nbrenames'] = []
2097 2101 alldata['time'] = []
2098 2102
2099 2103 roi = repo.revs('merge() and %ld', revs)
2100 2104 for r in roi:
2101 2105 ctx = repo[r]
2102 2106 p1 = ctx.p1().rev()
2103 2107 p2 = ctx.p2().rev()
2104 2108 bases = repo.changelog._commonancestorsheads(p1, p2)
2105 2109 for p in (p1, p2):
2106 2110 for b in bases:
2107 2111 base = repo[b]
2108 2112 parent = repo[p]
2109 2113 missing = copies._computeforwardmissing(base, parent)
2110 2114 if not missing:
2111 2115 continue
2112 2116 data = {
2113 2117 b'source': base.hex(),
2114 2118 b'destination': parent.hex(),
2115 2119 b'nbrevs': len(repo.revs('%d::%d', b, p)),
2116 2120 b'nbmissingfiles': len(missing),
2117 2121 }
2118 2122 if dostats:
2119 2123 alldata['nbrevs'].append(
2120 2124 (data['nbrevs'], base.hex(), parent.hex(),)
2121 2125 )
2122 2126 alldata['nbmissingfiles'].append(
2123 2127 (data['nbmissingfiles'], base.hex(), parent.hex(),)
2124 2128 )
2125 2129 if dotiming:
2126 2130 begin = util.timer()
2127 2131 renames = copies.pathcopies(base, parent)
2128 2132 end = util.timer()
2129 2133 # not very stable timing since we did only one run
2130 2134 data['time'] = end - begin
2131 2135 data['nbrenamedfiles'] = len(renames)
2132 2136 if dostats:
2133 2137 alldata['time'].append(
2134 2138 (data['time'], base.hex(), parent.hex(),)
2135 2139 )
2136 2140 alldata['nbrenames'].append(
2137 2141 (data['nbrenamedfiles'], base.hex(), parent.hex(),)
2138 2142 )
2139 2143 fm.startitem()
2140 2144 fm.data(**data)
2141 2145 out = data.copy()
2142 2146 out['source'] = fm.hexfunc(base.node())
2143 2147 out['destination'] = fm.hexfunc(parent.node())
2144 2148 fm.plain(output % out)
2145 2149
2146 2150 fm.end()
2147 2151 if dostats:
2148 2152 # use a second formatter because the data are quite different, not sure
2149 2153 # how it flies with the templater.
2150 2154 fm = ui.formatter(b'perf', opts)
2151 2155 entries = [
2152 2156 ('nbrevs', 'number of revision covered'),
2153 2157 ('nbmissingfiles', 'number of missing files at head'),
2154 2158 ]
2155 2159 if dotiming:
2156 2160 entries.append(('nbrenames', 'renamed files'))
2157 2161 entries.append(('time', 'time'))
2158 2162 _displaystats(ui, opts, entries, alldata)
2159 2163
2160 2164
2161 2165 @command(b'perfcca', formatteropts)
2162 2166 def perfcca(ui, repo, **opts):
2163 2167 opts = _byteskwargs(opts)
2164 2168 timer, fm = gettimer(ui, opts)
2165 2169 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2166 2170 fm.end()
2167 2171
2168 2172
2169 2173 @command(b'perffncacheload', formatteropts)
2170 2174 def perffncacheload(ui, repo, **opts):
2171 2175 opts = _byteskwargs(opts)
2172 2176 timer, fm = gettimer(ui, opts)
2173 2177 s = repo.store
2174 2178
2175 2179 def d():
2176 2180 s.fncache._load()
2177 2181
2178 2182 timer(d)
2179 2183 fm.end()
2180 2184
2181 2185
2182 2186 @command(b'perffncachewrite', formatteropts)
2183 2187 def perffncachewrite(ui, repo, **opts):
2184 2188 opts = _byteskwargs(opts)
2185 2189 timer, fm = gettimer(ui, opts)
2186 2190 s = repo.store
2187 2191 lock = repo.lock()
2188 2192 s.fncache._load()
2189 2193 tr = repo.transaction(b'perffncachewrite')
2190 2194 tr.addbackup(b'fncache')
2191 2195
2192 2196 def d():
2193 2197 s.fncache._dirty = True
2194 2198 s.fncache.write(tr)
2195 2199
2196 2200 timer(d)
2197 2201 tr.close()
2198 2202 lock.release()
2199 2203 fm.end()
2200 2204
2201 2205
2202 2206 @command(b'perffncacheencode', formatteropts)
2203 2207 def perffncacheencode(ui, repo, **opts):
2204 2208 opts = _byteskwargs(opts)
2205 2209 timer, fm = gettimer(ui, opts)
2206 2210 s = repo.store
2207 2211 s.fncache._load()
2208 2212
2209 2213 def d():
2210 2214 for p in s.fncache.entries:
2211 2215 s.encode(p)
2212 2216
2213 2217 timer(d)
2214 2218 fm.end()
2215 2219
2216 2220
2217 2221 def _bdiffworker(q, blocks, xdiff, ready, done):
2218 2222 while not done.is_set():
2219 2223 pair = q.get()
2220 2224 while pair is not None:
2221 2225 if xdiff:
2222 2226 mdiff.bdiff.xdiffblocks(*pair)
2223 2227 elif blocks:
2224 2228 mdiff.bdiff.blocks(*pair)
2225 2229 else:
2226 2230 mdiff.textdiff(*pair)
2227 2231 q.task_done()
2228 2232 pair = q.get()
2229 2233 q.task_done() # for the None one
2230 2234 with ready:
2231 2235 ready.wait()
2232 2236
2233 2237
2234 2238 def _manifestrevision(repo, mnode):
2235 2239 ml = repo.manifestlog
2236 2240
2237 2241 if util.safehasattr(ml, b'getstorage'):
2238 2242 store = ml.getstorage(b'')
2239 2243 else:
2240 2244 store = ml._revlog
2241 2245
2242 2246 return store.revision(mnode)
2243 2247
2244 2248
2245 2249 @command(
2246 2250 b'perfbdiff',
2247 2251 revlogopts
2248 2252 + formatteropts
2249 2253 + [
2250 2254 (
2251 2255 b'',
2252 2256 b'count',
2253 2257 1,
2254 2258 b'number of revisions to test (when using --startrev)',
2255 2259 ),
2256 2260 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2257 2261 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2258 2262 (b'', b'blocks', False, b'test computing diffs into blocks'),
2259 2263 (b'', b'xdiff', False, b'use xdiff algorithm'),
2260 2264 ],
2261 2265 b'-c|-m|FILE REV',
2262 2266 )
2263 2267 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2264 2268 """benchmark a bdiff between revisions
2265 2269
2266 2270 By default, benchmark a bdiff between its delta parent and itself.
2267 2271
2268 2272 With ``--count``, benchmark bdiffs between delta parents and self for N
2269 2273 revisions starting at the specified revision.
2270 2274
2271 2275 With ``--alldata``, assume the requested revision is a changeset and
2272 2276 measure bdiffs for all changes related to that changeset (manifest
2273 2277 and filelogs).
2274 2278 """
2275 2279 opts = _byteskwargs(opts)
2276 2280
2277 2281 if opts[b'xdiff'] and not opts[b'blocks']:
2278 2282 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2279 2283
2280 2284 if opts[b'alldata']:
2281 2285 opts[b'changelog'] = True
2282 2286
2283 2287 if opts.get(b'changelog') or opts.get(b'manifest'):
2284 2288 file_, rev = None, file_
2285 2289 elif rev is None:
2286 2290 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2287 2291
2288 2292 blocks = opts[b'blocks']
2289 2293 xdiff = opts[b'xdiff']
2290 2294 textpairs = []
2291 2295
2292 2296 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2293 2297
2294 2298 startrev = r.rev(r.lookup(rev))
2295 2299 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2296 2300 if opts[b'alldata']:
2297 2301 # Load revisions associated with changeset.
2298 2302 ctx = repo[rev]
2299 2303 mtext = _manifestrevision(repo, ctx.manifestnode())
2300 2304 for pctx in ctx.parents():
2301 2305 pman = _manifestrevision(repo, pctx.manifestnode())
2302 2306 textpairs.append((pman, mtext))
2303 2307
2304 2308 # Load filelog revisions by iterating manifest delta.
2305 2309 man = ctx.manifest()
2306 2310 pman = ctx.p1().manifest()
2307 2311 for filename, change in pman.diff(man).items():
2308 2312 fctx = repo.file(filename)
2309 2313 f1 = fctx.revision(change[0][0] or -1)
2310 2314 f2 = fctx.revision(change[1][0] or -1)
2311 2315 textpairs.append((f1, f2))
2312 2316 else:
2313 2317 dp = r.deltaparent(rev)
2314 2318 textpairs.append((r.revision(dp), r.revision(rev)))
2315 2319
2316 2320 withthreads = threads > 0
2317 2321 if not withthreads:
2318 2322
2319 2323 def d():
2320 2324 for pair in textpairs:
2321 2325 if xdiff:
2322 2326 mdiff.bdiff.xdiffblocks(*pair)
2323 2327 elif blocks:
2324 2328 mdiff.bdiff.blocks(*pair)
2325 2329 else:
2326 2330 mdiff.textdiff(*pair)
2327 2331
2328 2332 else:
2329 2333 q = queue()
2330 2334 for i in _xrange(threads):
2331 2335 q.put(None)
2332 2336 ready = threading.Condition()
2333 2337 done = threading.Event()
2334 2338 for i in _xrange(threads):
2335 2339 threading.Thread(
2336 2340 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2337 2341 ).start()
2338 2342 q.join()
2339 2343
2340 2344 def d():
2341 2345 for pair in textpairs:
2342 2346 q.put(pair)
2343 2347 for i in _xrange(threads):
2344 2348 q.put(None)
2345 2349 with ready:
2346 2350 ready.notify_all()
2347 2351 q.join()
2348 2352
2349 2353 timer, fm = gettimer(ui, opts)
2350 2354 timer(d)
2351 2355 fm.end()
2352 2356
2353 2357 if withthreads:
2354 2358 done.set()
2355 2359 for i in _xrange(threads):
2356 2360 q.put(None)
2357 2361 with ready:
2358 2362 ready.notify_all()
2359 2363
2360 2364
2361 2365 @command(
2362 2366 b'perfunidiff',
2363 2367 revlogopts
2364 2368 + formatteropts
2365 2369 + [
2366 2370 (
2367 2371 b'',
2368 2372 b'count',
2369 2373 1,
2370 2374 b'number of revisions to test (when using --startrev)',
2371 2375 ),
2372 2376 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2373 2377 ],
2374 2378 b'-c|-m|FILE REV',
2375 2379 )
2376 2380 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2377 2381 """benchmark a unified diff between revisions
2378 2382
2379 2383 This doesn't include any copy tracing - it's just a unified diff
2380 2384 of the texts.
2381 2385
2382 2386 By default, benchmark a diff between its delta parent and itself.
2383 2387
2384 2388 With ``--count``, benchmark diffs between delta parents and self for N
2385 2389 revisions starting at the specified revision.
2386 2390
2387 2391 With ``--alldata``, assume the requested revision is a changeset and
2388 2392 measure diffs for all changes related to that changeset (manifest
2389 2393 and filelogs).
2390 2394 """
2391 2395 opts = _byteskwargs(opts)
2392 2396 if opts[b'alldata']:
2393 2397 opts[b'changelog'] = True
2394 2398
2395 2399 if opts.get(b'changelog') or opts.get(b'manifest'):
2396 2400 file_, rev = None, file_
2397 2401 elif rev is None:
2398 2402 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2399 2403
2400 2404 textpairs = []
2401 2405
2402 2406 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2403 2407
2404 2408 startrev = r.rev(r.lookup(rev))
2405 2409 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2406 2410 if opts[b'alldata']:
2407 2411 # Load revisions associated with changeset.
2408 2412 ctx = repo[rev]
2409 2413 mtext = _manifestrevision(repo, ctx.manifestnode())
2410 2414 for pctx in ctx.parents():
2411 2415 pman = _manifestrevision(repo, pctx.manifestnode())
2412 2416 textpairs.append((pman, mtext))
2413 2417
2414 2418 # Load filelog revisions by iterating manifest delta.
2415 2419 man = ctx.manifest()
2416 2420 pman = ctx.p1().manifest()
2417 2421 for filename, change in pman.diff(man).items():
2418 2422 fctx = repo.file(filename)
2419 2423 f1 = fctx.revision(change[0][0] or -1)
2420 2424 f2 = fctx.revision(change[1][0] or -1)
2421 2425 textpairs.append((f1, f2))
2422 2426 else:
2423 2427 dp = r.deltaparent(rev)
2424 2428 textpairs.append((r.revision(dp), r.revision(rev)))
2425 2429
2426 2430 def d():
2427 2431 for left, right in textpairs:
2428 2432 # The date strings don't matter, so we pass empty strings.
2429 2433 headerlines, hunks = mdiff.unidiff(
2430 2434 left, b'', right, b'', b'left', b'right', binary=False
2431 2435 )
2432 2436 # consume iterators in roughly the way patch.py does
2433 2437 b'\n'.join(headerlines)
2434 2438 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2435 2439
2436 2440 timer, fm = gettimer(ui, opts)
2437 2441 timer(d)
2438 2442 fm.end()
2439 2443
2440 2444
2441 2445 @command(b'perfdiffwd', formatteropts)
2442 2446 def perfdiffwd(ui, repo, **opts):
2443 2447 """Profile diff of working directory changes"""
2444 2448 opts = _byteskwargs(opts)
2445 2449 timer, fm = gettimer(ui, opts)
2446 2450 options = {
2447 2451 'w': 'ignore_all_space',
2448 2452 'b': 'ignore_space_change',
2449 2453 'B': 'ignore_blank_lines',
2450 2454 }
2451 2455
2452 2456 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2453 2457 opts = dict((options[c], b'1') for c in diffopt)
2454 2458
2455 2459 def d():
2456 2460 ui.pushbuffer()
2457 2461 commands.diff(ui, repo, **opts)
2458 2462 ui.popbuffer()
2459 2463
2460 2464 diffopt = diffopt.encode('ascii')
2461 2465 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2462 2466 timer(d, title=title)
2463 2467 fm.end()
2464 2468
2465 2469
2466 2470 @command(b'perfrevlogindex', revlogopts + formatteropts, b'-c|-m|FILE')
2467 2471 def perfrevlogindex(ui, repo, file_=None, **opts):
2468 2472 """Benchmark operations against a revlog index.
2469 2473
2470 2474 This tests constructing a revlog instance, reading index data,
2471 2475 parsing index data, and performing various operations related to
2472 2476 index data.
2473 2477 """
2474 2478
2475 2479 opts = _byteskwargs(opts)
2476 2480
2477 2481 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2478 2482
2479 2483 opener = getattr(rl, 'opener') # trick linter
2480 2484 indexfile = rl.indexfile
2481 2485 data = opener.read(indexfile)
2482 2486
2483 2487 header = struct.unpack(b'>I', data[0:4])[0]
2484 2488 version = header & 0xFFFF
2485 2489 if version == 1:
2486 2490 revlogio = revlog.revlogio()
2487 2491 inline = header & (1 << 16)
2488 2492 else:
2489 2493 raise error.Abort(b'unsupported revlog version: %d' % version)
2490 2494
2491 2495 rllen = len(rl)
2492 2496
2493 2497 node0 = rl.node(0)
2494 2498 node25 = rl.node(rllen // 4)
2495 2499 node50 = rl.node(rllen // 2)
2496 2500 node75 = rl.node(rllen // 4 * 3)
2497 2501 node100 = rl.node(rllen - 1)
2498 2502
2499 2503 allrevs = range(rllen)
2500 2504 allrevsrev = list(reversed(allrevs))
2501 2505 allnodes = [rl.node(rev) for rev in range(rllen)]
2502 2506 allnodesrev = list(reversed(allnodes))
2503 2507
2504 2508 def constructor():
2505 2509 revlog.revlog(opener, indexfile)
2506 2510
2507 2511 def read():
2508 2512 with opener(indexfile) as fh:
2509 2513 fh.read()
2510 2514
2511 2515 def parseindex():
2512 2516 revlogio.parseindex(data, inline)
2513 2517
2514 2518 def getentry(revornode):
2515 2519 index = revlogio.parseindex(data, inline)[0]
2516 2520 index[revornode]
2517 2521
2518 2522 def getentries(revs, count=1):
2519 2523 index = revlogio.parseindex(data, inline)[0]
2520 2524
2521 2525 for i in range(count):
2522 2526 for rev in revs:
2523 2527 index[rev]
2524 2528
2525 2529 def resolvenode(node):
2526 2530 nodemap = revlogio.parseindex(data, inline)[1]
2527 2531 # This only works for the C code.
2528 2532 if nodemap is None:
2529 2533 return
2530 2534
2531 2535 try:
2532 2536 nodemap[node]
2533 2537 except error.RevlogError:
2534 2538 pass
2535 2539
2536 2540 def resolvenodes(nodes, count=1):
2537 2541 nodemap = revlogio.parseindex(data, inline)[1]
2538 2542 if nodemap is None:
2539 2543 return
2540 2544
2541 2545 for i in range(count):
2542 2546 for node in nodes:
2543 2547 try:
2544 2548 nodemap[node]
2545 2549 except error.RevlogError:
2546 2550 pass
2547 2551
2548 2552 benches = [
2549 2553 (constructor, b'revlog constructor'),
2550 2554 (read, b'read'),
2551 2555 (parseindex, b'create index object'),
2552 2556 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2553 2557 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2554 2558 (lambda: resolvenode(node0), b'look up node at rev 0'),
2555 2559 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2556 2560 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2557 2561 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2558 2562 (lambda: resolvenode(node100), b'look up node at tip'),
2559 2563 # 2x variation is to measure caching impact.
2560 2564 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2561 2565 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2562 2566 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2563 2567 (
2564 2568 lambda: resolvenodes(allnodesrev, 2),
2565 2569 b'look up all nodes 2x (reverse)',
2566 2570 ),
2567 2571 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2568 2572 (
2569 2573 lambda: getentries(allrevs, 2),
2570 2574 b'retrieve all index entries 2x (forward)',
2571 2575 ),
2572 2576 (
2573 2577 lambda: getentries(allrevsrev),
2574 2578 b'retrieve all index entries (reverse)',
2575 2579 ),
2576 2580 (
2577 2581 lambda: getentries(allrevsrev, 2),
2578 2582 b'retrieve all index entries 2x (reverse)',
2579 2583 ),
2580 2584 ]
2581 2585
2582 2586 for fn, title in benches:
2583 2587 timer, fm = gettimer(ui, opts)
2584 2588 timer(fn, title=title)
2585 2589 fm.end()
2586 2590
2587 2591
2588 2592 @command(
2589 2593 b'perfrevlogrevisions',
2590 2594 revlogopts
2591 2595 + formatteropts
2592 2596 + [
2593 2597 (b'd', b'dist', 100, b'distance between the revisions'),
2594 2598 (b's', b'startrev', 0, b'revision to start reading at'),
2595 2599 (b'', b'reverse', False, b'read in reverse'),
2596 2600 ],
2597 2601 b'-c|-m|FILE',
2598 2602 )
2599 2603 def perfrevlogrevisions(
2600 2604 ui, repo, file_=None, startrev=0, reverse=False, **opts
2601 2605 ):
2602 2606 """Benchmark reading a series of revisions from a revlog.
2603 2607
2604 2608 By default, we read every ``-d/--dist`` revision from 0 to tip of
2605 2609 the specified revlog.
2606 2610
2607 2611 The start revision can be defined via ``-s/--startrev``.
2608 2612 """
2609 2613 opts = _byteskwargs(opts)
2610 2614
2611 2615 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
2612 2616 rllen = getlen(ui)(rl)
2613 2617
2614 2618 if startrev < 0:
2615 2619 startrev = rllen + startrev
2616 2620
2617 2621 def d():
2618 2622 rl.clearcaches()
2619 2623
2620 2624 beginrev = startrev
2621 2625 endrev = rllen
2622 2626 dist = opts[b'dist']
2623 2627
2624 2628 if reverse:
2625 2629 beginrev, endrev = endrev - 1, beginrev - 1
2626 2630 dist = -1 * dist
2627 2631
2628 2632 for x in _xrange(beginrev, endrev, dist):
2629 2633 # Old revisions don't support passing int.
2630 2634 n = rl.node(x)
2631 2635 rl.revision(n)
2632 2636
2633 2637 timer, fm = gettimer(ui, opts)
2634 2638 timer(d)
2635 2639 fm.end()
2636 2640
2637 2641
2638 2642 @command(
2639 2643 b'perfrevlogwrite',
2640 2644 revlogopts
2641 2645 + formatteropts
2642 2646 + [
2643 2647 (b's', b'startrev', 1000, b'revision to start writing at'),
2644 2648 (b'', b'stoprev', -1, b'last revision to write'),
2645 2649 (b'', b'count', 3, b'number of passes to perform'),
2646 2650 (b'', b'details', False, b'print timing for every revisions tested'),
2647 2651 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
2648 2652 (b'', b'lazydeltabase', True, b'try the provided delta first'),
2649 2653 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
2650 2654 ],
2651 2655 b'-c|-m|FILE',
2652 2656 )
2653 2657 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
2654 2658 """Benchmark writing a series of revisions to a revlog.
2655 2659
2656 2660 Possible source values are:
2657 2661 * `full`: add from a full text (default).
2658 2662 * `parent-1`: add from a delta to the first parent
2659 2663 * `parent-2`: add from a delta to the second parent if it exists
2660 2664 (use a delta from the first parent otherwise)
2661 2665 * `parent-smallest`: add from the smallest delta (either p1 or p2)
2662 2666 * `storage`: add from the existing precomputed deltas
2663 2667
2664 2668 Note: This performance command measures performance in a custom way. As a
2665 2669 result some of the global configuration of the 'perf' command does not
2666 2670 apply to it:
2667 2671
2668 2672 * ``pre-run``: disabled
2669 2673
2670 2674 * ``profile-benchmark``: disabled
2671 2675
2672 2676 * ``run-limits``: disabled use --count instead
2673 2677 """
2674 2678 opts = _byteskwargs(opts)
2675 2679
2676 2680 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
2677 2681 rllen = getlen(ui)(rl)
2678 2682 if startrev < 0:
2679 2683 startrev = rllen + startrev
2680 2684 if stoprev < 0:
2681 2685 stoprev = rllen + stoprev
2682 2686
2683 2687 lazydeltabase = opts['lazydeltabase']
2684 2688 source = opts['source']
2685 2689 clearcaches = opts['clear_caches']
2686 2690 validsource = (
2687 2691 b'full',
2688 2692 b'parent-1',
2689 2693 b'parent-2',
2690 2694 b'parent-smallest',
2691 2695 b'storage',
2692 2696 )
2693 2697 if source not in validsource:
2694 2698 raise error.Abort('invalid source type: %s' % source)
2695 2699
2696 2700 ### actually gather results
2697 2701 count = opts['count']
2698 2702 if count <= 0:
2699 2703 raise error.Abort('invalide run count: %d' % count)
2700 2704 allresults = []
2701 2705 for c in range(count):
2702 2706 timing = _timeonewrite(
2703 2707 ui,
2704 2708 rl,
2705 2709 source,
2706 2710 startrev,
2707 2711 stoprev,
2708 2712 c + 1,
2709 2713 lazydeltabase=lazydeltabase,
2710 2714 clearcaches=clearcaches,
2711 2715 )
2712 2716 allresults.append(timing)
2713 2717
2714 2718 ### consolidate the results in a single list
2715 2719 results = []
2716 2720 for idx, (rev, t) in enumerate(allresults[0]):
2717 2721 ts = [t]
2718 2722 for other in allresults[1:]:
2719 2723 orev, ot = other[idx]
2720 2724 assert orev == rev
2721 2725 ts.append(ot)
2722 2726 results.append((rev, ts))
2723 2727 resultcount = len(results)
2724 2728
2725 2729 ### Compute and display relevant statistics
2726 2730
2727 2731 # get a formatter
2728 2732 fm = ui.formatter(b'perf', opts)
2729 2733 displayall = ui.configbool(b"perf", b"all-timing", False)
2730 2734
2731 2735 # print individual details if requested
2732 2736 if opts['details']:
2733 2737 for idx, item in enumerate(results, 1):
2734 2738 rev, data = item
2735 2739 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
2736 2740 formatone(fm, data, title=title, displayall=displayall)
2737 2741
2738 2742 # sorts results by median time
2739 2743 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
2740 2744 # list of (name, index) to display)
2741 2745 relevants = [
2742 2746 ("min", 0),
2743 2747 ("10%", resultcount * 10 // 100),
2744 2748 ("25%", resultcount * 25 // 100),
2745 2749 ("50%", resultcount * 70 // 100),
2746 2750 ("75%", resultcount * 75 // 100),
2747 2751 ("90%", resultcount * 90 // 100),
2748 2752 ("95%", resultcount * 95 // 100),
2749 2753 ("99%", resultcount * 99 // 100),
2750 2754 ("99.9%", resultcount * 999 // 1000),
2751 2755 ("99.99%", resultcount * 9999 // 10000),
2752 2756 ("99.999%", resultcount * 99999 // 100000),
2753 2757 ("max", -1),
2754 2758 ]
2755 2759 if not ui.quiet:
2756 2760 for name, idx in relevants:
2757 2761 data = results[idx]
2758 2762 title = '%s of %d, rev %d' % (name, resultcount, data[0])
2759 2763 formatone(fm, data[1], title=title, displayall=displayall)
2760 2764
2761 2765 # XXX summing that many float will not be very precise, we ignore this fact
2762 2766 # for now
2763 2767 totaltime = []
2764 2768 for item in allresults:
2765 2769 totaltime.append(
2766 2770 (
2767 2771 sum(x[1][0] for x in item),
2768 2772 sum(x[1][1] for x in item),
2769 2773 sum(x[1][2] for x in item),
2770 2774 )
2771 2775 )
2772 2776 formatone(
2773 2777 fm,
2774 2778 totaltime,
2775 2779 title="total time (%d revs)" % resultcount,
2776 2780 displayall=displayall,
2777 2781 )
2778 2782 fm.end()
2779 2783
2780 2784
2781 2785 class _faketr(object):
2782 2786 def add(s, x, y, z=None):
2783 2787 return None
2784 2788
2785 2789
2786 2790 def _timeonewrite(
2787 2791 ui,
2788 2792 orig,
2789 2793 source,
2790 2794 startrev,
2791 2795 stoprev,
2792 2796 runidx=None,
2793 2797 lazydeltabase=True,
2794 2798 clearcaches=True,
2795 2799 ):
2796 2800 timings = []
2797 2801 tr = _faketr()
2798 2802 with _temprevlog(ui, orig, startrev) as dest:
2799 2803 dest._lazydeltabase = lazydeltabase
2800 2804 revs = list(orig.revs(startrev, stoprev))
2801 2805 total = len(revs)
2802 2806 topic = 'adding'
2803 2807 if runidx is not None:
2804 2808 topic += ' (run #%d)' % runidx
2805 2809 # Support both old and new progress API
2806 2810 if util.safehasattr(ui, 'makeprogress'):
2807 2811 progress = ui.makeprogress(topic, unit='revs', total=total)
2808 2812
2809 2813 def updateprogress(pos):
2810 2814 progress.update(pos)
2811 2815
2812 2816 def completeprogress():
2813 2817 progress.complete()
2814 2818
2815 2819 else:
2816 2820
2817 2821 def updateprogress(pos):
2818 2822 ui.progress(topic, pos, unit='revs', total=total)
2819 2823
2820 2824 def completeprogress():
2821 2825 ui.progress(topic, None, unit='revs', total=total)
2822 2826
2823 2827 for idx, rev in enumerate(revs):
2824 2828 updateprogress(idx)
2825 2829 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
2826 2830 if clearcaches:
2827 2831 dest.index.clearcaches()
2828 2832 dest.clearcaches()
2829 2833 with timeone() as r:
2830 2834 dest.addrawrevision(*addargs, **addkwargs)
2831 2835 timings.append((rev, r[0]))
2832 2836 updateprogress(total)
2833 2837 completeprogress()
2834 2838 return timings
2835 2839
2836 2840
2837 2841 def _getrevisionseed(orig, rev, tr, source):
2838 2842 from mercurial.node import nullid
2839 2843
2840 2844 linkrev = orig.linkrev(rev)
2841 2845 node = orig.node(rev)
2842 2846 p1, p2 = orig.parents(node)
2843 2847 flags = orig.flags(rev)
2844 2848 cachedelta = None
2845 2849 text = None
2846 2850
2847 2851 if source == b'full':
2848 2852 text = orig.revision(rev)
2849 2853 elif source == b'parent-1':
2850 2854 baserev = orig.rev(p1)
2851 2855 cachedelta = (baserev, orig.revdiff(p1, rev))
2852 2856 elif source == b'parent-2':
2853 2857 parent = p2
2854 2858 if p2 == nullid:
2855 2859 parent = p1
2856 2860 baserev = orig.rev(parent)
2857 2861 cachedelta = (baserev, orig.revdiff(parent, rev))
2858 2862 elif source == b'parent-smallest':
2859 2863 p1diff = orig.revdiff(p1, rev)
2860 2864 parent = p1
2861 2865 diff = p1diff
2862 2866 if p2 != nullid:
2863 2867 p2diff = orig.revdiff(p2, rev)
2864 2868 if len(p1diff) > len(p2diff):
2865 2869 parent = p2
2866 2870 diff = p2diff
2867 2871 baserev = orig.rev(parent)
2868 2872 cachedelta = (baserev, diff)
2869 2873 elif source == b'storage':
2870 2874 baserev = orig.deltaparent(rev)
2871 2875 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
2872 2876
2873 2877 return (
2874 2878 (text, tr, linkrev, p1, p2),
2875 2879 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
2876 2880 )
2877 2881
2878 2882
2879 2883 @contextlib.contextmanager
2880 2884 def _temprevlog(ui, orig, truncaterev):
2881 2885 from mercurial import vfs as vfsmod
2882 2886
2883 2887 if orig._inline:
2884 2888 raise error.Abort('not supporting inline revlog (yet)')
2885 2889 revlogkwargs = {}
2886 2890 k = 'upperboundcomp'
2887 2891 if util.safehasattr(orig, k):
2888 2892 revlogkwargs[k] = getattr(orig, k)
2889 2893
2890 2894 origindexpath = orig.opener.join(orig.indexfile)
2891 2895 origdatapath = orig.opener.join(orig.datafile)
2892 2896 indexname = 'revlog.i'
2893 2897 dataname = 'revlog.d'
2894 2898
2895 2899 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
2896 2900 try:
2897 2901 # copy the data file in a temporary directory
2898 2902 ui.debug('copying data in %s\n' % tmpdir)
2899 2903 destindexpath = os.path.join(tmpdir, 'revlog.i')
2900 2904 destdatapath = os.path.join(tmpdir, 'revlog.d')
2901 2905 shutil.copyfile(origindexpath, destindexpath)
2902 2906 shutil.copyfile(origdatapath, destdatapath)
2903 2907
2904 2908 # remove the data we want to add again
2905 2909 ui.debug('truncating data to be rewritten\n')
2906 2910 with open(destindexpath, 'ab') as index:
2907 2911 index.seek(0)
2908 2912 index.truncate(truncaterev * orig._io.size)
2909 2913 with open(destdatapath, 'ab') as data:
2910 2914 data.seek(0)
2911 2915 data.truncate(orig.start(truncaterev))
2912 2916
2913 2917 # instantiate a new revlog from the temporary copy
2914 2918 ui.debug('truncating adding to be rewritten\n')
2915 2919 vfs = vfsmod.vfs(tmpdir)
2916 2920 vfs.options = getattr(orig.opener, 'options', None)
2917 2921
2918 2922 dest = revlog.revlog(
2919 2923 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
2920 2924 )
2921 2925 if dest._inline:
2922 2926 raise error.Abort('not supporting inline revlog (yet)')
2923 2927 # make sure internals are initialized
2924 2928 dest.revision(len(dest) - 1)
2925 2929 yield dest
2926 2930 del dest, vfs
2927 2931 finally:
2928 2932 shutil.rmtree(tmpdir, True)
2929 2933
2930 2934
2931 2935 @command(
2932 2936 b'perfrevlogchunks',
2933 2937 revlogopts
2934 2938 + formatteropts
2935 2939 + [
2936 2940 (b'e', b'engines', b'', b'compression engines to use'),
2937 2941 (b's', b'startrev', 0, b'revision to start at'),
2938 2942 ],
2939 2943 b'-c|-m|FILE',
2940 2944 )
2941 2945 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
2942 2946 """Benchmark operations on revlog chunks.
2943 2947
2944 2948 Logically, each revlog is a collection of fulltext revisions. However,
2945 2949 stored within each revlog are "chunks" of possibly compressed data. This
2946 2950 data needs to be read and decompressed or compressed and written.
2947 2951
2948 2952 This command measures the time it takes to read+decompress and recompress
2949 2953 chunks in a revlog. It effectively isolates I/O and compression performance.
2950 2954 For measurements of higher-level operations like resolving revisions,
2951 2955 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
2952 2956 """
2953 2957 opts = _byteskwargs(opts)
2954 2958
2955 2959 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
2956 2960
2957 2961 # _chunkraw was renamed to _getsegmentforrevs.
2958 2962 try:
2959 2963 segmentforrevs = rl._getsegmentforrevs
2960 2964 except AttributeError:
2961 2965 segmentforrevs = rl._chunkraw
2962 2966
2963 2967 # Verify engines argument.
2964 2968 if engines:
2965 2969 engines = set(e.strip() for e in engines.split(b','))
2966 2970 for engine in engines:
2967 2971 try:
2968 2972 util.compressionengines[engine]
2969 2973 except KeyError:
2970 2974 raise error.Abort(b'unknown compression engine: %s' % engine)
2971 2975 else:
2972 2976 engines = []
2973 2977 for e in util.compengines:
2974 2978 engine = util.compengines[e]
2975 2979 try:
2976 2980 if engine.available():
2977 2981 engine.revlogcompressor().compress(b'dummy')
2978 2982 engines.append(e)
2979 2983 except NotImplementedError:
2980 2984 pass
2981 2985
2982 2986 revs = list(rl.revs(startrev, len(rl) - 1))
2983 2987
2984 2988 def rlfh(rl):
2985 2989 if rl._inline:
2986 2990 return getsvfs(repo)(rl.indexfile)
2987 2991 else:
2988 2992 return getsvfs(repo)(rl.datafile)
2989 2993
2990 2994 def doread():
2991 2995 rl.clearcaches()
2992 2996 for rev in revs:
2993 2997 segmentforrevs(rev, rev)
2994 2998
2995 2999 def doreadcachedfh():
2996 3000 rl.clearcaches()
2997 3001 fh = rlfh(rl)
2998 3002 for rev in revs:
2999 3003 segmentforrevs(rev, rev, df=fh)
3000 3004
3001 3005 def doreadbatch():
3002 3006 rl.clearcaches()
3003 3007 segmentforrevs(revs[0], revs[-1])
3004 3008
3005 3009 def doreadbatchcachedfh():
3006 3010 rl.clearcaches()
3007 3011 fh = rlfh(rl)
3008 3012 segmentforrevs(revs[0], revs[-1], df=fh)
3009 3013
3010 3014 def dochunk():
3011 3015 rl.clearcaches()
3012 3016 fh = rlfh(rl)
3013 3017 for rev in revs:
3014 3018 rl._chunk(rev, df=fh)
3015 3019
3016 3020 chunks = [None]
3017 3021
3018 3022 def dochunkbatch():
3019 3023 rl.clearcaches()
3020 3024 fh = rlfh(rl)
3021 3025 # Save chunks as a side-effect.
3022 3026 chunks[0] = rl._chunks(revs, df=fh)
3023 3027
3024 3028 def docompress(compressor):
3025 3029 rl.clearcaches()
3026 3030
3027 3031 try:
3028 3032 # Swap in the requested compression engine.
3029 3033 oldcompressor = rl._compressor
3030 3034 rl._compressor = compressor
3031 3035 for chunk in chunks[0]:
3032 3036 rl.compress(chunk)
3033 3037 finally:
3034 3038 rl._compressor = oldcompressor
3035 3039
3036 3040 benches = [
3037 3041 (lambda: doread(), b'read'),
3038 3042 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3039 3043 (lambda: doreadbatch(), b'read batch'),
3040 3044 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3041 3045 (lambda: dochunk(), b'chunk'),
3042 3046 (lambda: dochunkbatch(), b'chunk batch'),
3043 3047 ]
3044 3048
3045 3049 for engine in sorted(engines):
3046 3050 compressor = util.compengines[engine].revlogcompressor()
3047 3051 benches.append(
3048 3052 (
3049 3053 functools.partial(docompress, compressor),
3050 3054 b'compress w/ %s' % engine,
3051 3055 )
3052 3056 )
3053 3057
3054 3058 for fn, title in benches:
3055 3059 timer, fm = gettimer(ui, opts)
3056 3060 timer(fn, title=title)
3057 3061 fm.end()
3058 3062
3059 3063
3060 3064 @command(
3061 3065 b'perfrevlogrevision',
3062 3066 revlogopts
3063 3067 + formatteropts
3064 3068 + [(b'', b'cache', False, b'use caches instead of clearing')],
3065 3069 b'-c|-m|FILE REV',
3066 3070 )
3067 3071 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3068 3072 """Benchmark obtaining a revlog revision.
3069 3073
3070 3074 Obtaining a revlog revision consists of roughly the following steps:
3071 3075
3072 3076 1. Compute the delta chain
3073 3077 2. Slice the delta chain if applicable
3074 3078 3. Obtain the raw chunks for that delta chain
3075 3079 4. Decompress each raw chunk
3076 3080 5. Apply binary patches to obtain fulltext
3077 3081 6. Verify hash of fulltext
3078 3082
3079 3083 This command measures the time spent in each of these phases.
3080 3084 """
3081 3085 opts = _byteskwargs(opts)
3082 3086
3083 3087 if opts.get(b'changelog') or opts.get(b'manifest'):
3084 3088 file_, rev = None, file_
3085 3089 elif rev is None:
3086 3090 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3087 3091
3088 3092 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3089 3093
3090 3094 # _chunkraw was renamed to _getsegmentforrevs.
3091 3095 try:
3092 3096 segmentforrevs = r._getsegmentforrevs
3093 3097 except AttributeError:
3094 3098 segmentforrevs = r._chunkraw
3095 3099
3096 3100 node = r.lookup(rev)
3097 3101 rev = r.rev(node)
3098 3102
3099 3103 def getrawchunks(data, chain):
3100 3104 start = r.start
3101 3105 length = r.length
3102 3106 inline = r._inline
3103 3107 iosize = r._io.size
3104 3108 buffer = util.buffer
3105 3109
3106 3110 chunks = []
3107 3111 ladd = chunks.append
3108 3112 for idx, item in enumerate(chain):
3109 3113 offset = start(item[0])
3110 3114 bits = data[idx]
3111 3115 for rev in item:
3112 3116 chunkstart = start(rev)
3113 3117 if inline:
3114 3118 chunkstart += (rev + 1) * iosize
3115 3119 chunklength = length(rev)
3116 3120 ladd(buffer(bits, chunkstart - offset, chunklength))
3117 3121
3118 3122 return chunks
3119 3123
3120 3124 def dodeltachain(rev):
3121 3125 if not cache:
3122 3126 r.clearcaches()
3123 3127 r._deltachain(rev)
3124 3128
3125 3129 def doread(chain):
3126 3130 if not cache:
3127 3131 r.clearcaches()
3128 3132 for item in slicedchain:
3129 3133 segmentforrevs(item[0], item[-1])
3130 3134
3131 3135 def doslice(r, chain, size):
3132 3136 for s in slicechunk(r, chain, targetsize=size):
3133 3137 pass
3134 3138
3135 3139 def dorawchunks(data, chain):
3136 3140 if not cache:
3137 3141 r.clearcaches()
3138 3142 getrawchunks(data, chain)
3139 3143
3140 3144 def dodecompress(chunks):
3141 3145 decomp = r.decompress
3142 3146 for chunk in chunks:
3143 3147 decomp(chunk)
3144 3148
3145 3149 def dopatch(text, bins):
3146 3150 if not cache:
3147 3151 r.clearcaches()
3148 3152 mdiff.patches(text, bins)
3149 3153
3150 3154 def dohash(text):
3151 3155 if not cache:
3152 3156 r.clearcaches()
3153 3157 r.checkhash(text, node, rev=rev)
3154 3158
3155 3159 def dorevision():
3156 3160 if not cache:
3157 3161 r.clearcaches()
3158 3162 r.revision(node)
3159 3163
3160 3164 try:
3161 3165 from mercurial.revlogutils.deltas import slicechunk
3162 3166 except ImportError:
3163 3167 slicechunk = getattr(revlog, '_slicechunk', None)
3164 3168
3165 3169 size = r.length(rev)
3166 3170 chain = r._deltachain(rev)[0]
3167 3171 if not getattr(r, '_withsparseread', False):
3168 3172 slicedchain = (chain,)
3169 3173 else:
3170 3174 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3171 3175 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3172 3176 rawchunks = getrawchunks(data, slicedchain)
3173 3177 bins = r._chunks(chain)
3174 3178 text = bytes(bins[0])
3175 3179 bins = bins[1:]
3176 3180 text = mdiff.patches(text, bins)
3177 3181
3178 3182 benches = [
3179 3183 (lambda: dorevision(), b'full'),
3180 3184 (lambda: dodeltachain(rev), b'deltachain'),
3181 3185 (lambda: doread(chain), b'read'),
3182 3186 ]
3183 3187
3184 3188 if getattr(r, '_withsparseread', False):
3185 3189 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3186 3190 benches.append(slicing)
3187 3191
3188 3192 benches.extend(
3189 3193 [
3190 3194 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3191 3195 (lambda: dodecompress(rawchunks), b'decompress'),
3192 3196 (lambda: dopatch(text, bins), b'patch'),
3193 3197 (lambda: dohash(text), b'hash'),
3194 3198 ]
3195 3199 )
3196 3200
3197 3201 timer, fm = gettimer(ui, opts)
3198 3202 for fn, title in benches:
3199 3203 timer(fn, title=title)
3200 3204 fm.end()
3201 3205
3202 3206
3203 3207 @command(
3204 3208 b'perfrevset',
3205 3209 [
3206 3210 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3207 3211 (b'', b'contexts', False, b'obtain changectx for each revision'),
3208 3212 ]
3209 3213 + formatteropts,
3210 3214 b"REVSET",
3211 3215 )
3212 3216 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3213 3217 """benchmark the execution time of a revset
3214 3218
3215 3219 Use the --clean option if need to evaluate the impact of build volatile
3216 3220 revisions set cache on the revset execution. Volatile cache hold filtered
3217 3221 and obsolete related cache."""
3218 3222 opts = _byteskwargs(opts)
3219 3223
3220 3224 timer, fm = gettimer(ui, opts)
3221 3225
3222 3226 def d():
3223 3227 if clear:
3224 3228 repo.invalidatevolatilesets()
3225 3229 if contexts:
3226 3230 for ctx in repo.set(expr):
3227 3231 pass
3228 3232 else:
3229 3233 for r in repo.revs(expr):
3230 3234 pass
3231 3235
3232 3236 timer(d)
3233 3237 fm.end()
3234 3238
3235 3239
3236 3240 @command(
3237 3241 b'perfvolatilesets',
3238 3242 [(b'', b'clear-obsstore', False, b'drop obsstore between each call.'),]
3239 3243 + formatteropts,
3240 3244 )
3241 3245 def perfvolatilesets(ui, repo, *names, **opts):
3242 3246 """benchmark the computation of various volatile set
3243 3247
3244 3248 Volatile set computes element related to filtering and obsolescence."""
3245 3249 opts = _byteskwargs(opts)
3246 3250 timer, fm = gettimer(ui, opts)
3247 3251 repo = repo.unfiltered()
3248 3252
3249 3253 def getobs(name):
3250 3254 def d():
3251 3255 repo.invalidatevolatilesets()
3252 3256 if opts[b'clear_obsstore']:
3253 3257 clearfilecache(repo, b'obsstore')
3254 3258 obsolete.getrevs(repo, name)
3255 3259
3256 3260 return d
3257 3261
3258 3262 allobs = sorted(obsolete.cachefuncs)
3259 3263 if names:
3260 3264 allobs = [n for n in allobs if n in names]
3261 3265
3262 3266 for name in allobs:
3263 3267 timer(getobs(name), title=name)
3264 3268
3265 3269 def getfiltered(name):
3266 3270 def d():
3267 3271 repo.invalidatevolatilesets()
3268 3272 if opts[b'clear_obsstore']:
3269 3273 clearfilecache(repo, b'obsstore')
3270 3274 repoview.filterrevs(repo, name)
3271 3275
3272 3276 return d
3273 3277
3274 3278 allfilter = sorted(repoview.filtertable)
3275 3279 if names:
3276 3280 allfilter = [n for n in allfilter if n in names]
3277 3281
3278 3282 for name in allfilter:
3279 3283 timer(getfiltered(name), title=name)
3280 3284 fm.end()
3281 3285
3282 3286
3283 3287 @command(
3284 3288 b'perfbranchmap',
3285 3289 [
3286 3290 (b'f', b'full', False, b'Includes build time of subset'),
3287 3291 (
3288 3292 b'',
3289 3293 b'clear-revbranch',
3290 3294 False,
3291 3295 b'purge the revbranch cache between computation',
3292 3296 ),
3293 3297 ]
3294 3298 + formatteropts,
3295 3299 )
3296 3300 def perfbranchmap(ui, repo, *filternames, **opts):
3297 3301 """benchmark the update of a branchmap
3298 3302
3299 3303 This benchmarks the full repo.branchmap() call with read and write disabled
3300 3304 """
3301 3305 opts = _byteskwargs(opts)
3302 3306 full = opts.get(b"full", False)
3303 3307 clear_revbranch = opts.get(b"clear_revbranch", False)
3304 3308 timer, fm = gettimer(ui, opts)
3305 3309
3306 3310 def getbranchmap(filtername):
3307 3311 """generate a benchmark function for the filtername"""
3308 3312 if filtername is None:
3309 3313 view = repo
3310 3314 else:
3311 3315 view = repo.filtered(filtername)
3312 3316 if util.safehasattr(view._branchcaches, '_per_filter'):
3313 3317 filtered = view._branchcaches._per_filter
3314 3318 else:
3315 3319 # older versions
3316 3320 filtered = view._branchcaches
3317 3321
3318 3322 def d():
3319 3323 if clear_revbranch:
3320 3324 repo.revbranchcache()._clear()
3321 3325 if full:
3322 3326 view._branchcaches.clear()
3323 3327 else:
3324 3328 filtered.pop(filtername, None)
3325 3329 view.branchmap()
3326 3330
3327 3331 return d
3328 3332
3329 3333 # add filter in smaller subset to bigger subset
3330 3334 possiblefilters = set(repoview.filtertable)
3331 3335 if filternames:
3332 3336 possiblefilters &= set(filternames)
3333 3337 subsettable = getbranchmapsubsettable()
3334 3338 allfilters = []
3335 3339 while possiblefilters:
3336 3340 for name in possiblefilters:
3337 3341 subset = subsettable.get(name)
3338 3342 if subset not in possiblefilters:
3339 3343 break
3340 3344 else:
3341 3345 assert False, b'subset cycle %s!' % possiblefilters
3342 3346 allfilters.append(name)
3343 3347 possiblefilters.remove(name)
3344 3348
3345 3349 # warm the cache
3346 3350 if not full:
3347 3351 for name in allfilters:
3348 3352 repo.filtered(name).branchmap()
3349 3353 if not filternames or b'unfiltered' in filternames:
3350 3354 # add unfiltered
3351 3355 allfilters.append(None)
3352 3356
3353 3357 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3354 3358 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3355 3359 branchcacheread.set(classmethod(lambda *args: None))
3356 3360 else:
3357 3361 # older versions
3358 3362 branchcacheread = safeattrsetter(branchmap, b'read')
3359 3363 branchcacheread.set(lambda *args: None)
3360 3364 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3361 3365 branchcachewrite.set(lambda *args: None)
3362 3366 try:
3363 3367 for name in allfilters:
3364 3368 printname = name
3365 3369 if name is None:
3366 3370 printname = b'unfiltered'
3367 3371 timer(getbranchmap(name), title=str(printname))
3368 3372 finally:
3369 3373 branchcacheread.restore()
3370 3374 branchcachewrite.restore()
3371 3375 fm.end()
3372 3376
3373 3377
3374 3378 @command(
3375 3379 b'perfbranchmapupdate',
3376 3380 [
3377 3381 (b'', b'base', [], b'subset of revision to start from'),
3378 3382 (b'', b'target', [], b'subset of revision to end with'),
3379 3383 (b'', b'clear-caches', False, b'clear cache between each runs'),
3380 3384 ]
3381 3385 + formatteropts,
3382 3386 )
3383 3387 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3384 3388 """benchmark branchmap update from for <base> revs to <target> revs
3385 3389
3386 3390 If `--clear-caches` is passed, the following items will be reset before
3387 3391 each update:
3388 3392 * the changelog instance and associated indexes
3389 3393 * the rev-branch-cache instance
3390 3394
3391 3395 Examples:
3392 3396
3393 3397 # update for the one last revision
3394 3398 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3395 3399
3396 3400 $ update for change coming with a new branch
3397 3401 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3398 3402 """
3399 3403 from mercurial import branchmap
3400 3404 from mercurial import repoview
3401 3405
3402 3406 opts = _byteskwargs(opts)
3403 3407 timer, fm = gettimer(ui, opts)
3404 3408 clearcaches = opts[b'clear_caches']
3405 3409 unfi = repo.unfiltered()
3406 3410 x = [None] # used to pass data between closure
3407 3411
3408 3412 # we use a `list` here to avoid possible side effect from smartset
3409 3413 baserevs = list(scmutil.revrange(repo, base))
3410 3414 targetrevs = list(scmutil.revrange(repo, target))
3411 3415 if not baserevs:
3412 3416 raise error.Abort(b'no revisions selected for --base')
3413 3417 if not targetrevs:
3414 3418 raise error.Abort(b'no revisions selected for --target')
3415 3419
3416 3420 # make sure the target branchmap also contains the one in the base
3417 3421 targetrevs = list(set(baserevs) | set(targetrevs))
3418 3422 targetrevs.sort()
3419 3423
3420 3424 cl = repo.changelog
3421 3425 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3422 3426 allbaserevs.sort()
3423 3427 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3424 3428
3425 3429 newrevs = list(alltargetrevs.difference(allbaserevs))
3426 3430 newrevs.sort()
3427 3431
3428 3432 allrevs = frozenset(unfi.changelog.revs())
3429 3433 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3430 3434 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3431 3435
3432 3436 def basefilter(repo, visibilityexceptions=None):
3433 3437 return basefilterrevs
3434 3438
3435 3439 def targetfilter(repo, visibilityexceptions=None):
3436 3440 return targetfilterrevs
3437 3441
3438 3442 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3439 3443 ui.status(msg % (len(allbaserevs), len(newrevs)))
3440 3444 if targetfilterrevs:
3441 3445 msg = b'(%d revisions still filtered)\n'
3442 3446 ui.status(msg % len(targetfilterrevs))
3443 3447
3444 3448 try:
3445 3449 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3446 3450 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3447 3451
3448 3452 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3449 3453 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3450 3454
3451 3455 # try to find an existing branchmap to reuse
3452 3456 subsettable = getbranchmapsubsettable()
3453 3457 candidatefilter = subsettable.get(None)
3454 3458 while candidatefilter is not None:
3455 3459 candidatebm = repo.filtered(candidatefilter).branchmap()
3456 3460 if candidatebm.validfor(baserepo):
3457 3461 filtered = repoview.filterrevs(repo, candidatefilter)
3458 3462 missing = [r for r in allbaserevs if r in filtered]
3459 3463 base = candidatebm.copy()
3460 3464 base.update(baserepo, missing)
3461 3465 break
3462 3466 candidatefilter = subsettable.get(candidatefilter)
3463 3467 else:
3464 3468 # no suitable subset where found
3465 3469 base = branchmap.branchcache()
3466 3470 base.update(baserepo, allbaserevs)
3467 3471
3468 3472 def setup():
3469 3473 x[0] = base.copy()
3470 3474 if clearcaches:
3471 3475 unfi._revbranchcache = None
3472 3476 clearchangelog(repo)
3473 3477
3474 3478 def bench():
3475 3479 x[0].update(targetrepo, newrevs)
3476 3480
3477 3481 timer(bench, setup=setup)
3478 3482 fm.end()
3479 3483 finally:
3480 3484 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3481 3485 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3482 3486
3483 3487
3484 3488 @command(
3485 3489 b'perfbranchmapload',
3486 3490 [
3487 3491 (b'f', b'filter', b'', b'Specify repoview filter'),
3488 3492 (b'', b'list', False, b'List brachmap filter caches'),
3489 3493 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3490 3494 ]
3491 3495 + formatteropts,
3492 3496 )
3493 3497 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3494 3498 """benchmark reading the branchmap"""
3495 3499 opts = _byteskwargs(opts)
3496 3500 clearrevlogs = opts[b'clear_revlogs']
3497 3501
3498 3502 if list:
3499 3503 for name, kind, st in repo.cachevfs.readdir(stat=True):
3500 3504 if name.startswith(b'branch2'):
3501 3505 filtername = name.partition(b'-')[2] or b'unfiltered'
3502 3506 ui.status(
3503 3507 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3504 3508 )
3505 3509 return
3506 3510 if not filter:
3507 3511 filter = None
3508 3512 subsettable = getbranchmapsubsettable()
3509 3513 if filter is None:
3510 3514 repo = repo.unfiltered()
3511 3515 else:
3512 3516 repo = repoview.repoview(repo, filter)
3513 3517
3514 3518 repo.branchmap() # make sure we have a relevant, up to date branchmap
3515 3519
3516 3520 try:
3517 3521 fromfile = branchmap.branchcache.fromfile
3518 3522 except AttributeError:
3519 3523 # older versions
3520 3524 fromfile = branchmap.read
3521 3525
3522 3526 currentfilter = filter
3523 3527 # try once without timer, the filter may not be cached
3524 3528 while fromfile(repo) is None:
3525 3529 currentfilter = subsettable.get(currentfilter)
3526 3530 if currentfilter is None:
3527 3531 raise error.Abort(
3528 3532 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3529 3533 )
3530 3534 repo = repo.filtered(currentfilter)
3531 3535 timer, fm = gettimer(ui, opts)
3532 3536
3533 3537 def setup():
3534 3538 if clearrevlogs:
3535 3539 clearchangelog(repo)
3536 3540
3537 3541 def bench():
3538 3542 fromfile(repo)
3539 3543
3540 3544 timer(bench, setup=setup)
3541 3545 fm.end()
3542 3546
3543 3547
3544 3548 @command(b'perfloadmarkers')
3545 3549 def perfloadmarkers(ui, repo):
3546 3550 """benchmark the time to parse the on-disk markers for a repo
3547 3551
3548 3552 Result is the number of markers in the repo."""
3549 3553 timer, fm = gettimer(ui)
3550 3554 svfs = getsvfs(repo)
3551 3555 timer(lambda: len(obsolete.obsstore(svfs)))
3552 3556 fm.end()
3553 3557
3554 3558
3555 3559 @command(
3556 3560 b'perflrucachedict',
3557 3561 formatteropts
3558 3562 + [
3559 3563 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3560 3564 (b'', b'mincost', 0, b'smallest cost of items in cache'),
3561 3565 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
3562 3566 (b'', b'size', 4, b'size of cache'),
3563 3567 (b'', b'gets', 10000, b'number of key lookups'),
3564 3568 (b'', b'sets', 10000, b'number of key sets'),
3565 3569 (b'', b'mixed', 10000, b'number of mixed mode operations'),
3566 3570 (
3567 3571 b'',
3568 3572 b'mixedgetfreq',
3569 3573 50,
3570 3574 b'frequency of get vs set ops in mixed mode',
3571 3575 ),
3572 3576 ],
3573 3577 norepo=True,
3574 3578 )
3575 3579 def perflrucache(
3576 3580 ui,
3577 3581 mincost=0,
3578 3582 maxcost=100,
3579 3583 costlimit=0,
3580 3584 size=4,
3581 3585 gets=10000,
3582 3586 sets=10000,
3583 3587 mixed=10000,
3584 3588 mixedgetfreq=50,
3585 3589 **opts
3586 3590 ):
3587 3591 opts = _byteskwargs(opts)
3588 3592
3589 3593 def doinit():
3590 3594 for i in _xrange(10000):
3591 3595 util.lrucachedict(size)
3592 3596
3593 3597 costrange = list(range(mincost, maxcost + 1))
3594 3598
3595 3599 values = []
3596 3600 for i in _xrange(size):
3597 3601 values.append(random.randint(0, _maxint))
3598 3602
3599 3603 # Get mode fills the cache and tests raw lookup performance with no
3600 3604 # eviction.
3601 3605 getseq = []
3602 3606 for i in _xrange(gets):
3603 3607 getseq.append(random.choice(values))
3604 3608
3605 3609 def dogets():
3606 3610 d = util.lrucachedict(size)
3607 3611 for v in values:
3608 3612 d[v] = v
3609 3613 for key in getseq:
3610 3614 value = d[key]
3611 3615 value # silence pyflakes warning
3612 3616
3613 3617 def dogetscost():
3614 3618 d = util.lrucachedict(size, maxcost=costlimit)
3615 3619 for i, v in enumerate(values):
3616 3620 d.insert(v, v, cost=costs[i])
3617 3621 for key in getseq:
3618 3622 try:
3619 3623 value = d[key]
3620 3624 value # silence pyflakes warning
3621 3625 except KeyError:
3622 3626 pass
3623 3627
3624 3628 # Set mode tests insertion speed with cache eviction.
3625 3629 setseq = []
3626 3630 costs = []
3627 3631 for i in _xrange(sets):
3628 3632 setseq.append(random.randint(0, _maxint))
3629 3633 costs.append(random.choice(costrange))
3630 3634
3631 3635 def doinserts():
3632 3636 d = util.lrucachedict(size)
3633 3637 for v in setseq:
3634 3638 d.insert(v, v)
3635 3639
3636 3640 def doinsertscost():
3637 3641 d = util.lrucachedict(size, maxcost=costlimit)
3638 3642 for i, v in enumerate(setseq):
3639 3643 d.insert(v, v, cost=costs[i])
3640 3644
3641 3645 def dosets():
3642 3646 d = util.lrucachedict(size)
3643 3647 for v in setseq:
3644 3648 d[v] = v
3645 3649
3646 3650 # Mixed mode randomly performs gets and sets with eviction.
3647 3651 mixedops = []
3648 3652 for i in _xrange(mixed):
3649 3653 r = random.randint(0, 100)
3650 3654 if r < mixedgetfreq:
3651 3655 op = 0
3652 3656 else:
3653 3657 op = 1
3654 3658
3655 3659 mixedops.append(
3656 3660 (op, random.randint(0, size * 2), random.choice(costrange))
3657 3661 )
3658 3662
3659 3663 def domixed():
3660 3664 d = util.lrucachedict(size)
3661 3665
3662 3666 for op, v, cost in mixedops:
3663 3667 if op == 0:
3664 3668 try:
3665 3669 d[v]
3666 3670 except KeyError:
3667 3671 pass
3668 3672 else:
3669 3673 d[v] = v
3670 3674
3671 3675 def domixedcost():
3672 3676 d = util.lrucachedict(size, maxcost=costlimit)
3673 3677
3674 3678 for op, v, cost in mixedops:
3675 3679 if op == 0:
3676 3680 try:
3677 3681 d[v]
3678 3682 except KeyError:
3679 3683 pass
3680 3684 else:
3681 3685 d.insert(v, v, cost=cost)
3682 3686
3683 3687 benches = [
3684 3688 (doinit, b'init'),
3685 3689 ]
3686 3690
3687 3691 if costlimit:
3688 3692 benches.extend(
3689 3693 [
3690 3694 (dogetscost, b'gets w/ cost limit'),
3691 3695 (doinsertscost, b'inserts w/ cost limit'),
3692 3696 (domixedcost, b'mixed w/ cost limit'),
3693 3697 ]
3694 3698 )
3695 3699 else:
3696 3700 benches.extend(
3697 3701 [
3698 3702 (dogets, b'gets'),
3699 3703 (doinserts, b'inserts'),
3700 3704 (dosets, b'sets'),
3701 3705 (domixed, b'mixed'),
3702 3706 ]
3703 3707 )
3704 3708
3705 3709 for fn, title in benches:
3706 3710 timer, fm = gettimer(ui, opts)
3707 3711 timer(fn, title=title)
3708 3712 fm.end()
3709 3713
3710 3714
3711 3715 @command(b'perfwrite', formatteropts)
3712 3716 def perfwrite(ui, repo, **opts):
3713 3717 """microbenchmark ui.write
3714 3718 """
3715 3719 opts = _byteskwargs(opts)
3716 3720
3717 3721 timer, fm = gettimer(ui, opts)
3718 3722
3719 3723 def write():
3720 3724 for i in range(100000):
3721 3725 ui.writenoi18n(b'Testing write performance\n')
3722 3726
3723 3727 timer(write)
3724 3728 fm.end()
3725 3729
3726 3730
3727 3731 def uisetup(ui):
3728 3732 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
3729 3733 commands, b'debugrevlogopts'
3730 3734 ):
3731 3735 # for "historical portability":
3732 3736 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
3733 3737 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
3734 3738 # openrevlog() should cause failure, because it has been
3735 3739 # available since 3.5 (or 49c583ca48c4).
3736 3740 def openrevlog(orig, repo, cmd, file_, opts):
3737 3741 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
3738 3742 raise error.Abort(
3739 3743 b"This version doesn't support --dir option",
3740 3744 hint=b"use 3.5 or later",
3741 3745 )
3742 3746 return orig(repo, cmd, file_, opts)
3743 3747
3744 3748 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
3745 3749
3746 3750
3747 3751 @command(
3748 3752 b'perfprogress',
3749 3753 formatteropts
3750 3754 + [
3751 3755 (b'', b'topic', b'topic', b'topic for progress messages'),
3752 3756 (b'c', b'total', 1000000, b'total value we are progressing to'),
3753 3757 ],
3754 3758 norepo=True,
3755 3759 )
3756 3760 def perfprogress(ui, topic=None, total=None, **opts):
3757 3761 """printing of progress bars"""
3758 3762 opts = _byteskwargs(opts)
3759 3763
3760 3764 timer, fm = gettimer(ui, opts)
3761 3765
3762 3766 def doprogress():
3763 3767 with ui.makeprogress(topic, total=total) as progress:
3764 3768 for i in _xrange(total):
3765 3769 progress.increment()
3766 3770
3767 3771 timer(doprogress)
3768 3772 fm.end()
@@ -1,396 +1,396
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perfaddremove
82 82 (no help text available)
83 83 perfancestors
84 84 (no help text available)
85 85 perfancestorset
86 86 (no help text available)
87 87 perfannotate (no help text available)
88 88 perfbdiff benchmark a bdiff between revisions
89 89 perfbookmarks
90 90 benchmark parsing bookmarks from disk to memory
91 91 perfbranchmap
92 92 benchmark the update of a branchmap
93 93 perfbranchmapload
94 94 benchmark reading the branchmap
95 95 perfbranchmapupdate
96 96 benchmark branchmap update from for <base> revs to <target>
97 97 revs
98 98 perfbundleread
99 99 Benchmark reading of bundle files.
100 100 perfcca (no help text available)
101 101 perfchangegroupchangelog
102 102 Benchmark producing a changelog group for a changegroup.
103 103 perfchangeset
104 104 (no help text available)
105 105 perfctxfiles (no help text available)
106 106 perfdiffwd Profile diff of working directory changes
107 107 perfdirfoldmap
108 (no help text available)
108 benchmap a 'dirstate._map.dirfoldmap.get()' request
109 109 perfdirs (no help text available)
110 110 perfdirstate benchmap the time necessary to load a dirstate from scratch
111 111 perfdirstatedirs
112 112 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
113 113 perfdirstatefoldmap
114 114 benchmap a 'dirstate._map.filefoldmap.get()' request
115 115 perfdirstatewrite
116 116 (no help text available)
117 117 perfdiscovery
118 118 benchmark discovery between local repo and the peer at given
119 119 path
120 120 perffncacheencode
121 121 (no help text available)
122 122 perffncacheload
123 123 (no help text available)
124 124 perffncachewrite
125 125 (no help text available)
126 126 perfheads benchmark the computation of a changelog heads
127 127 perfhelper-mergecopies
128 128 find statistics about potential parameters for
129 129 'perfmergecopies'
130 130 perfhelper-pathcopies
131 131 find statistic about potential parameters for the
132 132 'perftracecopies'
133 133 perfignore benchmark operation related to computing ignore
134 134 perfindex benchmark index creation time followed by a lookup
135 135 perflinelogedits
136 136 (no help text available)
137 137 perfloadmarkers
138 138 benchmark the time to parse the on-disk markers for a repo
139 139 perflog (no help text available)
140 140 perflookup (no help text available)
141 141 perflrucachedict
142 142 (no help text available)
143 143 perfmanifest benchmark the time to read a manifest from disk and return a
144 144 usable
145 145 perfmergecalculate
146 146 (no help text available)
147 147 perfmergecopies
148 148 measure runtime of 'copies.mergecopies'
149 149 perfmoonwalk benchmark walking the changelog backwards
150 150 perfnodelookup
151 151 (no help text available)
152 152 perfnodemap benchmark the time necessary to look up revision from a cold
153 153 nodemap
154 154 perfparents benchmark the time necessary to fetch one changeset's parents.
155 155 perfpathcopies
156 156 benchmark the copy tracing logic
157 157 perfphases benchmark phasesets computation
158 158 perfphasesremote
159 159 benchmark time needed to analyse phases of the remote server
160 160 perfprogress printing of progress bars
161 161 perfrawfiles (no help text available)
162 162 perfrevlogchunks
163 163 Benchmark operations on revlog chunks.
164 164 perfrevlogindex
165 165 Benchmark operations against a revlog index.
166 166 perfrevlogrevision
167 167 Benchmark obtaining a revlog revision.
168 168 perfrevlogrevisions
169 169 Benchmark reading a series of revisions from a revlog.
170 170 perfrevlogwrite
171 171 Benchmark writing a series of revisions to a revlog.
172 172 perfrevrange (no help text available)
173 173 perfrevset benchmark the execution time of a revset
174 174 perfstartup (no help text available)
175 175 perfstatus benchmark the performance of a single status call
176 176 perftags (no help text available)
177 177 perftemplating
178 178 test the rendering time of a given template
179 179 perfunidiff benchmark a unified diff between revisions
180 180 perfvolatilesets
181 181 benchmark the computation of various volatile set
182 182 perfwalk (no help text available)
183 183 perfwrite microbenchmark ui.write
184 184
185 185 (use 'hg help -v perf' to show built-in aliases and global options)
186 186 $ hg perfaddremove
187 187 $ hg perfancestors
188 188 $ hg perfancestorset 2
189 189 $ hg perfannotate a
190 190 $ hg perfbdiff -c 1
191 191 $ hg perfbdiff --alldata 1
192 192 $ hg perfunidiff -c 1
193 193 $ hg perfunidiff --alldata 1
194 194 $ hg perfbookmarks
195 195 $ hg perfbranchmap
196 196 $ hg perfbranchmapload
197 197 $ hg perfbranchmapupdate --base "not tip" --target "tip"
198 198 benchmark of branchmap with 3 revisions with 1 new ones
199 199 $ hg perfcca
200 200 $ hg perfchangegroupchangelog
201 201 $ hg perfchangegroupchangelog --cgversion 01
202 202 $ hg perfchangeset 2
203 203 $ hg perfctxfiles 2
204 204 $ hg perfdiffwd
205 205 $ hg perfdirfoldmap
206 206 $ hg perfdirs
207 207 $ hg perfdirstate
208 208 $ hg perfdirstatedirs
209 209 $ hg perfdirstatefoldmap
210 210 $ hg perfdirstatewrite
211 211 #if repofncache
212 212 $ hg perffncacheencode
213 213 $ hg perffncacheload
214 214 $ hg debugrebuildfncache
215 215 fncache already up to date
216 216 $ hg perffncachewrite
217 217 $ hg debugrebuildfncache
218 218 fncache already up to date
219 219 #endif
220 220 $ hg perfheads
221 221 $ hg perfignore
222 222 $ hg perfindex
223 223 $ hg perflinelogedits -n 1
224 224 $ hg perfloadmarkers
225 225 $ hg perflog
226 226 $ hg perflookup 2
227 227 $ hg perflrucache
228 228 $ hg perfmanifest 2
229 229 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
230 230 $ hg perfmanifest -m 44fe2c8352bb
231 231 abort: manifest revision must be integer or full node
232 232 [255]
233 233 $ hg perfmergecalculate -r 3
234 234 $ hg perfmoonwalk
235 235 $ hg perfnodelookup 2
236 236 $ hg perfpathcopies 1 2
237 237 $ hg perfprogress --total 1000
238 238 $ hg perfrawfiles 2
239 239 $ hg perfrevlogindex -c
240 240 #if reporevlogstore
241 241 $ hg perfrevlogrevisions .hg/store/data/a.i
242 242 #endif
243 243 $ hg perfrevlogrevision -m 0
244 244 $ hg perfrevlogchunks -c
245 245 $ hg perfrevrange
246 246 $ hg perfrevset 'all()'
247 247 $ hg perfstartup
248 248 $ hg perfstatus
249 249 $ hg perftags
250 250 $ hg perftemplating
251 251 $ hg perfvolatilesets
252 252 $ hg perfwalk
253 253 $ hg perfparents
254 254 $ hg perfdiscovery -q .
255 255
256 256 Test run control
257 257 ----------------
258 258
259 259 Simple single entry
260 260
261 261 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
262 262 ! wall * comb * user * sys * (best of 15) (glob)
263 263
264 264 Multiple entries
265 265
266 266 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-5'
267 267 ! wall * comb * user * sys * (best of 5) (glob)
268 268
269 269 error case are ignored
270 270
271 271 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-5'
272 272 malformatted run limit entry, missing "-": 500
273 273 ! wall * comb * user * sys * (best of 5) (glob)
274 274 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-12, 0.000000001-5'
275 275 malformatted run limit entry, could not convert string to float: aaa: aaa-12 (no-py3 !)
276 276 malformatted run limit entry, could not convert string to float: 'aaa': aaa-12 (py3 !)
277 277 ! wall * comb * user * sys * (best of 5) (glob)
278 278 $ hg perfparents --config perf.stub=no --config perf.run-limits='12-aaaaaa, 0.000000001-5'
279 279 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 12-aaaaaa
280 280 ! wall * comb * user * sys * (best of 5) (glob)
281 281
282 282 test actual output
283 283 ------------------
284 284
285 285 normal output:
286 286
287 287 $ hg perfheads --config perf.stub=no
288 288 ! wall * comb * user * sys * (best of *) (glob)
289 289
290 290 detailed output:
291 291
292 292 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
293 293 ! wall * comb * user * sys * (best of *) (glob)
294 294 ! wall * comb * user * sys * (max of *) (glob)
295 295 ! wall * comb * user * sys * (avg of *) (glob)
296 296 ! wall * comb * user * sys * (median of *) (glob)
297 297
298 298 test json output
299 299 ----------------
300 300
301 301 normal output:
302 302
303 303 $ hg perfheads --template json --config perf.stub=no
304 304 [
305 305 {
306 306 "comb": *, (glob)
307 307 "count": *, (glob)
308 308 "sys": *, (glob)
309 309 "user": *, (glob)
310 310 "wall": * (glob)
311 311 }
312 312 ]
313 313
314 314 detailed output:
315 315
316 316 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
317 317 [
318 318 {
319 319 "avg.comb": *, (glob)
320 320 "avg.count": *, (glob)
321 321 "avg.sys": *, (glob)
322 322 "avg.user": *, (glob)
323 323 "avg.wall": *, (glob)
324 324 "comb": *, (glob)
325 325 "count": *, (glob)
326 326 "max.comb": *, (glob)
327 327 "max.count": *, (glob)
328 328 "max.sys": *, (glob)
329 329 "max.user": *, (glob)
330 330 "max.wall": *, (glob)
331 331 "median.comb": *, (glob)
332 332 "median.count": *, (glob)
333 333 "median.sys": *, (glob)
334 334 "median.user": *, (glob)
335 335 "median.wall": *, (glob)
336 336 "sys": *, (glob)
337 337 "user": *, (glob)
338 338 "wall": * (glob)
339 339 }
340 340 ]
341 341
342 342 Test pre-run feature
343 343 --------------------
344 344
345 345 (perf discovery has some spurious output)
346 346
347 347 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
348 348 ! wall * comb * user * sys * (best of 1) (glob)
349 349 searching for changes
350 350 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
351 351 ! wall * comb * user * sys * (best of 1) (glob)
352 352 searching for changes
353 353 searching for changes
354 354 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
355 355 ! wall * comb * user * sys * (best of 1) (glob)
356 356 searching for changes
357 357 searching for changes
358 358 searching for changes
359 359 searching for changes
360 360
361 361 test profile-benchmark option
362 362 ------------------------------
363 363
364 364 Function to check that statprof ran
365 365 $ statprofran () {
366 366 > egrep 'Sample count:|No samples recorded' > /dev/null
367 367 > }
368 368 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
369 369
370 370 Check perf.py for historical portability
371 371 ----------------------------------------
372 372
373 373 $ cd "$TESTDIR/.."
374 374
375 375 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
376 376 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
377 377 > "$TESTDIR"/check-perf-code.py contrib/perf.py
378 378 contrib/perf.py:\d+: (re)
379 379 > from mercurial import (
380 380 import newer module separately in try clause for early Mercurial
381 381 contrib/perf.py:\d+: (re)
382 382 > from mercurial import (
383 383 import newer module separately in try clause for early Mercurial
384 384 contrib/perf.py:\d+: (re)
385 385 > origindexpath = orig.opener.join(orig.indexfile)
386 386 use getvfs()/getsvfs() for early Mercurial
387 387 contrib/perf.py:\d+: (re)
388 388 > origdatapath = orig.opener.join(orig.datafile)
389 389 use getvfs()/getsvfs() for early Mercurial
390 390 contrib/perf.py:\d+: (re)
391 391 > vfs = vfsmod.vfs(tmpdir)
392 392 use getvfs()/getsvfs() for early Mercurial
393 393 contrib/perf.py:\d+: (re)
394 394 > vfs.options = getattr(orig.opener, 'options', None)
395 395 use getvfs()/getsvfs() for early Mercurial
396 396 [1]
General Comments 0
You need to be logged in to leave comments. Login now