##// END OF EJS Templates
path: pass `path` to `peer` in `hg perf::discovery`...
marmoute -
r50632:4cedae99 default
parent child Browse files
Show More
@@ -1,4234 +1,4239 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238 # for "historical portability":
239 239 # define parsealiases locally, because cmdutil.parsealiases has been
240 240 # available since 1.5 (or 6252852b4332)
241 241 def parsealiases(cmd):
242 242 return cmd.split(b"|")
243 243
244 244
245 245 if safehasattr(registrar, 'command'):
246 246 command = registrar.command(cmdtable)
247 247 elif safehasattr(cmdutil, 'command'):
248 248 command = cmdutil.command(cmdtable)
249 249 if 'norepo' not in getargspec(command).args:
250 250 # for "historical portability":
251 251 # wrap original cmdutil.command, because "norepo" option has
252 252 # been available since 3.1 (or 75a96326cecb)
253 253 _command = command
254 254
255 255 def command(name, options=(), synopsis=None, norepo=False):
256 256 if norepo:
257 257 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 258 return _command(name, list(options), synopsis)
259 259
260 260
261 261 else:
262 262 # for "historical portability":
263 263 # define "@command" annotation locally, because cmdutil.command
264 264 # has been available since 1.9 (or 2daa5179e73f)
265 265 def command(name, options=(), synopsis=None, norepo=False):
266 266 def decorator(func):
267 267 if synopsis:
268 268 cmdtable[name] = func, list(options), synopsis
269 269 else:
270 270 cmdtable[name] = func, list(options)
271 271 if norepo:
272 272 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 273 return func
274 274
275 275 return decorator
276 276
277 277
278 278 try:
279 279 import mercurial.registrar
280 280 import mercurial.configitems
281 281
282 282 configtable = {}
283 283 configitem = mercurial.registrar.configitem(configtable)
284 284 configitem(
285 285 b'perf',
286 286 b'presleep',
287 287 default=mercurial.configitems.dynamicdefault,
288 288 experimental=True,
289 289 )
290 290 configitem(
291 291 b'perf',
292 292 b'stub',
293 293 default=mercurial.configitems.dynamicdefault,
294 294 experimental=True,
295 295 )
296 296 configitem(
297 297 b'perf',
298 298 b'parentscount',
299 299 default=mercurial.configitems.dynamicdefault,
300 300 experimental=True,
301 301 )
302 302 configitem(
303 303 b'perf',
304 304 b'all-timing',
305 305 default=mercurial.configitems.dynamicdefault,
306 306 experimental=True,
307 307 )
308 308 configitem(
309 309 b'perf',
310 310 b'pre-run',
311 311 default=mercurial.configitems.dynamicdefault,
312 312 )
313 313 configitem(
314 314 b'perf',
315 315 b'profile-benchmark',
316 316 default=mercurial.configitems.dynamicdefault,
317 317 )
318 318 configitem(
319 319 b'perf',
320 320 b'run-limits',
321 321 default=mercurial.configitems.dynamicdefault,
322 322 experimental=True,
323 323 )
324 324 except (ImportError, AttributeError):
325 325 pass
326 326 except TypeError:
327 327 # compatibility fix for a11fd395e83f
328 328 # hg version: 5.2
329 329 configitem(
330 330 b'perf',
331 331 b'presleep',
332 332 default=mercurial.configitems.dynamicdefault,
333 333 )
334 334 configitem(
335 335 b'perf',
336 336 b'stub',
337 337 default=mercurial.configitems.dynamicdefault,
338 338 )
339 339 configitem(
340 340 b'perf',
341 341 b'parentscount',
342 342 default=mercurial.configitems.dynamicdefault,
343 343 )
344 344 configitem(
345 345 b'perf',
346 346 b'all-timing',
347 347 default=mercurial.configitems.dynamicdefault,
348 348 )
349 349 configitem(
350 350 b'perf',
351 351 b'pre-run',
352 352 default=mercurial.configitems.dynamicdefault,
353 353 )
354 354 configitem(
355 355 b'perf',
356 356 b'profile-benchmark',
357 357 default=mercurial.configitems.dynamicdefault,
358 358 )
359 359 configitem(
360 360 b'perf',
361 361 b'run-limits',
362 362 default=mercurial.configitems.dynamicdefault,
363 363 )
364 364
365 365
366 366 def getlen(ui):
367 367 if ui.configbool(b"perf", b"stub", False):
368 368 return lambda x: 1
369 369 return len
370 370
371 371
372 372 class noop:
373 373 """dummy context manager"""
374 374
375 375 def __enter__(self):
376 376 pass
377 377
378 378 def __exit__(self, *args):
379 379 pass
380 380
381 381
382 382 NOOPCTX = noop()
383 383
384 384
385 385 def gettimer(ui, opts=None):
386 386 """return a timer function and formatter: (timer, formatter)
387 387
388 388 This function exists to gather the creation of formatter in a single
389 389 place instead of duplicating it in all performance commands."""
390 390
391 391 # enforce an idle period before execution to counteract power management
392 392 # experimental config: perf.presleep
393 393 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 394
395 395 if opts is None:
396 396 opts = {}
397 397 # redirect all to stderr unless buffer api is in use
398 398 if not ui._buffers:
399 399 ui = ui.copy()
400 400 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 401 if uifout:
402 402 # for "historical portability":
403 403 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 404 uifout.set(ui.ferr)
405 405
406 406 # get a formatter
407 407 uiformatter = getattr(ui, 'formatter', None)
408 408 if uiformatter:
409 409 fm = uiformatter(b'perf', opts)
410 410 else:
411 411 # for "historical portability":
412 412 # define formatter locally, because ui.formatter has been
413 413 # available since 2.2 (or ae5f92e154d3)
414 414 from mercurial import node
415 415
416 416 class defaultformatter:
417 417 """Minimized composition of baseformatter and plainformatter"""
418 418
419 419 def __init__(self, ui, topic, opts):
420 420 self._ui = ui
421 421 if ui.debugflag:
422 422 self.hexfunc = node.hex
423 423 else:
424 424 self.hexfunc = node.short
425 425
426 426 def __nonzero__(self):
427 427 return False
428 428
429 429 __bool__ = __nonzero__
430 430
431 431 def startitem(self):
432 432 pass
433 433
434 434 def data(self, **data):
435 435 pass
436 436
437 437 def write(self, fields, deftext, *fielddata, **opts):
438 438 self._ui.write(deftext % fielddata, **opts)
439 439
440 440 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 441 if cond:
442 442 self._ui.write(deftext % fielddata, **opts)
443 443
444 444 def plain(self, text, **opts):
445 445 self._ui.write(text, **opts)
446 446
447 447 def end(self):
448 448 pass
449 449
450 450 fm = defaultformatter(ui, b'perf', opts)
451 451
452 452 # stub function, runs code only once instead of in a loop
453 453 # experimental config: perf.stub
454 454 if ui.configbool(b"perf", b"stub", False):
455 455 return functools.partial(stub_timer, fm), fm
456 456
457 457 # experimental config: perf.all-timing
458 458 displayall = ui.configbool(b"perf", b"all-timing", False)
459 459
460 460 # experimental config: perf.run-limits
461 461 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 462 limits = []
463 463 for item in limitspec:
464 464 parts = item.split(b'-', 1)
465 465 if len(parts) < 2:
466 466 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 467 continue
468 468 try:
469 469 time_limit = float(_sysstr(parts[0]))
470 470 except ValueError as e:
471 471 ui.warn(
472 472 (
473 473 b'malformatted run limit entry, %s: %s\n'
474 474 % (_bytestr(e), item)
475 475 )
476 476 )
477 477 continue
478 478 try:
479 479 run_limit = int(_sysstr(parts[1]))
480 480 except ValueError as e:
481 481 ui.warn(
482 482 (
483 483 b'malformatted run limit entry, %s: %s\n'
484 484 % (_bytestr(e), item)
485 485 )
486 486 )
487 487 continue
488 488 limits.append((time_limit, run_limit))
489 489 if not limits:
490 490 limits = DEFAULTLIMITS
491 491
492 492 profiler = None
493 493 if profiling is not None:
494 494 if ui.configbool(b"perf", b"profile-benchmark", False):
495 495 profiler = profiling.profile(ui)
496 496
497 497 prerun = getint(ui, b"perf", b"pre-run", 0)
498 498 t = functools.partial(
499 499 _timer,
500 500 fm,
501 501 displayall=displayall,
502 502 limits=limits,
503 503 prerun=prerun,
504 504 profiler=profiler,
505 505 )
506 506 return t, fm
507 507
508 508
509 509 def stub_timer(fm, func, setup=None, title=None):
510 510 if setup is not None:
511 511 setup()
512 512 func()
513 513
514 514
515 515 @contextlib.contextmanager
516 516 def timeone():
517 517 r = []
518 518 ostart = os.times()
519 519 cstart = util.timer()
520 520 yield r
521 521 cstop = util.timer()
522 522 ostop = os.times()
523 523 a, b = ostart, ostop
524 524 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 525
526 526
527 527 # list of stop condition (elapsed time, minimal run count)
528 528 DEFAULTLIMITS = (
529 529 (3.0, 100),
530 530 (10.0, 3),
531 531 )
532 532
533 533
534 534 def _timer(
535 535 fm,
536 536 func,
537 537 setup=None,
538 538 title=None,
539 539 displayall=False,
540 540 limits=DEFAULTLIMITS,
541 541 prerun=0,
542 542 profiler=None,
543 543 ):
544 544 gc.collect()
545 545 results = []
546 546 begin = util.timer()
547 547 count = 0
548 548 if profiler is None:
549 549 profiler = NOOPCTX
550 550 for i in range(prerun):
551 551 if setup is not None:
552 552 setup()
553 553 func()
554 554 keepgoing = True
555 555 while keepgoing:
556 556 if setup is not None:
557 557 setup()
558 558 with profiler:
559 559 with timeone() as item:
560 560 r = func()
561 561 profiler = NOOPCTX
562 562 count += 1
563 563 results.append(item[0])
564 564 cstop = util.timer()
565 565 # Look for a stop condition.
566 566 elapsed = cstop - begin
567 567 for t, mincount in limits:
568 568 if elapsed >= t and count >= mincount:
569 569 keepgoing = False
570 570 break
571 571
572 572 formatone(fm, results, title=title, result=r, displayall=displayall)
573 573
574 574
575 575 def formatone(fm, timings, title=None, result=None, displayall=False):
576 576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'perf::delta-find',
930 930 revlogopts + formatteropts,
931 931 b'-c|-m|FILE REV',
932 932 )
933 933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 934 """benchmark the process of finding a valid delta for a revlog revision
935 935
936 936 When a revlog receives a new revision (e.g. from a commit, or from an
937 937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 938 This perf command measures how much time we spend in this process. It
939 939 operates on an already stored revision.
940 940
941 941 See `hg help debug-delta-find` for another related command.
942 942 """
943 943 from mercurial import revlogutils
944 944 import mercurial.revlogutils.deltas as deltautil
945 945
946 946 opts = _byteskwargs(opts)
947 947 if arg_2 is None:
948 948 file_ = None
949 949 rev = arg_1
950 950 else:
951 951 file_ = arg_1
952 952 rev = arg_2
953 953
954 954 repo = repo.unfiltered()
955 955
956 956 timer, fm = gettimer(ui, opts)
957 957
958 958 rev = int(rev)
959 959
960 960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961 961
962 962 deltacomputer = deltautil.deltacomputer(revlog)
963 963
964 964 node = revlog.node(rev)
965 965 p1r, p2r = revlog.parentrevs(rev)
966 966 p1 = revlog.node(p1r)
967 967 p2 = revlog.node(p2r)
968 968 full_text = revlog.revision(rev)
969 969 textlen = len(full_text)
970 970 cachedelta = None
971 971 flags = revlog.flags(rev)
972 972
973 973 revinfo = revlogutils.revisioninfo(
974 974 node,
975 975 p1,
976 976 p2,
977 977 [full_text], # btext
978 978 textlen,
979 979 cachedelta,
980 980 flags,
981 981 )
982 982
983 983 # Note: we should probably purge the potential caches (like the full
984 984 # manifest cache) between runs.
985 985 def find_one():
986 986 with revlog._datafp() as fh:
987 987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988 988
989 989 timer(find_one)
990 990 fm.end()
991 991
992 992
993 993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 994 def perfdiscovery(ui, repo, path, **opts):
995 995 """benchmark discovery between local repo and the peer at given path"""
996 996 repos = [repo, None]
997 997 timer, fm = gettimer(ui, opts)
998 998
999 999 try:
1000 from mercurial.utils.urlutil import get_unique_pull_path
1001
1002 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1000 from mercurial.utils.urlutil import get_unique_pull_path_obj
1001
1002 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 1003 except ImportError:
1004 path = ui.expandpath(path)
1004 try:
1005 from mercurial.utils.urlutil import get_unique_pull_path
1006
1007 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1008 except ImportError:
1009 path = ui.expandpath(path)
1005 1010
1006 1011 def s():
1007 1012 repos[1] = hg.peer(ui, opts, path)
1008 1013
1009 1014 def d():
1010 1015 setdiscovery.findcommonheads(ui, *repos)
1011 1016
1012 1017 timer(d, setup=s)
1013 1018 fm.end()
1014 1019
1015 1020
1016 1021 @command(
1017 1022 b'perf::bookmarks|perfbookmarks',
1018 1023 formatteropts
1019 1024 + [
1020 1025 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1021 1026 ],
1022 1027 )
1023 1028 def perfbookmarks(ui, repo, **opts):
1024 1029 """benchmark parsing bookmarks from disk to memory"""
1025 1030 opts = _byteskwargs(opts)
1026 1031 timer, fm = gettimer(ui, opts)
1027 1032
1028 1033 clearrevlogs = opts[b'clear_revlogs']
1029 1034
1030 1035 def s():
1031 1036 if clearrevlogs:
1032 1037 clearchangelog(repo)
1033 1038 clearfilecache(repo, b'_bookmarks')
1034 1039
1035 1040 def d():
1036 1041 repo._bookmarks
1037 1042
1038 1043 timer(d, setup=s)
1039 1044 fm.end()
1040 1045
1041 1046
1042 1047 @command(
1043 1048 b'perf::bundle',
1044 1049 [
1045 1050 (
1046 1051 b'r',
1047 1052 b'rev',
1048 1053 [],
1049 1054 b'changesets to bundle',
1050 1055 b'REV',
1051 1056 ),
1052 1057 (
1053 1058 b't',
1054 1059 b'type',
1055 1060 b'none',
1056 1061 b'bundlespec to use (see `hg help bundlespec`)',
1057 1062 b'TYPE',
1058 1063 ),
1059 1064 ]
1060 1065 + formatteropts,
1061 1066 b'REVS',
1062 1067 )
1063 1068 def perfbundle(ui, repo, *revs, **opts):
1064 1069 """benchmark the creation of a bundle from a repository
1065 1070
1066 1071 For now, this only supports "none" compression.
1067 1072 """
1068 1073 try:
1069 1074 from mercurial import bundlecaches
1070 1075
1071 1076 parsebundlespec = bundlecaches.parsebundlespec
1072 1077 except ImportError:
1073 1078 from mercurial import exchange
1074 1079
1075 1080 parsebundlespec = exchange.parsebundlespec
1076 1081
1077 1082 from mercurial import discovery
1078 1083 from mercurial import bundle2
1079 1084
1080 1085 opts = _byteskwargs(opts)
1081 1086 timer, fm = gettimer(ui, opts)
1082 1087
1083 1088 cl = repo.changelog
1084 1089 revs = list(revs)
1085 1090 revs.extend(opts.get(b'rev', ()))
1086 1091 revs = scmutil.revrange(repo, revs)
1087 1092 if not revs:
1088 1093 raise error.Abort(b"not revision specified")
1089 1094 # make it a consistent set (ie: without topological gaps)
1090 1095 old_len = len(revs)
1091 1096 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1092 1097 if old_len != len(revs):
1093 1098 new_count = len(revs) - old_len
1094 1099 msg = b"add %d new revisions to make it a consistent set\n"
1095 1100 ui.write_err(msg % new_count)
1096 1101
1097 1102 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1098 1103 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1099 1104 outgoing = discovery.outgoing(repo, bases, targets)
1100 1105
1101 1106 bundle_spec = opts.get(b'type')
1102 1107
1103 1108 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1104 1109
1105 1110 cgversion = bundle_spec.params.get(b"cg.version")
1106 1111 if cgversion is None:
1107 1112 if bundle_spec.version == b'v1':
1108 1113 cgversion = b'01'
1109 1114 if bundle_spec.version == b'v2':
1110 1115 cgversion = b'02'
1111 1116 if cgversion not in changegroup.supportedoutgoingversions(repo):
1112 1117 err = b"repository does not support bundle version %s"
1113 1118 raise error.Abort(err % cgversion)
1114 1119
1115 1120 if cgversion == b'01': # bundle1
1116 1121 bversion = b'HG10' + bundle_spec.wirecompression
1117 1122 bcompression = None
1118 1123 elif cgversion in (b'02', b'03'):
1119 1124 bversion = b'HG20'
1120 1125 bcompression = bundle_spec.wirecompression
1121 1126 else:
1122 1127 err = b'perf::bundle: unexpected changegroup version %s'
1123 1128 raise error.ProgrammingError(err % cgversion)
1124 1129
1125 1130 if bcompression is None:
1126 1131 bcompression = b'UN'
1127 1132
1128 1133 if bcompression != b'UN':
1129 1134 err = b'perf::bundle: compression currently unsupported: %s'
1130 1135 raise error.ProgrammingError(err % bcompression)
1131 1136
1132 1137 def do_bundle():
1133 1138 bundle2.writenewbundle(
1134 1139 ui,
1135 1140 repo,
1136 1141 b'perf::bundle',
1137 1142 os.devnull,
1138 1143 bversion,
1139 1144 outgoing,
1140 1145 bundle_spec.params,
1141 1146 )
1142 1147
1143 1148 timer(do_bundle)
1144 1149 fm.end()
1145 1150
1146 1151
1147 1152 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1148 1153 def perfbundleread(ui, repo, bundlepath, **opts):
1149 1154 """Benchmark reading of bundle files.
1150 1155
1151 1156 This command is meant to isolate the I/O part of bundle reading as
1152 1157 much as possible.
1153 1158 """
1154 1159 from mercurial import (
1155 1160 bundle2,
1156 1161 exchange,
1157 1162 streamclone,
1158 1163 )
1159 1164
1160 1165 opts = _byteskwargs(opts)
1161 1166
1162 1167 def makebench(fn):
1163 1168 def run():
1164 1169 with open(bundlepath, b'rb') as fh:
1165 1170 bundle = exchange.readbundle(ui, fh, bundlepath)
1166 1171 fn(bundle)
1167 1172
1168 1173 return run
1169 1174
1170 1175 def makereadnbytes(size):
1171 1176 def run():
1172 1177 with open(bundlepath, b'rb') as fh:
1173 1178 bundle = exchange.readbundle(ui, fh, bundlepath)
1174 1179 while bundle.read(size):
1175 1180 pass
1176 1181
1177 1182 return run
1178 1183
1179 1184 def makestdioread(size):
1180 1185 def run():
1181 1186 with open(bundlepath, b'rb') as fh:
1182 1187 while fh.read(size):
1183 1188 pass
1184 1189
1185 1190 return run
1186 1191
1187 1192 # bundle1
1188 1193
1189 1194 def deltaiter(bundle):
1190 1195 for delta in bundle.deltaiter():
1191 1196 pass
1192 1197
1193 1198 def iterchunks(bundle):
1194 1199 for chunk in bundle.getchunks():
1195 1200 pass
1196 1201
1197 1202 # bundle2
1198 1203
1199 1204 def forwardchunks(bundle):
1200 1205 for chunk in bundle._forwardchunks():
1201 1206 pass
1202 1207
1203 1208 def iterparts(bundle):
1204 1209 for part in bundle.iterparts():
1205 1210 pass
1206 1211
1207 1212 def iterpartsseekable(bundle):
1208 1213 for part in bundle.iterparts(seekable=True):
1209 1214 pass
1210 1215
1211 1216 def seek(bundle):
1212 1217 for part in bundle.iterparts(seekable=True):
1213 1218 part.seek(0, os.SEEK_END)
1214 1219
1215 1220 def makepartreadnbytes(size):
1216 1221 def run():
1217 1222 with open(bundlepath, b'rb') as fh:
1218 1223 bundle = exchange.readbundle(ui, fh, bundlepath)
1219 1224 for part in bundle.iterparts():
1220 1225 while part.read(size):
1221 1226 pass
1222 1227
1223 1228 return run
1224 1229
1225 1230 benches = [
1226 1231 (makestdioread(8192), b'read(8k)'),
1227 1232 (makestdioread(16384), b'read(16k)'),
1228 1233 (makestdioread(32768), b'read(32k)'),
1229 1234 (makestdioread(131072), b'read(128k)'),
1230 1235 ]
1231 1236
1232 1237 with open(bundlepath, b'rb') as fh:
1233 1238 bundle = exchange.readbundle(ui, fh, bundlepath)
1234 1239
1235 1240 if isinstance(bundle, changegroup.cg1unpacker):
1236 1241 benches.extend(
1237 1242 [
1238 1243 (makebench(deltaiter), b'cg1 deltaiter()'),
1239 1244 (makebench(iterchunks), b'cg1 getchunks()'),
1240 1245 (makereadnbytes(8192), b'cg1 read(8k)'),
1241 1246 (makereadnbytes(16384), b'cg1 read(16k)'),
1242 1247 (makereadnbytes(32768), b'cg1 read(32k)'),
1243 1248 (makereadnbytes(131072), b'cg1 read(128k)'),
1244 1249 ]
1245 1250 )
1246 1251 elif isinstance(bundle, bundle2.unbundle20):
1247 1252 benches.extend(
1248 1253 [
1249 1254 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1250 1255 (makebench(iterparts), b'bundle2 iterparts()'),
1251 1256 (
1252 1257 makebench(iterpartsseekable),
1253 1258 b'bundle2 iterparts() seekable',
1254 1259 ),
1255 1260 (makebench(seek), b'bundle2 part seek()'),
1256 1261 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1257 1262 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1258 1263 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1259 1264 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1260 1265 ]
1261 1266 )
1262 1267 elif isinstance(bundle, streamclone.streamcloneapplier):
1263 1268 raise error.Abort(b'stream clone bundles not supported')
1264 1269 else:
1265 1270 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1266 1271
1267 1272 for fn, title in benches:
1268 1273 timer, fm = gettimer(ui, opts)
1269 1274 timer(fn, title=title)
1270 1275 fm.end()
1271 1276
1272 1277
1273 1278 @command(
1274 1279 b'perf::changegroupchangelog|perfchangegroupchangelog',
1275 1280 formatteropts
1276 1281 + [
1277 1282 (b'', b'cgversion', b'02', b'changegroup version'),
1278 1283 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1279 1284 ],
1280 1285 )
1281 1286 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1282 1287 """Benchmark producing a changelog group for a changegroup.
1283 1288
1284 1289 This measures the time spent processing the changelog during a
1285 1290 bundle operation. This occurs during `hg bundle` and on a server
1286 1291 processing a `getbundle` wire protocol request (handles clones
1287 1292 and pull requests).
1288 1293
1289 1294 By default, all revisions are added to the changegroup.
1290 1295 """
1291 1296 opts = _byteskwargs(opts)
1292 1297 cl = repo.changelog
1293 1298 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1294 1299 bundler = changegroup.getbundler(cgversion, repo)
1295 1300
1296 1301 def d():
1297 1302 state, chunks = bundler._generatechangelog(cl, nodes)
1298 1303 for chunk in chunks:
1299 1304 pass
1300 1305
1301 1306 timer, fm = gettimer(ui, opts)
1302 1307
1303 1308 # Terminal printing can interfere with timing. So disable it.
1304 1309 with ui.configoverride({(b'progress', b'disable'): True}):
1305 1310 timer(d)
1306 1311
1307 1312 fm.end()
1308 1313
1309 1314
1310 1315 @command(b'perf::dirs|perfdirs', formatteropts)
1311 1316 def perfdirs(ui, repo, **opts):
1312 1317 opts = _byteskwargs(opts)
1313 1318 timer, fm = gettimer(ui, opts)
1314 1319 dirstate = repo.dirstate
1315 1320 b'a' in dirstate
1316 1321
1317 1322 def d():
1318 1323 dirstate.hasdir(b'a')
1319 1324 try:
1320 1325 del dirstate._map._dirs
1321 1326 except AttributeError:
1322 1327 pass
1323 1328
1324 1329 timer(d)
1325 1330 fm.end()
1326 1331
1327 1332
1328 1333 @command(
1329 1334 b'perf::dirstate|perfdirstate',
1330 1335 [
1331 1336 (
1332 1337 b'',
1333 1338 b'iteration',
1334 1339 None,
1335 1340 b'benchmark a full iteration for the dirstate',
1336 1341 ),
1337 1342 (
1338 1343 b'',
1339 1344 b'contains',
1340 1345 None,
1341 1346 b'benchmark a large amount of `nf in dirstate` calls',
1342 1347 ),
1343 1348 ]
1344 1349 + formatteropts,
1345 1350 )
1346 1351 def perfdirstate(ui, repo, **opts):
1347 1352 """benchmap the time of various distate operations
1348 1353
1349 1354 By default benchmark the time necessary to load a dirstate from scratch.
1350 1355 The dirstate is loaded to the point were a "contains" request can be
1351 1356 answered.
1352 1357 """
1353 1358 opts = _byteskwargs(opts)
1354 1359 timer, fm = gettimer(ui, opts)
1355 1360 b"a" in repo.dirstate
1356 1361
1357 1362 if opts[b'iteration'] and opts[b'contains']:
1358 1363 msg = b'only specify one of --iteration or --contains'
1359 1364 raise error.Abort(msg)
1360 1365
1361 1366 if opts[b'iteration']:
1362 1367 setup = None
1363 1368 dirstate = repo.dirstate
1364 1369
1365 1370 def d():
1366 1371 for f in dirstate:
1367 1372 pass
1368 1373
1369 1374 elif opts[b'contains']:
1370 1375 setup = None
1371 1376 dirstate = repo.dirstate
1372 1377 allfiles = list(dirstate)
1373 1378 # also add file path that will be "missing" from the dirstate
1374 1379 allfiles.extend([f[::-1] for f in allfiles])
1375 1380
1376 1381 def d():
1377 1382 for f in allfiles:
1378 1383 f in dirstate
1379 1384
1380 1385 else:
1381 1386
1382 1387 def setup():
1383 1388 repo.dirstate.invalidate()
1384 1389
1385 1390 def d():
1386 1391 b"a" in repo.dirstate
1387 1392
1388 1393 timer(d, setup=setup)
1389 1394 fm.end()
1390 1395
1391 1396
1392 1397 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1393 1398 def perfdirstatedirs(ui, repo, **opts):
1394 1399 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1395 1400 opts = _byteskwargs(opts)
1396 1401 timer, fm = gettimer(ui, opts)
1397 1402 repo.dirstate.hasdir(b"a")
1398 1403
1399 1404 def setup():
1400 1405 try:
1401 1406 del repo.dirstate._map._dirs
1402 1407 except AttributeError:
1403 1408 pass
1404 1409
1405 1410 def d():
1406 1411 repo.dirstate.hasdir(b"a")
1407 1412
1408 1413 timer(d, setup=setup)
1409 1414 fm.end()
1410 1415
1411 1416
1412 1417 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1413 1418 def perfdirstatefoldmap(ui, repo, **opts):
1414 1419 """benchmap a `dirstate._map.filefoldmap.get()` request
1415 1420
1416 1421 The dirstate filefoldmap cache is dropped between every request.
1417 1422 """
1418 1423 opts = _byteskwargs(opts)
1419 1424 timer, fm = gettimer(ui, opts)
1420 1425 dirstate = repo.dirstate
1421 1426 dirstate._map.filefoldmap.get(b'a')
1422 1427
1423 1428 def setup():
1424 1429 del dirstate._map.filefoldmap
1425 1430
1426 1431 def d():
1427 1432 dirstate._map.filefoldmap.get(b'a')
1428 1433
1429 1434 timer(d, setup=setup)
1430 1435 fm.end()
1431 1436
1432 1437
1433 1438 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1434 1439 def perfdirfoldmap(ui, repo, **opts):
1435 1440 """benchmap a `dirstate._map.dirfoldmap.get()` request
1436 1441
1437 1442 The dirstate dirfoldmap cache is dropped between every request.
1438 1443 """
1439 1444 opts = _byteskwargs(opts)
1440 1445 timer, fm = gettimer(ui, opts)
1441 1446 dirstate = repo.dirstate
1442 1447 dirstate._map.dirfoldmap.get(b'a')
1443 1448
1444 1449 def setup():
1445 1450 del dirstate._map.dirfoldmap
1446 1451 try:
1447 1452 del dirstate._map._dirs
1448 1453 except AttributeError:
1449 1454 pass
1450 1455
1451 1456 def d():
1452 1457 dirstate._map.dirfoldmap.get(b'a')
1453 1458
1454 1459 timer(d, setup=setup)
1455 1460 fm.end()
1456 1461
1457 1462
1458 1463 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1459 1464 def perfdirstatewrite(ui, repo, **opts):
1460 1465 """benchmap the time it take to write a dirstate on disk"""
1461 1466 opts = _byteskwargs(opts)
1462 1467 timer, fm = gettimer(ui, opts)
1463 1468 ds = repo.dirstate
1464 1469 b"a" in ds
1465 1470
1466 1471 def setup():
1467 1472 ds._dirty = True
1468 1473
1469 1474 def d():
1470 1475 ds.write(repo.currenttransaction())
1471 1476
1472 1477 timer(d, setup=setup)
1473 1478 fm.end()
1474 1479
1475 1480
1476 1481 def _getmergerevs(repo, opts):
1477 1482 """parse command argument to return rev involved in merge
1478 1483
1479 1484 input: options dictionnary with `rev`, `from` and `bse`
1480 1485 output: (localctx, otherctx, basectx)
1481 1486 """
1482 1487 if opts[b'from']:
1483 1488 fromrev = scmutil.revsingle(repo, opts[b'from'])
1484 1489 wctx = repo[fromrev]
1485 1490 else:
1486 1491 wctx = repo[None]
1487 1492 # we don't want working dir files to be stat'd in the benchmark, so
1488 1493 # prime that cache
1489 1494 wctx.dirty()
1490 1495 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1491 1496 if opts[b'base']:
1492 1497 fromrev = scmutil.revsingle(repo, opts[b'base'])
1493 1498 ancestor = repo[fromrev]
1494 1499 else:
1495 1500 ancestor = wctx.ancestor(rctx)
1496 1501 return (wctx, rctx, ancestor)
1497 1502
1498 1503
1499 1504 @command(
1500 1505 b'perf::mergecalculate|perfmergecalculate',
1501 1506 [
1502 1507 (b'r', b'rev', b'.', b'rev to merge against'),
1503 1508 (b'', b'from', b'', b'rev to merge from'),
1504 1509 (b'', b'base', b'', b'the revision to use as base'),
1505 1510 ]
1506 1511 + formatteropts,
1507 1512 )
1508 1513 def perfmergecalculate(ui, repo, **opts):
1509 1514 opts = _byteskwargs(opts)
1510 1515 timer, fm = gettimer(ui, opts)
1511 1516
1512 1517 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1513 1518
1514 1519 def d():
1515 1520 # acceptremote is True because we don't want prompts in the middle of
1516 1521 # our benchmark
1517 1522 merge.calculateupdates(
1518 1523 repo,
1519 1524 wctx,
1520 1525 rctx,
1521 1526 [ancestor],
1522 1527 branchmerge=False,
1523 1528 force=False,
1524 1529 acceptremote=True,
1525 1530 followcopies=True,
1526 1531 )
1527 1532
1528 1533 timer(d)
1529 1534 fm.end()
1530 1535
1531 1536
1532 1537 @command(
1533 1538 b'perf::mergecopies|perfmergecopies',
1534 1539 [
1535 1540 (b'r', b'rev', b'.', b'rev to merge against'),
1536 1541 (b'', b'from', b'', b'rev to merge from'),
1537 1542 (b'', b'base', b'', b'the revision to use as base'),
1538 1543 ]
1539 1544 + formatteropts,
1540 1545 )
1541 1546 def perfmergecopies(ui, repo, **opts):
1542 1547 """measure runtime of `copies.mergecopies`"""
1543 1548 opts = _byteskwargs(opts)
1544 1549 timer, fm = gettimer(ui, opts)
1545 1550 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1546 1551
1547 1552 def d():
1548 1553 # acceptremote is True because we don't want prompts in the middle of
1549 1554 # our benchmark
1550 1555 copies.mergecopies(repo, wctx, rctx, ancestor)
1551 1556
1552 1557 timer(d)
1553 1558 fm.end()
1554 1559
1555 1560
1556 1561 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1557 1562 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1558 1563 """benchmark the copy tracing logic"""
1559 1564 opts = _byteskwargs(opts)
1560 1565 timer, fm = gettimer(ui, opts)
1561 1566 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1562 1567 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1563 1568
1564 1569 def d():
1565 1570 copies.pathcopies(ctx1, ctx2)
1566 1571
1567 1572 timer(d)
1568 1573 fm.end()
1569 1574
1570 1575
1571 1576 @command(
1572 1577 b'perf::phases|perfphases',
1573 1578 [
1574 1579 (b'', b'full', False, b'include file reading time too'),
1575 1580 ],
1576 1581 b"",
1577 1582 )
1578 1583 def perfphases(ui, repo, **opts):
1579 1584 """benchmark phasesets computation"""
1580 1585 opts = _byteskwargs(opts)
1581 1586 timer, fm = gettimer(ui, opts)
1582 1587 _phases = repo._phasecache
1583 1588 full = opts.get(b'full')
1584 1589
1585 1590 def d():
1586 1591 phases = _phases
1587 1592 if full:
1588 1593 clearfilecache(repo, b'_phasecache')
1589 1594 phases = repo._phasecache
1590 1595 phases.invalidate()
1591 1596 phases.loadphaserevs(repo)
1592 1597
1593 1598 timer(d)
1594 1599 fm.end()
1595 1600
1596 1601
1597 1602 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1598 1603 def perfphasesremote(ui, repo, dest=None, **opts):
1599 1604 """benchmark time needed to analyse phases of the remote server"""
1600 1605 from mercurial.node import bin
1601 1606 from mercurial import (
1602 1607 exchange,
1603 1608 hg,
1604 1609 phases,
1605 1610 )
1606 1611
1607 1612 opts = _byteskwargs(opts)
1608 1613 timer, fm = gettimer(ui, opts)
1609 1614
1610 1615 path = ui.getpath(dest, default=(b'default-push', b'default'))
1611 1616 if not path:
1612 1617 raise error.Abort(
1613 1618 b'default repository not configured!',
1614 1619 hint=b"see 'hg help config.paths'",
1615 1620 )
1616 1621 if util.safehasattr(path, 'main_path'):
1617 1622 path = path.get_push_variant()
1618 1623 dest = path.loc
1619 1624 else:
1620 1625 dest = path.pushloc or path.loc
1621 1626 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1622 1627 other = hg.peer(repo, opts, dest)
1623 1628
1624 1629 # easier to perform discovery through the operation
1625 1630 op = exchange.pushoperation(repo, other)
1626 1631 exchange._pushdiscoverychangeset(op)
1627 1632
1628 1633 remotesubset = op.fallbackheads
1629 1634
1630 1635 with other.commandexecutor() as e:
1631 1636 remotephases = e.callcommand(
1632 1637 b'listkeys', {b'namespace': b'phases'}
1633 1638 ).result()
1634 1639 del other
1635 1640 publishing = remotephases.get(b'publishing', False)
1636 1641 if publishing:
1637 1642 ui.statusnoi18n(b'publishing: yes\n')
1638 1643 else:
1639 1644 ui.statusnoi18n(b'publishing: no\n')
1640 1645
1641 1646 has_node = getattr(repo.changelog.index, 'has_node', None)
1642 1647 if has_node is None:
1643 1648 has_node = repo.changelog.nodemap.__contains__
1644 1649 nonpublishroots = 0
1645 1650 for nhex, phase in remotephases.iteritems():
1646 1651 if nhex == b'publishing': # ignore data related to publish option
1647 1652 continue
1648 1653 node = bin(nhex)
1649 1654 if has_node(node) and int(phase):
1650 1655 nonpublishroots += 1
1651 1656 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1652 1657 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1653 1658
1654 1659 def d():
1655 1660 phases.remotephasessummary(repo, remotesubset, remotephases)
1656 1661
1657 1662 timer(d)
1658 1663 fm.end()
1659 1664
1660 1665
1661 1666 @command(
1662 1667 b'perf::manifest|perfmanifest',
1663 1668 [
1664 1669 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1665 1670 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1666 1671 ]
1667 1672 + formatteropts,
1668 1673 b'REV|NODE',
1669 1674 )
1670 1675 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1671 1676 """benchmark the time to read a manifest from disk and return a usable
1672 1677 dict-like object
1673 1678
1674 1679 Manifest caches are cleared before retrieval."""
1675 1680 opts = _byteskwargs(opts)
1676 1681 timer, fm = gettimer(ui, opts)
1677 1682 if not manifest_rev:
1678 1683 ctx = scmutil.revsingle(repo, rev, rev)
1679 1684 t = ctx.manifestnode()
1680 1685 else:
1681 1686 from mercurial.node import bin
1682 1687
1683 1688 if len(rev) == 40:
1684 1689 t = bin(rev)
1685 1690 else:
1686 1691 try:
1687 1692 rev = int(rev)
1688 1693
1689 1694 if util.safehasattr(repo.manifestlog, b'getstorage'):
1690 1695 t = repo.manifestlog.getstorage(b'').node(rev)
1691 1696 else:
1692 1697 t = repo.manifestlog._revlog.lookup(rev)
1693 1698 except ValueError:
1694 1699 raise error.Abort(
1695 1700 b'manifest revision must be integer or full node'
1696 1701 )
1697 1702
1698 1703 def d():
1699 1704 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1700 1705 repo.manifestlog[t].read()
1701 1706
1702 1707 timer(d)
1703 1708 fm.end()
1704 1709
1705 1710
1706 1711 @command(b'perf::changeset|perfchangeset', formatteropts)
1707 1712 def perfchangeset(ui, repo, rev, **opts):
1708 1713 opts = _byteskwargs(opts)
1709 1714 timer, fm = gettimer(ui, opts)
1710 1715 n = scmutil.revsingle(repo, rev).node()
1711 1716
1712 1717 def d():
1713 1718 repo.changelog.read(n)
1714 1719 # repo.changelog._cache = None
1715 1720
1716 1721 timer(d)
1717 1722 fm.end()
1718 1723
1719 1724
1720 1725 @command(b'perf::ignore|perfignore', formatteropts)
1721 1726 def perfignore(ui, repo, **opts):
1722 1727 """benchmark operation related to computing ignore"""
1723 1728 opts = _byteskwargs(opts)
1724 1729 timer, fm = gettimer(ui, opts)
1725 1730 dirstate = repo.dirstate
1726 1731
1727 1732 def setupone():
1728 1733 dirstate.invalidate()
1729 1734 clearfilecache(dirstate, b'_ignore')
1730 1735
1731 1736 def runone():
1732 1737 dirstate._ignore
1733 1738
1734 1739 timer(runone, setup=setupone, title=b"load")
1735 1740 fm.end()
1736 1741
1737 1742
1738 1743 @command(
1739 1744 b'perf::index|perfindex',
1740 1745 [
1741 1746 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1742 1747 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1743 1748 ]
1744 1749 + formatteropts,
1745 1750 )
1746 1751 def perfindex(ui, repo, **opts):
1747 1752 """benchmark index creation time followed by a lookup
1748 1753
1749 1754 The default is to look `tip` up. Depending on the index implementation,
1750 1755 the revision looked up can matters. For example, an implementation
1751 1756 scanning the index will have a faster lookup time for `--rev tip` than for
1752 1757 `--rev 0`. The number of looked up revisions and their order can also
1753 1758 matters.
1754 1759
1755 1760 Example of useful set to test:
1756 1761
1757 1762 * tip
1758 1763 * 0
1759 1764 * -10:
1760 1765 * :10
1761 1766 * -10: + :10
1762 1767 * :10: + -10:
1763 1768 * -10000:
1764 1769 * -10000: + 0
1765 1770
1766 1771 It is not currently possible to check for lookup of a missing node. For
1767 1772 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1768 1773 import mercurial.revlog
1769 1774
1770 1775 opts = _byteskwargs(opts)
1771 1776 timer, fm = gettimer(ui, opts)
1772 1777 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1773 1778 if opts[b'no_lookup']:
1774 1779 if opts['rev']:
1775 1780 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1776 1781 nodes = []
1777 1782 elif not opts[b'rev']:
1778 1783 nodes = [repo[b"tip"].node()]
1779 1784 else:
1780 1785 revs = scmutil.revrange(repo, opts[b'rev'])
1781 1786 cl = repo.changelog
1782 1787 nodes = [cl.node(r) for r in revs]
1783 1788
1784 1789 unfi = repo.unfiltered()
1785 1790 # find the filecache func directly
1786 1791 # This avoid polluting the benchmark with the filecache logic
1787 1792 makecl = unfi.__class__.changelog.func
1788 1793
1789 1794 def setup():
1790 1795 # probably not necessary, but for good measure
1791 1796 clearchangelog(unfi)
1792 1797
1793 1798 def d():
1794 1799 cl = makecl(unfi)
1795 1800 for n in nodes:
1796 1801 cl.rev(n)
1797 1802
1798 1803 timer(d, setup=setup)
1799 1804 fm.end()
1800 1805
1801 1806
1802 1807 @command(
1803 1808 b'perf::nodemap|perfnodemap',
1804 1809 [
1805 1810 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1806 1811 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1807 1812 ]
1808 1813 + formatteropts,
1809 1814 )
1810 1815 def perfnodemap(ui, repo, **opts):
1811 1816 """benchmark the time necessary to look up revision from a cold nodemap
1812 1817
1813 1818 Depending on the implementation, the amount and order of revision we look
1814 1819 up can varies. Example of useful set to test:
1815 1820 * tip
1816 1821 * 0
1817 1822 * -10:
1818 1823 * :10
1819 1824 * -10: + :10
1820 1825 * :10: + -10:
1821 1826 * -10000:
1822 1827 * -10000: + 0
1823 1828
1824 1829 The command currently focus on valid binary lookup. Benchmarking for
1825 1830 hexlookup, prefix lookup and missing lookup would also be valuable.
1826 1831 """
1827 1832 import mercurial.revlog
1828 1833
1829 1834 opts = _byteskwargs(opts)
1830 1835 timer, fm = gettimer(ui, opts)
1831 1836 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1832 1837
1833 1838 unfi = repo.unfiltered()
1834 1839 clearcaches = opts[b'clear_caches']
1835 1840 # find the filecache func directly
1836 1841 # This avoid polluting the benchmark with the filecache logic
1837 1842 makecl = unfi.__class__.changelog.func
1838 1843 if not opts[b'rev']:
1839 1844 raise error.Abort(b'use --rev to specify revisions to look up')
1840 1845 revs = scmutil.revrange(repo, opts[b'rev'])
1841 1846 cl = repo.changelog
1842 1847 nodes = [cl.node(r) for r in revs]
1843 1848
1844 1849 # use a list to pass reference to a nodemap from one closure to the next
1845 1850 nodeget = [None]
1846 1851
1847 1852 def setnodeget():
1848 1853 # probably not necessary, but for good measure
1849 1854 clearchangelog(unfi)
1850 1855 cl = makecl(unfi)
1851 1856 if util.safehasattr(cl.index, 'get_rev'):
1852 1857 nodeget[0] = cl.index.get_rev
1853 1858 else:
1854 1859 nodeget[0] = cl.nodemap.get
1855 1860
1856 1861 def d():
1857 1862 get = nodeget[0]
1858 1863 for n in nodes:
1859 1864 get(n)
1860 1865
1861 1866 setup = None
1862 1867 if clearcaches:
1863 1868
1864 1869 def setup():
1865 1870 setnodeget()
1866 1871
1867 1872 else:
1868 1873 setnodeget()
1869 1874 d() # prewarm the data structure
1870 1875 timer(d, setup=setup)
1871 1876 fm.end()
1872 1877
1873 1878
1874 1879 @command(b'perf::startup|perfstartup', formatteropts)
1875 1880 def perfstartup(ui, repo, **opts):
1876 1881 opts = _byteskwargs(opts)
1877 1882 timer, fm = gettimer(ui, opts)
1878 1883
1879 1884 def d():
1880 1885 if os.name != 'nt':
1881 1886 os.system(
1882 1887 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1883 1888 )
1884 1889 else:
1885 1890 os.environ['HGRCPATH'] = r' '
1886 1891 os.system("%s version -q > NUL" % sys.argv[0])
1887 1892
1888 1893 timer(d)
1889 1894 fm.end()
1890 1895
1891 1896
1892 1897 @command(b'perf::parents|perfparents', formatteropts)
1893 1898 def perfparents(ui, repo, **opts):
1894 1899 """benchmark the time necessary to fetch one changeset's parents.
1895 1900
1896 1901 The fetch is done using the `node identifier`, traversing all object layers
1897 1902 from the repository object. The first N revisions will be used for this
1898 1903 benchmark. N is controlled by the ``perf.parentscount`` config option
1899 1904 (default: 1000).
1900 1905 """
1901 1906 opts = _byteskwargs(opts)
1902 1907 timer, fm = gettimer(ui, opts)
1903 1908 # control the number of commits perfparents iterates over
1904 1909 # experimental config: perf.parentscount
1905 1910 count = getint(ui, b"perf", b"parentscount", 1000)
1906 1911 if len(repo.changelog) < count:
1907 1912 raise error.Abort(b"repo needs %d commits for this test" % count)
1908 1913 repo = repo.unfiltered()
1909 1914 nl = [repo.changelog.node(i) for i in _xrange(count)]
1910 1915
1911 1916 def d():
1912 1917 for n in nl:
1913 1918 repo.changelog.parents(n)
1914 1919
1915 1920 timer(d)
1916 1921 fm.end()
1917 1922
1918 1923
1919 1924 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1920 1925 def perfctxfiles(ui, repo, x, **opts):
1921 1926 opts = _byteskwargs(opts)
1922 1927 x = int(x)
1923 1928 timer, fm = gettimer(ui, opts)
1924 1929
1925 1930 def d():
1926 1931 len(repo[x].files())
1927 1932
1928 1933 timer(d)
1929 1934 fm.end()
1930 1935
1931 1936
1932 1937 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1933 1938 def perfrawfiles(ui, repo, x, **opts):
1934 1939 opts = _byteskwargs(opts)
1935 1940 x = int(x)
1936 1941 timer, fm = gettimer(ui, opts)
1937 1942 cl = repo.changelog
1938 1943
1939 1944 def d():
1940 1945 len(cl.read(x)[3])
1941 1946
1942 1947 timer(d)
1943 1948 fm.end()
1944 1949
1945 1950
1946 1951 @command(b'perf::lookup|perflookup', formatteropts)
1947 1952 def perflookup(ui, repo, rev, **opts):
1948 1953 opts = _byteskwargs(opts)
1949 1954 timer, fm = gettimer(ui, opts)
1950 1955 timer(lambda: len(repo.lookup(rev)))
1951 1956 fm.end()
1952 1957
1953 1958
1954 1959 @command(
1955 1960 b'perf::linelogedits|perflinelogedits',
1956 1961 [
1957 1962 (b'n', b'edits', 10000, b'number of edits'),
1958 1963 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1959 1964 ],
1960 1965 norepo=True,
1961 1966 )
1962 1967 def perflinelogedits(ui, **opts):
1963 1968 from mercurial import linelog
1964 1969
1965 1970 opts = _byteskwargs(opts)
1966 1971
1967 1972 edits = opts[b'edits']
1968 1973 maxhunklines = opts[b'max_hunk_lines']
1969 1974
1970 1975 maxb1 = 100000
1971 1976 random.seed(0)
1972 1977 randint = random.randint
1973 1978 currentlines = 0
1974 1979 arglist = []
1975 1980 for rev in _xrange(edits):
1976 1981 a1 = randint(0, currentlines)
1977 1982 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1978 1983 b1 = randint(0, maxb1)
1979 1984 b2 = randint(b1, b1 + maxhunklines)
1980 1985 currentlines += (b2 - b1) - (a2 - a1)
1981 1986 arglist.append((rev, a1, a2, b1, b2))
1982 1987
1983 1988 def d():
1984 1989 ll = linelog.linelog()
1985 1990 for args in arglist:
1986 1991 ll.replacelines(*args)
1987 1992
1988 1993 timer, fm = gettimer(ui, opts)
1989 1994 timer(d)
1990 1995 fm.end()
1991 1996
1992 1997
1993 1998 @command(b'perf::revrange|perfrevrange', formatteropts)
1994 1999 def perfrevrange(ui, repo, *specs, **opts):
1995 2000 opts = _byteskwargs(opts)
1996 2001 timer, fm = gettimer(ui, opts)
1997 2002 revrange = scmutil.revrange
1998 2003 timer(lambda: len(revrange(repo, specs)))
1999 2004 fm.end()
2000 2005
2001 2006
2002 2007 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2003 2008 def perfnodelookup(ui, repo, rev, **opts):
2004 2009 opts = _byteskwargs(opts)
2005 2010 timer, fm = gettimer(ui, opts)
2006 2011 import mercurial.revlog
2007 2012
2008 2013 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2009 2014 n = scmutil.revsingle(repo, rev).node()
2010 2015
2011 2016 try:
2012 2017 cl = revlog(getsvfs(repo), radix=b"00changelog")
2013 2018 except TypeError:
2014 2019 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2015 2020
2016 2021 def d():
2017 2022 cl.rev(n)
2018 2023 clearcaches(cl)
2019 2024
2020 2025 timer(d)
2021 2026 fm.end()
2022 2027
2023 2028
2024 2029 @command(
2025 2030 b'perf::log|perflog',
2026 2031 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2027 2032 )
2028 2033 def perflog(ui, repo, rev=None, **opts):
2029 2034 opts = _byteskwargs(opts)
2030 2035 if rev is None:
2031 2036 rev = []
2032 2037 timer, fm = gettimer(ui, opts)
2033 2038 ui.pushbuffer()
2034 2039 timer(
2035 2040 lambda: commands.log(
2036 2041 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2037 2042 )
2038 2043 )
2039 2044 ui.popbuffer()
2040 2045 fm.end()
2041 2046
2042 2047
2043 2048 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2044 2049 def perfmoonwalk(ui, repo, **opts):
2045 2050 """benchmark walking the changelog backwards
2046 2051
2047 2052 This also loads the changelog data for each revision in the changelog.
2048 2053 """
2049 2054 opts = _byteskwargs(opts)
2050 2055 timer, fm = gettimer(ui, opts)
2051 2056
2052 2057 def moonwalk():
2053 2058 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2054 2059 ctx = repo[i]
2055 2060 ctx.branch() # read changelog data (in addition to the index)
2056 2061
2057 2062 timer(moonwalk)
2058 2063 fm.end()
2059 2064
2060 2065
2061 2066 @command(
2062 2067 b'perf::templating|perftemplating',
2063 2068 [
2064 2069 (b'r', b'rev', [], b'revisions to run the template on'),
2065 2070 ]
2066 2071 + formatteropts,
2067 2072 )
2068 2073 def perftemplating(ui, repo, testedtemplate=None, **opts):
2069 2074 """test the rendering time of a given template"""
2070 2075 if makelogtemplater is None:
2071 2076 raise error.Abort(
2072 2077 b"perftemplating not available with this Mercurial",
2073 2078 hint=b"use 4.3 or later",
2074 2079 )
2075 2080
2076 2081 opts = _byteskwargs(opts)
2077 2082
2078 2083 nullui = ui.copy()
2079 2084 nullui.fout = open(os.devnull, 'wb')
2080 2085 nullui.disablepager()
2081 2086 revs = opts.get(b'rev')
2082 2087 if not revs:
2083 2088 revs = [b'all()']
2084 2089 revs = list(scmutil.revrange(repo, revs))
2085 2090
2086 2091 defaulttemplate = (
2087 2092 b'{date|shortdate} [{rev}:{node|short}]'
2088 2093 b' {author|person}: {desc|firstline}\n'
2089 2094 )
2090 2095 if testedtemplate is None:
2091 2096 testedtemplate = defaulttemplate
2092 2097 displayer = makelogtemplater(nullui, repo, testedtemplate)
2093 2098
2094 2099 def format():
2095 2100 for r in revs:
2096 2101 ctx = repo[r]
2097 2102 displayer.show(ctx)
2098 2103 displayer.flush(ctx)
2099 2104
2100 2105 timer, fm = gettimer(ui, opts)
2101 2106 timer(format)
2102 2107 fm.end()
2103 2108
2104 2109
2105 2110 def _displaystats(ui, opts, entries, data):
2106 2111 # use a second formatter because the data are quite different, not sure
2107 2112 # how it flies with the templater.
2108 2113 fm = ui.formatter(b'perf-stats', opts)
2109 2114 for key, title in entries:
2110 2115 values = data[key]
2111 2116 nbvalues = len(data)
2112 2117 values.sort()
2113 2118 stats = {
2114 2119 'key': key,
2115 2120 'title': title,
2116 2121 'nbitems': len(values),
2117 2122 'min': values[0][0],
2118 2123 '10%': values[(nbvalues * 10) // 100][0],
2119 2124 '25%': values[(nbvalues * 25) // 100][0],
2120 2125 '50%': values[(nbvalues * 50) // 100][0],
2121 2126 '75%': values[(nbvalues * 75) // 100][0],
2122 2127 '80%': values[(nbvalues * 80) // 100][0],
2123 2128 '85%': values[(nbvalues * 85) // 100][0],
2124 2129 '90%': values[(nbvalues * 90) // 100][0],
2125 2130 '95%': values[(nbvalues * 95) // 100][0],
2126 2131 '99%': values[(nbvalues * 99) // 100][0],
2127 2132 'max': values[-1][0],
2128 2133 }
2129 2134 fm.startitem()
2130 2135 fm.data(**stats)
2131 2136 # make node pretty for the human output
2132 2137 fm.plain('### %s (%d items)\n' % (title, len(values)))
2133 2138 lines = [
2134 2139 'min',
2135 2140 '10%',
2136 2141 '25%',
2137 2142 '50%',
2138 2143 '75%',
2139 2144 '80%',
2140 2145 '85%',
2141 2146 '90%',
2142 2147 '95%',
2143 2148 '99%',
2144 2149 'max',
2145 2150 ]
2146 2151 for l in lines:
2147 2152 fm.plain('%s: %s\n' % (l, stats[l]))
2148 2153 fm.end()
2149 2154
2150 2155
2151 2156 @command(
2152 2157 b'perf::helper-mergecopies|perfhelper-mergecopies',
2153 2158 formatteropts
2154 2159 + [
2155 2160 (b'r', b'revs', [], b'restrict search to these revisions'),
2156 2161 (b'', b'timing', False, b'provides extra data (costly)'),
2157 2162 (b'', b'stats', False, b'provides statistic about the measured data'),
2158 2163 ],
2159 2164 )
2160 2165 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2161 2166 """find statistics about potential parameters for `perfmergecopies`
2162 2167
2163 2168 This command find (base, p1, p2) triplet relevant for copytracing
2164 2169 benchmarking in the context of a merge. It reports values for some of the
2165 2170 parameters that impact merge copy tracing time during merge.
2166 2171
2167 2172 If `--timing` is set, rename detection is run and the associated timing
2168 2173 will be reported. The extra details come at the cost of slower command
2169 2174 execution.
2170 2175
2171 2176 Since rename detection is only run once, other factors might easily
2172 2177 affect the precision of the timing. However it should give a good
2173 2178 approximation of which revision triplets are very costly.
2174 2179 """
2175 2180 opts = _byteskwargs(opts)
2176 2181 fm = ui.formatter(b'perf', opts)
2177 2182 dotiming = opts[b'timing']
2178 2183 dostats = opts[b'stats']
2179 2184
2180 2185 output_template = [
2181 2186 ("base", "%(base)12s"),
2182 2187 ("p1", "%(p1.node)12s"),
2183 2188 ("p2", "%(p2.node)12s"),
2184 2189 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2185 2190 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2186 2191 ("p1.renames", "%(p1.renamedfiles)12d"),
2187 2192 ("p1.time", "%(p1.time)12.3f"),
2188 2193 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2189 2194 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2190 2195 ("p2.renames", "%(p2.renamedfiles)12d"),
2191 2196 ("p2.time", "%(p2.time)12.3f"),
2192 2197 ("renames", "%(nbrenamedfiles)12d"),
2193 2198 ("total.time", "%(time)12.3f"),
2194 2199 ]
2195 2200 if not dotiming:
2196 2201 output_template = [
2197 2202 i
2198 2203 for i in output_template
2199 2204 if not ('time' in i[0] or 'renames' in i[0])
2200 2205 ]
2201 2206 header_names = [h for (h, v) in output_template]
2202 2207 output = ' '.join([v for (h, v) in output_template]) + '\n'
2203 2208 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2204 2209 fm.plain(header % tuple(header_names))
2205 2210
2206 2211 if not revs:
2207 2212 revs = ['all()']
2208 2213 revs = scmutil.revrange(repo, revs)
2209 2214
2210 2215 if dostats:
2211 2216 alldata = {
2212 2217 'nbrevs': [],
2213 2218 'nbmissingfiles': [],
2214 2219 }
2215 2220 if dotiming:
2216 2221 alldata['parentnbrenames'] = []
2217 2222 alldata['totalnbrenames'] = []
2218 2223 alldata['parenttime'] = []
2219 2224 alldata['totaltime'] = []
2220 2225
2221 2226 roi = repo.revs('merge() and %ld', revs)
2222 2227 for r in roi:
2223 2228 ctx = repo[r]
2224 2229 p1 = ctx.p1()
2225 2230 p2 = ctx.p2()
2226 2231 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2227 2232 for b in bases:
2228 2233 b = repo[b]
2229 2234 p1missing = copies._computeforwardmissing(b, p1)
2230 2235 p2missing = copies._computeforwardmissing(b, p2)
2231 2236 data = {
2232 2237 b'base': b.hex(),
2233 2238 b'p1.node': p1.hex(),
2234 2239 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2235 2240 b'p1.nbmissingfiles': len(p1missing),
2236 2241 b'p2.node': p2.hex(),
2237 2242 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2238 2243 b'p2.nbmissingfiles': len(p2missing),
2239 2244 }
2240 2245 if dostats:
2241 2246 if p1missing:
2242 2247 alldata['nbrevs'].append(
2243 2248 (data['p1.nbrevs'], b.hex(), p1.hex())
2244 2249 )
2245 2250 alldata['nbmissingfiles'].append(
2246 2251 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2247 2252 )
2248 2253 if p2missing:
2249 2254 alldata['nbrevs'].append(
2250 2255 (data['p2.nbrevs'], b.hex(), p2.hex())
2251 2256 )
2252 2257 alldata['nbmissingfiles'].append(
2253 2258 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2254 2259 )
2255 2260 if dotiming:
2256 2261 begin = util.timer()
2257 2262 mergedata = copies.mergecopies(repo, p1, p2, b)
2258 2263 end = util.timer()
2259 2264 # not very stable timing since we did only one run
2260 2265 data['time'] = end - begin
2261 2266 # mergedata contains five dicts: "copy", "movewithdir",
2262 2267 # "diverge", "renamedelete" and "dirmove".
2263 2268 # The first 4 are about renamed file so lets count that.
2264 2269 renames = len(mergedata[0])
2265 2270 renames += len(mergedata[1])
2266 2271 renames += len(mergedata[2])
2267 2272 renames += len(mergedata[3])
2268 2273 data['nbrenamedfiles'] = renames
2269 2274 begin = util.timer()
2270 2275 p1renames = copies.pathcopies(b, p1)
2271 2276 end = util.timer()
2272 2277 data['p1.time'] = end - begin
2273 2278 begin = util.timer()
2274 2279 p2renames = copies.pathcopies(b, p2)
2275 2280 end = util.timer()
2276 2281 data['p2.time'] = end - begin
2277 2282 data['p1.renamedfiles'] = len(p1renames)
2278 2283 data['p2.renamedfiles'] = len(p2renames)
2279 2284
2280 2285 if dostats:
2281 2286 if p1missing:
2282 2287 alldata['parentnbrenames'].append(
2283 2288 (data['p1.renamedfiles'], b.hex(), p1.hex())
2284 2289 )
2285 2290 alldata['parenttime'].append(
2286 2291 (data['p1.time'], b.hex(), p1.hex())
2287 2292 )
2288 2293 if p2missing:
2289 2294 alldata['parentnbrenames'].append(
2290 2295 (data['p2.renamedfiles'], b.hex(), p2.hex())
2291 2296 )
2292 2297 alldata['parenttime'].append(
2293 2298 (data['p2.time'], b.hex(), p2.hex())
2294 2299 )
2295 2300 if p1missing or p2missing:
2296 2301 alldata['totalnbrenames'].append(
2297 2302 (
2298 2303 data['nbrenamedfiles'],
2299 2304 b.hex(),
2300 2305 p1.hex(),
2301 2306 p2.hex(),
2302 2307 )
2303 2308 )
2304 2309 alldata['totaltime'].append(
2305 2310 (data['time'], b.hex(), p1.hex(), p2.hex())
2306 2311 )
2307 2312 fm.startitem()
2308 2313 fm.data(**data)
2309 2314 # make node pretty for the human output
2310 2315 out = data.copy()
2311 2316 out['base'] = fm.hexfunc(b.node())
2312 2317 out['p1.node'] = fm.hexfunc(p1.node())
2313 2318 out['p2.node'] = fm.hexfunc(p2.node())
2314 2319 fm.plain(output % out)
2315 2320
2316 2321 fm.end()
2317 2322 if dostats:
2318 2323 # use a second formatter because the data are quite different, not sure
2319 2324 # how it flies with the templater.
2320 2325 entries = [
2321 2326 ('nbrevs', 'number of revision covered'),
2322 2327 ('nbmissingfiles', 'number of missing files at head'),
2323 2328 ]
2324 2329 if dotiming:
2325 2330 entries.append(
2326 2331 ('parentnbrenames', 'rename from one parent to base')
2327 2332 )
2328 2333 entries.append(('totalnbrenames', 'total number of renames'))
2329 2334 entries.append(('parenttime', 'time for one parent'))
2330 2335 entries.append(('totaltime', 'time for both parents'))
2331 2336 _displaystats(ui, opts, entries, alldata)
2332 2337
2333 2338
2334 2339 @command(
2335 2340 b'perf::helper-pathcopies|perfhelper-pathcopies',
2336 2341 formatteropts
2337 2342 + [
2338 2343 (b'r', b'revs', [], b'restrict search to these revisions'),
2339 2344 (b'', b'timing', False, b'provides extra data (costly)'),
2340 2345 (b'', b'stats', False, b'provides statistic about the measured data'),
2341 2346 ],
2342 2347 )
2343 2348 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2344 2349 """find statistic about potential parameters for the `perftracecopies`
2345 2350
2346 2351 This command find source-destination pair relevant for copytracing testing.
2347 2352 It report value for some of the parameters that impact copy tracing time.
2348 2353
2349 2354 If `--timing` is set, rename detection is run and the associated timing
2350 2355 will be reported. The extra details comes at the cost of a slower command
2351 2356 execution.
2352 2357
2353 2358 Since the rename detection is only run once, other factors might easily
2354 2359 affect the precision of the timing. However it should give a good
2355 2360 approximation of which revision pairs are very costly.
2356 2361 """
2357 2362 opts = _byteskwargs(opts)
2358 2363 fm = ui.formatter(b'perf', opts)
2359 2364 dotiming = opts[b'timing']
2360 2365 dostats = opts[b'stats']
2361 2366
2362 2367 if dotiming:
2363 2368 header = '%12s %12s %12s %12s %12s %12s\n'
2364 2369 output = (
2365 2370 "%(source)12s %(destination)12s "
2366 2371 "%(nbrevs)12d %(nbmissingfiles)12d "
2367 2372 "%(nbrenamedfiles)12d %(time)18.5f\n"
2368 2373 )
2369 2374 header_names = (
2370 2375 "source",
2371 2376 "destination",
2372 2377 "nb-revs",
2373 2378 "nb-files",
2374 2379 "nb-renames",
2375 2380 "time",
2376 2381 )
2377 2382 fm.plain(header % header_names)
2378 2383 else:
2379 2384 header = '%12s %12s %12s %12s\n'
2380 2385 output = (
2381 2386 "%(source)12s %(destination)12s "
2382 2387 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2383 2388 )
2384 2389 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2385 2390
2386 2391 if not revs:
2387 2392 revs = ['all()']
2388 2393 revs = scmutil.revrange(repo, revs)
2389 2394
2390 2395 if dostats:
2391 2396 alldata = {
2392 2397 'nbrevs': [],
2393 2398 'nbmissingfiles': [],
2394 2399 }
2395 2400 if dotiming:
2396 2401 alldata['nbrenames'] = []
2397 2402 alldata['time'] = []
2398 2403
2399 2404 roi = repo.revs('merge() and %ld', revs)
2400 2405 for r in roi:
2401 2406 ctx = repo[r]
2402 2407 p1 = ctx.p1().rev()
2403 2408 p2 = ctx.p2().rev()
2404 2409 bases = repo.changelog._commonancestorsheads(p1, p2)
2405 2410 for p in (p1, p2):
2406 2411 for b in bases:
2407 2412 base = repo[b]
2408 2413 parent = repo[p]
2409 2414 missing = copies._computeforwardmissing(base, parent)
2410 2415 if not missing:
2411 2416 continue
2412 2417 data = {
2413 2418 b'source': base.hex(),
2414 2419 b'destination': parent.hex(),
2415 2420 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2416 2421 b'nbmissingfiles': len(missing),
2417 2422 }
2418 2423 if dostats:
2419 2424 alldata['nbrevs'].append(
2420 2425 (
2421 2426 data['nbrevs'],
2422 2427 base.hex(),
2423 2428 parent.hex(),
2424 2429 )
2425 2430 )
2426 2431 alldata['nbmissingfiles'].append(
2427 2432 (
2428 2433 data['nbmissingfiles'],
2429 2434 base.hex(),
2430 2435 parent.hex(),
2431 2436 )
2432 2437 )
2433 2438 if dotiming:
2434 2439 begin = util.timer()
2435 2440 renames = copies.pathcopies(base, parent)
2436 2441 end = util.timer()
2437 2442 # not very stable timing since we did only one run
2438 2443 data['time'] = end - begin
2439 2444 data['nbrenamedfiles'] = len(renames)
2440 2445 if dostats:
2441 2446 alldata['time'].append(
2442 2447 (
2443 2448 data['time'],
2444 2449 base.hex(),
2445 2450 parent.hex(),
2446 2451 )
2447 2452 )
2448 2453 alldata['nbrenames'].append(
2449 2454 (
2450 2455 data['nbrenamedfiles'],
2451 2456 base.hex(),
2452 2457 parent.hex(),
2453 2458 )
2454 2459 )
2455 2460 fm.startitem()
2456 2461 fm.data(**data)
2457 2462 out = data.copy()
2458 2463 out['source'] = fm.hexfunc(base.node())
2459 2464 out['destination'] = fm.hexfunc(parent.node())
2460 2465 fm.plain(output % out)
2461 2466
2462 2467 fm.end()
2463 2468 if dostats:
2464 2469 entries = [
2465 2470 ('nbrevs', 'number of revision covered'),
2466 2471 ('nbmissingfiles', 'number of missing files at head'),
2467 2472 ]
2468 2473 if dotiming:
2469 2474 entries.append(('nbrenames', 'renamed files'))
2470 2475 entries.append(('time', 'time'))
2471 2476 _displaystats(ui, opts, entries, alldata)
2472 2477
2473 2478
2474 2479 @command(b'perf::cca|perfcca', formatteropts)
2475 2480 def perfcca(ui, repo, **opts):
2476 2481 opts = _byteskwargs(opts)
2477 2482 timer, fm = gettimer(ui, opts)
2478 2483 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2479 2484 fm.end()
2480 2485
2481 2486
2482 2487 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2483 2488 def perffncacheload(ui, repo, **opts):
2484 2489 opts = _byteskwargs(opts)
2485 2490 timer, fm = gettimer(ui, opts)
2486 2491 s = repo.store
2487 2492
2488 2493 def d():
2489 2494 s.fncache._load()
2490 2495
2491 2496 timer(d)
2492 2497 fm.end()
2493 2498
2494 2499
2495 2500 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2496 2501 def perffncachewrite(ui, repo, **opts):
2497 2502 opts = _byteskwargs(opts)
2498 2503 timer, fm = gettimer(ui, opts)
2499 2504 s = repo.store
2500 2505 lock = repo.lock()
2501 2506 s.fncache._load()
2502 2507 tr = repo.transaction(b'perffncachewrite')
2503 2508 tr.addbackup(b'fncache')
2504 2509
2505 2510 def d():
2506 2511 s.fncache._dirty = True
2507 2512 s.fncache.write(tr)
2508 2513
2509 2514 timer(d)
2510 2515 tr.close()
2511 2516 lock.release()
2512 2517 fm.end()
2513 2518
2514 2519
2515 2520 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2516 2521 def perffncacheencode(ui, repo, **opts):
2517 2522 opts = _byteskwargs(opts)
2518 2523 timer, fm = gettimer(ui, opts)
2519 2524 s = repo.store
2520 2525 s.fncache._load()
2521 2526
2522 2527 def d():
2523 2528 for p in s.fncache.entries:
2524 2529 s.encode(p)
2525 2530
2526 2531 timer(d)
2527 2532 fm.end()
2528 2533
2529 2534
2530 2535 def _bdiffworker(q, blocks, xdiff, ready, done):
2531 2536 while not done.is_set():
2532 2537 pair = q.get()
2533 2538 while pair is not None:
2534 2539 if xdiff:
2535 2540 mdiff.bdiff.xdiffblocks(*pair)
2536 2541 elif blocks:
2537 2542 mdiff.bdiff.blocks(*pair)
2538 2543 else:
2539 2544 mdiff.textdiff(*pair)
2540 2545 q.task_done()
2541 2546 pair = q.get()
2542 2547 q.task_done() # for the None one
2543 2548 with ready:
2544 2549 ready.wait()
2545 2550
2546 2551
2547 2552 def _manifestrevision(repo, mnode):
2548 2553 ml = repo.manifestlog
2549 2554
2550 2555 if util.safehasattr(ml, b'getstorage'):
2551 2556 store = ml.getstorage(b'')
2552 2557 else:
2553 2558 store = ml._revlog
2554 2559
2555 2560 return store.revision(mnode)
2556 2561
2557 2562
2558 2563 @command(
2559 2564 b'perf::bdiff|perfbdiff',
2560 2565 revlogopts
2561 2566 + formatteropts
2562 2567 + [
2563 2568 (
2564 2569 b'',
2565 2570 b'count',
2566 2571 1,
2567 2572 b'number of revisions to test (when using --startrev)',
2568 2573 ),
2569 2574 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2570 2575 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2571 2576 (b'', b'blocks', False, b'test computing diffs into blocks'),
2572 2577 (b'', b'xdiff', False, b'use xdiff algorithm'),
2573 2578 ],
2574 2579 b'-c|-m|FILE REV',
2575 2580 )
2576 2581 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2577 2582 """benchmark a bdiff between revisions
2578 2583
2579 2584 By default, benchmark a bdiff between its delta parent and itself.
2580 2585
2581 2586 With ``--count``, benchmark bdiffs between delta parents and self for N
2582 2587 revisions starting at the specified revision.
2583 2588
2584 2589 With ``--alldata``, assume the requested revision is a changeset and
2585 2590 measure bdiffs for all changes related to that changeset (manifest
2586 2591 and filelogs).
2587 2592 """
2588 2593 opts = _byteskwargs(opts)
2589 2594
2590 2595 if opts[b'xdiff'] and not opts[b'blocks']:
2591 2596 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2592 2597
2593 2598 if opts[b'alldata']:
2594 2599 opts[b'changelog'] = True
2595 2600
2596 2601 if opts.get(b'changelog') or opts.get(b'manifest'):
2597 2602 file_, rev = None, file_
2598 2603 elif rev is None:
2599 2604 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2600 2605
2601 2606 blocks = opts[b'blocks']
2602 2607 xdiff = opts[b'xdiff']
2603 2608 textpairs = []
2604 2609
2605 2610 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2606 2611
2607 2612 startrev = r.rev(r.lookup(rev))
2608 2613 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2609 2614 if opts[b'alldata']:
2610 2615 # Load revisions associated with changeset.
2611 2616 ctx = repo[rev]
2612 2617 mtext = _manifestrevision(repo, ctx.manifestnode())
2613 2618 for pctx in ctx.parents():
2614 2619 pman = _manifestrevision(repo, pctx.manifestnode())
2615 2620 textpairs.append((pman, mtext))
2616 2621
2617 2622 # Load filelog revisions by iterating manifest delta.
2618 2623 man = ctx.manifest()
2619 2624 pman = ctx.p1().manifest()
2620 2625 for filename, change in pman.diff(man).items():
2621 2626 fctx = repo.file(filename)
2622 2627 f1 = fctx.revision(change[0][0] or -1)
2623 2628 f2 = fctx.revision(change[1][0] or -1)
2624 2629 textpairs.append((f1, f2))
2625 2630 else:
2626 2631 dp = r.deltaparent(rev)
2627 2632 textpairs.append((r.revision(dp), r.revision(rev)))
2628 2633
2629 2634 withthreads = threads > 0
2630 2635 if not withthreads:
2631 2636
2632 2637 def d():
2633 2638 for pair in textpairs:
2634 2639 if xdiff:
2635 2640 mdiff.bdiff.xdiffblocks(*pair)
2636 2641 elif blocks:
2637 2642 mdiff.bdiff.blocks(*pair)
2638 2643 else:
2639 2644 mdiff.textdiff(*pair)
2640 2645
2641 2646 else:
2642 2647 q = queue()
2643 2648 for i in _xrange(threads):
2644 2649 q.put(None)
2645 2650 ready = threading.Condition()
2646 2651 done = threading.Event()
2647 2652 for i in _xrange(threads):
2648 2653 threading.Thread(
2649 2654 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2650 2655 ).start()
2651 2656 q.join()
2652 2657
2653 2658 def d():
2654 2659 for pair in textpairs:
2655 2660 q.put(pair)
2656 2661 for i in _xrange(threads):
2657 2662 q.put(None)
2658 2663 with ready:
2659 2664 ready.notify_all()
2660 2665 q.join()
2661 2666
2662 2667 timer, fm = gettimer(ui, opts)
2663 2668 timer(d)
2664 2669 fm.end()
2665 2670
2666 2671 if withthreads:
2667 2672 done.set()
2668 2673 for i in _xrange(threads):
2669 2674 q.put(None)
2670 2675 with ready:
2671 2676 ready.notify_all()
2672 2677
2673 2678
2674 2679 @command(
2675 2680 b'perf::unbundle',
2676 2681 formatteropts,
2677 2682 b'BUNDLE_FILE',
2678 2683 )
2679 2684 def perf_unbundle(ui, repo, fname, **opts):
2680 2685 """benchmark application of a bundle in a repository.
2681 2686
2682 2687 This does not include the final transaction processing"""
2683 2688
2684 2689 from mercurial import exchange
2685 2690 from mercurial import bundle2
2686 2691 from mercurial import transaction
2687 2692
2688 2693 opts = _byteskwargs(opts)
2689 2694
2690 2695 ### some compatibility hotfix
2691 2696 #
2692 2697 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2693 2698 # critical regression that break transaction rollback for files that are
2694 2699 # de-inlined.
2695 2700 method = transaction.transaction._addentry
2696 2701 pre_63edc384d3b7 = "data" in getargspec(method).args
2697 2702 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2698 2703 # a changeset that is a close descendant of 18415fc918a1, the changeset
2699 2704 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2700 2705 args = getargspec(error.Abort.__init__).args
2701 2706 post_18415fc918a1 = "detailed_exit_code" in args
2702 2707
2703 2708 old_max_inline = None
2704 2709 try:
2705 2710 if not (pre_63edc384d3b7 or post_18415fc918a1):
2706 2711 # disable inlining
2707 2712 old_max_inline = mercurial.revlog._maxinline
2708 2713 # large enough to never happen
2709 2714 mercurial.revlog._maxinline = 2 ** 50
2710 2715
2711 2716 with repo.lock():
2712 2717 bundle = [None, None]
2713 2718 orig_quiet = repo.ui.quiet
2714 2719 try:
2715 2720 repo.ui.quiet = True
2716 2721 with open(fname, mode="rb") as f:
2717 2722
2718 2723 def noop_report(*args, **kwargs):
2719 2724 pass
2720 2725
2721 2726 def setup():
2722 2727 gen, tr = bundle
2723 2728 if tr is not None:
2724 2729 tr.abort()
2725 2730 bundle[:] = [None, None]
2726 2731 f.seek(0)
2727 2732 bundle[0] = exchange.readbundle(ui, f, fname)
2728 2733 bundle[1] = repo.transaction(b'perf::unbundle')
2729 2734 # silence the transaction
2730 2735 bundle[1]._report = noop_report
2731 2736
2732 2737 def apply():
2733 2738 gen, tr = bundle
2734 2739 bundle2.applybundle(
2735 2740 repo,
2736 2741 gen,
2737 2742 tr,
2738 2743 source=b'perf::unbundle',
2739 2744 url=fname,
2740 2745 )
2741 2746
2742 2747 timer, fm = gettimer(ui, opts)
2743 2748 timer(apply, setup=setup)
2744 2749 fm.end()
2745 2750 finally:
2746 2751 repo.ui.quiet == orig_quiet
2747 2752 gen, tr = bundle
2748 2753 if tr is not None:
2749 2754 tr.abort()
2750 2755 finally:
2751 2756 if old_max_inline is not None:
2752 2757 mercurial.revlog._maxinline = old_max_inline
2753 2758
2754 2759
2755 2760 @command(
2756 2761 b'perf::unidiff|perfunidiff',
2757 2762 revlogopts
2758 2763 + formatteropts
2759 2764 + [
2760 2765 (
2761 2766 b'',
2762 2767 b'count',
2763 2768 1,
2764 2769 b'number of revisions to test (when using --startrev)',
2765 2770 ),
2766 2771 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2767 2772 ],
2768 2773 b'-c|-m|FILE REV',
2769 2774 )
2770 2775 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2771 2776 """benchmark a unified diff between revisions
2772 2777
2773 2778 This doesn't include any copy tracing - it's just a unified diff
2774 2779 of the texts.
2775 2780
2776 2781 By default, benchmark a diff between its delta parent and itself.
2777 2782
2778 2783 With ``--count``, benchmark diffs between delta parents and self for N
2779 2784 revisions starting at the specified revision.
2780 2785
2781 2786 With ``--alldata``, assume the requested revision is a changeset and
2782 2787 measure diffs for all changes related to that changeset (manifest
2783 2788 and filelogs).
2784 2789 """
2785 2790 opts = _byteskwargs(opts)
2786 2791 if opts[b'alldata']:
2787 2792 opts[b'changelog'] = True
2788 2793
2789 2794 if opts.get(b'changelog') or opts.get(b'manifest'):
2790 2795 file_, rev = None, file_
2791 2796 elif rev is None:
2792 2797 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2793 2798
2794 2799 textpairs = []
2795 2800
2796 2801 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2797 2802
2798 2803 startrev = r.rev(r.lookup(rev))
2799 2804 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2800 2805 if opts[b'alldata']:
2801 2806 # Load revisions associated with changeset.
2802 2807 ctx = repo[rev]
2803 2808 mtext = _manifestrevision(repo, ctx.manifestnode())
2804 2809 for pctx in ctx.parents():
2805 2810 pman = _manifestrevision(repo, pctx.manifestnode())
2806 2811 textpairs.append((pman, mtext))
2807 2812
2808 2813 # Load filelog revisions by iterating manifest delta.
2809 2814 man = ctx.manifest()
2810 2815 pman = ctx.p1().manifest()
2811 2816 for filename, change in pman.diff(man).items():
2812 2817 fctx = repo.file(filename)
2813 2818 f1 = fctx.revision(change[0][0] or -1)
2814 2819 f2 = fctx.revision(change[1][0] or -1)
2815 2820 textpairs.append((f1, f2))
2816 2821 else:
2817 2822 dp = r.deltaparent(rev)
2818 2823 textpairs.append((r.revision(dp), r.revision(rev)))
2819 2824
2820 2825 def d():
2821 2826 for left, right in textpairs:
2822 2827 # The date strings don't matter, so we pass empty strings.
2823 2828 headerlines, hunks = mdiff.unidiff(
2824 2829 left, b'', right, b'', b'left', b'right', binary=False
2825 2830 )
2826 2831 # consume iterators in roughly the way patch.py does
2827 2832 b'\n'.join(headerlines)
2828 2833 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2829 2834
2830 2835 timer, fm = gettimer(ui, opts)
2831 2836 timer(d)
2832 2837 fm.end()
2833 2838
2834 2839
2835 2840 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2836 2841 def perfdiffwd(ui, repo, **opts):
2837 2842 """Profile diff of working directory changes"""
2838 2843 opts = _byteskwargs(opts)
2839 2844 timer, fm = gettimer(ui, opts)
2840 2845 options = {
2841 2846 'w': 'ignore_all_space',
2842 2847 'b': 'ignore_space_change',
2843 2848 'B': 'ignore_blank_lines',
2844 2849 }
2845 2850
2846 2851 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2847 2852 opts = {options[c]: b'1' for c in diffopt}
2848 2853
2849 2854 def d():
2850 2855 ui.pushbuffer()
2851 2856 commands.diff(ui, repo, **opts)
2852 2857 ui.popbuffer()
2853 2858
2854 2859 diffopt = diffopt.encode('ascii')
2855 2860 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2856 2861 timer(d, title=title)
2857 2862 fm.end()
2858 2863
2859 2864
2860 2865 @command(
2861 2866 b'perf::revlogindex|perfrevlogindex',
2862 2867 revlogopts + formatteropts,
2863 2868 b'-c|-m|FILE',
2864 2869 )
2865 2870 def perfrevlogindex(ui, repo, file_=None, **opts):
2866 2871 """Benchmark operations against a revlog index.
2867 2872
2868 2873 This tests constructing a revlog instance, reading index data,
2869 2874 parsing index data, and performing various operations related to
2870 2875 index data.
2871 2876 """
2872 2877
2873 2878 opts = _byteskwargs(opts)
2874 2879
2875 2880 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2876 2881
2877 2882 opener = getattr(rl, 'opener') # trick linter
2878 2883 # compat with hg <= 5.8
2879 2884 radix = getattr(rl, 'radix', None)
2880 2885 indexfile = getattr(rl, '_indexfile', None)
2881 2886 if indexfile is None:
2882 2887 # compatibility with <= hg-5.8
2883 2888 indexfile = getattr(rl, 'indexfile')
2884 2889 data = opener.read(indexfile)
2885 2890
2886 2891 header = struct.unpack(b'>I', data[0:4])[0]
2887 2892 version = header & 0xFFFF
2888 2893 if version == 1:
2889 2894 inline = header & (1 << 16)
2890 2895 else:
2891 2896 raise error.Abort(b'unsupported revlog version: %d' % version)
2892 2897
2893 2898 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2894 2899 if parse_index_v1 is None:
2895 2900 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2896 2901
2897 2902 rllen = len(rl)
2898 2903
2899 2904 node0 = rl.node(0)
2900 2905 node25 = rl.node(rllen // 4)
2901 2906 node50 = rl.node(rllen // 2)
2902 2907 node75 = rl.node(rllen // 4 * 3)
2903 2908 node100 = rl.node(rllen - 1)
2904 2909
2905 2910 allrevs = range(rllen)
2906 2911 allrevsrev = list(reversed(allrevs))
2907 2912 allnodes = [rl.node(rev) for rev in range(rllen)]
2908 2913 allnodesrev = list(reversed(allnodes))
2909 2914
2910 2915 def constructor():
2911 2916 if radix is not None:
2912 2917 revlog(opener, radix=radix)
2913 2918 else:
2914 2919 # hg <= 5.8
2915 2920 revlog(opener, indexfile=indexfile)
2916 2921
2917 2922 def read():
2918 2923 with opener(indexfile) as fh:
2919 2924 fh.read()
2920 2925
2921 2926 def parseindex():
2922 2927 parse_index_v1(data, inline)
2923 2928
2924 2929 def getentry(revornode):
2925 2930 index = parse_index_v1(data, inline)[0]
2926 2931 index[revornode]
2927 2932
2928 2933 def getentries(revs, count=1):
2929 2934 index = parse_index_v1(data, inline)[0]
2930 2935
2931 2936 for i in range(count):
2932 2937 for rev in revs:
2933 2938 index[rev]
2934 2939
2935 2940 def resolvenode(node):
2936 2941 index = parse_index_v1(data, inline)[0]
2937 2942 rev = getattr(index, 'rev', None)
2938 2943 if rev is None:
2939 2944 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2940 2945 # This only works for the C code.
2941 2946 if nodemap is None:
2942 2947 return
2943 2948 rev = nodemap.__getitem__
2944 2949
2945 2950 try:
2946 2951 rev(node)
2947 2952 except error.RevlogError:
2948 2953 pass
2949 2954
2950 2955 def resolvenodes(nodes, count=1):
2951 2956 index = parse_index_v1(data, inline)[0]
2952 2957 rev = getattr(index, 'rev', None)
2953 2958 if rev is None:
2954 2959 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2955 2960 # This only works for the C code.
2956 2961 if nodemap is None:
2957 2962 return
2958 2963 rev = nodemap.__getitem__
2959 2964
2960 2965 for i in range(count):
2961 2966 for node in nodes:
2962 2967 try:
2963 2968 rev(node)
2964 2969 except error.RevlogError:
2965 2970 pass
2966 2971
2967 2972 benches = [
2968 2973 (constructor, b'revlog constructor'),
2969 2974 (read, b'read'),
2970 2975 (parseindex, b'create index object'),
2971 2976 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2972 2977 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2973 2978 (lambda: resolvenode(node0), b'look up node at rev 0'),
2974 2979 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2975 2980 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2976 2981 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2977 2982 (lambda: resolvenode(node100), b'look up node at tip'),
2978 2983 # 2x variation is to measure caching impact.
2979 2984 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2980 2985 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2981 2986 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2982 2987 (
2983 2988 lambda: resolvenodes(allnodesrev, 2),
2984 2989 b'look up all nodes 2x (reverse)',
2985 2990 ),
2986 2991 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2987 2992 (
2988 2993 lambda: getentries(allrevs, 2),
2989 2994 b'retrieve all index entries 2x (forward)',
2990 2995 ),
2991 2996 (
2992 2997 lambda: getentries(allrevsrev),
2993 2998 b'retrieve all index entries (reverse)',
2994 2999 ),
2995 3000 (
2996 3001 lambda: getentries(allrevsrev, 2),
2997 3002 b'retrieve all index entries 2x (reverse)',
2998 3003 ),
2999 3004 ]
3000 3005
3001 3006 for fn, title in benches:
3002 3007 timer, fm = gettimer(ui, opts)
3003 3008 timer(fn, title=title)
3004 3009 fm.end()
3005 3010
3006 3011
3007 3012 @command(
3008 3013 b'perf::revlogrevisions|perfrevlogrevisions',
3009 3014 revlogopts
3010 3015 + formatteropts
3011 3016 + [
3012 3017 (b'd', b'dist', 100, b'distance between the revisions'),
3013 3018 (b's', b'startrev', 0, b'revision to start reading at'),
3014 3019 (b'', b'reverse', False, b'read in reverse'),
3015 3020 ],
3016 3021 b'-c|-m|FILE',
3017 3022 )
3018 3023 def perfrevlogrevisions(
3019 3024 ui, repo, file_=None, startrev=0, reverse=False, **opts
3020 3025 ):
3021 3026 """Benchmark reading a series of revisions from a revlog.
3022 3027
3023 3028 By default, we read every ``-d/--dist`` revision from 0 to tip of
3024 3029 the specified revlog.
3025 3030
3026 3031 The start revision can be defined via ``-s/--startrev``.
3027 3032 """
3028 3033 opts = _byteskwargs(opts)
3029 3034
3030 3035 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3031 3036 rllen = getlen(ui)(rl)
3032 3037
3033 3038 if startrev < 0:
3034 3039 startrev = rllen + startrev
3035 3040
3036 3041 def d():
3037 3042 rl.clearcaches()
3038 3043
3039 3044 beginrev = startrev
3040 3045 endrev = rllen
3041 3046 dist = opts[b'dist']
3042 3047
3043 3048 if reverse:
3044 3049 beginrev, endrev = endrev - 1, beginrev - 1
3045 3050 dist = -1 * dist
3046 3051
3047 3052 for x in _xrange(beginrev, endrev, dist):
3048 3053 # Old revisions don't support passing int.
3049 3054 n = rl.node(x)
3050 3055 rl.revision(n)
3051 3056
3052 3057 timer, fm = gettimer(ui, opts)
3053 3058 timer(d)
3054 3059 fm.end()
3055 3060
3056 3061
3057 3062 @command(
3058 3063 b'perf::revlogwrite|perfrevlogwrite',
3059 3064 revlogopts
3060 3065 + formatteropts
3061 3066 + [
3062 3067 (b's', b'startrev', 1000, b'revision to start writing at'),
3063 3068 (b'', b'stoprev', -1, b'last revision to write'),
3064 3069 (b'', b'count', 3, b'number of passes to perform'),
3065 3070 (b'', b'details', False, b'print timing for every revisions tested'),
3066 3071 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3067 3072 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3068 3073 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3069 3074 ],
3070 3075 b'-c|-m|FILE',
3071 3076 )
3072 3077 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3073 3078 """Benchmark writing a series of revisions to a revlog.
3074 3079
3075 3080 Possible source values are:
3076 3081 * `full`: add from a full text (default).
3077 3082 * `parent-1`: add from a delta to the first parent
3078 3083 * `parent-2`: add from a delta to the second parent if it exists
3079 3084 (use a delta from the first parent otherwise)
3080 3085 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3081 3086 * `storage`: add from the existing precomputed deltas
3082 3087
3083 3088 Note: This performance command measures performance in a custom way. As a
3084 3089 result some of the global configuration of the 'perf' command does not
3085 3090 apply to it:
3086 3091
3087 3092 * ``pre-run``: disabled
3088 3093
3089 3094 * ``profile-benchmark``: disabled
3090 3095
3091 3096 * ``run-limits``: disabled use --count instead
3092 3097 """
3093 3098 opts = _byteskwargs(opts)
3094 3099
3095 3100 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3096 3101 rllen = getlen(ui)(rl)
3097 3102 if startrev < 0:
3098 3103 startrev = rllen + startrev
3099 3104 if stoprev < 0:
3100 3105 stoprev = rllen + stoprev
3101 3106
3102 3107 lazydeltabase = opts['lazydeltabase']
3103 3108 source = opts['source']
3104 3109 clearcaches = opts['clear_caches']
3105 3110 validsource = (
3106 3111 b'full',
3107 3112 b'parent-1',
3108 3113 b'parent-2',
3109 3114 b'parent-smallest',
3110 3115 b'storage',
3111 3116 )
3112 3117 if source not in validsource:
3113 3118 raise error.Abort('invalid source type: %s' % source)
3114 3119
3115 3120 ### actually gather results
3116 3121 count = opts['count']
3117 3122 if count <= 0:
3118 3123 raise error.Abort('invalide run count: %d' % count)
3119 3124 allresults = []
3120 3125 for c in range(count):
3121 3126 timing = _timeonewrite(
3122 3127 ui,
3123 3128 rl,
3124 3129 source,
3125 3130 startrev,
3126 3131 stoprev,
3127 3132 c + 1,
3128 3133 lazydeltabase=lazydeltabase,
3129 3134 clearcaches=clearcaches,
3130 3135 )
3131 3136 allresults.append(timing)
3132 3137
3133 3138 ### consolidate the results in a single list
3134 3139 results = []
3135 3140 for idx, (rev, t) in enumerate(allresults[0]):
3136 3141 ts = [t]
3137 3142 for other in allresults[1:]:
3138 3143 orev, ot = other[idx]
3139 3144 assert orev == rev
3140 3145 ts.append(ot)
3141 3146 results.append((rev, ts))
3142 3147 resultcount = len(results)
3143 3148
3144 3149 ### Compute and display relevant statistics
3145 3150
3146 3151 # get a formatter
3147 3152 fm = ui.formatter(b'perf', opts)
3148 3153 displayall = ui.configbool(b"perf", b"all-timing", False)
3149 3154
3150 3155 # print individual details if requested
3151 3156 if opts['details']:
3152 3157 for idx, item in enumerate(results, 1):
3153 3158 rev, data = item
3154 3159 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3155 3160 formatone(fm, data, title=title, displayall=displayall)
3156 3161
3157 3162 # sorts results by median time
3158 3163 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3159 3164 # list of (name, index) to display)
3160 3165 relevants = [
3161 3166 ("min", 0),
3162 3167 ("10%", resultcount * 10 // 100),
3163 3168 ("25%", resultcount * 25 // 100),
3164 3169 ("50%", resultcount * 70 // 100),
3165 3170 ("75%", resultcount * 75 // 100),
3166 3171 ("90%", resultcount * 90 // 100),
3167 3172 ("95%", resultcount * 95 // 100),
3168 3173 ("99%", resultcount * 99 // 100),
3169 3174 ("99.9%", resultcount * 999 // 1000),
3170 3175 ("99.99%", resultcount * 9999 // 10000),
3171 3176 ("99.999%", resultcount * 99999 // 100000),
3172 3177 ("max", -1),
3173 3178 ]
3174 3179 if not ui.quiet:
3175 3180 for name, idx in relevants:
3176 3181 data = results[idx]
3177 3182 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3178 3183 formatone(fm, data[1], title=title, displayall=displayall)
3179 3184
3180 3185 # XXX summing that many float will not be very precise, we ignore this fact
3181 3186 # for now
3182 3187 totaltime = []
3183 3188 for item in allresults:
3184 3189 totaltime.append(
3185 3190 (
3186 3191 sum(x[1][0] for x in item),
3187 3192 sum(x[1][1] for x in item),
3188 3193 sum(x[1][2] for x in item),
3189 3194 )
3190 3195 )
3191 3196 formatone(
3192 3197 fm,
3193 3198 totaltime,
3194 3199 title="total time (%d revs)" % resultcount,
3195 3200 displayall=displayall,
3196 3201 )
3197 3202 fm.end()
3198 3203
3199 3204
3200 3205 class _faketr:
3201 3206 def add(s, x, y, z=None):
3202 3207 return None
3203 3208
3204 3209
3205 3210 def _timeonewrite(
3206 3211 ui,
3207 3212 orig,
3208 3213 source,
3209 3214 startrev,
3210 3215 stoprev,
3211 3216 runidx=None,
3212 3217 lazydeltabase=True,
3213 3218 clearcaches=True,
3214 3219 ):
3215 3220 timings = []
3216 3221 tr = _faketr()
3217 3222 with _temprevlog(ui, orig, startrev) as dest:
3218 3223 dest._lazydeltabase = lazydeltabase
3219 3224 revs = list(orig.revs(startrev, stoprev))
3220 3225 total = len(revs)
3221 3226 topic = 'adding'
3222 3227 if runidx is not None:
3223 3228 topic += ' (run #%d)' % runidx
3224 3229 # Support both old and new progress API
3225 3230 if util.safehasattr(ui, 'makeprogress'):
3226 3231 progress = ui.makeprogress(topic, unit='revs', total=total)
3227 3232
3228 3233 def updateprogress(pos):
3229 3234 progress.update(pos)
3230 3235
3231 3236 def completeprogress():
3232 3237 progress.complete()
3233 3238
3234 3239 else:
3235 3240
3236 3241 def updateprogress(pos):
3237 3242 ui.progress(topic, pos, unit='revs', total=total)
3238 3243
3239 3244 def completeprogress():
3240 3245 ui.progress(topic, None, unit='revs', total=total)
3241 3246
3242 3247 for idx, rev in enumerate(revs):
3243 3248 updateprogress(idx)
3244 3249 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3245 3250 if clearcaches:
3246 3251 dest.index.clearcaches()
3247 3252 dest.clearcaches()
3248 3253 with timeone() as r:
3249 3254 dest.addrawrevision(*addargs, **addkwargs)
3250 3255 timings.append((rev, r[0]))
3251 3256 updateprogress(total)
3252 3257 completeprogress()
3253 3258 return timings
3254 3259
3255 3260
3256 3261 def _getrevisionseed(orig, rev, tr, source):
3257 3262 from mercurial.node import nullid
3258 3263
3259 3264 linkrev = orig.linkrev(rev)
3260 3265 node = orig.node(rev)
3261 3266 p1, p2 = orig.parents(node)
3262 3267 flags = orig.flags(rev)
3263 3268 cachedelta = None
3264 3269 text = None
3265 3270
3266 3271 if source == b'full':
3267 3272 text = orig.revision(rev)
3268 3273 elif source == b'parent-1':
3269 3274 baserev = orig.rev(p1)
3270 3275 cachedelta = (baserev, orig.revdiff(p1, rev))
3271 3276 elif source == b'parent-2':
3272 3277 parent = p2
3273 3278 if p2 == nullid:
3274 3279 parent = p1
3275 3280 baserev = orig.rev(parent)
3276 3281 cachedelta = (baserev, orig.revdiff(parent, rev))
3277 3282 elif source == b'parent-smallest':
3278 3283 p1diff = orig.revdiff(p1, rev)
3279 3284 parent = p1
3280 3285 diff = p1diff
3281 3286 if p2 != nullid:
3282 3287 p2diff = orig.revdiff(p2, rev)
3283 3288 if len(p1diff) > len(p2diff):
3284 3289 parent = p2
3285 3290 diff = p2diff
3286 3291 baserev = orig.rev(parent)
3287 3292 cachedelta = (baserev, diff)
3288 3293 elif source == b'storage':
3289 3294 baserev = orig.deltaparent(rev)
3290 3295 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3291 3296
3292 3297 return (
3293 3298 (text, tr, linkrev, p1, p2),
3294 3299 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3295 3300 )
3296 3301
3297 3302
3298 3303 @contextlib.contextmanager
3299 3304 def _temprevlog(ui, orig, truncaterev):
3300 3305 from mercurial import vfs as vfsmod
3301 3306
3302 3307 if orig._inline:
3303 3308 raise error.Abort('not supporting inline revlog (yet)')
3304 3309 revlogkwargs = {}
3305 3310 k = 'upperboundcomp'
3306 3311 if util.safehasattr(orig, k):
3307 3312 revlogkwargs[k] = getattr(orig, k)
3308 3313
3309 3314 indexfile = getattr(orig, '_indexfile', None)
3310 3315 if indexfile is None:
3311 3316 # compatibility with <= hg-5.8
3312 3317 indexfile = getattr(orig, 'indexfile')
3313 3318 origindexpath = orig.opener.join(indexfile)
3314 3319
3315 3320 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3316 3321 origdatapath = orig.opener.join(datafile)
3317 3322 radix = b'revlog'
3318 3323 indexname = b'revlog.i'
3319 3324 dataname = b'revlog.d'
3320 3325
3321 3326 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3322 3327 try:
3323 3328 # copy the data file in a temporary directory
3324 3329 ui.debug('copying data in %s\n' % tmpdir)
3325 3330 destindexpath = os.path.join(tmpdir, 'revlog.i')
3326 3331 destdatapath = os.path.join(tmpdir, 'revlog.d')
3327 3332 shutil.copyfile(origindexpath, destindexpath)
3328 3333 shutil.copyfile(origdatapath, destdatapath)
3329 3334
3330 3335 # remove the data we want to add again
3331 3336 ui.debug('truncating data to be rewritten\n')
3332 3337 with open(destindexpath, 'ab') as index:
3333 3338 index.seek(0)
3334 3339 index.truncate(truncaterev * orig._io.size)
3335 3340 with open(destdatapath, 'ab') as data:
3336 3341 data.seek(0)
3337 3342 data.truncate(orig.start(truncaterev))
3338 3343
3339 3344 # instantiate a new revlog from the temporary copy
3340 3345 ui.debug('truncating adding to be rewritten\n')
3341 3346 vfs = vfsmod.vfs(tmpdir)
3342 3347 vfs.options = getattr(orig.opener, 'options', None)
3343 3348
3344 3349 try:
3345 3350 dest = revlog(vfs, radix=radix, **revlogkwargs)
3346 3351 except TypeError:
3347 3352 dest = revlog(
3348 3353 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3349 3354 )
3350 3355 if dest._inline:
3351 3356 raise error.Abort('not supporting inline revlog (yet)')
3352 3357 # make sure internals are initialized
3353 3358 dest.revision(len(dest) - 1)
3354 3359 yield dest
3355 3360 del dest, vfs
3356 3361 finally:
3357 3362 shutil.rmtree(tmpdir, True)
3358 3363
3359 3364
3360 3365 @command(
3361 3366 b'perf::revlogchunks|perfrevlogchunks',
3362 3367 revlogopts
3363 3368 + formatteropts
3364 3369 + [
3365 3370 (b'e', b'engines', b'', b'compression engines to use'),
3366 3371 (b's', b'startrev', 0, b'revision to start at'),
3367 3372 ],
3368 3373 b'-c|-m|FILE',
3369 3374 )
3370 3375 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3371 3376 """Benchmark operations on revlog chunks.
3372 3377
3373 3378 Logically, each revlog is a collection of fulltext revisions. However,
3374 3379 stored within each revlog are "chunks" of possibly compressed data. This
3375 3380 data needs to be read and decompressed or compressed and written.
3376 3381
3377 3382 This command measures the time it takes to read+decompress and recompress
3378 3383 chunks in a revlog. It effectively isolates I/O and compression performance.
3379 3384 For measurements of higher-level operations like resolving revisions,
3380 3385 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3381 3386 """
3382 3387 opts = _byteskwargs(opts)
3383 3388
3384 3389 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3385 3390
3386 3391 # _chunkraw was renamed to _getsegmentforrevs.
3387 3392 try:
3388 3393 segmentforrevs = rl._getsegmentforrevs
3389 3394 except AttributeError:
3390 3395 segmentforrevs = rl._chunkraw
3391 3396
3392 3397 # Verify engines argument.
3393 3398 if engines:
3394 3399 engines = {e.strip() for e in engines.split(b',')}
3395 3400 for engine in engines:
3396 3401 try:
3397 3402 util.compressionengines[engine]
3398 3403 except KeyError:
3399 3404 raise error.Abort(b'unknown compression engine: %s' % engine)
3400 3405 else:
3401 3406 engines = []
3402 3407 for e in util.compengines:
3403 3408 engine = util.compengines[e]
3404 3409 try:
3405 3410 if engine.available():
3406 3411 engine.revlogcompressor().compress(b'dummy')
3407 3412 engines.append(e)
3408 3413 except NotImplementedError:
3409 3414 pass
3410 3415
3411 3416 revs = list(rl.revs(startrev, len(rl) - 1))
3412 3417
3413 3418 def rlfh(rl):
3414 3419 if rl._inline:
3415 3420 indexfile = getattr(rl, '_indexfile', None)
3416 3421 if indexfile is None:
3417 3422 # compatibility with <= hg-5.8
3418 3423 indexfile = getattr(rl, 'indexfile')
3419 3424 return getsvfs(repo)(indexfile)
3420 3425 else:
3421 3426 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3422 3427 return getsvfs(repo)(datafile)
3423 3428
3424 3429 def doread():
3425 3430 rl.clearcaches()
3426 3431 for rev in revs:
3427 3432 segmentforrevs(rev, rev)
3428 3433
3429 3434 def doreadcachedfh():
3430 3435 rl.clearcaches()
3431 3436 fh = rlfh(rl)
3432 3437 for rev in revs:
3433 3438 segmentforrevs(rev, rev, df=fh)
3434 3439
3435 3440 def doreadbatch():
3436 3441 rl.clearcaches()
3437 3442 segmentforrevs(revs[0], revs[-1])
3438 3443
3439 3444 def doreadbatchcachedfh():
3440 3445 rl.clearcaches()
3441 3446 fh = rlfh(rl)
3442 3447 segmentforrevs(revs[0], revs[-1], df=fh)
3443 3448
3444 3449 def dochunk():
3445 3450 rl.clearcaches()
3446 3451 fh = rlfh(rl)
3447 3452 for rev in revs:
3448 3453 rl._chunk(rev, df=fh)
3449 3454
3450 3455 chunks = [None]
3451 3456
3452 3457 def dochunkbatch():
3453 3458 rl.clearcaches()
3454 3459 fh = rlfh(rl)
3455 3460 # Save chunks as a side-effect.
3456 3461 chunks[0] = rl._chunks(revs, df=fh)
3457 3462
3458 3463 def docompress(compressor):
3459 3464 rl.clearcaches()
3460 3465
3461 3466 try:
3462 3467 # Swap in the requested compression engine.
3463 3468 oldcompressor = rl._compressor
3464 3469 rl._compressor = compressor
3465 3470 for chunk in chunks[0]:
3466 3471 rl.compress(chunk)
3467 3472 finally:
3468 3473 rl._compressor = oldcompressor
3469 3474
3470 3475 benches = [
3471 3476 (lambda: doread(), b'read'),
3472 3477 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3473 3478 (lambda: doreadbatch(), b'read batch'),
3474 3479 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3475 3480 (lambda: dochunk(), b'chunk'),
3476 3481 (lambda: dochunkbatch(), b'chunk batch'),
3477 3482 ]
3478 3483
3479 3484 for engine in sorted(engines):
3480 3485 compressor = util.compengines[engine].revlogcompressor()
3481 3486 benches.append(
3482 3487 (
3483 3488 functools.partial(docompress, compressor),
3484 3489 b'compress w/ %s' % engine,
3485 3490 )
3486 3491 )
3487 3492
3488 3493 for fn, title in benches:
3489 3494 timer, fm = gettimer(ui, opts)
3490 3495 timer(fn, title=title)
3491 3496 fm.end()
3492 3497
3493 3498
3494 3499 @command(
3495 3500 b'perf::revlogrevision|perfrevlogrevision',
3496 3501 revlogopts
3497 3502 + formatteropts
3498 3503 + [(b'', b'cache', False, b'use caches instead of clearing')],
3499 3504 b'-c|-m|FILE REV',
3500 3505 )
3501 3506 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3502 3507 """Benchmark obtaining a revlog revision.
3503 3508
3504 3509 Obtaining a revlog revision consists of roughly the following steps:
3505 3510
3506 3511 1. Compute the delta chain
3507 3512 2. Slice the delta chain if applicable
3508 3513 3. Obtain the raw chunks for that delta chain
3509 3514 4. Decompress each raw chunk
3510 3515 5. Apply binary patches to obtain fulltext
3511 3516 6. Verify hash of fulltext
3512 3517
3513 3518 This command measures the time spent in each of these phases.
3514 3519 """
3515 3520 opts = _byteskwargs(opts)
3516 3521
3517 3522 if opts.get(b'changelog') or opts.get(b'manifest'):
3518 3523 file_, rev = None, file_
3519 3524 elif rev is None:
3520 3525 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3521 3526
3522 3527 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3523 3528
3524 3529 # _chunkraw was renamed to _getsegmentforrevs.
3525 3530 try:
3526 3531 segmentforrevs = r._getsegmentforrevs
3527 3532 except AttributeError:
3528 3533 segmentforrevs = r._chunkraw
3529 3534
3530 3535 node = r.lookup(rev)
3531 3536 rev = r.rev(node)
3532 3537
3533 3538 def getrawchunks(data, chain):
3534 3539 start = r.start
3535 3540 length = r.length
3536 3541 inline = r._inline
3537 3542 try:
3538 3543 iosize = r.index.entry_size
3539 3544 except AttributeError:
3540 3545 iosize = r._io.size
3541 3546 buffer = util.buffer
3542 3547
3543 3548 chunks = []
3544 3549 ladd = chunks.append
3545 3550 for idx, item in enumerate(chain):
3546 3551 offset = start(item[0])
3547 3552 bits = data[idx]
3548 3553 for rev in item:
3549 3554 chunkstart = start(rev)
3550 3555 if inline:
3551 3556 chunkstart += (rev + 1) * iosize
3552 3557 chunklength = length(rev)
3553 3558 ladd(buffer(bits, chunkstart - offset, chunklength))
3554 3559
3555 3560 return chunks
3556 3561
3557 3562 def dodeltachain(rev):
3558 3563 if not cache:
3559 3564 r.clearcaches()
3560 3565 r._deltachain(rev)
3561 3566
3562 3567 def doread(chain):
3563 3568 if not cache:
3564 3569 r.clearcaches()
3565 3570 for item in slicedchain:
3566 3571 segmentforrevs(item[0], item[-1])
3567 3572
3568 3573 def doslice(r, chain, size):
3569 3574 for s in slicechunk(r, chain, targetsize=size):
3570 3575 pass
3571 3576
3572 3577 def dorawchunks(data, chain):
3573 3578 if not cache:
3574 3579 r.clearcaches()
3575 3580 getrawchunks(data, chain)
3576 3581
3577 3582 def dodecompress(chunks):
3578 3583 decomp = r.decompress
3579 3584 for chunk in chunks:
3580 3585 decomp(chunk)
3581 3586
3582 3587 def dopatch(text, bins):
3583 3588 if not cache:
3584 3589 r.clearcaches()
3585 3590 mdiff.patches(text, bins)
3586 3591
3587 3592 def dohash(text):
3588 3593 if not cache:
3589 3594 r.clearcaches()
3590 3595 r.checkhash(text, node, rev=rev)
3591 3596
3592 3597 def dorevision():
3593 3598 if not cache:
3594 3599 r.clearcaches()
3595 3600 r.revision(node)
3596 3601
3597 3602 try:
3598 3603 from mercurial.revlogutils.deltas import slicechunk
3599 3604 except ImportError:
3600 3605 slicechunk = getattr(revlog, '_slicechunk', None)
3601 3606
3602 3607 size = r.length(rev)
3603 3608 chain = r._deltachain(rev)[0]
3604 3609 if not getattr(r, '_withsparseread', False):
3605 3610 slicedchain = (chain,)
3606 3611 else:
3607 3612 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3608 3613 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3609 3614 rawchunks = getrawchunks(data, slicedchain)
3610 3615 bins = r._chunks(chain)
3611 3616 text = bytes(bins[0])
3612 3617 bins = bins[1:]
3613 3618 text = mdiff.patches(text, bins)
3614 3619
3615 3620 benches = [
3616 3621 (lambda: dorevision(), b'full'),
3617 3622 (lambda: dodeltachain(rev), b'deltachain'),
3618 3623 (lambda: doread(chain), b'read'),
3619 3624 ]
3620 3625
3621 3626 if getattr(r, '_withsparseread', False):
3622 3627 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3623 3628 benches.append(slicing)
3624 3629
3625 3630 benches.extend(
3626 3631 [
3627 3632 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3628 3633 (lambda: dodecompress(rawchunks), b'decompress'),
3629 3634 (lambda: dopatch(text, bins), b'patch'),
3630 3635 (lambda: dohash(text), b'hash'),
3631 3636 ]
3632 3637 )
3633 3638
3634 3639 timer, fm = gettimer(ui, opts)
3635 3640 for fn, title in benches:
3636 3641 timer(fn, title=title)
3637 3642 fm.end()
3638 3643
3639 3644
3640 3645 @command(
3641 3646 b'perf::revset|perfrevset',
3642 3647 [
3643 3648 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3644 3649 (b'', b'contexts', False, b'obtain changectx for each revision'),
3645 3650 ]
3646 3651 + formatteropts,
3647 3652 b"REVSET",
3648 3653 )
3649 3654 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3650 3655 """benchmark the execution time of a revset
3651 3656
3652 3657 Use the --clean option if need to evaluate the impact of build volatile
3653 3658 revisions set cache on the revset execution. Volatile cache hold filtered
3654 3659 and obsolete related cache."""
3655 3660 opts = _byteskwargs(opts)
3656 3661
3657 3662 timer, fm = gettimer(ui, opts)
3658 3663
3659 3664 def d():
3660 3665 if clear:
3661 3666 repo.invalidatevolatilesets()
3662 3667 if contexts:
3663 3668 for ctx in repo.set(expr):
3664 3669 pass
3665 3670 else:
3666 3671 for r in repo.revs(expr):
3667 3672 pass
3668 3673
3669 3674 timer(d)
3670 3675 fm.end()
3671 3676
3672 3677
3673 3678 @command(
3674 3679 b'perf::volatilesets|perfvolatilesets',
3675 3680 [
3676 3681 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3677 3682 ]
3678 3683 + formatteropts,
3679 3684 )
3680 3685 def perfvolatilesets(ui, repo, *names, **opts):
3681 3686 """benchmark the computation of various volatile set
3682 3687
3683 3688 Volatile set computes element related to filtering and obsolescence."""
3684 3689 opts = _byteskwargs(opts)
3685 3690 timer, fm = gettimer(ui, opts)
3686 3691 repo = repo.unfiltered()
3687 3692
3688 3693 def getobs(name):
3689 3694 def d():
3690 3695 repo.invalidatevolatilesets()
3691 3696 if opts[b'clear_obsstore']:
3692 3697 clearfilecache(repo, b'obsstore')
3693 3698 obsolete.getrevs(repo, name)
3694 3699
3695 3700 return d
3696 3701
3697 3702 allobs = sorted(obsolete.cachefuncs)
3698 3703 if names:
3699 3704 allobs = [n for n in allobs if n in names]
3700 3705
3701 3706 for name in allobs:
3702 3707 timer(getobs(name), title=name)
3703 3708
3704 3709 def getfiltered(name):
3705 3710 def d():
3706 3711 repo.invalidatevolatilesets()
3707 3712 if opts[b'clear_obsstore']:
3708 3713 clearfilecache(repo, b'obsstore')
3709 3714 repoview.filterrevs(repo, name)
3710 3715
3711 3716 return d
3712 3717
3713 3718 allfilter = sorted(repoview.filtertable)
3714 3719 if names:
3715 3720 allfilter = [n for n in allfilter if n in names]
3716 3721
3717 3722 for name in allfilter:
3718 3723 timer(getfiltered(name), title=name)
3719 3724 fm.end()
3720 3725
3721 3726
3722 3727 @command(
3723 3728 b'perf::branchmap|perfbranchmap',
3724 3729 [
3725 3730 (b'f', b'full', False, b'Includes build time of subset'),
3726 3731 (
3727 3732 b'',
3728 3733 b'clear-revbranch',
3729 3734 False,
3730 3735 b'purge the revbranch cache between computation',
3731 3736 ),
3732 3737 ]
3733 3738 + formatteropts,
3734 3739 )
3735 3740 def perfbranchmap(ui, repo, *filternames, **opts):
3736 3741 """benchmark the update of a branchmap
3737 3742
3738 3743 This benchmarks the full repo.branchmap() call with read and write disabled
3739 3744 """
3740 3745 opts = _byteskwargs(opts)
3741 3746 full = opts.get(b"full", False)
3742 3747 clear_revbranch = opts.get(b"clear_revbranch", False)
3743 3748 timer, fm = gettimer(ui, opts)
3744 3749
3745 3750 def getbranchmap(filtername):
3746 3751 """generate a benchmark function for the filtername"""
3747 3752 if filtername is None:
3748 3753 view = repo
3749 3754 else:
3750 3755 view = repo.filtered(filtername)
3751 3756 if util.safehasattr(view._branchcaches, '_per_filter'):
3752 3757 filtered = view._branchcaches._per_filter
3753 3758 else:
3754 3759 # older versions
3755 3760 filtered = view._branchcaches
3756 3761
3757 3762 def d():
3758 3763 if clear_revbranch:
3759 3764 repo.revbranchcache()._clear()
3760 3765 if full:
3761 3766 view._branchcaches.clear()
3762 3767 else:
3763 3768 filtered.pop(filtername, None)
3764 3769 view.branchmap()
3765 3770
3766 3771 return d
3767 3772
3768 3773 # add filter in smaller subset to bigger subset
3769 3774 possiblefilters = set(repoview.filtertable)
3770 3775 if filternames:
3771 3776 possiblefilters &= set(filternames)
3772 3777 subsettable = getbranchmapsubsettable()
3773 3778 allfilters = []
3774 3779 while possiblefilters:
3775 3780 for name in possiblefilters:
3776 3781 subset = subsettable.get(name)
3777 3782 if subset not in possiblefilters:
3778 3783 break
3779 3784 else:
3780 3785 assert False, b'subset cycle %s!' % possiblefilters
3781 3786 allfilters.append(name)
3782 3787 possiblefilters.remove(name)
3783 3788
3784 3789 # warm the cache
3785 3790 if not full:
3786 3791 for name in allfilters:
3787 3792 repo.filtered(name).branchmap()
3788 3793 if not filternames or b'unfiltered' in filternames:
3789 3794 # add unfiltered
3790 3795 allfilters.append(None)
3791 3796
3792 3797 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3793 3798 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3794 3799 branchcacheread.set(classmethod(lambda *args: None))
3795 3800 else:
3796 3801 # older versions
3797 3802 branchcacheread = safeattrsetter(branchmap, b'read')
3798 3803 branchcacheread.set(lambda *args: None)
3799 3804 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3800 3805 branchcachewrite.set(lambda *args: None)
3801 3806 try:
3802 3807 for name in allfilters:
3803 3808 printname = name
3804 3809 if name is None:
3805 3810 printname = b'unfiltered'
3806 3811 timer(getbranchmap(name), title=printname)
3807 3812 finally:
3808 3813 branchcacheread.restore()
3809 3814 branchcachewrite.restore()
3810 3815 fm.end()
3811 3816
3812 3817
3813 3818 @command(
3814 3819 b'perf::branchmapupdate|perfbranchmapupdate',
3815 3820 [
3816 3821 (b'', b'base', [], b'subset of revision to start from'),
3817 3822 (b'', b'target', [], b'subset of revision to end with'),
3818 3823 (b'', b'clear-caches', False, b'clear cache between each runs'),
3819 3824 ]
3820 3825 + formatteropts,
3821 3826 )
3822 3827 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3823 3828 """benchmark branchmap update from for <base> revs to <target> revs
3824 3829
3825 3830 If `--clear-caches` is passed, the following items will be reset before
3826 3831 each update:
3827 3832 * the changelog instance and associated indexes
3828 3833 * the rev-branch-cache instance
3829 3834
3830 3835 Examples:
3831 3836
3832 3837 # update for the one last revision
3833 3838 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3834 3839
3835 3840 $ update for change coming with a new branch
3836 3841 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3837 3842 """
3838 3843 from mercurial import branchmap
3839 3844 from mercurial import repoview
3840 3845
3841 3846 opts = _byteskwargs(opts)
3842 3847 timer, fm = gettimer(ui, opts)
3843 3848 clearcaches = opts[b'clear_caches']
3844 3849 unfi = repo.unfiltered()
3845 3850 x = [None] # used to pass data between closure
3846 3851
3847 3852 # we use a `list` here to avoid possible side effect from smartset
3848 3853 baserevs = list(scmutil.revrange(repo, base))
3849 3854 targetrevs = list(scmutil.revrange(repo, target))
3850 3855 if not baserevs:
3851 3856 raise error.Abort(b'no revisions selected for --base')
3852 3857 if not targetrevs:
3853 3858 raise error.Abort(b'no revisions selected for --target')
3854 3859
3855 3860 # make sure the target branchmap also contains the one in the base
3856 3861 targetrevs = list(set(baserevs) | set(targetrevs))
3857 3862 targetrevs.sort()
3858 3863
3859 3864 cl = repo.changelog
3860 3865 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3861 3866 allbaserevs.sort()
3862 3867 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3863 3868
3864 3869 newrevs = list(alltargetrevs.difference(allbaserevs))
3865 3870 newrevs.sort()
3866 3871
3867 3872 allrevs = frozenset(unfi.changelog.revs())
3868 3873 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3869 3874 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3870 3875
3871 3876 def basefilter(repo, visibilityexceptions=None):
3872 3877 return basefilterrevs
3873 3878
3874 3879 def targetfilter(repo, visibilityexceptions=None):
3875 3880 return targetfilterrevs
3876 3881
3877 3882 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3878 3883 ui.status(msg % (len(allbaserevs), len(newrevs)))
3879 3884 if targetfilterrevs:
3880 3885 msg = b'(%d revisions still filtered)\n'
3881 3886 ui.status(msg % len(targetfilterrevs))
3882 3887
3883 3888 try:
3884 3889 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3885 3890 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3886 3891
3887 3892 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3888 3893 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3889 3894
3890 3895 # try to find an existing branchmap to reuse
3891 3896 subsettable = getbranchmapsubsettable()
3892 3897 candidatefilter = subsettable.get(None)
3893 3898 while candidatefilter is not None:
3894 3899 candidatebm = repo.filtered(candidatefilter).branchmap()
3895 3900 if candidatebm.validfor(baserepo):
3896 3901 filtered = repoview.filterrevs(repo, candidatefilter)
3897 3902 missing = [r for r in allbaserevs if r in filtered]
3898 3903 base = candidatebm.copy()
3899 3904 base.update(baserepo, missing)
3900 3905 break
3901 3906 candidatefilter = subsettable.get(candidatefilter)
3902 3907 else:
3903 3908 # no suitable subset where found
3904 3909 base = branchmap.branchcache()
3905 3910 base.update(baserepo, allbaserevs)
3906 3911
3907 3912 def setup():
3908 3913 x[0] = base.copy()
3909 3914 if clearcaches:
3910 3915 unfi._revbranchcache = None
3911 3916 clearchangelog(repo)
3912 3917
3913 3918 def bench():
3914 3919 x[0].update(targetrepo, newrevs)
3915 3920
3916 3921 timer(bench, setup=setup)
3917 3922 fm.end()
3918 3923 finally:
3919 3924 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3920 3925 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3921 3926
3922 3927
3923 3928 @command(
3924 3929 b'perf::branchmapload|perfbranchmapload',
3925 3930 [
3926 3931 (b'f', b'filter', b'', b'Specify repoview filter'),
3927 3932 (b'', b'list', False, b'List brachmap filter caches'),
3928 3933 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3929 3934 ]
3930 3935 + formatteropts,
3931 3936 )
3932 3937 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3933 3938 """benchmark reading the branchmap"""
3934 3939 opts = _byteskwargs(opts)
3935 3940 clearrevlogs = opts[b'clear_revlogs']
3936 3941
3937 3942 if list:
3938 3943 for name, kind, st in repo.cachevfs.readdir(stat=True):
3939 3944 if name.startswith(b'branch2'):
3940 3945 filtername = name.partition(b'-')[2] or b'unfiltered'
3941 3946 ui.status(
3942 3947 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3943 3948 )
3944 3949 return
3945 3950 if not filter:
3946 3951 filter = None
3947 3952 subsettable = getbranchmapsubsettable()
3948 3953 if filter is None:
3949 3954 repo = repo.unfiltered()
3950 3955 else:
3951 3956 repo = repoview.repoview(repo, filter)
3952 3957
3953 3958 repo.branchmap() # make sure we have a relevant, up to date branchmap
3954 3959
3955 3960 try:
3956 3961 fromfile = branchmap.branchcache.fromfile
3957 3962 except AttributeError:
3958 3963 # older versions
3959 3964 fromfile = branchmap.read
3960 3965
3961 3966 currentfilter = filter
3962 3967 # try once without timer, the filter may not be cached
3963 3968 while fromfile(repo) is None:
3964 3969 currentfilter = subsettable.get(currentfilter)
3965 3970 if currentfilter is None:
3966 3971 raise error.Abort(
3967 3972 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3968 3973 )
3969 3974 repo = repo.filtered(currentfilter)
3970 3975 timer, fm = gettimer(ui, opts)
3971 3976
3972 3977 def setup():
3973 3978 if clearrevlogs:
3974 3979 clearchangelog(repo)
3975 3980
3976 3981 def bench():
3977 3982 fromfile(repo)
3978 3983
3979 3984 timer(bench, setup=setup)
3980 3985 fm.end()
3981 3986
3982 3987
3983 3988 @command(b'perf::loadmarkers|perfloadmarkers')
3984 3989 def perfloadmarkers(ui, repo):
3985 3990 """benchmark the time to parse the on-disk markers for a repo
3986 3991
3987 3992 Result is the number of markers in the repo."""
3988 3993 timer, fm = gettimer(ui)
3989 3994 svfs = getsvfs(repo)
3990 3995 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3991 3996 fm.end()
3992 3997
3993 3998
3994 3999 @command(
3995 4000 b'perf::lrucachedict|perflrucachedict',
3996 4001 formatteropts
3997 4002 + [
3998 4003 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
3999 4004 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4000 4005 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4001 4006 (b'', b'size', 4, b'size of cache'),
4002 4007 (b'', b'gets', 10000, b'number of key lookups'),
4003 4008 (b'', b'sets', 10000, b'number of key sets'),
4004 4009 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4005 4010 (
4006 4011 b'',
4007 4012 b'mixedgetfreq',
4008 4013 50,
4009 4014 b'frequency of get vs set ops in mixed mode',
4010 4015 ),
4011 4016 ],
4012 4017 norepo=True,
4013 4018 )
4014 4019 def perflrucache(
4015 4020 ui,
4016 4021 mincost=0,
4017 4022 maxcost=100,
4018 4023 costlimit=0,
4019 4024 size=4,
4020 4025 gets=10000,
4021 4026 sets=10000,
4022 4027 mixed=10000,
4023 4028 mixedgetfreq=50,
4024 4029 **opts
4025 4030 ):
4026 4031 opts = _byteskwargs(opts)
4027 4032
4028 4033 def doinit():
4029 4034 for i in _xrange(10000):
4030 4035 util.lrucachedict(size)
4031 4036
4032 4037 costrange = list(range(mincost, maxcost + 1))
4033 4038
4034 4039 values = []
4035 4040 for i in _xrange(size):
4036 4041 values.append(random.randint(0, _maxint))
4037 4042
4038 4043 # Get mode fills the cache and tests raw lookup performance with no
4039 4044 # eviction.
4040 4045 getseq = []
4041 4046 for i in _xrange(gets):
4042 4047 getseq.append(random.choice(values))
4043 4048
4044 4049 def dogets():
4045 4050 d = util.lrucachedict(size)
4046 4051 for v in values:
4047 4052 d[v] = v
4048 4053 for key in getseq:
4049 4054 value = d[key]
4050 4055 value # silence pyflakes warning
4051 4056
4052 4057 def dogetscost():
4053 4058 d = util.lrucachedict(size, maxcost=costlimit)
4054 4059 for i, v in enumerate(values):
4055 4060 d.insert(v, v, cost=costs[i])
4056 4061 for key in getseq:
4057 4062 try:
4058 4063 value = d[key]
4059 4064 value # silence pyflakes warning
4060 4065 except KeyError:
4061 4066 pass
4062 4067
4063 4068 # Set mode tests insertion speed with cache eviction.
4064 4069 setseq = []
4065 4070 costs = []
4066 4071 for i in _xrange(sets):
4067 4072 setseq.append(random.randint(0, _maxint))
4068 4073 costs.append(random.choice(costrange))
4069 4074
4070 4075 def doinserts():
4071 4076 d = util.lrucachedict(size)
4072 4077 for v in setseq:
4073 4078 d.insert(v, v)
4074 4079
4075 4080 def doinsertscost():
4076 4081 d = util.lrucachedict(size, maxcost=costlimit)
4077 4082 for i, v in enumerate(setseq):
4078 4083 d.insert(v, v, cost=costs[i])
4079 4084
4080 4085 def dosets():
4081 4086 d = util.lrucachedict(size)
4082 4087 for v in setseq:
4083 4088 d[v] = v
4084 4089
4085 4090 # Mixed mode randomly performs gets and sets with eviction.
4086 4091 mixedops = []
4087 4092 for i in _xrange(mixed):
4088 4093 r = random.randint(0, 100)
4089 4094 if r < mixedgetfreq:
4090 4095 op = 0
4091 4096 else:
4092 4097 op = 1
4093 4098
4094 4099 mixedops.append(
4095 4100 (op, random.randint(0, size * 2), random.choice(costrange))
4096 4101 )
4097 4102
4098 4103 def domixed():
4099 4104 d = util.lrucachedict(size)
4100 4105
4101 4106 for op, v, cost in mixedops:
4102 4107 if op == 0:
4103 4108 try:
4104 4109 d[v]
4105 4110 except KeyError:
4106 4111 pass
4107 4112 else:
4108 4113 d[v] = v
4109 4114
4110 4115 def domixedcost():
4111 4116 d = util.lrucachedict(size, maxcost=costlimit)
4112 4117
4113 4118 for op, v, cost in mixedops:
4114 4119 if op == 0:
4115 4120 try:
4116 4121 d[v]
4117 4122 except KeyError:
4118 4123 pass
4119 4124 else:
4120 4125 d.insert(v, v, cost=cost)
4121 4126
4122 4127 benches = [
4123 4128 (doinit, b'init'),
4124 4129 ]
4125 4130
4126 4131 if costlimit:
4127 4132 benches.extend(
4128 4133 [
4129 4134 (dogetscost, b'gets w/ cost limit'),
4130 4135 (doinsertscost, b'inserts w/ cost limit'),
4131 4136 (domixedcost, b'mixed w/ cost limit'),
4132 4137 ]
4133 4138 )
4134 4139 else:
4135 4140 benches.extend(
4136 4141 [
4137 4142 (dogets, b'gets'),
4138 4143 (doinserts, b'inserts'),
4139 4144 (dosets, b'sets'),
4140 4145 (domixed, b'mixed'),
4141 4146 ]
4142 4147 )
4143 4148
4144 4149 for fn, title in benches:
4145 4150 timer, fm = gettimer(ui, opts)
4146 4151 timer(fn, title=title)
4147 4152 fm.end()
4148 4153
4149 4154
4150 4155 @command(
4151 4156 b'perf::write|perfwrite',
4152 4157 formatteropts
4153 4158 + [
4154 4159 (b'', b'write-method', b'write', b'ui write method'),
4155 4160 (b'', b'nlines', 100, b'number of lines'),
4156 4161 (b'', b'nitems', 100, b'number of items (per line)'),
4157 4162 (b'', b'item', b'x', b'item that is written'),
4158 4163 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4159 4164 (b'', b'flush-line', None, b'flush after each line'),
4160 4165 ],
4161 4166 )
4162 4167 def perfwrite(ui, repo, **opts):
4163 4168 """microbenchmark ui.write (and others)"""
4164 4169 opts = _byteskwargs(opts)
4165 4170
4166 4171 write = getattr(ui, _sysstr(opts[b'write_method']))
4167 4172 nlines = int(opts[b'nlines'])
4168 4173 nitems = int(opts[b'nitems'])
4169 4174 item = opts[b'item']
4170 4175 batch_line = opts.get(b'batch_line')
4171 4176 flush_line = opts.get(b'flush_line')
4172 4177
4173 4178 if batch_line:
4174 4179 line = item * nitems + b'\n'
4175 4180
4176 4181 def benchmark():
4177 4182 for i in pycompat.xrange(nlines):
4178 4183 if batch_line:
4179 4184 write(line)
4180 4185 else:
4181 4186 for i in pycompat.xrange(nitems):
4182 4187 write(item)
4183 4188 write(b'\n')
4184 4189 if flush_line:
4185 4190 ui.flush()
4186 4191 ui.flush()
4187 4192
4188 4193 timer, fm = gettimer(ui, opts)
4189 4194 timer(benchmark)
4190 4195 fm.end()
4191 4196
4192 4197
4193 4198 def uisetup(ui):
4194 4199 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4195 4200 commands, b'debugrevlogopts'
4196 4201 ):
4197 4202 # for "historical portability":
4198 4203 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4199 4204 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4200 4205 # openrevlog() should cause failure, because it has been
4201 4206 # available since 3.5 (or 49c583ca48c4).
4202 4207 def openrevlog(orig, repo, cmd, file_, opts):
4203 4208 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4204 4209 raise error.Abort(
4205 4210 b"This version doesn't support --dir option",
4206 4211 hint=b"use 3.5 or later",
4207 4212 )
4208 4213 return orig(repo, cmd, file_, opts)
4209 4214
4210 4215 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4211 4216
4212 4217
4213 4218 @command(
4214 4219 b'perf::progress|perfprogress',
4215 4220 formatteropts
4216 4221 + [
4217 4222 (b'', b'topic', b'topic', b'topic for progress messages'),
4218 4223 (b'c', b'total', 1000000, b'total value we are progressing to'),
4219 4224 ],
4220 4225 norepo=True,
4221 4226 )
4222 4227 def perfprogress(ui, topic=None, total=None, **opts):
4223 4228 """printing of progress bars"""
4224 4229 opts = _byteskwargs(opts)
4225 4230
4226 4231 timer, fm = gettimer(ui, opts)
4227 4232
4228 4233 def doprogress():
4229 4234 with ui.makeprogress(topic, total=total) as progress:
4230 4235 for i in _xrange(total):
4231 4236 progress.increment()
4232 4237
4233 4238 timer(doprogress)
4234 4239 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now