##// END OF EJS Templates
locking: grab the wlock before touching the dirstate in `perfdirstatewrite`...
marmoute -
r50903:e859f440 default
parent child Browse files
Show More
@@ -1,4239 +1,4240 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238
238 239 # for "historical portability":
239 240 # define parsealiases locally, because cmdutil.parsealiases has been
240 241 # available since 1.5 (or 6252852b4332)
241 242 def parsealiases(cmd):
242 243 return cmd.split(b"|")
243 244
244 245
245 246 if safehasattr(registrar, 'command'):
246 247 command = registrar.command(cmdtable)
247 248 elif safehasattr(cmdutil, 'command'):
248 249 command = cmdutil.command(cmdtable)
249 250 if 'norepo' not in getargspec(command).args:
250 251 # for "historical portability":
251 252 # wrap original cmdutil.command, because "norepo" option has
252 253 # been available since 3.1 (or 75a96326cecb)
253 254 _command = command
254 255
255 256 def command(name, options=(), synopsis=None, norepo=False):
256 257 if norepo:
257 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
258 259 return _command(name, list(options), synopsis)
259 260
260 261
261 262 else:
262 263 # for "historical portability":
263 264 # define "@command" annotation locally, because cmdutil.command
264 265 # has been available since 1.9 (or 2daa5179e73f)
265 266 def command(name, options=(), synopsis=None, norepo=False):
266 267 def decorator(func):
267 268 if synopsis:
268 269 cmdtable[name] = func, list(options), synopsis
269 270 else:
270 271 cmdtable[name] = func, list(options)
271 272 if norepo:
272 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
273 274 return func
274 275
275 276 return decorator
276 277
277 278
278 279 try:
279 280 import mercurial.registrar
280 281 import mercurial.configitems
281 282
282 283 configtable = {}
283 284 configitem = mercurial.registrar.configitem(configtable)
284 285 configitem(
285 286 b'perf',
286 287 b'presleep',
287 288 default=mercurial.configitems.dynamicdefault,
288 289 experimental=True,
289 290 )
290 291 configitem(
291 292 b'perf',
292 293 b'stub',
293 294 default=mercurial.configitems.dynamicdefault,
294 295 experimental=True,
295 296 )
296 297 configitem(
297 298 b'perf',
298 299 b'parentscount',
299 300 default=mercurial.configitems.dynamicdefault,
300 301 experimental=True,
301 302 )
302 303 configitem(
303 304 b'perf',
304 305 b'all-timing',
305 306 default=mercurial.configitems.dynamicdefault,
306 307 experimental=True,
307 308 )
308 309 configitem(
309 310 b'perf',
310 311 b'pre-run',
311 312 default=mercurial.configitems.dynamicdefault,
312 313 )
313 314 configitem(
314 315 b'perf',
315 316 b'profile-benchmark',
316 317 default=mercurial.configitems.dynamicdefault,
317 318 )
318 319 configitem(
319 320 b'perf',
320 321 b'run-limits',
321 322 default=mercurial.configitems.dynamicdefault,
322 323 experimental=True,
323 324 )
324 325 except (ImportError, AttributeError):
325 326 pass
326 327 except TypeError:
327 328 # compatibility fix for a11fd395e83f
328 329 # hg version: 5.2
329 330 configitem(
330 331 b'perf',
331 332 b'presleep',
332 333 default=mercurial.configitems.dynamicdefault,
333 334 )
334 335 configitem(
335 336 b'perf',
336 337 b'stub',
337 338 default=mercurial.configitems.dynamicdefault,
338 339 )
339 340 configitem(
340 341 b'perf',
341 342 b'parentscount',
342 343 default=mercurial.configitems.dynamicdefault,
343 344 )
344 345 configitem(
345 346 b'perf',
346 347 b'all-timing',
347 348 default=mercurial.configitems.dynamicdefault,
348 349 )
349 350 configitem(
350 351 b'perf',
351 352 b'pre-run',
352 353 default=mercurial.configitems.dynamicdefault,
353 354 )
354 355 configitem(
355 356 b'perf',
356 357 b'profile-benchmark',
357 358 default=mercurial.configitems.dynamicdefault,
358 359 )
359 360 configitem(
360 361 b'perf',
361 362 b'run-limits',
362 363 default=mercurial.configitems.dynamicdefault,
363 364 )
364 365
365 366
366 367 def getlen(ui):
367 368 if ui.configbool(b"perf", b"stub", False):
368 369 return lambda x: 1
369 370 return len
370 371
371 372
372 373 class noop:
373 374 """dummy context manager"""
374 375
375 376 def __enter__(self):
376 377 pass
377 378
378 379 def __exit__(self, *args):
379 380 pass
380 381
381 382
382 383 NOOPCTX = noop()
383 384
384 385
385 386 def gettimer(ui, opts=None):
386 387 """return a timer function and formatter: (timer, formatter)
387 388
388 389 This function exists to gather the creation of formatter in a single
389 390 place instead of duplicating it in all performance commands."""
390 391
391 392 # enforce an idle period before execution to counteract power management
392 393 # experimental config: perf.presleep
393 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
394 395
395 396 if opts is None:
396 397 opts = {}
397 398 # redirect all to stderr unless buffer api is in use
398 399 if not ui._buffers:
399 400 ui = ui.copy()
400 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
401 402 if uifout:
402 403 # for "historical portability":
403 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
404 405 uifout.set(ui.ferr)
405 406
406 407 # get a formatter
407 408 uiformatter = getattr(ui, 'formatter', None)
408 409 if uiformatter:
409 410 fm = uiformatter(b'perf', opts)
410 411 else:
411 412 # for "historical portability":
412 413 # define formatter locally, because ui.formatter has been
413 414 # available since 2.2 (or ae5f92e154d3)
414 415 from mercurial import node
415 416
416 417 class defaultformatter:
417 418 """Minimized composition of baseformatter and plainformatter"""
418 419
419 420 def __init__(self, ui, topic, opts):
420 421 self._ui = ui
421 422 if ui.debugflag:
422 423 self.hexfunc = node.hex
423 424 else:
424 425 self.hexfunc = node.short
425 426
426 427 def __nonzero__(self):
427 428 return False
428 429
429 430 __bool__ = __nonzero__
430 431
431 432 def startitem(self):
432 433 pass
433 434
434 435 def data(self, **data):
435 436 pass
436 437
437 438 def write(self, fields, deftext, *fielddata, **opts):
438 439 self._ui.write(deftext % fielddata, **opts)
439 440
440 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
441 442 if cond:
442 443 self._ui.write(deftext % fielddata, **opts)
443 444
444 445 def plain(self, text, **opts):
445 446 self._ui.write(text, **opts)
446 447
447 448 def end(self):
448 449 pass
449 450
450 451 fm = defaultformatter(ui, b'perf', opts)
451 452
452 453 # stub function, runs code only once instead of in a loop
453 454 # experimental config: perf.stub
454 455 if ui.configbool(b"perf", b"stub", False):
455 456 return functools.partial(stub_timer, fm), fm
456 457
457 458 # experimental config: perf.all-timing
458 459 displayall = ui.configbool(b"perf", b"all-timing", False)
459 460
460 461 # experimental config: perf.run-limits
461 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
462 463 limits = []
463 464 for item in limitspec:
464 465 parts = item.split(b'-', 1)
465 466 if len(parts) < 2:
466 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
467 468 continue
468 469 try:
469 470 time_limit = float(_sysstr(parts[0]))
470 471 except ValueError as e:
471 472 ui.warn(
472 473 (
473 474 b'malformatted run limit entry, %s: %s\n'
474 475 % (_bytestr(e), item)
475 476 )
476 477 )
477 478 continue
478 479 try:
479 480 run_limit = int(_sysstr(parts[1]))
480 481 except ValueError as e:
481 482 ui.warn(
482 483 (
483 484 b'malformatted run limit entry, %s: %s\n'
484 485 % (_bytestr(e), item)
485 486 )
486 487 )
487 488 continue
488 489 limits.append((time_limit, run_limit))
489 490 if not limits:
490 491 limits = DEFAULTLIMITS
491 492
492 493 profiler = None
493 494 if profiling is not None:
494 495 if ui.configbool(b"perf", b"profile-benchmark", False):
495 496 profiler = profiling.profile(ui)
496 497
497 498 prerun = getint(ui, b"perf", b"pre-run", 0)
498 499 t = functools.partial(
499 500 _timer,
500 501 fm,
501 502 displayall=displayall,
502 503 limits=limits,
503 504 prerun=prerun,
504 505 profiler=profiler,
505 506 )
506 507 return t, fm
507 508
508 509
509 510 def stub_timer(fm, func, setup=None, title=None):
510 511 if setup is not None:
511 512 setup()
512 513 func()
513 514
514 515
515 516 @contextlib.contextmanager
516 517 def timeone():
517 518 r = []
518 519 ostart = os.times()
519 520 cstart = util.timer()
520 521 yield r
521 522 cstop = util.timer()
522 523 ostop = os.times()
523 524 a, b = ostart, ostop
524 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
525 526
526 527
527 528 # list of stop condition (elapsed time, minimal run count)
528 529 DEFAULTLIMITS = (
529 530 (3.0, 100),
530 531 (10.0, 3),
531 532 )
532 533
533 534
534 535 def _timer(
535 536 fm,
536 537 func,
537 538 setup=None,
538 539 title=None,
539 540 displayall=False,
540 541 limits=DEFAULTLIMITS,
541 542 prerun=0,
542 543 profiler=None,
543 544 ):
544 545 gc.collect()
545 546 results = []
546 547 begin = util.timer()
547 548 count = 0
548 549 if profiler is None:
549 550 profiler = NOOPCTX
550 551 for i in range(prerun):
551 552 if setup is not None:
552 553 setup()
553 554 func()
554 555 keepgoing = True
555 556 while keepgoing:
556 557 if setup is not None:
557 558 setup()
558 559 with profiler:
559 560 with timeone() as item:
560 561 r = func()
561 562 profiler = NOOPCTX
562 563 count += 1
563 564 results.append(item[0])
564 565 cstop = util.timer()
565 566 # Look for a stop condition.
566 567 elapsed = cstop - begin
567 568 for t, mincount in limits:
568 569 if elapsed >= t and count >= mincount:
569 570 keepgoing = False
570 571 break
571 572
572 573 formatone(fm, results, title=title, result=r, displayall=displayall)
573 574
574 575
575 576 def formatone(fm, timings, title=None, result=None, displayall=False):
576
577 577 count = len(timings)
578 578
579 579 fm.startitem()
580 580
581 581 if title:
582 582 fm.write(b'title', b'! %s\n', title)
583 583 if result:
584 584 fm.write(b'result', b'! result: %s\n', result)
585 585
586 586 def display(role, entry):
587 587 prefix = b''
588 588 if role != b'best':
589 589 prefix = b'%s.' % role
590 590 fm.plain(b'!')
591 591 fm.write(prefix + b'wall', b' wall %f', entry[0])
592 592 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
593 593 fm.write(prefix + b'user', b' user %f', entry[1])
594 594 fm.write(prefix + b'sys', b' sys %f', entry[2])
595 595 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
596 596 fm.plain(b'\n')
597 597
598 598 timings.sort()
599 599 min_val = timings[0]
600 600 display(b'best', min_val)
601 601 if displayall:
602 602 max_val = timings[-1]
603 603 display(b'max', max_val)
604 604 avg = tuple([sum(x) / count for x in zip(*timings)])
605 605 display(b'avg', avg)
606 606 median = timings[len(timings) // 2]
607 607 display(b'median', median)
608 608
609 609
610 610 # utilities for historical portability
611 611
612 612
613 613 def getint(ui, section, name, default):
614 614 # for "historical portability":
615 615 # ui.configint has been available since 1.9 (or fa2b596db182)
616 616 v = ui.config(section, name, None)
617 617 if v is None:
618 618 return default
619 619 try:
620 620 return int(v)
621 621 except ValueError:
622 622 raise error.ConfigError(
623 623 b"%s.%s is not an integer ('%s')" % (section, name, v)
624 624 )
625 625
626 626
627 627 def safeattrsetter(obj, name, ignoremissing=False):
628 628 """Ensure that 'obj' has 'name' attribute before subsequent setattr
629 629
630 630 This function is aborted, if 'obj' doesn't have 'name' attribute
631 631 at runtime. This avoids overlooking removal of an attribute, which
632 632 breaks assumption of performance measurement, in the future.
633 633
634 634 This function returns the object to (1) assign a new value, and
635 635 (2) restore an original value to the attribute.
636 636
637 637 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
638 638 abortion, and this function returns None. This is useful to
639 639 examine an attribute, which isn't ensured in all Mercurial
640 640 versions.
641 641 """
642 642 if not util.safehasattr(obj, name):
643 643 if ignoremissing:
644 644 return None
645 645 raise error.Abort(
646 646 (
647 647 b"missing attribute %s of %s might break assumption"
648 648 b" of performance measurement"
649 649 )
650 650 % (name, obj)
651 651 )
652 652
653 653 origvalue = getattr(obj, _sysstr(name))
654 654
655 655 class attrutil:
656 656 def set(self, newvalue):
657 657 setattr(obj, _sysstr(name), newvalue)
658 658
659 659 def restore(self):
660 660 setattr(obj, _sysstr(name), origvalue)
661 661
662 662 return attrutil()
663 663
664 664
665 665 # utilities to examine each internal API changes
666 666
667 667
668 668 def getbranchmapsubsettable():
669 669 # for "historical portability":
670 670 # subsettable is defined in:
671 671 # - branchmap since 2.9 (or 175c6fd8cacc)
672 672 # - repoview since 2.5 (or 59a9f18d4587)
673 673 # - repoviewutil since 5.0
674 674 for mod in (branchmap, repoview, repoviewutil):
675 675 subsettable = getattr(mod, 'subsettable', None)
676 676 if subsettable:
677 677 return subsettable
678 678
679 679 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
680 680 # branchmap and repoview modules exist, but subsettable attribute
681 681 # doesn't)
682 682 raise error.Abort(
683 683 b"perfbranchmap not available with this Mercurial",
684 684 hint=b"use 2.5 or later",
685 685 )
686 686
687 687
688 688 def getsvfs(repo):
689 689 """Return appropriate object to access files under .hg/store"""
690 690 # for "historical portability":
691 691 # repo.svfs has been available since 2.3 (or 7034365089bf)
692 692 svfs = getattr(repo, 'svfs', None)
693 693 if svfs:
694 694 return svfs
695 695 else:
696 696 return getattr(repo, 'sopener')
697 697
698 698
699 699 def getvfs(repo):
700 700 """Return appropriate object to access files under .hg"""
701 701 # for "historical portability":
702 702 # repo.vfs has been available since 2.3 (or 7034365089bf)
703 703 vfs = getattr(repo, 'vfs', None)
704 704 if vfs:
705 705 return vfs
706 706 else:
707 707 return getattr(repo, 'opener')
708 708
709 709
710 710 def repocleartagscachefunc(repo):
711 711 """Return the function to clear tags cache according to repo internal API"""
712 712 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
713 713 # in this case, setattr(repo, '_tagscache', None) or so isn't
714 714 # correct way to clear tags cache, because existing code paths
715 715 # expect _tagscache to be a structured object.
716 716 def clearcache():
717 717 # _tagscache has been filteredpropertycache since 2.5 (or
718 718 # 98c867ac1330), and delattr() can't work in such case
719 719 if '_tagscache' in vars(repo):
720 720 del repo.__dict__['_tagscache']
721 721
722 722 return clearcache
723 723
724 724 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
725 725 if repotags: # since 1.4 (or 5614a628d173)
726 726 return lambda: repotags.set(None)
727 727
728 728 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
729 729 if repotagscache: # since 0.6 (or d7df759d0e97)
730 730 return lambda: repotagscache.set(None)
731 731
732 732 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
733 733 # this point, but it isn't so problematic, because:
734 734 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
735 735 # in perftags() causes failure soon
736 736 # - perf.py itself has been available since 1.1 (or eb240755386d)
737 737 raise error.Abort(b"tags API of this hg command is unknown")
738 738
739 739
740 740 # utilities to clear cache
741 741
742 742
743 743 def clearfilecache(obj, attrname):
744 744 unfiltered = getattr(obj, 'unfiltered', None)
745 745 if unfiltered is not None:
746 746 obj = obj.unfiltered()
747 747 if attrname in vars(obj):
748 748 delattr(obj, attrname)
749 749 obj._filecache.pop(attrname, None)
750 750
751 751
752 752 def clearchangelog(repo):
753 753 if repo is not repo.unfiltered():
754 754 object.__setattr__(repo, '_clcachekey', None)
755 755 object.__setattr__(repo, '_clcache', None)
756 756 clearfilecache(repo.unfiltered(), 'changelog')
757 757
758 758
759 759 # perf commands
760 760
761 761
762 762 @command(b'perf::walk|perfwalk', formatteropts)
763 763 def perfwalk(ui, repo, *pats, **opts):
764 764 opts = _byteskwargs(opts)
765 765 timer, fm = gettimer(ui, opts)
766 766 m = scmutil.match(repo[None], pats, {})
767 767 timer(
768 768 lambda: len(
769 769 list(
770 770 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
771 771 )
772 772 )
773 773 )
774 774 fm.end()
775 775
776 776
777 777 @command(b'perf::annotate|perfannotate', formatteropts)
778 778 def perfannotate(ui, repo, f, **opts):
779 779 opts = _byteskwargs(opts)
780 780 timer, fm = gettimer(ui, opts)
781 781 fc = repo[b'.'][f]
782 782 timer(lambda: len(fc.annotate(True)))
783 783 fm.end()
784 784
785 785
786 786 @command(
787 787 b'perf::status|perfstatus',
788 788 [
789 789 (b'u', b'unknown', False, b'ask status to look for unknown files'),
790 790 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
791 791 ]
792 792 + formatteropts,
793 793 )
794 794 def perfstatus(ui, repo, **opts):
795 795 """benchmark the performance of a single status call
796 796
797 797 The repository data are preserved between each call.
798 798
799 799 By default, only the status of the tracked file are requested. If
800 800 `--unknown` is passed, the "unknown" files are also tracked.
801 801 """
802 802 opts = _byteskwargs(opts)
803 803 # m = match.always(repo.root, repo.getcwd())
804 804 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
805 805 # False))))
806 806 timer, fm = gettimer(ui, opts)
807 807 if opts[b'dirstate']:
808 808 dirstate = repo.dirstate
809 809 m = scmutil.matchall(repo)
810 810 unknown = opts[b'unknown']
811 811
812 812 def status_dirstate():
813 813 s = dirstate.status(
814 814 m, subrepos=[], ignored=False, clean=False, unknown=unknown
815 815 )
816 816 sum(map(bool, s))
817 817
818 818 timer(status_dirstate)
819 819 else:
820 820 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
821 821 fm.end()
822 822
823 823
824 824 @command(b'perf::addremove|perfaddremove', formatteropts)
825 825 def perfaddremove(ui, repo, **opts):
826 826 opts = _byteskwargs(opts)
827 827 timer, fm = gettimer(ui, opts)
828 828 try:
829 829 oldquiet = repo.ui.quiet
830 830 repo.ui.quiet = True
831 831 matcher = scmutil.match(repo[None])
832 832 opts[b'dry_run'] = True
833 833 if 'uipathfn' in getargspec(scmutil.addremove).args:
834 834 uipathfn = scmutil.getuipathfn(repo)
835 835 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
836 836 else:
837 837 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
838 838 finally:
839 839 repo.ui.quiet = oldquiet
840 840 fm.end()
841 841
842 842
843 843 def clearcaches(cl):
844 844 # behave somewhat consistently across internal API changes
845 845 if util.safehasattr(cl, b'clearcaches'):
846 846 cl.clearcaches()
847 847 elif util.safehasattr(cl, b'_nodecache'):
848 848 # <= hg-5.2
849 849 from mercurial.node import nullid, nullrev
850 850
851 851 cl._nodecache = {nullid: nullrev}
852 852 cl._nodepos = None
853 853
854 854
855 855 @command(b'perf::heads|perfheads', formatteropts)
856 856 def perfheads(ui, repo, **opts):
857 857 """benchmark the computation of a changelog heads"""
858 858 opts = _byteskwargs(opts)
859 859 timer, fm = gettimer(ui, opts)
860 860 cl = repo.changelog
861 861
862 862 def s():
863 863 clearcaches(cl)
864 864
865 865 def d():
866 866 len(cl.headrevs())
867 867
868 868 timer(d, setup=s)
869 869 fm.end()
870 870
871 871
872 872 @command(
873 873 b'perf::tags|perftags',
874 874 formatteropts
875 875 + [
876 876 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
877 877 ],
878 878 )
879 879 def perftags(ui, repo, **opts):
880 880 opts = _byteskwargs(opts)
881 881 timer, fm = gettimer(ui, opts)
882 882 repocleartagscache = repocleartagscachefunc(repo)
883 883 clearrevlogs = opts[b'clear_revlogs']
884 884
885 885 def s():
886 886 if clearrevlogs:
887 887 clearchangelog(repo)
888 888 clearfilecache(repo.unfiltered(), 'manifest')
889 889 repocleartagscache()
890 890
891 891 def t():
892 892 return len(repo.tags())
893 893
894 894 timer(t, setup=s)
895 895 fm.end()
896 896
897 897
898 898 @command(b'perf::ancestors|perfancestors', formatteropts)
899 899 def perfancestors(ui, repo, **opts):
900 900 opts = _byteskwargs(opts)
901 901 timer, fm = gettimer(ui, opts)
902 902 heads = repo.changelog.headrevs()
903 903
904 904 def d():
905 905 for a in repo.changelog.ancestors(heads):
906 906 pass
907 907
908 908 timer(d)
909 909 fm.end()
910 910
911 911
912 912 @command(b'perf::ancestorset|perfancestorset', formatteropts)
913 913 def perfancestorset(ui, repo, revset, **opts):
914 914 opts = _byteskwargs(opts)
915 915 timer, fm = gettimer(ui, opts)
916 916 revs = repo.revs(revset)
917 917 heads = repo.changelog.headrevs()
918 918
919 919 def d():
920 920 s = repo.changelog.ancestors(heads)
921 921 for rev in revs:
922 922 rev in s
923 923
924 924 timer(d)
925 925 fm.end()
926 926
927 927
928 928 @command(
929 929 b'perf::delta-find',
930 930 revlogopts + formatteropts,
931 931 b'-c|-m|FILE REV',
932 932 )
933 933 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
934 934 """benchmark the process of finding a valid delta for a revlog revision
935 935
936 936 When a revlog receives a new revision (e.g. from a commit, or from an
937 937 incoming bundle), it searches for a suitable delta-base to produce a delta.
938 938 This perf command measures how much time we spend in this process. It
939 939 operates on an already stored revision.
940 940
941 941 See `hg help debug-delta-find` for another related command.
942 942 """
943 943 from mercurial import revlogutils
944 944 import mercurial.revlogutils.deltas as deltautil
945 945
946 946 opts = _byteskwargs(opts)
947 947 if arg_2 is None:
948 948 file_ = None
949 949 rev = arg_1
950 950 else:
951 951 file_ = arg_1
952 952 rev = arg_2
953 953
954 954 repo = repo.unfiltered()
955 955
956 956 timer, fm = gettimer(ui, opts)
957 957
958 958 rev = int(rev)
959 959
960 960 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
961 961
962 962 deltacomputer = deltautil.deltacomputer(revlog)
963 963
964 964 node = revlog.node(rev)
965 965 p1r, p2r = revlog.parentrevs(rev)
966 966 p1 = revlog.node(p1r)
967 967 p2 = revlog.node(p2r)
968 968 full_text = revlog.revision(rev)
969 969 textlen = len(full_text)
970 970 cachedelta = None
971 971 flags = revlog.flags(rev)
972 972
973 973 revinfo = revlogutils.revisioninfo(
974 974 node,
975 975 p1,
976 976 p2,
977 977 [full_text], # btext
978 978 textlen,
979 979 cachedelta,
980 980 flags,
981 981 )
982 982
983 983 # Note: we should probably purge the potential caches (like the full
984 984 # manifest cache) between runs.
985 985 def find_one():
986 986 with revlog._datafp() as fh:
987 987 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
988 988
989 989 timer(find_one)
990 990 fm.end()
991 991
992 992
993 993 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
994 994 def perfdiscovery(ui, repo, path, **opts):
995 995 """benchmark discovery between local repo and the peer at given path"""
996 996 repos = [repo, None]
997 997 timer, fm = gettimer(ui, opts)
998 998
999 999 try:
1000 1000 from mercurial.utils.urlutil import get_unique_pull_path_obj
1001 1001
1002 1002 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1003 1003 except ImportError:
1004 1004 try:
1005 1005 from mercurial.utils.urlutil import get_unique_pull_path
1006 1006
1007 1007 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1008 1008 except ImportError:
1009 1009 path = ui.expandpath(path)
1010 1010
1011 1011 def s():
1012 1012 repos[1] = hg.peer(ui, opts, path)
1013 1013
1014 1014 def d():
1015 1015 setdiscovery.findcommonheads(ui, *repos)
1016 1016
1017 1017 timer(d, setup=s)
1018 1018 fm.end()
1019 1019
1020 1020
1021 1021 @command(
1022 1022 b'perf::bookmarks|perfbookmarks',
1023 1023 formatteropts
1024 1024 + [
1025 1025 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1026 1026 ],
1027 1027 )
1028 1028 def perfbookmarks(ui, repo, **opts):
1029 1029 """benchmark parsing bookmarks from disk to memory"""
1030 1030 opts = _byteskwargs(opts)
1031 1031 timer, fm = gettimer(ui, opts)
1032 1032
1033 1033 clearrevlogs = opts[b'clear_revlogs']
1034 1034
1035 1035 def s():
1036 1036 if clearrevlogs:
1037 1037 clearchangelog(repo)
1038 1038 clearfilecache(repo, b'_bookmarks')
1039 1039
1040 1040 def d():
1041 1041 repo._bookmarks
1042 1042
1043 1043 timer(d, setup=s)
1044 1044 fm.end()
1045 1045
1046 1046
1047 1047 @command(
1048 1048 b'perf::bundle',
1049 1049 [
1050 1050 (
1051 1051 b'r',
1052 1052 b'rev',
1053 1053 [],
1054 1054 b'changesets to bundle',
1055 1055 b'REV',
1056 1056 ),
1057 1057 (
1058 1058 b't',
1059 1059 b'type',
1060 1060 b'none',
1061 1061 b'bundlespec to use (see `hg help bundlespec`)',
1062 1062 b'TYPE',
1063 1063 ),
1064 1064 ]
1065 1065 + formatteropts,
1066 1066 b'REVS',
1067 1067 )
1068 1068 def perfbundle(ui, repo, *revs, **opts):
1069 1069 """benchmark the creation of a bundle from a repository
1070 1070
1071 1071 For now, this only supports "none" compression.
1072 1072 """
1073 1073 try:
1074 1074 from mercurial import bundlecaches
1075 1075
1076 1076 parsebundlespec = bundlecaches.parsebundlespec
1077 1077 except ImportError:
1078 1078 from mercurial import exchange
1079 1079
1080 1080 parsebundlespec = exchange.parsebundlespec
1081 1081
1082 1082 from mercurial import discovery
1083 1083 from mercurial import bundle2
1084 1084
1085 1085 opts = _byteskwargs(opts)
1086 1086 timer, fm = gettimer(ui, opts)
1087 1087
1088 1088 cl = repo.changelog
1089 1089 revs = list(revs)
1090 1090 revs.extend(opts.get(b'rev', ()))
1091 1091 revs = scmutil.revrange(repo, revs)
1092 1092 if not revs:
1093 1093 raise error.Abort(b"not revision specified")
1094 1094 # make it a consistent set (ie: without topological gaps)
1095 1095 old_len = len(revs)
1096 1096 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1097 1097 if old_len != len(revs):
1098 1098 new_count = len(revs) - old_len
1099 1099 msg = b"add %d new revisions to make it a consistent set\n"
1100 1100 ui.write_err(msg % new_count)
1101 1101
1102 1102 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1103 1103 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1104 1104 outgoing = discovery.outgoing(repo, bases, targets)
1105 1105
1106 1106 bundle_spec = opts.get(b'type')
1107 1107
1108 1108 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1109 1109
1110 1110 cgversion = bundle_spec.params.get(b"cg.version")
1111 1111 if cgversion is None:
1112 1112 if bundle_spec.version == b'v1':
1113 1113 cgversion = b'01'
1114 1114 if bundle_spec.version == b'v2':
1115 1115 cgversion = b'02'
1116 1116 if cgversion not in changegroup.supportedoutgoingversions(repo):
1117 1117 err = b"repository does not support bundle version %s"
1118 1118 raise error.Abort(err % cgversion)
1119 1119
1120 1120 if cgversion == b'01': # bundle1
1121 1121 bversion = b'HG10' + bundle_spec.wirecompression
1122 1122 bcompression = None
1123 1123 elif cgversion in (b'02', b'03'):
1124 1124 bversion = b'HG20'
1125 1125 bcompression = bundle_spec.wirecompression
1126 1126 else:
1127 1127 err = b'perf::bundle: unexpected changegroup version %s'
1128 1128 raise error.ProgrammingError(err % cgversion)
1129 1129
1130 1130 if bcompression is None:
1131 1131 bcompression = b'UN'
1132 1132
1133 1133 if bcompression != b'UN':
1134 1134 err = b'perf::bundle: compression currently unsupported: %s'
1135 1135 raise error.ProgrammingError(err % bcompression)
1136 1136
1137 1137 def do_bundle():
1138 1138 bundle2.writenewbundle(
1139 1139 ui,
1140 1140 repo,
1141 1141 b'perf::bundle',
1142 1142 os.devnull,
1143 1143 bversion,
1144 1144 outgoing,
1145 1145 bundle_spec.params,
1146 1146 )
1147 1147
1148 1148 timer(do_bundle)
1149 1149 fm.end()
1150 1150
1151 1151
1152 1152 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1153 1153 def perfbundleread(ui, repo, bundlepath, **opts):
1154 1154 """Benchmark reading of bundle files.
1155 1155
1156 1156 This command is meant to isolate the I/O part of bundle reading as
1157 1157 much as possible.
1158 1158 """
1159 1159 from mercurial import (
1160 1160 bundle2,
1161 1161 exchange,
1162 1162 streamclone,
1163 1163 )
1164 1164
1165 1165 opts = _byteskwargs(opts)
1166 1166
1167 1167 def makebench(fn):
1168 1168 def run():
1169 1169 with open(bundlepath, b'rb') as fh:
1170 1170 bundle = exchange.readbundle(ui, fh, bundlepath)
1171 1171 fn(bundle)
1172 1172
1173 1173 return run
1174 1174
1175 1175 def makereadnbytes(size):
1176 1176 def run():
1177 1177 with open(bundlepath, b'rb') as fh:
1178 1178 bundle = exchange.readbundle(ui, fh, bundlepath)
1179 1179 while bundle.read(size):
1180 1180 pass
1181 1181
1182 1182 return run
1183 1183
1184 1184 def makestdioread(size):
1185 1185 def run():
1186 1186 with open(bundlepath, b'rb') as fh:
1187 1187 while fh.read(size):
1188 1188 pass
1189 1189
1190 1190 return run
1191 1191
1192 1192 # bundle1
1193 1193
1194 1194 def deltaiter(bundle):
1195 1195 for delta in bundle.deltaiter():
1196 1196 pass
1197 1197
1198 1198 def iterchunks(bundle):
1199 1199 for chunk in bundle.getchunks():
1200 1200 pass
1201 1201
1202 1202 # bundle2
1203 1203
1204 1204 def forwardchunks(bundle):
1205 1205 for chunk in bundle._forwardchunks():
1206 1206 pass
1207 1207
1208 1208 def iterparts(bundle):
1209 1209 for part in bundle.iterparts():
1210 1210 pass
1211 1211
1212 1212 def iterpartsseekable(bundle):
1213 1213 for part in bundle.iterparts(seekable=True):
1214 1214 pass
1215 1215
1216 1216 def seek(bundle):
1217 1217 for part in bundle.iterparts(seekable=True):
1218 1218 part.seek(0, os.SEEK_END)
1219 1219
1220 1220 def makepartreadnbytes(size):
1221 1221 def run():
1222 1222 with open(bundlepath, b'rb') as fh:
1223 1223 bundle = exchange.readbundle(ui, fh, bundlepath)
1224 1224 for part in bundle.iterparts():
1225 1225 while part.read(size):
1226 1226 pass
1227 1227
1228 1228 return run
1229 1229
1230 1230 benches = [
1231 1231 (makestdioread(8192), b'read(8k)'),
1232 1232 (makestdioread(16384), b'read(16k)'),
1233 1233 (makestdioread(32768), b'read(32k)'),
1234 1234 (makestdioread(131072), b'read(128k)'),
1235 1235 ]
1236 1236
1237 1237 with open(bundlepath, b'rb') as fh:
1238 1238 bundle = exchange.readbundle(ui, fh, bundlepath)
1239 1239
1240 1240 if isinstance(bundle, changegroup.cg1unpacker):
1241 1241 benches.extend(
1242 1242 [
1243 1243 (makebench(deltaiter), b'cg1 deltaiter()'),
1244 1244 (makebench(iterchunks), b'cg1 getchunks()'),
1245 1245 (makereadnbytes(8192), b'cg1 read(8k)'),
1246 1246 (makereadnbytes(16384), b'cg1 read(16k)'),
1247 1247 (makereadnbytes(32768), b'cg1 read(32k)'),
1248 1248 (makereadnbytes(131072), b'cg1 read(128k)'),
1249 1249 ]
1250 1250 )
1251 1251 elif isinstance(bundle, bundle2.unbundle20):
1252 1252 benches.extend(
1253 1253 [
1254 1254 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1255 1255 (makebench(iterparts), b'bundle2 iterparts()'),
1256 1256 (
1257 1257 makebench(iterpartsseekable),
1258 1258 b'bundle2 iterparts() seekable',
1259 1259 ),
1260 1260 (makebench(seek), b'bundle2 part seek()'),
1261 1261 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1262 1262 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1263 1263 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1264 1264 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1265 1265 ]
1266 1266 )
1267 1267 elif isinstance(bundle, streamclone.streamcloneapplier):
1268 1268 raise error.Abort(b'stream clone bundles not supported')
1269 1269 else:
1270 1270 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1271 1271
1272 1272 for fn, title in benches:
1273 1273 timer, fm = gettimer(ui, opts)
1274 1274 timer(fn, title=title)
1275 1275 fm.end()
1276 1276
1277 1277
1278 1278 @command(
1279 1279 b'perf::changegroupchangelog|perfchangegroupchangelog',
1280 1280 formatteropts
1281 1281 + [
1282 1282 (b'', b'cgversion', b'02', b'changegroup version'),
1283 1283 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1284 1284 ],
1285 1285 )
1286 1286 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1287 1287 """Benchmark producing a changelog group for a changegroup.
1288 1288
1289 1289 This measures the time spent processing the changelog during a
1290 1290 bundle operation. This occurs during `hg bundle` and on a server
1291 1291 processing a `getbundle` wire protocol request (handles clones
1292 1292 and pull requests).
1293 1293
1294 1294 By default, all revisions are added to the changegroup.
1295 1295 """
1296 1296 opts = _byteskwargs(opts)
1297 1297 cl = repo.changelog
1298 1298 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1299 1299 bundler = changegroup.getbundler(cgversion, repo)
1300 1300
1301 1301 def d():
1302 1302 state, chunks = bundler._generatechangelog(cl, nodes)
1303 1303 for chunk in chunks:
1304 1304 pass
1305 1305
1306 1306 timer, fm = gettimer(ui, opts)
1307 1307
1308 1308 # Terminal printing can interfere with timing. So disable it.
1309 1309 with ui.configoverride({(b'progress', b'disable'): True}):
1310 1310 timer(d)
1311 1311
1312 1312 fm.end()
1313 1313
1314 1314
1315 1315 @command(b'perf::dirs|perfdirs', formatteropts)
1316 1316 def perfdirs(ui, repo, **opts):
1317 1317 opts = _byteskwargs(opts)
1318 1318 timer, fm = gettimer(ui, opts)
1319 1319 dirstate = repo.dirstate
1320 1320 b'a' in dirstate
1321 1321
1322 1322 def d():
1323 1323 dirstate.hasdir(b'a')
1324 1324 try:
1325 1325 del dirstate._map._dirs
1326 1326 except AttributeError:
1327 1327 pass
1328 1328
1329 1329 timer(d)
1330 1330 fm.end()
1331 1331
1332 1332
1333 1333 @command(
1334 1334 b'perf::dirstate|perfdirstate',
1335 1335 [
1336 1336 (
1337 1337 b'',
1338 1338 b'iteration',
1339 1339 None,
1340 1340 b'benchmark a full iteration for the dirstate',
1341 1341 ),
1342 1342 (
1343 1343 b'',
1344 1344 b'contains',
1345 1345 None,
1346 1346 b'benchmark a large amount of `nf in dirstate` calls',
1347 1347 ),
1348 1348 ]
1349 1349 + formatteropts,
1350 1350 )
1351 1351 def perfdirstate(ui, repo, **opts):
1352 1352 """benchmap the time of various distate operations
1353 1353
1354 1354 By default benchmark the time necessary to load a dirstate from scratch.
1355 1355 The dirstate is loaded to the point were a "contains" request can be
1356 1356 answered.
1357 1357 """
1358 1358 opts = _byteskwargs(opts)
1359 1359 timer, fm = gettimer(ui, opts)
1360 1360 b"a" in repo.dirstate
1361 1361
1362 1362 if opts[b'iteration'] and opts[b'contains']:
1363 1363 msg = b'only specify one of --iteration or --contains'
1364 1364 raise error.Abort(msg)
1365 1365
1366 1366 if opts[b'iteration']:
1367 1367 setup = None
1368 1368 dirstate = repo.dirstate
1369 1369
1370 1370 def d():
1371 1371 for f in dirstate:
1372 1372 pass
1373 1373
1374 1374 elif opts[b'contains']:
1375 1375 setup = None
1376 1376 dirstate = repo.dirstate
1377 1377 allfiles = list(dirstate)
1378 1378 # also add file path that will be "missing" from the dirstate
1379 1379 allfiles.extend([f[::-1] for f in allfiles])
1380 1380
1381 1381 def d():
1382 1382 for f in allfiles:
1383 1383 f in dirstate
1384 1384
1385 1385 else:
1386 1386
1387 1387 def setup():
1388 1388 repo.dirstate.invalidate()
1389 1389
1390 1390 def d():
1391 1391 b"a" in repo.dirstate
1392 1392
1393 1393 timer(d, setup=setup)
1394 1394 fm.end()
1395 1395
1396 1396
1397 1397 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1398 1398 def perfdirstatedirs(ui, repo, **opts):
1399 1399 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1400 1400 opts = _byteskwargs(opts)
1401 1401 timer, fm = gettimer(ui, opts)
1402 1402 repo.dirstate.hasdir(b"a")
1403 1403
1404 1404 def setup():
1405 1405 try:
1406 1406 del repo.dirstate._map._dirs
1407 1407 except AttributeError:
1408 1408 pass
1409 1409
1410 1410 def d():
1411 1411 repo.dirstate.hasdir(b"a")
1412 1412
1413 1413 timer(d, setup=setup)
1414 1414 fm.end()
1415 1415
1416 1416
1417 1417 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1418 1418 def perfdirstatefoldmap(ui, repo, **opts):
1419 1419 """benchmap a `dirstate._map.filefoldmap.get()` request
1420 1420
1421 1421 The dirstate filefoldmap cache is dropped between every request.
1422 1422 """
1423 1423 opts = _byteskwargs(opts)
1424 1424 timer, fm = gettimer(ui, opts)
1425 1425 dirstate = repo.dirstate
1426 1426 dirstate._map.filefoldmap.get(b'a')
1427 1427
1428 1428 def setup():
1429 1429 del dirstate._map.filefoldmap
1430 1430
1431 1431 def d():
1432 1432 dirstate._map.filefoldmap.get(b'a')
1433 1433
1434 1434 timer(d, setup=setup)
1435 1435 fm.end()
1436 1436
1437 1437
1438 1438 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1439 1439 def perfdirfoldmap(ui, repo, **opts):
1440 1440 """benchmap a `dirstate._map.dirfoldmap.get()` request
1441 1441
1442 1442 The dirstate dirfoldmap cache is dropped between every request.
1443 1443 """
1444 1444 opts = _byteskwargs(opts)
1445 1445 timer, fm = gettimer(ui, opts)
1446 1446 dirstate = repo.dirstate
1447 1447 dirstate._map.dirfoldmap.get(b'a')
1448 1448
1449 1449 def setup():
1450 1450 del dirstate._map.dirfoldmap
1451 1451 try:
1452 1452 del dirstate._map._dirs
1453 1453 except AttributeError:
1454 1454 pass
1455 1455
1456 1456 def d():
1457 1457 dirstate._map.dirfoldmap.get(b'a')
1458 1458
1459 1459 timer(d, setup=setup)
1460 1460 fm.end()
1461 1461
1462 1462
1463 1463 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1464 1464 def perfdirstatewrite(ui, repo, **opts):
1465 1465 """benchmap the time it take to write a dirstate on disk"""
1466 1466 opts = _byteskwargs(opts)
1467 1467 timer, fm = gettimer(ui, opts)
1468 1468 ds = repo.dirstate
1469 1469 b"a" in ds
1470 1470
1471 1471 def setup():
1472 1472 ds._dirty = True
1473 1473
1474 1474 def d():
1475 1475 ds.write(repo.currenttransaction())
1476 1476
1477 timer(d, setup=setup)
1477 with repo.wlock():
1478 timer(d, setup=setup)
1478 1479 fm.end()
1479 1480
1480 1481
1481 1482 def _getmergerevs(repo, opts):
1482 1483 """parse command argument to return rev involved in merge
1483 1484
1484 1485 input: options dictionnary with `rev`, `from` and `bse`
1485 1486 output: (localctx, otherctx, basectx)
1486 1487 """
1487 1488 if opts[b'from']:
1488 1489 fromrev = scmutil.revsingle(repo, opts[b'from'])
1489 1490 wctx = repo[fromrev]
1490 1491 else:
1491 1492 wctx = repo[None]
1492 1493 # we don't want working dir files to be stat'd in the benchmark, so
1493 1494 # prime that cache
1494 1495 wctx.dirty()
1495 1496 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1496 1497 if opts[b'base']:
1497 1498 fromrev = scmutil.revsingle(repo, opts[b'base'])
1498 1499 ancestor = repo[fromrev]
1499 1500 else:
1500 1501 ancestor = wctx.ancestor(rctx)
1501 1502 return (wctx, rctx, ancestor)
1502 1503
1503 1504
1504 1505 @command(
1505 1506 b'perf::mergecalculate|perfmergecalculate',
1506 1507 [
1507 1508 (b'r', b'rev', b'.', b'rev to merge against'),
1508 1509 (b'', b'from', b'', b'rev to merge from'),
1509 1510 (b'', b'base', b'', b'the revision to use as base'),
1510 1511 ]
1511 1512 + formatteropts,
1512 1513 )
1513 1514 def perfmergecalculate(ui, repo, **opts):
1514 1515 opts = _byteskwargs(opts)
1515 1516 timer, fm = gettimer(ui, opts)
1516 1517
1517 1518 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1518 1519
1519 1520 def d():
1520 1521 # acceptremote is True because we don't want prompts in the middle of
1521 1522 # our benchmark
1522 1523 merge.calculateupdates(
1523 1524 repo,
1524 1525 wctx,
1525 1526 rctx,
1526 1527 [ancestor],
1527 1528 branchmerge=False,
1528 1529 force=False,
1529 1530 acceptremote=True,
1530 1531 followcopies=True,
1531 1532 )
1532 1533
1533 1534 timer(d)
1534 1535 fm.end()
1535 1536
1536 1537
1537 1538 @command(
1538 1539 b'perf::mergecopies|perfmergecopies',
1539 1540 [
1540 1541 (b'r', b'rev', b'.', b'rev to merge against'),
1541 1542 (b'', b'from', b'', b'rev to merge from'),
1542 1543 (b'', b'base', b'', b'the revision to use as base'),
1543 1544 ]
1544 1545 + formatteropts,
1545 1546 )
1546 1547 def perfmergecopies(ui, repo, **opts):
1547 1548 """measure runtime of `copies.mergecopies`"""
1548 1549 opts = _byteskwargs(opts)
1549 1550 timer, fm = gettimer(ui, opts)
1550 1551 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1551 1552
1552 1553 def d():
1553 1554 # acceptremote is True because we don't want prompts in the middle of
1554 1555 # our benchmark
1555 1556 copies.mergecopies(repo, wctx, rctx, ancestor)
1556 1557
1557 1558 timer(d)
1558 1559 fm.end()
1559 1560
1560 1561
1561 1562 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1562 1563 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1563 1564 """benchmark the copy tracing logic"""
1564 1565 opts = _byteskwargs(opts)
1565 1566 timer, fm = gettimer(ui, opts)
1566 1567 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1567 1568 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1568 1569
1569 1570 def d():
1570 1571 copies.pathcopies(ctx1, ctx2)
1571 1572
1572 1573 timer(d)
1573 1574 fm.end()
1574 1575
1575 1576
1576 1577 @command(
1577 1578 b'perf::phases|perfphases',
1578 1579 [
1579 1580 (b'', b'full', False, b'include file reading time too'),
1580 1581 ],
1581 1582 b"",
1582 1583 )
1583 1584 def perfphases(ui, repo, **opts):
1584 1585 """benchmark phasesets computation"""
1585 1586 opts = _byteskwargs(opts)
1586 1587 timer, fm = gettimer(ui, opts)
1587 1588 _phases = repo._phasecache
1588 1589 full = opts.get(b'full')
1589 1590
1590 1591 def d():
1591 1592 phases = _phases
1592 1593 if full:
1593 1594 clearfilecache(repo, b'_phasecache')
1594 1595 phases = repo._phasecache
1595 1596 phases.invalidate()
1596 1597 phases.loadphaserevs(repo)
1597 1598
1598 1599 timer(d)
1599 1600 fm.end()
1600 1601
1601 1602
1602 1603 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1603 1604 def perfphasesremote(ui, repo, dest=None, **opts):
1604 1605 """benchmark time needed to analyse phases of the remote server"""
1605 1606 from mercurial.node import bin
1606 1607 from mercurial import (
1607 1608 exchange,
1608 1609 hg,
1609 1610 phases,
1610 1611 )
1611 1612
1612 1613 opts = _byteskwargs(opts)
1613 1614 timer, fm = gettimer(ui, opts)
1614 1615
1615 1616 path = ui.getpath(dest, default=(b'default-push', b'default'))
1616 1617 if not path:
1617 1618 raise error.Abort(
1618 1619 b'default repository not configured!',
1619 1620 hint=b"see 'hg help config.paths'",
1620 1621 )
1621 1622 if util.safehasattr(path, 'main_path'):
1622 1623 path = path.get_push_variant()
1623 1624 dest = path.loc
1624 1625 else:
1625 1626 dest = path.pushloc or path.loc
1626 1627 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1627 1628 other = hg.peer(repo, opts, dest)
1628 1629
1629 1630 # easier to perform discovery through the operation
1630 1631 op = exchange.pushoperation(repo, other)
1631 1632 exchange._pushdiscoverychangeset(op)
1632 1633
1633 1634 remotesubset = op.fallbackheads
1634 1635
1635 1636 with other.commandexecutor() as e:
1636 1637 remotephases = e.callcommand(
1637 1638 b'listkeys', {b'namespace': b'phases'}
1638 1639 ).result()
1639 1640 del other
1640 1641 publishing = remotephases.get(b'publishing', False)
1641 1642 if publishing:
1642 1643 ui.statusnoi18n(b'publishing: yes\n')
1643 1644 else:
1644 1645 ui.statusnoi18n(b'publishing: no\n')
1645 1646
1646 1647 has_node = getattr(repo.changelog.index, 'has_node', None)
1647 1648 if has_node is None:
1648 1649 has_node = repo.changelog.nodemap.__contains__
1649 1650 nonpublishroots = 0
1650 1651 for nhex, phase in remotephases.iteritems():
1651 1652 if nhex == b'publishing': # ignore data related to publish option
1652 1653 continue
1653 1654 node = bin(nhex)
1654 1655 if has_node(node) and int(phase):
1655 1656 nonpublishroots += 1
1656 1657 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1657 1658 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1658 1659
1659 1660 def d():
1660 1661 phases.remotephasessummary(repo, remotesubset, remotephases)
1661 1662
1662 1663 timer(d)
1663 1664 fm.end()
1664 1665
1665 1666
1666 1667 @command(
1667 1668 b'perf::manifest|perfmanifest',
1668 1669 [
1669 1670 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1670 1671 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1671 1672 ]
1672 1673 + formatteropts,
1673 1674 b'REV|NODE',
1674 1675 )
1675 1676 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1676 1677 """benchmark the time to read a manifest from disk and return a usable
1677 1678 dict-like object
1678 1679
1679 1680 Manifest caches are cleared before retrieval."""
1680 1681 opts = _byteskwargs(opts)
1681 1682 timer, fm = gettimer(ui, opts)
1682 1683 if not manifest_rev:
1683 1684 ctx = scmutil.revsingle(repo, rev, rev)
1684 1685 t = ctx.manifestnode()
1685 1686 else:
1686 1687 from mercurial.node import bin
1687 1688
1688 1689 if len(rev) == 40:
1689 1690 t = bin(rev)
1690 1691 else:
1691 1692 try:
1692 1693 rev = int(rev)
1693 1694
1694 1695 if util.safehasattr(repo.manifestlog, b'getstorage'):
1695 1696 t = repo.manifestlog.getstorage(b'').node(rev)
1696 1697 else:
1697 1698 t = repo.manifestlog._revlog.lookup(rev)
1698 1699 except ValueError:
1699 1700 raise error.Abort(
1700 1701 b'manifest revision must be integer or full node'
1701 1702 )
1702 1703
1703 1704 def d():
1704 1705 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1705 1706 repo.manifestlog[t].read()
1706 1707
1707 1708 timer(d)
1708 1709 fm.end()
1709 1710
1710 1711
1711 1712 @command(b'perf::changeset|perfchangeset', formatteropts)
1712 1713 def perfchangeset(ui, repo, rev, **opts):
1713 1714 opts = _byteskwargs(opts)
1714 1715 timer, fm = gettimer(ui, opts)
1715 1716 n = scmutil.revsingle(repo, rev).node()
1716 1717
1717 1718 def d():
1718 1719 repo.changelog.read(n)
1719 1720 # repo.changelog._cache = None
1720 1721
1721 1722 timer(d)
1722 1723 fm.end()
1723 1724
1724 1725
1725 1726 @command(b'perf::ignore|perfignore', formatteropts)
1726 1727 def perfignore(ui, repo, **opts):
1727 1728 """benchmark operation related to computing ignore"""
1728 1729 opts = _byteskwargs(opts)
1729 1730 timer, fm = gettimer(ui, opts)
1730 1731 dirstate = repo.dirstate
1731 1732
1732 1733 def setupone():
1733 1734 dirstate.invalidate()
1734 1735 clearfilecache(dirstate, b'_ignore')
1735 1736
1736 1737 def runone():
1737 1738 dirstate._ignore
1738 1739
1739 1740 timer(runone, setup=setupone, title=b"load")
1740 1741 fm.end()
1741 1742
1742 1743
1743 1744 @command(
1744 1745 b'perf::index|perfindex',
1745 1746 [
1746 1747 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1747 1748 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1748 1749 ]
1749 1750 + formatteropts,
1750 1751 )
1751 1752 def perfindex(ui, repo, **opts):
1752 1753 """benchmark index creation time followed by a lookup
1753 1754
1754 1755 The default is to look `tip` up. Depending on the index implementation,
1755 1756 the revision looked up can matters. For example, an implementation
1756 1757 scanning the index will have a faster lookup time for `--rev tip` than for
1757 1758 `--rev 0`. The number of looked up revisions and their order can also
1758 1759 matters.
1759 1760
1760 1761 Example of useful set to test:
1761 1762
1762 1763 * tip
1763 1764 * 0
1764 1765 * -10:
1765 1766 * :10
1766 1767 * -10: + :10
1767 1768 * :10: + -10:
1768 1769 * -10000:
1769 1770 * -10000: + 0
1770 1771
1771 1772 It is not currently possible to check for lookup of a missing node. For
1772 1773 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1773 1774 import mercurial.revlog
1774 1775
1775 1776 opts = _byteskwargs(opts)
1776 1777 timer, fm = gettimer(ui, opts)
1777 1778 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1778 1779 if opts[b'no_lookup']:
1779 1780 if opts['rev']:
1780 1781 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1781 1782 nodes = []
1782 1783 elif not opts[b'rev']:
1783 1784 nodes = [repo[b"tip"].node()]
1784 1785 else:
1785 1786 revs = scmutil.revrange(repo, opts[b'rev'])
1786 1787 cl = repo.changelog
1787 1788 nodes = [cl.node(r) for r in revs]
1788 1789
1789 1790 unfi = repo.unfiltered()
1790 1791 # find the filecache func directly
1791 1792 # This avoid polluting the benchmark with the filecache logic
1792 1793 makecl = unfi.__class__.changelog.func
1793 1794
1794 1795 def setup():
1795 1796 # probably not necessary, but for good measure
1796 1797 clearchangelog(unfi)
1797 1798
1798 1799 def d():
1799 1800 cl = makecl(unfi)
1800 1801 for n in nodes:
1801 1802 cl.rev(n)
1802 1803
1803 1804 timer(d, setup=setup)
1804 1805 fm.end()
1805 1806
1806 1807
1807 1808 @command(
1808 1809 b'perf::nodemap|perfnodemap',
1809 1810 [
1810 1811 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1811 1812 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1812 1813 ]
1813 1814 + formatteropts,
1814 1815 )
1815 1816 def perfnodemap(ui, repo, **opts):
1816 1817 """benchmark the time necessary to look up revision from a cold nodemap
1817 1818
1818 1819 Depending on the implementation, the amount and order of revision we look
1819 1820 up can varies. Example of useful set to test:
1820 1821 * tip
1821 1822 * 0
1822 1823 * -10:
1823 1824 * :10
1824 1825 * -10: + :10
1825 1826 * :10: + -10:
1826 1827 * -10000:
1827 1828 * -10000: + 0
1828 1829
1829 1830 The command currently focus on valid binary lookup. Benchmarking for
1830 1831 hexlookup, prefix lookup and missing lookup would also be valuable.
1831 1832 """
1832 1833 import mercurial.revlog
1833 1834
1834 1835 opts = _byteskwargs(opts)
1835 1836 timer, fm = gettimer(ui, opts)
1836 1837 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1837 1838
1838 1839 unfi = repo.unfiltered()
1839 1840 clearcaches = opts[b'clear_caches']
1840 1841 # find the filecache func directly
1841 1842 # This avoid polluting the benchmark with the filecache logic
1842 1843 makecl = unfi.__class__.changelog.func
1843 1844 if not opts[b'rev']:
1844 1845 raise error.Abort(b'use --rev to specify revisions to look up')
1845 1846 revs = scmutil.revrange(repo, opts[b'rev'])
1846 1847 cl = repo.changelog
1847 1848 nodes = [cl.node(r) for r in revs]
1848 1849
1849 1850 # use a list to pass reference to a nodemap from one closure to the next
1850 1851 nodeget = [None]
1851 1852
1852 1853 def setnodeget():
1853 1854 # probably not necessary, but for good measure
1854 1855 clearchangelog(unfi)
1855 1856 cl = makecl(unfi)
1856 1857 if util.safehasattr(cl.index, 'get_rev'):
1857 1858 nodeget[0] = cl.index.get_rev
1858 1859 else:
1859 1860 nodeget[0] = cl.nodemap.get
1860 1861
1861 1862 def d():
1862 1863 get = nodeget[0]
1863 1864 for n in nodes:
1864 1865 get(n)
1865 1866
1866 1867 setup = None
1867 1868 if clearcaches:
1868 1869
1869 1870 def setup():
1870 1871 setnodeget()
1871 1872
1872 1873 else:
1873 1874 setnodeget()
1874 1875 d() # prewarm the data structure
1875 1876 timer(d, setup=setup)
1876 1877 fm.end()
1877 1878
1878 1879
1879 1880 @command(b'perf::startup|perfstartup', formatteropts)
1880 1881 def perfstartup(ui, repo, **opts):
1881 1882 opts = _byteskwargs(opts)
1882 1883 timer, fm = gettimer(ui, opts)
1883 1884
1884 1885 def d():
1885 1886 if os.name != 'nt':
1886 1887 os.system(
1887 1888 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1888 1889 )
1889 1890 else:
1890 1891 os.environ['HGRCPATH'] = r' '
1891 1892 os.system("%s version -q > NUL" % sys.argv[0])
1892 1893
1893 1894 timer(d)
1894 1895 fm.end()
1895 1896
1896 1897
1897 1898 @command(b'perf::parents|perfparents', formatteropts)
1898 1899 def perfparents(ui, repo, **opts):
1899 1900 """benchmark the time necessary to fetch one changeset's parents.
1900 1901
1901 1902 The fetch is done using the `node identifier`, traversing all object layers
1902 1903 from the repository object. The first N revisions will be used for this
1903 1904 benchmark. N is controlled by the ``perf.parentscount`` config option
1904 1905 (default: 1000).
1905 1906 """
1906 1907 opts = _byteskwargs(opts)
1907 1908 timer, fm = gettimer(ui, opts)
1908 1909 # control the number of commits perfparents iterates over
1909 1910 # experimental config: perf.parentscount
1910 1911 count = getint(ui, b"perf", b"parentscount", 1000)
1911 1912 if len(repo.changelog) < count:
1912 1913 raise error.Abort(b"repo needs %d commits for this test" % count)
1913 1914 repo = repo.unfiltered()
1914 1915 nl = [repo.changelog.node(i) for i in _xrange(count)]
1915 1916
1916 1917 def d():
1917 1918 for n in nl:
1918 1919 repo.changelog.parents(n)
1919 1920
1920 1921 timer(d)
1921 1922 fm.end()
1922 1923
1923 1924
1924 1925 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
1925 1926 def perfctxfiles(ui, repo, x, **opts):
1926 1927 opts = _byteskwargs(opts)
1927 1928 x = int(x)
1928 1929 timer, fm = gettimer(ui, opts)
1929 1930
1930 1931 def d():
1931 1932 len(repo[x].files())
1932 1933
1933 1934 timer(d)
1934 1935 fm.end()
1935 1936
1936 1937
1937 1938 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
1938 1939 def perfrawfiles(ui, repo, x, **opts):
1939 1940 opts = _byteskwargs(opts)
1940 1941 x = int(x)
1941 1942 timer, fm = gettimer(ui, opts)
1942 1943 cl = repo.changelog
1943 1944
1944 1945 def d():
1945 1946 len(cl.read(x)[3])
1946 1947
1947 1948 timer(d)
1948 1949 fm.end()
1949 1950
1950 1951
1951 1952 @command(b'perf::lookup|perflookup', formatteropts)
1952 1953 def perflookup(ui, repo, rev, **opts):
1953 1954 opts = _byteskwargs(opts)
1954 1955 timer, fm = gettimer(ui, opts)
1955 1956 timer(lambda: len(repo.lookup(rev)))
1956 1957 fm.end()
1957 1958
1958 1959
1959 1960 @command(
1960 1961 b'perf::linelogedits|perflinelogedits',
1961 1962 [
1962 1963 (b'n', b'edits', 10000, b'number of edits'),
1963 1964 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
1964 1965 ],
1965 1966 norepo=True,
1966 1967 )
1967 1968 def perflinelogedits(ui, **opts):
1968 1969 from mercurial import linelog
1969 1970
1970 1971 opts = _byteskwargs(opts)
1971 1972
1972 1973 edits = opts[b'edits']
1973 1974 maxhunklines = opts[b'max_hunk_lines']
1974 1975
1975 1976 maxb1 = 100000
1976 1977 random.seed(0)
1977 1978 randint = random.randint
1978 1979 currentlines = 0
1979 1980 arglist = []
1980 1981 for rev in _xrange(edits):
1981 1982 a1 = randint(0, currentlines)
1982 1983 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
1983 1984 b1 = randint(0, maxb1)
1984 1985 b2 = randint(b1, b1 + maxhunklines)
1985 1986 currentlines += (b2 - b1) - (a2 - a1)
1986 1987 arglist.append((rev, a1, a2, b1, b2))
1987 1988
1988 1989 def d():
1989 1990 ll = linelog.linelog()
1990 1991 for args in arglist:
1991 1992 ll.replacelines(*args)
1992 1993
1993 1994 timer, fm = gettimer(ui, opts)
1994 1995 timer(d)
1995 1996 fm.end()
1996 1997
1997 1998
1998 1999 @command(b'perf::revrange|perfrevrange', formatteropts)
1999 2000 def perfrevrange(ui, repo, *specs, **opts):
2000 2001 opts = _byteskwargs(opts)
2001 2002 timer, fm = gettimer(ui, opts)
2002 2003 revrange = scmutil.revrange
2003 2004 timer(lambda: len(revrange(repo, specs)))
2004 2005 fm.end()
2005 2006
2006 2007
2007 2008 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2008 2009 def perfnodelookup(ui, repo, rev, **opts):
2009 2010 opts = _byteskwargs(opts)
2010 2011 timer, fm = gettimer(ui, opts)
2011 2012 import mercurial.revlog
2012 2013
2013 2014 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2014 2015 n = scmutil.revsingle(repo, rev).node()
2015 2016
2016 2017 try:
2017 2018 cl = revlog(getsvfs(repo), radix=b"00changelog")
2018 2019 except TypeError:
2019 2020 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2020 2021
2021 2022 def d():
2022 2023 cl.rev(n)
2023 2024 clearcaches(cl)
2024 2025
2025 2026 timer(d)
2026 2027 fm.end()
2027 2028
2028 2029
2029 2030 @command(
2030 2031 b'perf::log|perflog',
2031 2032 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2032 2033 )
2033 2034 def perflog(ui, repo, rev=None, **opts):
2034 2035 opts = _byteskwargs(opts)
2035 2036 if rev is None:
2036 2037 rev = []
2037 2038 timer, fm = gettimer(ui, opts)
2038 2039 ui.pushbuffer()
2039 2040 timer(
2040 2041 lambda: commands.log(
2041 2042 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2042 2043 )
2043 2044 )
2044 2045 ui.popbuffer()
2045 2046 fm.end()
2046 2047
2047 2048
2048 2049 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2049 2050 def perfmoonwalk(ui, repo, **opts):
2050 2051 """benchmark walking the changelog backwards
2051 2052
2052 2053 This also loads the changelog data for each revision in the changelog.
2053 2054 """
2054 2055 opts = _byteskwargs(opts)
2055 2056 timer, fm = gettimer(ui, opts)
2056 2057
2057 2058 def moonwalk():
2058 2059 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2059 2060 ctx = repo[i]
2060 2061 ctx.branch() # read changelog data (in addition to the index)
2061 2062
2062 2063 timer(moonwalk)
2063 2064 fm.end()
2064 2065
2065 2066
2066 2067 @command(
2067 2068 b'perf::templating|perftemplating',
2068 2069 [
2069 2070 (b'r', b'rev', [], b'revisions to run the template on'),
2070 2071 ]
2071 2072 + formatteropts,
2072 2073 )
2073 2074 def perftemplating(ui, repo, testedtemplate=None, **opts):
2074 2075 """test the rendering time of a given template"""
2075 2076 if makelogtemplater is None:
2076 2077 raise error.Abort(
2077 2078 b"perftemplating not available with this Mercurial",
2078 2079 hint=b"use 4.3 or later",
2079 2080 )
2080 2081
2081 2082 opts = _byteskwargs(opts)
2082 2083
2083 2084 nullui = ui.copy()
2084 2085 nullui.fout = open(os.devnull, 'wb')
2085 2086 nullui.disablepager()
2086 2087 revs = opts.get(b'rev')
2087 2088 if not revs:
2088 2089 revs = [b'all()']
2089 2090 revs = list(scmutil.revrange(repo, revs))
2090 2091
2091 2092 defaulttemplate = (
2092 2093 b'{date|shortdate} [{rev}:{node|short}]'
2093 2094 b' {author|person}: {desc|firstline}\n'
2094 2095 )
2095 2096 if testedtemplate is None:
2096 2097 testedtemplate = defaulttemplate
2097 2098 displayer = makelogtemplater(nullui, repo, testedtemplate)
2098 2099
2099 2100 def format():
2100 2101 for r in revs:
2101 2102 ctx = repo[r]
2102 2103 displayer.show(ctx)
2103 2104 displayer.flush(ctx)
2104 2105
2105 2106 timer, fm = gettimer(ui, opts)
2106 2107 timer(format)
2107 2108 fm.end()
2108 2109
2109 2110
2110 2111 def _displaystats(ui, opts, entries, data):
2111 2112 # use a second formatter because the data are quite different, not sure
2112 2113 # how it flies with the templater.
2113 2114 fm = ui.formatter(b'perf-stats', opts)
2114 2115 for key, title in entries:
2115 2116 values = data[key]
2116 2117 nbvalues = len(data)
2117 2118 values.sort()
2118 2119 stats = {
2119 2120 'key': key,
2120 2121 'title': title,
2121 2122 'nbitems': len(values),
2122 2123 'min': values[0][0],
2123 2124 '10%': values[(nbvalues * 10) // 100][0],
2124 2125 '25%': values[(nbvalues * 25) // 100][0],
2125 2126 '50%': values[(nbvalues * 50) // 100][0],
2126 2127 '75%': values[(nbvalues * 75) // 100][0],
2127 2128 '80%': values[(nbvalues * 80) // 100][0],
2128 2129 '85%': values[(nbvalues * 85) // 100][0],
2129 2130 '90%': values[(nbvalues * 90) // 100][0],
2130 2131 '95%': values[(nbvalues * 95) // 100][0],
2131 2132 '99%': values[(nbvalues * 99) // 100][0],
2132 2133 'max': values[-1][0],
2133 2134 }
2134 2135 fm.startitem()
2135 2136 fm.data(**stats)
2136 2137 # make node pretty for the human output
2137 2138 fm.plain('### %s (%d items)\n' % (title, len(values)))
2138 2139 lines = [
2139 2140 'min',
2140 2141 '10%',
2141 2142 '25%',
2142 2143 '50%',
2143 2144 '75%',
2144 2145 '80%',
2145 2146 '85%',
2146 2147 '90%',
2147 2148 '95%',
2148 2149 '99%',
2149 2150 'max',
2150 2151 ]
2151 2152 for l in lines:
2152 2153 fm.plain('%s: %s\n' % (l, stats[l]))
2153 2154 fm.end()
2154 2155
2155 2156
2156 2157 @command(
2157 2158 b'perf::helper-mergecopies|perfhelper-mergecopies',
2158 2159 formatteropts
2159 2160 + [
2160 2161 (b'r', b'revs', [], b'restrict search to these revisions'),
2161 2162 (b'', b'timing', False, b'provides extra data (costly)'),
2162 2163 (b'', b'stats', False, b'provides statistic about the measured data'),
2163 2164 ],
2164 2165 )
2165 2166 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2166 2167 """find statistics about potential parameters for `perfmergecopies`
2167 2168
2168 2169 This command find (base, p1, p2) triplet relevant for copytracing
2169 2170 benchmarking in the context of a merge. It reports values for some of the
2170 2171 parameters that impact merge copy tracing time during merge.
2171 2172
2172 2173 If `--timing` is set, rename detection is run and the associated timing
2173 2174 will be reported. The extra details come at the cost of slower command
2174 2175 execution.
2175 2176
2176 2177 Since rename detection is only run once, other factors might easily
2177 2178 affect the precision of the timing. However it should give a good
2178 2179 approximation of which revision triplets are very costly.
2179 2180 """
2180 2181 opts = _byteskwargs(opts)
2181 2182 fm = ui.formatter(b'perf', opts)
2182 2183 dotiming = opts[b'timing']
2183 2184 dostats = opts[b'stats']
2184 2185
2185 2186 output_template = [
2186 2187 ("base", "%(base)12s"),
2187 2188 ("p1", "%(p1.node)12s"),
2188 2189 ("p2", "%(p2.node)12s"),
2189 2190 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2190 2191 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2191 2192 ("p1.renames", "%(p1.renamedfiles)12d"),
2192 2193 ("p1.time", "%(p1.time)12.3f"),
2193 2194 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2194 2195 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2195 2196 ("p2.renames", "%(p2.renamedfiles)12d"),
2196 2197 ("p2.time", "%(p2.time)12.3f"),
2197 2198 ("renames", "%(nbrenamedfiles)12d"),
2198 2199 ("total.time", "%(time)12.3f"),
2199 2200 ]
2200 2201 if not dotiming:
2201 2202 output_template = [
2202 2203 i
2203 2204 for i in output_template
2204 2205 if not ('time' in i[0] or 'renames' in i[0])
2205 2206 ]
2206 2207 header_names = [h for (h, v) in output_template]
2207 2208 output = ' '.join([v for (h, v) in output_template]) + '\n'
2208 2209 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2209 2210 fm.plain(header % tuple(header_names))
2210 2211
2211 2212 if not revs:
2212 2213 revs = ['all()']
2213 2214 revs = scmutil.revrange(repo, revs)
2214 2215
2215 2216 if dostats:
2216 2217 alldata = {
2217 2218 'nbrevs': [],
2218 2219 'nbmissingfiles': [],
2219 2220 }
2220 2221 if dotiming:
2221 2222 alldata['parentnbrenames'] = []
2222 2223 alldata['totalnbrenames'] = []
2223 2224 alldata['parenttime'] = []
2224 2225 alldata['totaltime'] = []
2225 2226
2226 2227 roi = repo.revs('merge() and %ld', revs)
2227 2228 for r in roi:
2228 2229 ctx = repo[r]
2229 2230 p1 = ctx.p1()
2230 2231 p2 = ctx.p2()
2231 2232 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2232 2233 for b in bases:
2233 2234 b = repo[b]
2234 2235 p1missing = copies._computeforwardmissing(b, p1)
2235 2236 p2missing = copies._computeforwardmissing(b, p2)
2236 2237 data = {
2237 2238 b'base': b.hex(),
2238 2239 b'p1.node': p1.hex(),
2239 2240 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2240 2241 b'p1.nbmissingfiles': len(p1missing),
2241 2242 b'p2.node': p2.hex(),
2242 2243 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2243 2244 b'p2.nbmissingfiles': len(p2missing),
2244 2245 }
2245 2246 if dostats:
2246 2247 if p1missing:
2247 2248 alldata['nbrevs'].append(
2248 2249 (data['p1.nbrevs'], b.hex(), p1.hex())
2249 2250 )
2250 2251 alldata['nbmissingfiles'].append(
2251 2252 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2252 2253 )
2253 2254 if p2missing:
2254 2255 alldata['nbrevs'].append(
2255 2256 (data['p2.nbrevs'], b.hex(), p2.hex())
2256 2257 )
2257 2258 alldata['nbmissingfiles'].append(
2258 2259 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2259 2260 )
2260 2261 if dotiming:
2261 2262 begin = util.timer()
2262 2263 mergedata = copies.mergecopies(repo, p1, p2, b)
2263 2264 end = util.timer()
2264 2265 # not very stable timing since we did only one run
2265 2266 data['time'] = end - begin
2266 2267 # mergedata contains five dicts: "copy", "movewithdir",
2267 2268 # "diverge", "renamedelete" and "dirmove".
2268 2269 # The first 4 are about renamed file so lets count that.
2269 2270 renames = len(mergedata[0])
2270 2271 renames += len(mergedata[1])
2271 2272 renames += len(mergedata[2])
2272 2273 renames += len(mergedata[3])
2273 2274 data['nbrenamedfiles'] = renames
2274 2275 begin = util.timer()
2275 2276 p1renames = copies.pathcopies(b, p1)
2276 2277 end = util.timer()
2277 2278 data['p1.time'] = end - begin
2278 2279 begin = util.timer()
2279 2280 p2renames = copies.pathcopies(b, p2)
2280 2281 end = util.timer()
2281 2282 data['p2.time'] = end - begin
2282 2283 data['p1.renamedfiles'] = len(p1renames)
2283 2284 data['p2.renamedfiles'] = len(p2renames)
2284 2285
2285 2286 if dostats:
2286 2287 if p1missing:
2287 2288 alldata['parentnbrenames'].append(
2288 2289 (data['p1.renamedfiles'], b.hex(), p1.hex())
2289 2290 )
2290 2291 alldata['parenttime'].append(
2291 2292 (data['p1.time'], b.hex(), p1.hex())
2292 2293 )
2293 2294 if p2missing:
2294 2295 alldata['parentnbrenames'].append(
2295 2296 (data['p2.renamedfiles'], b.hex(), p2.hex())
2296 2297 )
2297 2298 alldata['parenttime'].append(
2298 2299 (data['p2.time'], b.hex(), p2.hex())
2299 2300 )
2300 2301 if p1missing or p2missing:
2301 2302 alldata['totalnbrenames'].append(
2302 2303 (
2303 2304 data['nbrenamedfiles'],
2304 2305 b.hex(),
2305 2306 p1.hex(),
2306 2307 p2.hex(),
2307 2308 )
2308 2309 )
2309 2310 alldata['totaltime'].append(
2310 2311 (data['time'], b.hex(), p1.hex(), p2.hex())
2311 2312 )
2312 2313 fm.startitem()
2313 2314 fm.data(**data)
2314 2315 # make node pretty for the human output
2315 2316 out = data.copy()
2316 2317 out['base'] = fm.hexfunc(b.node())
2317 2318 out['p1.node'] = fm.hexfunc(p1.node())
2318 2319 out['p2.node'] = fm.hexfunc(p2.node())
2319 2320 fm.plain(output % out)
2320 2321
2321 2322 fm.end()
2322 2323 if dostats:
2323 2324 # use a second formatter because the data are quite different, not sure
2324 2325 # how it flies with the templater.
2325 2326 entries = [
2326 2327 ('nbrevs', 'number of revision covered'),
2327 2328 ('nbmissingfiles', 'number of missing files at head'),
2328 2329 ]
2329 2330 if dotiming:
2330 2331 entries.append(
2331 2332 ('parentnbrenames', 'rename from one parent to base')
2332 2333 )
2333 2334 entries.append(('totalnbrenames', 'total number of renames'))
2334 2335 entries.append(('parenttime', 'time for one parent'))
2335 2336 entries.append(('totaltime', 'time for both parents'))
2336 2337 _displaystats(ui, opts, entries, alldata)
2337 2338
2338 2339
2339 2340 @command(
2340 2341 b'perf::helper-pathcopies|perfhelper-pathcopies',
2341 2342 formatteropts
2342 2343 + [
2343 2344 (b'r', b'revs', [], b'restrict search to these revisions'),
2344 2345 (b'', b'timing', False, b'provides extra data (costly)'),
2345 2346 (b'', b'stats', False, b'provides statistic about the measured data'),
2346 2347 ],
2347 2348 )
2348 2349 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2349 2350 """find statistic about potential parameters for the `perftracecopies`
2350 2351
2351 2352 This command find source-destination pair relevant for copytracing testing.
2352 2353 It report value for some of the parameters that impact copy tracing time.
2353 2354
2354 2355 If `--timing` is set, rename detection is run and the associated timing
2355 2356 will be reported. The extra details comes at the cost of a slower command
2356 2357 execution.
2357 2358
2358 2359 Since the rename detection is only run once, other factors might easily
2359 2360 affect the precision of the timing. However it should give a good
2360 2361 approximation of which revision pairs are very costly.
2361 2362 """
2362 2363 opts = _byteskwargs(opts)
2363 2364 fm = ui.formatter(b'perf', opts)
2364 2365 dotiming = opts[b'timing']
2365 2366 dostats = opts[b'stats']
2366 2367
2367 2368 if dotiming:
2368 2369 header = '%12s %12s %12s %12s %12s %12s\n'
2369 2370 output = (
2370 2371 "%(source)12s %(destination)12s "
2371 2372 "%(nbrevs)12d %(nbmissingfiles)12d "
2372 2373 "%(nbrenamedfiles)12d %(time)18.5f\n"
2373 2374 )
2374 2375 header_names = (
2375 2376 "source",
2376 2377 "destination",
2377 2378 "nb-revs",
2378 2379 "nb-files",
2379 2380 "nb-renames",
2380 2381 "time",
2381 2382 )
2382 2383 fm.plain(header % header_names)
2383 2384 else:
2384 2385 header = '%12s %12s %12s %12s\n'
2385 2386 output = (
2386 2387 "%(source)12s %(destination)12s "
2387 2388 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2388 2389 )
2389 2390 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2390 2391
2391 2392 if not revs:
2392 2393 revs = ['all()']
2393 2394 revs = scmutil.revrange(repo, revs)
2394 2395
2395 2396 if dostats:
2396 2397 alldata = {
2397 2398 'nbrevs': [],
2398 2399 'nbmissingfiles': [],
2399 2400 }
2400 2401 if dotiming:
2401 2402 alldata['nbrenames'] = []
2402 2403 alldata['time'] = []
2403 2404
2404 2405 roi = repo.revs('merge() and %ld', revs)
2405 2406 for r in roi:
2406 2407 ctx = repo[r]
2407 2408 p1 = ctx.p1().rev()
2408 2409 p2 = ctx.p2().rev()
2409 2410 bases = repo.changelog._commonancestorsheads(p1, p2)
2410 2411 for p in (p1, p2):
2411 2412 for b in bases:
2412 2413 base = repo[b]
2413 2414 parent = repo[p]
2414 2415 missing = copies._computeforwardmissing(base, parent)
2415 2416 if not missing:
2416 2417 continue
2417 2418 data = {
2418 2419 b'source': base.hex(),
2419 2420 b'destination': parent.hex(),
2420 2421 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2421 2422 b'nbmissingfiles': len(missing),
2422 2423 }
2423 2424 if dostats:
2424 2425 alldata['nbrevs'].append(
2425 2426 (
2426 2427 data['nbrevs'],
2427 2428 base.hex(),
2428 2429 parent.hex(),
2429 2430 )
2430 2431 )
2431 2432 alldata['nbmissingfiles'].append(
2432 2433 (
2433 2434 data['nbmissingfiles'],
2434 2435 base.hex(),
2435 2436 parent.hex(),
2436 2437 )
2437 2438 )
2438 2439 if dotiming:
2439 2440 begin = util.timer()
2440 2441 renames = copies.pathcopies(base, parent)
2441 2442 end = util.timer()
2442 2443 # not very stable timing since we did only one run
2443 2444 data['time'] = end - begin
2444 2445 data['nbrenamedfiles'] = len(renames)
2445 2446 if dostats:
2446 2447 alldata['time'].append(
2447 2448 (
2448 2449 data['time'],
2449 2450 base.hex(),
2450 2451 parent.hex(),
2451 2452 )
2452 2453 )
2453 2454 alldata['nbrenames'].append(
2454 2455 (
2455 2456 data['nbrenamedfiles'],
2456 2457 base.hex(),
2457 2458 parent.hex(),
2458 2459 )
2459 2460 )
2460 2461 fm.startitem()
2461 2462 fm.data(**data)
2462 2463 out = data.copy()
2463 2464 out['source'] = fm.hexfunc(base.node())
2464 2465 out['destination'] = fm.hexfunc(parent.node())
2465 2466 fm.plain(output % out)
2466 2467
2467 2468 fm.end()
2468 2469 if dostats:
2469 2470 entries = [
2470 2471 ('nbrevs', 'number of revision covered'),
2471 2472 ('nbmissingfiles', 'number of missing files at head'),
2472 2473 ]
2473 2474 if dotiming:
2474 2475 entries.append(('nbrenames', 'renamed files'))
2475 2476 entries.append(('time', 'time'))
2476 2477 _displaystats(ui, opts, entries, alldata)
2477 2478
2478 2479
2479 2480 @command(b'perf::cca|perfcca', formatteropts)
2480 2481 def perfcca(ui, repo, **opts):
2481 2482 opts = _byteskwargs(opts)
2482 2483 timer, fm = gettimer(ui, opts)
2483 2484 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2484 2485 fm.end()
2485 2486
2486 2487
2487 2488 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2488 2489 def perffncacheload(ui, repo, **opts):
2489 2490 opts = _byteskwargs(opts)
2490 2491 timer, fm = gettimer(ui, opts)
2491 2492 s = repo.store
2492 2493
2493 2494 def d():
2494 2495 s.fncache._load()
2495 2496
2496 2497 timer(d)
2497 2498 fm.end()
2498 2499
2499 2500
2500 2501 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2501 2502 def perffncachewrite(ui, repo, **opts):
2502 2503 opts = _byteskwargs(opts)
2503 2504 timer, fm = gettimer(ui, opts)
2504 2505 s = repo.store
2505 2506 lock = repo.lock()
2506 2507 s.fncache._load()
2507 2508 tr = repo.transaction(b'perffncachewrite')
2508 2509 tr.addbackup(b'fncache')
2509 2510
2510 2511 def d():
2511 2512 s.fncache._dirty = True
2512 2513 s.fncache.write(tr)
2513 2514
2514 2515 timer(d)
2515 2516 tr.close()
2516 2517 lock.release()
2517 2518 fm.end()
2518 2519
2519 2520
2520 2521 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2521 2522 def perffncacheencode(ui, repo, **opts):
2522 2523 opts = _byteskwargs(opts)
2523 2524 timer, fm = gettimer(ui, opts)
2524 2525 s = repo.store
2525 2526 s.fncache._load()
2526 2527
2527 2528 def d():
2528 2529 for p in s.fncache.entries:
2529 2530 s.encode(p)
2530 2531
2531 2532 timer(d)
2532 2533 fm.end()
2533 2534
2534 2535
2535 2536 def _bdiffworker(q, blocks, xdiff, ready, done):
2536 2537 while not done.is_set():
2537 2538 pair = q.get()
2538 2539 while pair is not None:
2539 2540 if xdiff:
2540 2541 mdiff.bdiff.xdiffblocks(*pair)
2541 2542 elif blocks:
2542 2543 mdiff.bdiff.blocks(*pair)
2543 2544 else:
2544 2545 mdiff.textdiff(*pair)
2545 2546 q.task_done()
2546 2547 pair = q.get()
2547 2548 q.task_done() # for the None one
2548 2549 with ready:
2549 2550 ready.wait()
2550 2551
2551 2552
2552 2553 def _manifestrevision(repo, mnode):
2553 2554 ml = repo.manifestlog
2554 2555
2555 2556 if util.safehasattr(ml, b'getstorage'):
2556 2557 store = ml.getstorage(b'')
2557 2558 else:
2558 2559 store = ml._revlog
2559 2560
2560 2561 return store.revision(mnode)
2561 2562
2562 2563
2563 2564 @command(
2564 2565 b'perf::bdiff|perfbdiff',
2565 2566 revlogopts
2566 2567 + formatteropts
2567 2568 + [
2568 2569 (
2569 2570 b'',
2570 2571 b'count',
2571 2572 1,
2572 2573 b'number of revisions to test (when using --startrev)',
2573 2574 ),
2574 2575 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2575 2576 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2576 2577 (b'', b'blocks', False, b'test computing diffs into blocks'),
2577 2578 (b'', b'xdiff', False, b'use xdiff algorithm'),
2578 2579 ],
2579 2580 b'-c|-m|FILE REV',
2580 2581 )
2581 2582 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2582 2583 """benchmark a bdiff between revisions
2583 2584
2584 2585 By default, benchmark a bdiff between its delta parent and itself.
2585 2586
2586 2587 With ``--count``, benchmark bdiffs between delta parents and self for N
2587 2588 revisions starting at the specified revision.
2588 2589
2589 2590 With ``--alldata``, assume the requested revision is a changeset and
2590 2591 measure bdiffs for all changes related to that changeset (manifest
2591 2592 and filelogs).
2592 2593 """
2593 2594 opts = _byteskwargs(opts)
2594 2595
2595 2596 if opts[b'xdiff'] and not opts[b'blocks']:
2596 2597 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2597 2598
2598 2599 if opts[b'alldata']:
2599 2600 opts[b'changelog'] = True
2600 2601
2601 2602 if opts.get(b'changelog') or opts.get(b'manifest'):
2602 2603 file_, rev = None, file_
2603 2604 elif rev is None:
2604 2605 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2605 2606
2606 2607 blocks = opts[b'blocks']
2607 2608 xdiff = opts[b'xdiff']
2608 2609 textpairs = []
2609 2610
2610 2611 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2611 2612
2612 2613 startrev = r.rev(r.lookup(rev))
2613 2614 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2614 2615 if opts[b'alldata']:
2615 2616 # Load revisions associated with changeset.
2616 2617 ctx = repo[rev]
2617 2618 mtext = _manifestrevision(repo, ctx.manifestnode())
2618 2619 for pctx in ctx.parents():
2619 2620 pman = _manifestrevision(repo, pctx.manifestnode())
2620 2621 textpairs.append((pman, mtext))
2621 2622
2622 2623 # Load filelog revisions by iterating manifest delta.
2623 2624 man = ctx.manifest()
2624 2625 pman = ctx.p1().manifest()
2625 2626 for filename, change in pman.diff(man).items():
2626 2627 fctx = repo.file(filename)
2627 2628 f1 = fctx.revision(change[0][0] or -1)
2628 2629 f2 = fctx.revision(change[1][0] or -1)
2629 2630 textpairs.append((f1, f2))
2630 2631 else:
2631 2632 dp = r.deltaparent(rev)
2632 2633 textpairs.append((r.revision(dp), r.revision(rev)))
2633 2634
2634 2635 withthreads = threads > 0
2635 2636 if not withthreads:
2636 2637
2637 2638 def d():
2638 2639 for pair in textpairs:
2639 2640 if xdiff:
2640 2641 mdiff.bdiff.xdiffblocks(*pair)
2641 2642 elif blocks:
2642 2643 mdiff.bdiff.blocks(*pair)
2643 2644 else:
2644 2645 mdiff.textdiff(*pair)
2645 2646
2646 2647 else:
2647 2648 q = queue()
2648 2649 for i in _xrange(threads):
2649 2650 q.put(None)
2650 2651 ready = threading.Condition()
2651 2652 done = threading.Event()
2652 2653 for i in _xrange(threads):
2653 2654 threading.Thread(
2654 2655 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2655 2656 ).start()
2656 2657 q.join()
2657 2658
2658 2659 def d():
2659 2660 for pair in textpairs:
2660 2661 q.put(pair)
2661 2662 for i in _xrange(threads):
2662 2663 q.put(None)
2663 2664 with ready:
2664 2665 ready.notify_all()
2665 2666 q.join()
2666 2667
2667 2668 timer, fm = gettimer(ui, opts)
2668 2669 timer(d)
2669 2670 fm.end()
2670 2671
2671 2672 if withthreads:
2672 2673 done.set()
2673 2674 for i in _xrange(threads):
2674 2675 q.put(None)
2675 2676 with ready:
2676 2677 ready.notify_all()
2677 2678
2678 2679
2679 2680 @command(
2680 2681 b'perf::unbundle',
2681 2682 formatteropts,
2682 2683 b'BUNDLE_FILE',
2683 2684 )
2684 2685 def perf_unbundle(ui, repo, fname, **opts):
2685 2686 """benchmark application of a bundle in a repository.
2686 2687
2687 2688 This does not include the final transaction processing"""
2688 2689
2689 2690 from mercurial import exchange
2690 2691 from mercurial import bundle2
2691 2692 from mercurial import transaction
2692 2693
2693 2694 opts = _byteskwargs(opts)
2694 2695
2695 2696 ### some compatibility hotfix
2696 2697 #
2697 2698 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2698 2699 # critical regression that break transaction rollback for files that are
2699 2700 # de-inlined.
2700 2701 method = transaction.transaction._addentry
2701 2702 pre_63edc384d3b7 = "data" in getargspec(method).args
2702 2703 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2703 2704 # a changeset that is a close descendant of 18415fc918a1, the changeset
2704 2705 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2705 2706 args = getargspec(error.Abort.__init__).args
2706 2707 post_18415fc918a1 = "detailed_exit_code" in args
2707 2708
2708 2709 old_max_inline = None
2709 2710 try:
2710 2711 if not (pre_63edc384d3b7 or post_18415fc918a1):
2711 2712 # disable inlining
2712 2713 old_max_inline = mercurial.revlog._maxinline
2713 2714 # large enough to never happen
2714 2715 mercurial.revlog._maxinline = 2 ** 50
2715 2716
2716 2717 with repo.lock():
2717 2718 bundle = [None, None]
2718 2719 orig_quiet = repo.ui.quiet
2719 2720 try:
2720 2721 repo.ui.quiet = True
2721 2722 with open(fname, mode="rb") as f:
2722 2723
2723 2724 def noop_report(*args, **kwargs):
2724 2725 pass
2725 2726
2726 2727 def setup():
2727 2728 gen, tr = bundle
2728 2729 if tr is not None:
2729 2730 tr.abort()
2730 2731 bundle[:] = [None, None]
2731 2732 f.seek(0)
2732 2733 bundle[0] = exchange.readbundle(ui, f, fname)
2733 2734 bundle[1] = repo.transaction(b'perf::unbundle')
2734 2735 # silence the transaction
2735 2736 bundle[1]._report = noop_report
2736 2737
2737 2738 def apply():
2738 2739 gen, tr = bundle
2739 2740 bundle2.applybundle(
2740 2741 repo,
2741 2742 gen,
2742 2743 tr,
2743 2744 source=b'perf::unbundle',
2744 2745 url=fname,
2745 2746 )
2746 2747
2747 2748 timer, fm = gettimer(ui, opts)
2748 2749 timer(apply, setup=setup)
2749 2750 fm.end()
2750 2751 finally:
2751 2752 repo.ui.quiet == orig_quiet
2752 2753 gen, tr = bundle
2753 2754 if tr is not None:
2754 2755 tr.abort()
2755 2756 finally:
2756 2757 if old_max_inline is not None:
2757 2758 mercurial.revlog._maxinline = old_max_inline
2758 2759
2759 2760
2760 2761 @command(
2761 2762 b'perf::unidiff|perfunidiff',
2762 2763 revlogopts
2763 2764 + formatteropts
2764 2765 + [
2765 2766 (
2766 2767 b'',
2767 2768 b'count',
2768 2769 1,
2769 2770 b'number of revisions to test (when using --startrev)',
2770 2771 ),
2771 2772 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2772 2773 ],
2773 2774 b'-c|-m|FILE REV',
2774 2775 )
2775 2776 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2776 2777 """benchmark a unified diff between revisions
2777 2778
2778 2779 This doesn't include any copy tracing - it's just a unified diff
2779 2780 of the texts.
2780 2781
2781 2782 By default, benchmark a diff between its delta parent and itself.
2782 2783
2783 2784 With ``--count``, benchmark diffs between delta parents and self for N
2784 2785 revisions starting at the specified revision.
2785 2786
2786 2787 With ``--alldata``, assume the requested revision is a changeset and
2787 2788 measure diffs for all changes related to that changeset (manifest
2788 2789 and filelogs).
2789 2790 """
2790 2791 opts = _byteskwargs(opts)
2791 2792 if opts[b'alldata']:
2792 2793 opts[b'changelog'] = True
2793 2794
2794 2795 if opts.get(b'changelog') or opts.get(b'manifest'):
2795 2796 file_, rev = None, file_
2796 2797 elif rev is None:
2797 2798 raise error.CommandError(b'perfunidiff', b'invalid arguments')
2798 2799
2799 2800 textpairs = []
2800 2801
2801 2802 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
2802 2803
2803 2804 startrev = r.rev(r.lookup(rev))
2804 2805 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2805 2806 if opts[b'alldata']:
2806 2807 # Load revisions associated with changeset.
2807 2808 ctx = repo[rev]
2808 2809 mtext = _manifestrevision(repo, ctx.manifestnode())
2809 2810 for pctx in ctx.parents():
2810 2811 pman = _manifestrevision(repo, pctx.manifestnode())
2811 2812 textpairs.append((pman, mtext))
2812 2813
2813 2814 # Load filelog revisions by iterating manifest delta.
2814 2815 man = ctx.manifest()
2815 2816 pman = ctx.p1().manifest()
2816 2817 for filename, change in pman.diff(man).items():
2817 2818 fctx = repo.file(filename)
2818 2819 f1 = fctx.revision(change[0][0] or -1)
2819 2820 f2 = fctx.revision(change[1][0] or -1)
2820 2821 textpairs.append((f1, f2))
2821 2822 else:
2822 2823 dp = r.deltaparent(rev)
2823 2824 textpairs.append((r.revision(dp), r.revision(rev)))
2824 2825
2825 2826 def d():
2826 2827 for left, right in textpairs:
2827 2828 # The date strings don't matter, so we pass empty strings.
2828 2829 headerlines, hunks = mdiff.unidiff(
2829 2830 left, b'', right, b'', b'left', b'right', binary=False
2830 2831 )
2831 2832 # consume iterators in roughly the way patch.py does
2832 2833 b'\n'.join(headerlines)
2833 2834 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2834 2835
2835 2836 timer, fm = gettimer(ui, opts)
2836 2837 timer(d)
2837 2838 fm.end()
2838 2839
2839 2840
2840 2841 @command(b'perf::diffwd|perfdiffwd', formatteropts)
2841 2842 def perfdiffwd(ui, repo, **opts):
2842 2843 """Profile diff of working directory changes"""
2843 2844 opts = _byteskwargs(opts)
2844 2845 timer, fm = gettimer(ui, opts)
2845 2846 options = {
2846 2847 'w': 'ignore_all_space',
2847 2848 'b': 'ignore_space_change',
2848 2849 'B': 'ignore_blank_lines',
2849 2850 }
2850 2851
2851 2852 for diffopt in ('', 'w', 'b', 'B', 'wB'):
2852 2853 opts = {options[c]: b'1' for c in diffopt}
2853 2854
2854 2855 def d():
2855 2856 ui.pushbuffer()
2856 2857 commands.diff(ui, repo, **opts)
2857 2858 ui.popbuffer()
2858 2859
2859 2860 diffopt = diffopt.encode('ascii')
2860 2861 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
2861 2862 timer(d, title=title)
2862 2863 fm.end()
2863 2864
2864 2865
2865 2866 @command(
2866 2867 b'perf::revlogindex|perfrevlogindex',
2867 2868 revlogopts + formatteropts,
2868 2869 b'-c|-m|FILE',
2869 2870 )
2870 2871 def perfrevlogindex(ui, repo, file_=None, **opts):
2871 2872 """Benchmark operations against a revlog index.
2872 2873
2873 2874 This tests constructing a revlog instance, reading index data,
2874 2875 parsing index data, and performing various operations related to
2875 2876 index data.
2876 2877 """
2877 2878
2878 2879 opts = _byteskwargs(opts)
2879 2880
2880 2881 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
2881 2882
2882 2883 opener = getattr(rl, 'opener') # trick linter
2883 2884 # compat with hg <= 5.8
2884 2885 radix = getattr(rl, 'radix', None)
2885 2886 indexfile = getattr(rl, '_indexfile', None)
2886 2887 if indexfile is None:
2887 2888 # compatibility with <= hg-5.8
2888 2889 indexfile = getattr(rl, 'indexfile')
2889 2890 data = opener.read(indexfile)
2890 2891
2891 2892 header = struct.unpack(b'>I', data[0:4])[0]
2892 2893 version = header & 0xFFFF
2893 2894 if version == 1:
2894 2895 inline = header & (1 << 16)
2895 2896 else:
2896 2897 raise error.Abort(b'unsupported revlog version: %d' % version)
2897 2898
2898 2899 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
2899 2900 if parse_index_v1 is None:
2900 2901 parse_index_v1 = mercurial.revlog.revlogio().parseindex
2901 2902
2902 2903 rllen = len(rl)
2903 2904
2904 2905 node0 = rl.node(0)
2905 2906 node25 = rl.node(rllen // 4)
2906 2907 node50 = rl.node(rllen // 2)
2907 2908 node75 = rl.node(rllen // 4 * 3)
2908 2909 node100 = rl.node(rllen - 1)
2909 2910
2910 2911 allrevs = range(rllen)
2911 2912 allrevsrev = list(reversed(allrevs))
2912 2913 allnodes = [rl.node(rev) for rev in range(rllen)]
2913 2914 allnodesrev = list(reversed(allnodes))
2914 2915
2915 2916 def constructor():
2916 2917 if radix is not None:
2917 2918 revlog(opener, radix=radix)
2918 2919 else:
2919 2920 # hg <= 5.8
2920 2921 revlog(opener, indexfile=indexfile)
2921 2922
2922 2923 def read():
2923 2924 with opener(indexfile) as fh:
2924 2925 fh.read()
2925 2926
2926 2927 def parseindex():
2927 2928 parse_index_v1(data, inline)
2928 2929
2929 2930 def getentry(revornode):
2930 2931 index = parse_index_v1(data, inline)[0]
2931 2932 index[revornode]
2932 2933
2933 2934 def getentries(revs, count=1):
2934 2935 index = parse_index_v1(data, inline)[0]
2935 2936
2936 2937 for i in range(count):
2937 2938 for rev in revs:
2938 2939 index[rev]
2939 2940
2940 2941 def resolvenode(node):
2941 2942 index = parse_index_v1(data, inline)[0]
2942 2943 rev = getattr(index, 'rev', None)
2943 2944 if rev is None:
2944 2945 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2945 2946 # This only works for the C code.
2946 2947 if nodemap is None:
2947 2948 return
2948 2949 rev = nodemap.__getitem__
2949 2950
2950 2951 try:
2951 2952 rev(node)
2952 2953 except error.RevlogError:
2953 2954 pass
2954 2955
2955 2956 def resolvenodes(nodes, count=1):
2956 2957 index = parse_index_v1(data, inline)[0]
2957 2958 rev = getattr(index, 'rev', None)
2958 2959 if rev is None:
2959 2960 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
2960 2961 # This only works for the C code.
2961 2962 if nodemap is None:
2962 2963 return
2963 2964 rev = nodemap.__getitem__
2964 2965
2965 2966 for i in range(count):
2966 2967 for node in nodes:
2967 2968 try:
2968 2969 rev(node)
2969 2970 except error.RevlogError:
2970 2971 pass
2971 2972
2972 2973 benches = [
2973 2974 (constructor, b'revlog constructor'),
2974 2975 (read, b'read'),
2975 2976 (parseindex, b'create index object'),
2976 2977 (lambda: getentry(0), b'retrieve index entry for rev 0'),
2977 2978 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
2978 2979 (lambda: resolvenode(node0), b'look up node at rev 0'),
2979 2980 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
2980 2981 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
2981 2982 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
2982 2983 (lambda: resolvenode(node100), b'look up node at tip'),
2983 2984 # 2x variation is to measure caching impact.
2984 2985 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
2985 2986 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
2986 2987 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
2987 2988 (
2988 2989 lambda: resolvenodes(allnodesrev, 2),
2989 2990 b'look up all nodes 2x (reverse)',
2990 2991 ),
2991 2992 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
2992 2993 (
2993 2994 lambda: getentries(allrevs, 2),
2994 2995 b'retrieve all index entries 2x (forward)',
2995 2996 ),
2996 2997 (
2997 2998 lambda: getentries(allrevsrev),
2998 2999 b'retrieve all index entries (reverse)',
2999 3000 ),
3000 3001 (
3001 3002 lambda: getentries(allrevsrev, 2),
3002 3003 b'retrieve all index entries 2x (reverse)',
3003 3004 ),
3004 3005 ]
3005 3006
3006 3007 for fn, title in benches:
3007 3008 timer, fm = gettimer(ui, opts)
3008 3009 timer(fn, title=title)
3009 3010 fm.end()
3010 3011
3011 3012
3012 3013 @command(
3013 3014 b'perf::revlogrevisions|perfrevlogrevisions',
3014 3015 revlogopts
3015 3016 + formatteropts
3016 3017 + [
3017 3018 (b'd', b'dist', 100, b'distance between the revisions'),
3018 3019 (b's', b'startrev', 0, b'revision to start reading at'),
3019 3020 (b'', b'reverse', False, b'read in reverse'),
3020 3021 ],
3021 3022 b'-c|-m|FILE',
3022 3023 )
3023 3024 def perfrevlogrevisions(
3024 3025 ui, repo, file_=None, startrev=0, reverse=False, **opts
3025 3026 ):
3026 3027 """Benchmark reading a series of revisions from a revlog.
3027 3028
3028 3029 By default, we read every ``-d/--dist`` revision from 0 to tip of
3029 3030 the specified revlog.
3030 3031
3031 3032 The start revision can be defined via ``-s/--startrev``.
3032 3033 """
3033 3034 opts = _byteskwargs(opts)
3034 3035
3035 3036 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3036 3037 rllen = getlen(ui)(rl)
3037 3038
3038 3039 if startrev < 0:
3039 3040 startrev = rllen + startrev
3040 3041
3041 3042 def d():
3042 3043 rl.clearcaches()
3043 3044
3044 3045 beginrev = startrev
3045 3046 endrev = rllen
3046 3047 dist = opts[b'dist']
3047 3048
3048 3049 if reverse:
3049 3050 beginrev, endrev = endrev - 1, beginrev - 1
3050 3051 dist = -1 * dist
3051 3052
3052 3053 for x in _xrange(beginrev, endrev, dist):
3053 3054 # Old revisions don't support passing int.
3054 3055 n = rl.node(x)
3055 3056 rl.revision(n)
3056 3057
3057 3058 timer, fm = gettimer(ui, opts)
3058 3059 timer(d)
3059 3060 fm.end()
3060 3061
3061 3062
3062 3063 @command(
3063 3064 b'perf::revlogwrite|perfrevlogwrite',
3064 3065 revlogopts
3065 3066 + formatteropts
3066 3067 + [
3067 3068 (b's', b'startrev', 1000, b'revision to start writing at'),
3068 3069 (b'', b'stoprev', -1, b'last revision to write'),
3069 3070 (b'', b'count', 3, b'number of passes to perform'),
3070 3071 (b'', b'details', False, b'print timing for every revisions tested'),
3071 3072 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3072 3073 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3073 3074 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3074 3075 ],
3075 3076 b'-c|-m|FILE',
3076 3077 )
3077 3078 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3078 3079 """Benchmark writing a series of revisions to a revlog.
3079 3080
3080 3081 Possible source values are:
3081 3082 * `full`: add from a full text (default).
3082 3083 * `parent-1`: add from a delta to the first parent
3083 3084 * `parent-2`: add from a delta to the second parent if it exists
3084 3085 (use a delta from the first parent otherwise)
3085 3086 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3086 3087 * `storage`: add from the existing precomputed deltas
3087 3088
3088 3089 Note: This performance command measures performance in a custom way. As a
3089 3090 result some of the global configuration of the 'perf' command does not
3090 3091 apply to it:
3091 3092
3092 3093 * ``pre-run``: disabled
3093 3094
3094 3095 * ``profile-benchmark``: disabled
3095 3096
3096 3097 * ``run-limits``: disabled use --count instead
3097 3098 """
3098 3099 opts = _byteskwargs(opts)
3099 3100
3100 3101 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3101 3102 rllen = getlen(ui)(rl)
3102 3103 if startrev < 0:
3103 3104 startrev = rllen + startrev
3104 3105 if stoprev < 0:
3105 3106 stoprev = rllen + stoprev
3106 3107
3107 3108 lazydeltabase = opts['lazydeltabase']
3108 3109 source = opts['source']
3109 3110 clearcaches = opts['clear_caches']
3110 3111 validsource = (
3111 3112 b'full',
3112 3113 b'parent-1',
3113 3114 b'parent-2',
3114 3115 b'parent-smallest',
3115 3116 b'storage',
3116 3117 )
3117 3118 if source not in validsource:
3118 3119 raise error.Abort('invalid source type: %s' % source)
3119 3120
3120 3121 ### actually gather results
3121 3122 count = opts['count']
3122 3123 if count <= 0:
3123 3124 raise error.Abort('invalide run count: %d' % count)
3124 3125 allresults = []
3125 3126 for c in range(count):
3126 3127 timing = _timeonewrite(
3127 3128 ui,
3128 3129 rl,
3129 3130 source,
3130 3131 startrev,
3131 3132 stoprev,
3132 3133 c + 1,
3133 3134 lazydeltabase=lazydeltabase,
3134 3135 clearcaches=clearcaches,
3135 3136 )
3136 3137 allresults.append(timing)
3137 3138
3138 3139 ### consolidate the results in a single list
3139 3140 results = []
3140 3141 for idx, (rev, t) in enumerate(allresults[0]):
3141 3142 ts = [t]
3142 3143 for other in allresults[1:]:
3143 3144 orev, ot = other[idx]
3144 3145 assert orev == rev
3145 3146 ts.append(ot)
3146 3147 results.append((rev, ts))
3147 3148 resultcount = len(results)
3148 3149
3149 3150 ### Compute and display relevant statistics
3150 3151
3151 3152 # get a formatter
3152 3153 fm = ui.formatter(b'perf', opts)
3153 3154 displayall = ui.configbool(b"perf", b"all-timing", False)
3154 3155
3155 3156 # print individual details if requested
3156 3157 if opts['details']:
3157 3158 for idx, item in enumerate(results, 1):
3158 3159 rev, data = item
3159 3160 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3160 3161 formatone(fm, data, title=title, displayall=displayall)
3161 3162
3162 3163 # sorts results by median time
3163 3164 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3164 3165 # list of (name, index) to display)
3165 3166 relevants = [
3166 3167 ("min", 0),
3167 3168 ("10%", resultcount * 10 // 100),
3168 3169 ("25%", resultcount * 25 // 100),
3169 3170 ("50%", resultcount * 70 // 100),
3170 3171 ("75%", resultcount * 75 // 100),
3171 3172 ("90%", resultcount * 90 // 100),
3172 3173 ("95%", resultcount * 95 // 100),
3173 3174 ("99%", resultcount * 99 // 100),
3174 3175 ("99.9%", resultcount * 999 // 1000),
3175 3176 ("99.99%", resultcount * 9999 // 10000),
3176 3177 ("99.999%", resultcount * 99999 // 100000),
3177 3178 ("max", -1),
3178 3179 ]
3179 3180 if not ui.quiet:
3180 3181 for name, idx in relevants:
3181 3182 data = results[idx]
3182 3183 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3183 3184 formatone(fm, data[1], title=title, displayall=displayall)
3184 3185
3185 3186 # XXX summing that many float will not be very precise, we ignore this fact
3186 3187 # for now
3187 3188 totaltime = []
3188 3189 for item in allresults:
3189 3190 totaltime.append(
3190 3191 (
3191 3192 sum(x[1][0] for x in item),
3192 3193 sum(x[1][1] for x in item),
3193 3194 sum(x[1][2] for x in item),
3194 3195 )
3195 3196 )
3196 3197 formatone(
3197 3198 fm,
3198 3199 totaltime,
3199 3200 title="total time (%d revs)" % resultcount,
3200 3201 displayall=displayall,
3201 3202 )
3202 3203 fm.end()
3203 3204
3204 3205
3205 3206 class _faketr:
3206 3207 def add(s, x, y, z=None):
3207 3208 return None
3208 3209
3209 3210
3210 3211 def _timeonewrite(
3211 3212 ui,
3212 3213 orig,
3213 3214 source,
3214 3215 startrev,
3215 3216 stoprev,
3216 3217 runidx=None,
3217 3218 lazydeltabase=True,
3218 3219 clearcaches=True,
3219 3220 ):
3220 3221 timings = []
3221 3222 tr = _faketr()
3222 3223 with _temprevlog(ui, orig, startrev) as dest:
3223 3224 dest._lazydeltabase = lazydeltabase
3224 3225 revs = list(orig.revs(startrev, stoprev))
3225 3226 total = len(revs)
3226 3227 topic = 'adding'
3227 3228 if runidx is not None:
3228 3229 topic += ' (run #%d)' % runidx
3229 3230 # Support both old and new progress API
3230 3231 if util.safehasattr(ui, 'makeprogress'):
3231 3232 progress = ui.makeprogress(topic, unit='revs', total=total)
3232 3233
3233 3234 def updateprogress(pos):
3234 3235 progress.update(pos)
3235 3236
3236 3237 def completeprogress():
3237 3238 progress.complete()
3238 3239
3239 3240 else:
3240 3241
3241 3242 def updateprogress(pos):
3242 3243 ui.progress(topic, pos, unit='revs', total=total)
3243 3244
3244 3245 def completeprogress():
3245 3246 ui.progress(topic, None, unit='revs', total=total)
3246 3247
3247 3248 for idx, rev in enumerate(revs):
3248 3249 updateprogress(idx)
3249 3250 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3250 3251 if clearcaches:
3251 3252 dest.index.clearcaches()
3252 3253 dest.clearcaches()
3253 3254 with timeone() as r:
3254 3255 dest.addrawrevision(*addargs, **addkwargs)
3255 3256 timings.append((rev, r[0]))
3256 3257 updateprogress(total)
3257 3258 completeprogress()
3258 3259 return timings
3259 3260
3260 3261
3261 3262 def _getrevisionseed(orig, rev, tr, source):
3262 3263 from mercurial.node import nullid
3263 3264
3264 3265 linkrev = orig.linkrev(rev)
3265 3266 node = orig.node(rev)
3266 3267 p1, p2 = orig.parents(node)
3267 3268 flags = orig.flags(rev)
3268 3269 cachedelta = None
3269 3270 text = None
3270 3271
3271 3272 if source == b'full':
3272 3273 text = orig.revision(rev)
3273 3274 elif source == b'parent-1':
3274 3275 baserev = orig.rev(p1)
3275 3276 cachedelta = (baserev, orig.revdiff(p1, rev))
3276 3277 elif source == b'parent-2':
3277 3278 parent = p2
3278 3279 if p2 == nullid:
3279 3280 parent = p1
3280 3281 baserev = orig.rev(parent)
3281 3282 cachedelta = (baserev, orig.revdiff(parent, rev))
3282 3283 elif source == b'parent-smallest':
3283 3284 p1diff = orig.revdiff(p1, rev)
3284 3285 parent = p1
3285 3286 diff = p1diff
3286 3287 if p2 != nullid:
3287 3288 p2diff = orig.revdiff(p2, rev)
3288 3289 if len(p1diff) > len(p2diff):
3289 3290 parent = p2
3290 3291 diff = p2diff
3291 3292 baserev = orig.rev(parent)
3292 3293 cachedelta = (baserev, diff)
3293 3294 elif source == b'storage':
3294 3295 baserev = orig.deltaparent(rev)
3295 3296 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3296 3297
3297 3298 return (
3298 3299 (text, tr, linkrev, p1, p2),
3299 3300 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3300 3301 )
3301 3302
3302 3303
3303 3304 @contextlib.contextmanager
3304 3305 def _temprevlog(ui, orig, truncaterev):
3305 3306 from mercurial import vfs as vfsmod
3306 3307
3307 3308 if orig._inline:
3308 3309 raise error.Abort('not supporting inline revlog (yet)')
3309 3310 revlogkwargs = {}
3310 3311 k = 'upperboundcomp'
3311 3312 if util.safehasattr(orig, k):
3312 3313 revlogkwargs[k] = getattr(orig, k)
3313 3314
3314 3315 indexfile = getattr(orig, '_indexfile', None)
3315 3316 if indexfile is None:
3316 3317 # compatibility with <= hg-5.8
3317 3318 indexfile = getattr(orig, 'indexfile')
3318 3319 origindexpath = orig.opener.join(indexfile)
3319 3320
3320 3321 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3321 3322 origdatapath = orig.opener.join(datafile)
3322 3323 radix = b'revlog'
3323 3324 indexname = b'revlog.i'
3324 3325 dataname = b'revlog.d'
3325 3326
3326 3327 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3327 3328 try:
3328 3329 # copy the data file in a temporary directory
3329 3330 ui.debug('copying data in %s\n' % tmpdir)
3330 3331 destindexpath = os.path.join(tmpdir, 'revlog.i')
3331 3332 destdatapath = os.path.join(tmpdir, 'revlog.d')
3332 3333 shutil.copyfile(origindexpath, destindexpath)
3333 3334 shutil.copyfile(origdatapath, destdatapath)
3334 3335
3335 3336 # remove the data we want to add again
3336 3337 ui.debug('truncating data to be rewritten\n')
3337 3338 with open(destindexpath, 'ab') as index:
3338 3339 index.seek(0)
3339 3340 index.truncate(truncaterev * orig._io.size)
3340 3341 with open(destdatapath, 'ab') as data:
3341 3342 data.seek(0)
3342 3343 data.truncate(orig.start(truncaterev))
3343 3344
3344 3345 # instantiate a new revlog from the temporary copy
3345 3346 ui.debug('truncating adding to be rewritten\n')
3346 3347 vfs = vfsmod.vfs(tmpdir)
3347 3348 vfs.options = getattr(orig.opener, 'options', None)
3348 3349
3349 3350 try:
3350 3351 dest = revlog(vfs, radix=radix, **revlogkwargs)
3351 3352 except TypeError:
3352 3353 dest = revlog(
3353 3354 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3354 3355 )
3355 3356 if dest._inline:
3356 3357 raise error.Abort('not supporting inline revlog (yet)')
3357 3358 # make sure internals are initialized
3358 3359 dest.revision(len(dest) - 1)
3359 3360 yield dest
3360 3361 del dest, vfs
3361 3362 finally:
3362 3363 shutil.rmtree(tmpdir, True)
3363 3364
3364 3365
3365 3366 @command(
3366 3367 b'perf::revlogchunks|perfrevlogchunks',
3367 3368 revlogopts
3368 3369 + formatteropts
3369 3370 + [
3370 3371 (b'e', b'engines', b'', b'compression engines to use'),
3371 3372 (b's', b'startrev', 0, b'revision to start at'),
3372 3373 ],
3373 3374 b'-c|-m|FILE',
3374 3375 )
3375 3376 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3376 3377 """Benchmark operations on revlog chunks.
3377 3378
3378 3379 Logically, each revlog is a collection of fulltext revisions. However,
3379 3380 stored within each revlog are "chunks" of possibly compressed data. This
3380 3381 data needs to be read and decompressed or compressed and written.
3381 3382
3382 3383 This command measures the time it takes to read+decompress and recompress
3383 3384 chunks in a revlog. It effectively isolates I/O and compression performance.
3384 3385 For measurements of higher-level operations like resolving revisions,
3385 3386 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3386 3387 """
3387 3388 opts = _byteskwargs(opts)
3388 3389
3389 3390 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3390 3391
3391 3392 # _chunkraw was renamed to _getsegmentforrevs.
3392 3393 try:
3393 3394 segmentforrevs = rl._getsegmentforrevs
3394 3395 except AttributeError:
3395 3396 segmentforrevs = rl._chunkraw
3396 3397
3397 3398 # Verify engines argument.
3398 3399 if engines:
3399 3400 engines = {e.strip() for e in engines.split(b',')}
3400 3401 for engine in engines:
3401 3402 try:
3402 3403 util.compressionengines[engine]
3403 3404 except KeyError:
3404 3405 raise error.Abort(b'unknown compression engine: %s' % engine)
3405 3406 else:
3406 3407 engines = []
3407 3408 for e in util.compengines:
3408 3409 engine = util.compengines[e]
3409 3410 try:
3410 3411 if engine.available():
3411 3412 engine.revlogcompressor().compress(b'dummy')
3412 3413 engines.append(e)
3413 3414 except NotImplementedError:
3414 3415 pass
3415 3416
3416 3417 revs = list(rl.revs(startrev, len(rl) - 1))
3417 3418
3418 3419 def rlfh(rl):
3419 3420 if rl._inline:
3420 3421 indexfile = getattr(rl, '_indexfile', None)
3421 3422 if indexfile is None:
3422 3423 # compatibility with <= hg-5.8
3423 3424 indexfile = getattr(rl, 'indexfile')
3424 3425 return getsvfs(repo)(indexfile)
3425 3426 else:
3426 3427 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3427 3428 return getsvfs(repo)(datafile)
3428 3429
3429 3430 def doread():
3430 3431 rl.clearcaches()
3431 3432 for rev in revs:
3432 3433 segmentforrevs(rev, rev)
3433 3434
3434 3435 def doreadcachedfh():
3435 3436 rl.clearcaches()
3436 3437 fh = rlfh(rl)
3437 3438 for rev in revs:
3438 3439 segmentforrevs(rev, rev, df=fh)
3439 3440
3440 3441 def doreadbatch():
3441 3442 rl.clearcaches()
3442 3443 segmentforrevs(revs[0], revs[-1])
3443 3444
3444 3445 def doreadbatchcachedfh():
3445 3446 rl.clearcaches()
3446 3447 fh = rlfh(rl)
3447 3448 segmentforrevs(revs[0], revs[-1], df=fh)
3448 3449
3449 3450 def dochunk():
3450 3451 rl.clearcaches()
3451 3452 fh = rlfh(rl)
3452 3453 for rev in revs:
3453 3454 rl._chunk(rev, df=fh)
3454 3455
3455 3456 chunks = [None]
3456 3457
3457 3458 def dochunkbatch():
3458 3459 rl.clearcaches()
3459 3460 fh = rlfh(rl)
3460 3461 # Save chunks as a side-effect.
3461 3462 chunks[0] = rl._chunks(revs, df=fh)
3462 3463
3463 3464 def docompress(compressor):
3464 3465 rl.clearcaches()
3465 3466
3466 3467 try:
3467 3468 # Swap in the requested compression engine.
3468 3469 oldcompressor = rl._compressor
3469 3470 rl._compressor = compressor
3470 3471 for chunk in chunks[0]:
3471 3472 rl.compress(chunk)
3472 3473 finally:
3473 3474 rl._compressor = oldcompressor
3474 3475
3475 3476 benches = [
3476 3477 (lambda: doread(), b'read'),
3477 3478 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3478 3479 (lambda: doreadbatch(), b'read batch'),
3479 3480 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3480 3481 (lambda: dochunk(), b'chunk'),
3481 3482 (lambda: dochunkbatch(), b'chunk batch'),
3482 3483 ]
3483 3484
3484 3485 for engine in sorted(engines):
3485 3486 compressor = util.compengines[engine].revlogcompressor()
3486 3487 benches.append(
3487 3488 (
3488 3489 functools.partial(docompress, compressor),
3489 3490 b'compress w/ %s' % engine,
3490 3491 )
3491 3492 )
3492 3493
3493 3494 for fn, title in benches:
3494 3495 timer, fm = gettimer(ui, opts)
3495 3496 timer(fn, title=title)
3496 3497 fm.end()
3497 3498
3498 3499
3499 3500 @command(
3500 3501 b'perf::revlogrevision|perfrevlogrevision',
3501 3502 revlogopts
3502 3503 + formatteropts
3503 3504 + [(b'', b'cache', False, b'use caches instead of clearing')],
3504 3505 b'-c|-m|FILE REV',
3505 3506 )
3506 3507 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3507 3508 """Benchmark obtaining a revlog revision.
3508 3509
3509 3510 Obtaining a revlog revision consists of roughly the following steps:
3510 3511
3511 3512 1. Compute the delta chain
3512 3513 2. Slice the delta chain if applicable
3513 3514 3. Obtain the raw chunks for that delta chain
3514 3515 4. Decompress each raw chunk
3515 3516 5. Apply binary patches to obtain fulltext
3516 3517 6. Verify hash of fulltext
3517 3518
3518 3519 This command measures the time spent in each of these phases.
3519 3520 """
3520 3521 opts = _byteskwargs(opts)
3521 3522
3522 3523 if opts.get(b'changelog') or opts.get(b'manifest'):
3523 3524 file_, rev = None, file_
3524 3525 elif rev is None:
3525 3526 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3526 3527
3527 3528 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3528 3529
3529 3530 # _chunkraw was renamed to _getsegmentforrevs.
3530 3531 try:
3531 3532 segmentforrevs = r._getsegmentforrevs
3532 3533 except AttributeError:
3533 3534 segmentforrevs = r._chunkraw
3534 3535
3535 3536 node = r.lookup(rev)
3536 3537 rev = r.rev(node)
3537 3538
3538 3539 def getrawchunks(data, chain):
3539 3540 start = r.start
3540 3541 length = r.length
3541 3542 inline = r._inline
3542 3543 try:
3543 3544 iosize = r.index.entry_size
3544 3545 except AttributeError:
3545 3546 iosize = r._io.size
3546 3547 buffer = util.buffer
3547 3548
3548 3549 chunks = []
3549 3550 ladd = chunks.append
3550 3551 for idx, item in enumerate(chain):
3551 3552 offset = start(item[0])
3552 3553 bits = data[idx]
3553 3554 for rev in item:
3554 3555 chunkstart = start(rev)
3555 3556 if inline:
3556 3557 chunkstart += (rev + 1) * iosize
3557 3558 chunklength = length(rev)
3558 3559 ladd(buffer(bits, chunkstart - offset, chunklength))
3559 3560
3560 3561 return chunks
3561 3562
3562 3563 def dodeltachain(rev):
3563 3564 if not cache:
3564 3565 r.clearcaches()
3565 3566 r._deltachain(rev)
3566 3567
3567 3568 def doread(chain):
3568 3569 if not cache:
3569 3570 r.clearcaches()
3570 3571 for item in slicedchain:
3571 3572 segmentforrevs(item[0], item[-1])
3572 3573
3573 3574 def doslice(r, chain, size):
3574 3575 for s in slicechunk(r, chain, targetsize=size):
3575 3576 pass
3576 3577
3577 3578 def dorawchunks(data, chain):
3578 3579 if not cache:
3579 3580 r.clearcaches()
3580 3581 getrawchunks(data, chain)
3581 3582
3582 3583 def dodecompress(chunks):
3583 3584 decomp = r.decompress
3584 3585 for chunk in chunks:
3585 3586 decomp(chunk)
3586 3587
3587 3588 def dopatch(text, bins):
3588 3589 if not cache:
3589 3590 r.clearcaches()
3590 3591 mdiff.patches(text, bins)
3591 3592
3592 3593 def dohash(text):
3593 3594 if not cache:
3594 3595 r.clearcaches()
3595 3596 r.checkhash(text, node, rev=rev)
3596 3597
3597 3598 def dorevision():
3598 3599 if not cache:
3599 3600 r.clearcaches()
3600 3601 r.revision(node)
3601 3602
3602 3603 try:
3603 3604 from mercurial.revlogutils.deltas import slicechunk
3604 3605 except ImportError:
3605 3606 slicechunk = getattr(revlog, '_slicechunk', None)
3606 3607
3607 3608 size = r.length(rev)
3608 3609 chain = r._deltachain(rev)[0]
3609 3610 if not getattr(r, '_withsparseread', False):
3610 3611 slicedchain = (chain,)
3611 3612 else:
3612 3613 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3613 3614 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3614 3615 rawchunks = getrawchunks(data, slicedchain)
3615 3616 bins = r._chunks(chain)
3616 3617 text = bytes(bins[0])
3617 3618 bins = bins[1:]
3618 3619 text = mdiff.patches(text, bins)
3619 3620
3620 3621 benches = [
3621 3622 (lambda: dorevision(), b'full'),
3622 3623 (lambda: dodeltachain(rev), b'deltachain'),
3623 3624 (lambda: doread(chain), b'read'),
3624 3625 ]
3625 3626
3626 3627 if getattr(r, '_withsparseread', False):
3627 3628 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3628 3629 benches.append(slicing)
3629 3630
3630 3631 benches.extend(
3631 3632 [
3632 3633 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3633 3634 (lambda: dodecompress(rawchunks), b'decompress'),
3634 3635 (lambda: dopatch(text, bins), b'patch'),
3635 3636 (lambda: dohash(text), b'hash'),
3636 3637 ]
3637 3638 )
3638 3639
3639 3640 timer, fm = gettimer(ui, opts)
3640 3641 for fn, title in benches:
3641 3642 timer(fn, title=title)
3642 3643 fm.end()
3643 3644
3644 3645
3645 3646 @command(
3646 3647 b'perf::revset|perfrevset',
3647 3648 [
3648 3649 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3649 3650 (b'', b'contexts', False, b'obtain changectx for each revision'),
3650 3651 ]
3651 3652 + formatteropts,
3652 3653 b"REVSET",
3653 3654 )
3654 3655 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3655 3656 """benchmark the execution time of a revset
3656 3657
3657 3658 Use the --clean option if need to evaluate the impact of build volatile
3658 3659 revisions set cache on the revset execution. Volatile cache hold filtered
3659 3660 and obsolete related cache."""
3660 3661 opts = _byteskwargs(opts)
3661 3662
3662 3663 timer, fm = gettimer(ui, opts)
3663 3664
3664 3665 def d():
3665 3666 if clear:
3666 3667 repo.invalidatevolatilesets()
3667 3668 if contexts:
3668 3669 for ctx in repo.set(expr):
3669 3670 pass
3670 3671 else:
3671 3672 for r in repo.revs(expr):
3672 3673 pass
3673 3674
3674 3675 timer(d)
3675 3676 fm.end()
3676 3677
3677 3678
3678 3679 @command(
3679 3680 b'perf::volatilesets|perfvolatilesets',
3680 3681 [
3681 3682 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3682 3683 ]
3683 3684 + formatteropts,
3684 3685 )
3685 3686 def perfvolatilesets(ui, repo, *names, **opts):
3686 3687 """benchmark the computation of various volatile set
3687 3688
3688 3689 Volatile set computes element related to filtering and obsolescence."""
3689 3690 opts = _byteskwargs(opts)
3690 3691 timer, fm = gettimer(ui, opts)
3691 3692 repo = repo.unfiltered()
3692 3693
3693 3694 def getobs(name):
3694 3695 def d():
3695 3696 repo.invalidatevolatilesets()
3696 3697 if opts[b'clear_obsstore']:
3697 3698 clearfilecache(repo, b'obsstore')
3698 3699 obsolete.getrevs(repo, name)
3699 3700
3700 3701 return d
3701 3702
3702 3703 allobs = sorted(obsolete.cachefuncs)
3703 3704 if names:
3704 3705 allobs = [n for n in allobs if n in names]
3705 3706
3706 3707 for name in allobs:
3707 3708 timer(getobs(name), title=name)
3708 3709
3709 3710 def getfiltered(name):
3710 3711 def d():
3711 3712 repo.invalidatevolatilesets()
3712 3713 if opts[b'clear_obsstore']:
3713 3714 clearfilecache(repo, b'obsstore')
3714 3715 repoview.filterrevs(repo, name)
3715 3716
3716 3717 return d
3717 3718
3718 3719 allfilter = sorted(repoview.filtertable)
3719 3720 if names:
3720 3721 allfilter = [n for n in allfilter if n in names]
3721 3722
3722 3723 for name in allfilter:
3723 3724 timer(getfiltered(name), title=name)
3724 3725 fm.end()
3725 3726
3726 3727
3727 3728 @command(
3728 3729 b'perf::branchmap|perfbranchmap',
3729 3730 [
3730 3731 (b'f', b'full', False, b'Includes build time of subset'),
3731 3732 (
3732 3733 b'',
3733 3734 b'clear-revbranch',
3734 3735 False,
3735 3736 b'purge the revbranch cache between computation',
3736 3737 ),
3737 3738 ]
3738 3739 + formatteropts,
3739 3740 )
3740 3741 def perfbranchmap(ui, repo, *filternames, **opts):
3741 3742 """benchmark the update of a branchmap
3742 3743
3743 3744 This benchmarks the full repo.branchmap() call with read and write disabled
3744 3745 """
3745 3746 opts = _byteskwargs(opts)
3746 3747 full = opts.get(b"full", False)
3747 3748 clear_revbranch = opts.get(b"clear_revbranch", False)
3748 3749 timer, fm = gettimer(ui, opts)
3749 3750
3750 3751 def getbranchmap(filtername):
3751 3752 """generate a benchmark function for the filtername"""
3752 3753 if filtername is None:
3753 3754 view = repo
3754 3755 else:
3755 3756 view = repo.filtered(filtername)
3756 3757 if util.safehasattr(view._branchcaches, '_per_filter'):
3757 3758 filtered = view._branchcaches._per_filter
3758 3759 else:
3759 3760 # older versions
3760 3761 filtered = view._branchcaches
3761 3762
3762 3763 def d():
3763 3764 if clear_revbranch:
3764 3765 repo.revbranchcache()._clear()
3765 3766 if full:
3766 3767 view._branchcaches.clear()
3767 3768 else:
3768 3769 filtered.pop(filtername, None)
3769 3770 view.branchmap()
3770 3771
3771 3772 return d
3772 3773
3773 3774 # add filter in smaller subset to bigger subset
3774 3775 possiblefilters = set(repoview.filtertable)
3775 3776 if filternames:
3776 3777 possiblefilters &= set(filternames)
3777 3778 subsettable = getbranchmapsubsettable()
3778 3779 allfilters = []
3779 3780 while possiblefilters:
3780 3781 for name in possiblefilters:
3781 3782 subset = subsettable.get(name)
3782 3783 if subset not in possiblefilters:
3783 3784 break
3784 3785 else:
3785 3786 assert False, b'subset cycle %s!' % possiblefilters
3786 3787 allfilters.append(name)
3787 3788 possiblefilters.remove(name)
3788 3789
3789 3790 # warm the cache
3790 3791 if not full:
3791 3792 for name in allfilters:
3792 3793 repo.filtered(name).branchmap()
3793 3794 if not filternames or b'unfiltered' in filternames:
3794 3795 # add unfiltered
3795 3796 allfilters.append(None)
3796 3797
3797 3798 if util.safehasattr(branchmap.branchcache, 'fromfile'):
3798 3799 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
3799 3800 branchcacheread.set(classmethod(lambda *args: None))
3800 3801 else:
3801 3802 # older versions
3802 3803 branchcacheread = safeattrsetter(branchmap, b'read')
3803 3804 branchcacheread.set(lambda *args: None)
3804 3805 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
3805 3806 branchcachewrite.set(lambda *args: None)
3806 3807 try:
3807 3808 for name in allfilters:
3808 3809 printname = name
3809 3810 if name is None:
3810 3811 printname = b'unfiltered'
3811 3812 timer(getbranchmap(name), title=printname)
3812 3813 finally:
3813 3814 branchcacheread.restore()
3814 3815 branchcachewrite.restore()
3815 3816 fm.end()
3816 3817
3817 3818
3818 3819 @command(
3819 3820 b'perf::branchmapupdate|perfbranchmapupdate',
3820 3821 [
3821 3822 (b'', b'base', [], b'subset of revision to start from'),
3822 3823 (b'', b'target', [], b'subset of revision to end with'),
3823 3824 (b'', b'clear-caches', False, b'clear cache between each runs'),
3824 3825 ]
3825 3826 + formatteropts,
3826 3827 )
3827 3828 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
3828 3829 """benchmark branchmap update from for <base> revs to <target> revs
3829 3830
3830 3831 If `--clear-caches` is passed, the following items will be reset before
3831 3832 each update:
3832 3833 * the changelog instance and associated indexes
3833 3834 * the rev-branch-cache instance
3834 3835
3835 3836 Examples:
3836 3837
3837 3838 # update for the one last revision
3838 3839 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
3839 3840
3840 3841 $ update for change coming with a new branch
3841 3842 $ hg perfbranchmapupdate --base 'stable' --target 'default'
3842 3843 """
3843 3844 from mercurial import branchmap
3844 3845 from mercurial import repoview
3845 3846
3846 3847 opts = _byteskwargs(opts)
3847 3848 timer, fm = gettimer(ui, opts)
3848 3849 clearcaches = opts[b'clear_caches']
3849 3850 unfi = repo.unfiltered()
3850 3851 x = [None] # used to pass data between closure
3851 3852
3852 3853 # we use a `list` here to avoid possible side effect from smartset
3853 3854 baserevs = list(scmutil.revrange(repo, base))
3854 3855 targetrevs = list(scmutil.revrange(repo, target))
3855 3856 if not baserevs:
3856 3857 raise error.Abort(b'no revisions selected for --base')
3857 3858 if not targetrevs:
3858 3859 raise error.Abort(b'no revisions selected for --target')
3859 3860
3860 3861 # make sure the target branchmap also contains the one in the base
3861 3862 targetrevs = list(set(baserevs) | set(targetrevs))
3862 3863 targetrevs.sort()
3863 3864
3864 3865 cl = repo.changelog
3865 3866 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
3866 3867 allbaserevs.sort()
3867 3868 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
3868 3869
3869 3870 newrevs = list(alltargetrevs.difference(allbaserevs))
3870 3871 newrevs.sort()
3871 3872
3872 3873 allrevs = frozenset(unfi.changelog.revs())
3873 3874 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
3874 3875 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
3875 3876
3876 3877 def basefilter(repo, visibilityexceptions=None):
3877 3878 return basefilterrevs
3878 3879
3879 3880 def targetfilter(repo, visibilityexceptions=None):
3880 3881 return targetfilterrevs
3881 3882
3882 3883 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
3883 3884 ui.status(msg % (len(allbaserevs), len(newrevs)))
3884 3885 if targetfilterrevs:
3885 3886 msg = b'(%d revisions still filtered)\n'
3886 3887 ui.status(msg % len(targetfilterrevs))
3887 3888
3888 3889 try:
3889 3890 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
3890 3891 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
3891 3892
3892 3893 baserepo = repo.filtered(b'__perf_branchmap_update_base')
3893 3894 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
3894 3895
3895 3896 # try to find an existing branchmap to reuse
3896 3897 subsettable = getbranchmapsubsettable()
3897 3898 candidatefilter = subsettable.get(None)
3898 3899 while candidatefilter is not None:
3899 3900 candidatebm = repo.filtered(candidatefilter).branchmap()
3900 3901 if candidatebm.validfor(baserepo):
3901 3902 filtered = repoview.filterrevs(repo, candidatefilter)
3902 3903 missing = [r for r in allbaserevs if r in filtered]
3903 3904 base = candidatebm.copy()
3904 3905 base.update(baserepo, missing)
3905 3906 break
3906 3907 candidatefilter = subsettable.get(candidatefilter)
3907 3908 else:
3908 3909 # no suitable subset where found
3909 3910 base = branchmap.branchcache()
3910 3911 base.update(baserepo, allbaserevs)
3911 3912
3912 3913 def setup():
3913 3914 x[0] = base.copy()
3914 3915 if clearcaches:
3915 3916 unfi._revbranchcache = None
3916 3917 clearchangelog(repo)
3917 3918
3918 3919 def bench():
3919 3920 x[0].update(targetrepo, newrevs)
3920 3921
3921 3922 timer(bench, setup=setup)
3922 3923 fm.end()
3923 3924 finally:
3924 3925 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
3925 3926 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
3926 3927
3927 3928
3928 3929 @command(
3929 3930 b'perf::branchmapload|perfbranchmapload',
3930 3931 [
3931 3932 (b'f', b'filter', b'', b'Specify repoview filter'),
3932 3933 (b'', b'list', False, b'List brachmap filter caches'),
3933 3934 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
3934 3935 ]
3935 3936 + formatteropts,
3936 3937 )
3937 3938 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
3938 3939 """benchmark reading the branchmap"""
3939 3940 opts = _byteskwargs(opts)
3940 3941 clearrevlogs = opts[b'clear_revlogs']
3941 3942
3942 3943 if list:
3943 3944 for name, kind, st in repo.cachevfs.readdir(stat=True):
3944 3945 if name.startswith(b'branch2'):
3945 3946 filtername = name.partition(b'-')[2] or b'unfiltered'
3946 3947 ui.status(
3947 3948 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
3948 3949 )
3949 3950 return
3950 3951 if not filter:
3951 3952 filter = None
3952 3953 subsettable = getbranchmapsubsettable()
3953 3954 if filter is None:
3954 3955 repo = repo.unfiltered()
3955 3956 else:
3956 3957 repo = repoview.repoview(repo, filter)
3957 3958
3958 3959 repo.branchmap() # make sure we have a relevant, up to date branchmap
3959 3960
3960 3961 try:
3961 3962 fromfile = branchmap.branchcache.fromfile
3962 3963 except AttributeError:
3963 3964 # older versions
3964 3965 fromfile = branchmap.read
3965 3966
3966 3967 currentfilter = filter
3967 3968 # try once without timer, the filter may not be cached
3968 3969 while fromfile(repo) is None:
3969 3970 currentfilter = subsettable.get(currentfilter)
3970 3971 if currentfilter is None:
3971 3972 raise error.Abort(
3972 3973 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
3973 3974 )
3974 3975 repo = repo.filtered(currentfilter)
3975 3976 timer, fm = gettimer(ui, opts)
3976 3977
3977 3978 def setup():
3978 3979 if clearrevlogs:
3979 3980 clearchangelog(repo)
3980 3981
3981 3982 def bench():
3982 3983 fromfile(repo)
3983 3984
3984 3985 timer(bench, setup=setup)
3985 3986 fm.end()
3986 3987
3987 3988
3988 3989 @command(b'perf::loadmarkers|perfloadmarkers')
3989 3990 def perfloadmarkers(ui, repo):
3990 3991 """benchmark the time to parse the on-disk markers for a repo
3991 3992
3992 3993 Result is the number of markers in the repo."""
3993 3994 timer, fm = gettimer(ui)
3994 3995 svfs = getsvfs(repo)
3995 3996 timer(lambda: len(obsolete.obsstore(repo, svfs)))
3996 3997 fm.end()
3997 3998
3998 3999
3999 4000 @command(
4000 4001 b'perf::lrucachedict|perflrucachedict',
4001 4002 formatteropts
4002 4003 + [
4003 4004 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4004 4005 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4005 4006 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4006 4007 (b'', b'size', 4, b'size of cache'),
4007 4008 (b'', b'gets', 10000, b'number of key lookups'),
4008 4009 (b'', b'sets', 10000, b'number of key sets'),
4009 4010 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4010 4011 (
4011 4012 b'',
4012 4013 b'mixedgetfreq',
4013 4014 50,
4014 4015 b'frequency of get vs set ops in mixed mode',
4015 4016 ),
4016 4017 ],
4017 4018 norepo=True,
4018 4019 )
4019 4020 def perflrucache(
4020 4021 ui,
4021 4022 mincost=0,
4022 4023 maxcost=100,
4023 4024 costlimit=0,
4024 4025 size=4,
4025 4026 gets=10000,
4026 4027 sets=10000,
4027 4028 mixed=10000,
4028 4029 mixedgetfreq=50,
4029 4030 **opts
4030 4031 ):
4031 4032 opts = _byteskwargs(opts)
4032 4033
4033 4034 def doinit():
4034 4035 for i in _xrange(10000):
4035 4036 util.lrucachedict(size)
4036 4037
4037 4038 costrange = list(range(mincost, maxcost + 1))
4038 4039
4039 4040 values = []
4040 4041 for i in _xrange(size):
4041 4042 values.append(random.randint(0, _maxint))
4042 4043
4043 4044 # Get mode fills the cache and tests raw lookup performance with no
4044 4045 # eviction.
4045 4046 getseq = []
4046 4047 for i in _xrange(gets):
4047 4048 getseq.append(random.choice(values))
4048 4049
4049 4050 def dogets():
4050 4051 d = util.lrucachedict(size)
4051 4052 for v in values:
4052 4053 d[v] = v
4053 4054 for key in getseq:
4054 4055 value = d[key]
4055 4056 value # silence pyflakes warning
4056 4057
4057 4058 def dogetscost():
4058 4059 d = util.lrucachedict(size, maxcost=costlimit)
4059 4060 for i, v in enumerate(values):
4060 4061 d.insert(v, v, cost=costs[i])
4061 4062 for key in getseq:
4062 4063 try:
4063 4064 value = d[key]
4064 4065 value # silence pyflakes warning
4065 4066 except KeyError:
4066 4067 pass
4067 4068
4068 4069 # Set mode tests insertion speed with cache eviction.
4069 4070 setseq = []
4070 4071 costs = []
4071 4072 for i in _xrange(sets):
4072 4073 setseq.append(random.randint(0, _maxint))
4073 4074 costs.append(random.choice(costrange))
4074 4075
4075 4076 def doinserts():
4076 4077 d = util.lrucachedict(size)
4077 4078 for v in setseq:
4078 4079 d.insert(v, v)
4079 4080
4080 4081 def doinsertscost():
4081 4082 d = util.lrucachedict(size, maxcost=costlimit)
4082 4083 for i, v in enumerate(setseq):
4083 4084 d.insert(v, v, cost=costs[i])
4084 4085
4085 4086 def dosets():
4086 4087 d = util.lrucachedict(size)
4087 4088 for v in setseq:
4088 4089 d[v] = v
4089 4090
4090 4091 # Mixed mode randomly performs gets and sets with eviction.
4091 4092 mixedops = []
4092 4093 for i in _xrange(mixed):
4093 4094 r = random.randint(0, 100)
4094 4095 if r < mixedgetfreq:
4095 4096 op = 0
4096 4097 else:
4097 4098 op = 1
4098 4099
4099 4100 mixedops.append(
4100 4101 (op, random.randint(0, size * 2), random.choice(costrange))
4101 4102 )
4102 4103
4103 4104 def domixed():
4104 4105 d = util.lrucachedict(size)
4105 4106
4106 4107 for op, v, cost in mixedops:
4107 4108 if op == 0:
4108 4109 try:
4109 4110 d[v]
4110 4111 except KeyError:
4111 4112 pass
4112 4113 else:
4113 4114 d[v] = v
4114 4115
4115 4116 def domixedcost():
4116 4117 d = util.lrucachedict(size, maxcost=costlimit)
4117 4118
4118 4119 for op, v, cost in mixedops:
4119 4120 if op == 0:
4120 4121 try:
4121 4122 d[v]
4122 4123 except KeyError:
4123 4124 pass
4124 4125 else:
4125 4126 d.insert(v, v, cost=cost)
4126 4127
4127 4128 benches = [
4128 4129 (doinit, b'init'),
4129 4130 ]
4130 4131
4131 4132 if costlimit:
4132 4133 benches.extend(
4133 4134 [
4134 4135 (dogetscost, b'gets w/ cost limit'),
4135 4136 (doinsertscost, b'inserts w/ cost limit'),
4136 4137 (domixedcost, b'mixed w/ cost limit'),
4137 4138 ]
4138 4139 )
4139 4140 else:
4140 4141 benches.extend(
4141 4142 [
4142 4143 (dogets, b'gets'),
4143 4144 (doinserts, b'inserts'),
4144 4145 (dosets, b'sets'),
4145 4146 (domixed, b'mixed'),
4146 4147 ]
4147 4148 )
4148 4149
4149 4150 for fn, title in benches:
4150 4151 timer, fm = gettimer(ui, opts)
4151 4152 timer(fn, title=title)
4152 4153 fm.end()
4153 4154
4154 4155
4155 4156 @command(
4156 4157 b'perf::write|perfwrite',
4157 4158 formatteropts
4158 4159 + [
4159 4160 (b'', b'write-method', b'write', b'ui write method'),
4160 4161 (b'', b'nlines', 100, b'number of lines'),
4161 4162 (b'', b'nitems', 100, b'number of items (per line)'),
4162 4163 (b'', b'item', b'x', b'item that is written'),
4163 4164 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4164 4165 (b'', b'flush-line', None, b'flush after each line'),
4165 4166 ],
4166 4167 )
4167 4168 def perfwrite(ui, repo, **opts):
4168 4169 """microbenchmark ui.write (and others)"""
4169 4170 opts = _byteskwargs(opts)
4170 4171
4171 4172 write = getattr(ui, _sysstr(opts[b'write_method']))
4172 4173 nlines = int(opts[b'nlines'])
4173 4174 nitems = int(opts[b'nitems'])
4174 4175 item = opts[b'item']
4175 4176 batch_line = opts.get(b'batch_line')
4176 4177 flush_line = opts.get(b'flush_line')
4177 4178
4178 4179 if batch_line:
4179 4180 line = item * nitems + b'\n'
4180 4181
4181 4182 def benchmark():
4182 4183 for i in pycompat.xrange(nlines):
4183 4184 if batch_line:
4184 4185 write(line)
4185 4186 else:
4186 4187 for i in pycompat.xrange(nitems):
4187 4188 write(item)
4188 4189 write(b'\n')
4189 4190 if flush_line:
4190 4191 ui.flush()
4191 4192 ui.flush()
4192 4193
4193 4194 timer, fm = gettimer(ui, opts)
4194 4195 timer(benchmark)
4195 4196 fm.end()
4196 4197
4197 4198
4198 4199 def uisetup(ui):
4199 4200 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4200 4201 commands, b'debugrevlogopts'
4201 4202 ):
4202 4203 # for "historical portability":
4203 4204 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4204 4205 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4205 4206 # openrevlog() should cause failure, because it has been
4206 4207 # available since 3.5 (or 49c583ca48c4).
4207 4208 def openrevlog(orig, repo, cmd, file_, opts):
4208 4209 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4209 4210 raise error.Abort(
4210 4211 b"This version doesn't support --dir option",
4211 4212 hint=b"use 3.5 or later",
4212 4213 )
4213 4214 return orig(repo, cmd, file_, opts)
4214 4215
4215 4216 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4216 4217
4217 4218
4218 4219 @command(
4219 4220 b'perf::progress|perfprogress',
4220 4221 formatteropts
4221 4222 + [
4222 4223 (b'', b'topic', b'topic', b'topic for progress messages'),
4223 4224 (b'c', b'total', 1000000, b'total value we are progressing to'),
4224 4225 ],
4225 4226 norepo=True,
4226 4227 )
4227 4228 def perfprogress(ui, topic=None, total=None, **opts):
4228 4229 """printing of progress bars"""
4229 4230 opts = _byteskwargs(opts)
4230 4231
4231 4232 timer, fm = gettimer(ui, opts)
4232 4233
4233 4234 def doprogress():
4234 4235 with ui.makeprogress(topic, total=total) as progress:
4235 4236 for i in _xrange(total):
4236 4237 progress.increment()
4237 4238
4238 4239 timer(doprogress)
4239 4240 fm.end()
General Comments 0
You need to be logged in to leave comments. Login now