##// END OF EJS Templates
perf: introduce more cache invalidation option in perf::tags...
marmoute -
r51831:f02b62b7 stable
parent child Browse files
Show More
@@ -1,4448 +1,4497 b''
1 1 # perf.py - performance test routines
2 2 '''helper extension to measure performance
3 3
4 4 Configurations
5 5 ==============
6 6
7 7 ``perf``
8 8 --------
9 9
10 10 ``all-timing``
11 11 When set, additional statistics will be reported for each benchmark: best,
12 12 worst, median average. If not set only the best timing is reported
13 13 (default: off).
14 14
15 15 ``presleep``
16 16 number of second to wait before any group of runs (default: 1)
17 17
18 18 ``pre-run``
19 19 number of run to perform before starting measurement.
20 20
21 21 ``profile-benchmark``
22 22 Enable profiling for the benchmarked section.
23 23 (The first iteration is benchmarked)
24 24
25 25 ``run-limits``
26 26 Control the number of runs each benchmark will perform. The option value
27 27 should be a list of `<time>-<numberofrun>` pairs. After each run the
28 28 conditions are considered in order with the following logic:
29 29
30 30 If benchmark has been running for <time> seconds, and we have performed
31 31 <numberofrun> iterations, stop the benchmark,
32 32
33 33 The default value is: `3.0-100, 10.0-3`
34 34
35 35 ``stub``
36 36 When set, benchmarks will only be run once, useful for testing
37 37 (default: off)
38 38 '''
39 39
40 40 # "historical portability" policy of perf.py:
41 41 #
42 42 # We have to do:
43 43 # - make perf.py "loadable" with as wide Mercurial version as possible
44 44 # This doesn't mean that perf commands work correctly with that Mercurial.
45 45 # BTW, perf.py itself has been available since 1.1 (or eb240755386d).
46 46 # - make historical perf command work correctly with as wide Mercurial
47 47 # version as possible
48 48 #
49 49 # We have to do, if possible with reasonable cost:
50 50 # - make recent perf command for historical feature work correctly
51 51 # with early Mercurial
52 52 #
53 53 # We don't have to do:
54 54 # - make perf command for recent feature work correctly with early
55 55 # Mercurial
56 56
57 57 import contextlib
58 58 import functools
59 59 import gc
60 60 import os
61 61 import random
62 62 import shutil
63 63 import struct
64 64 import sys
65 65 import tempfile
66 66 import threading
67 67 import time
68 68
69 69 import mercurial.revlog
70 70 from mercurial import (
71 71 changegroup,
72 72 cmdutil,
73 73 commands,
74 74 copies,
75 75 error,
76 76 extensions,
77 77 hg,
78 78 mdiff,
79 79 merge,
80 80 util,
81 81 )
82 82
83 83 # for "historical portability":
84 84 # try to import modules separately (in dict order), and ignore
85 85 # failure, because these aren't available with early Mercurial
86 86 try:
87 87 from mercurial import branchmap # since 2.5 (or bcee63733aad)
88 88 except ImportError:
89 89 pass
90 90 try:
91 91 from mercurial import obsolete # since 2.3 (or ad0d6c2b3279)
92 92 except ImportError:
93 93 pass
94 94 try:
95 95 from mercurial import registrar # since 3.7 (or 37d50250b696)
96 96
97 97 dir(registrar) # forcibly load it
98 98 except ImportError:
99 99 registrar = None
100 100 try:
101 101 from mercurial import repoview # since 2.5 (or 3a6ddacb7198)
102 102 except ImportError:
103 103 pass
104 104 try:
105 105 from mercurial.utils import repoviewutil # since 5.0
106 106 except ImportError:
107 107 repoviewutil = None
108 108 try:
109 109 from mercurial import scmutil # since 1.9 (or 8b252e826c68)
110 110 except ImportError:
111 111 pass
112 112 try:
113 113 from mercurial import setdiscovery # since 1.9 (or cb98fed52495)
114 114 except ImportError:
115 115 pass
116 116
117 117 try:
118 118 from mercurial import profiling
119 119 except ImportError:
120 120 profiling = None
121 121
122 122 try:
123 123 from mercurial.revlogutils import constants as revlog_constants
124 124
125 125 perf_rl_kind = (revlog_constants.KIND_OTHER, b'created-by-perf')
126 126
127 127 def revlog(opener, *args, **kwargs):
128 128 return mercurial.revlog.revlog(opener, perf_rl_kind, *args, **kwargs)
129 129
130 130
131 131 except (ImportError, AttributeError):
132 132 perf_rl_kind = None
133 133
134 134 def revlog(opener, *args, **kwargs):
135 135 return mercurial.revlog.revlog(opener, *args, **kwargs)
136 136
137 137
138 138 def identity(a):
139 139 return a
140 140
141 141
142 142 try:
143 143 from mercurial import pycompat
144 144
145 145 getargspec = pycompat.getargspec # added to module after 4.5
146 146 _byteskwargs = pycompat.byteskwargs # since 4.1 (or fbc3f73dc802)
147 147 _sysstr = pycompat.sysstr # since 4.0 (or 2219f4f82ede)
148 148 _bytestr = pycompat.bytestr # since 4.2 (or b70407bd84d5)
149 149 _xrange = pycompat.xrange # since 4.8 (or 7eba8f83129b)
150 150 fsencode = pycompat.fsencode # since 3.9 (or f4a5e0e86a7e)
151 151 if pycompat.ispy3:
152 152 _maxint = sys.maxsize # per py3 docs for replacing maxint
153 153 else:
154 154 _maxint = sys.maxint
155 155 except (NameError, ImportError, AttributeError):
156 156 import inspect
157 157
158 158 getargspec = inspect.getargspec
159 159 _byteskwargs = identity
160 160 _bytestr = str
161 161 fsencode = identity # no py3 support
162 162 _maxint = sys.maxint # no py3 support
163 163 _sysstr = lambda x: x # no py3 support
164 164 _xrange = xrange
165 165
166 166 try:
167 167 # 4.7+
168 168 queue = pycompat.queue.Queue
169 169 except (NameError, AttributeError, ImportError):
170 170 # <4.7.
171 171 try:
172 172 queue = pycompat.queue
173 173 except (NameError, AttributeError, ImportError):
174 174 import Queue as queue
175 175
176 176 try:
177 177 from mercurial import logcmdutil
178 178
179 179 makelogtemplater = logcmdutil.maketemplater
180 180 except (AttributeError, ImportError):
181 181 try:
182 182 makelogtemplater = cmdutil.makelogtemplater
183 183 except (AttributeError, ImportError):
184 184 makelogtemplater = None
185 185
186 186 # for "historical portability":
187 187 # define util.safehasattr forcibly, because util.safehasattr has been
188 188 # available since 1.9.3 (or 94b200a11cf7)
189 189 _undefined = object()
190 190
191 191
192 192 def safehasattr(thing, attr):
193 193 return getattr(thing, _sysstr(attr), _undefined) is not _undefined
194 194
195 195
196 196 setattr(util, 'safehasattr', safehasattr)
197 197
198 198 # for "historical portability":
199 199 # define util.timer forcibly, because util.timer has been available
200 200 # since ae5d60bb70c9
201 201 if safehasattr(time, 'perf_counter'):
202 202 util.timer = time.perf_counter
203 203 elif os.name == b'nt':
204 204 util.timer = time.clock
205 205 else:
206 206 util.timer = time.time
207 207
208 208 # for "historical portability":
209 209 # use locally defined empty option list, if formatteropts isn't
210 210 # available, because commands.formatteropts has been available since
211 211 # 3.2 (or 7a7eed5176a4), even though formatting itself has been
212 212 # available since 2.2 (or ae5f92e154d3)
213 213 formatteropts = getattr(
214 214 cmdutil, "formatteropts", getattr(commands, "formatteropts", [])
215 215 )
216 216
217 217 # for "historical portability":
218 218 # use locally defined option list, if debugrevlogopts isn't available,
219 219 # because commands.debugrevlogopts has been available since 3.7 (or
220 220 # 5606f7d0d063), even though cmdutil.openrevlog() has been available
221 221 # since 1.9 (or a79fea6b3e77).
222 222 revlogopts = getattr(
223 223 cmdutil,
224 224 "debugrevlogopts",
225 225 getattr(
226 226 commands,
227 227 "debugrevlogopts",
228 228 [
229 229 (b'c', b'changelog', False, b'open changelog'),
230 230 (b'm', b'manifest', False, b'open manifest'),
231 231 (b'', b'dir', False, b'open directory manifest'),
232 232 ],
233 233 ),
234 234 )
235 235
236 236 cmdtable = {}
237 237
238 238
239 239 # for "historical portability":
240 240 # define parsealiases locally, because cmdutil.parsealiases has been
241 241 # available since 1.5 (or 6252852b4332)
242 242 def parsealiases(cmd):
243 243 return cmd.split(b"|")
244 244
245 245
246 246 if safehasattr(registrar, 'command'):
247 247 command = registrar.command(cmdtable)
248 248 elif safehasattr(cmdutil, 'command'):
249 249 command = cmdutil.command(cmdtable)
250 250 if 'norepo' not in getargspec(command).args:
251 251 # for "historical portability":
252 252 # wrap original cmdutil.command, because "norepo" option has
253 253 # been available since 3.1 (or 75a96326cecb)
254 254 _command = command
255 255
256 256 def command(name, options=(), synopsis=None, norepo=False):
257 257 if norepo:
258 258 commands.norepo += b' %s' % b' '.join(parsealiases(name))
259 259 return _command(name, list(options), synopsis)
260 260
261 261
262 262 else:
263 263 # for "historical portability":
264 264 # define "@command" annotation locally, because cmdutil.command
265 265 # has been available since 1.9 (or 2daa5179e73f)
266 266 def command(name, options=(), synopsis=None, norepo=False):
267 267 def decorator(func):
268 268 if synopsis:
269 269 cmdtable[name] = func, list(options), synopsis
270 270 else:
271 271 cmdtable[name] = func, list(options)
272 272 if norepo:
273 273 commands.norepo += b' %s' % b' '.join(parsealiases(name))
274 274 return func
275 275
276 276 return decorator
277 277
278 278
279 279 try:
280 280 import mercurial.registrar
281 281 import mercurial.configitems
282 282
283 283 configtable = {}
284 284 configitem = mercurial.registrar.configitem(configtable)
285 285 configitem(
286 286 b'perf',
287 287 b'presleep',
288 288 default=mercurial.configitems.dynamicdefault,
289 289 experimental=True,
290 290 )
291 291 configitem(
292 292 b'perf',
293 293 b'stub',
294 294 default=mercurial.configitems.dynamicdefault,
295 295 experimental=True,
296 296 )
297 297 configitem(
298 298 b'perf',
299 299 b'parentscount',
300 300 default=mercurial.configitems.dynamicdefault,
301 301 experimental=True,
302 302 )
303 303 configitem(
304 304 b'perf',
305 305 b'all-timing',
306 306 default=mercurial.configitems.dynamicdefault,
307 307 experimental=True,
308 308 )
309 309 configitem(
310 310 b'perf',
311 311 b'pre-run',
312 312 default=mercurial.configitems.dynamicdefault,
313 313 )
314 314 configitem(
315 315 b'perf',
316 316 b'profile-benchmark',
317 317 default=mercurial.configitems.dynamicdefault,
318 318 )
319 319 configitem(
320 320 b'perf',
321 321 b'run-limits',
322 322 default=mercurial.configitems.dynamicdefault,
323 323 experimental=True,
324 324 )
325 325 except (ImportError, AttributeError):
326 326 pass
327 327 except TypeError:
328 328 # compatibility fix for a11fd395e83f
329 329 # hg version: 5.2
330 330 configitem(
331 331 b'perf',
332 332 b'presleep',
333 333 default=mercurial.configitems.dynamicdefault,
334 334 )
335 335 configitem(
336 336 b'perf',
337 337 b'stub',
338 338 default=mercurial.configitems.dynamicdefault,
339 339 )
340 340 configitem(
341 341 b'perf',
342 342 b'parentscount',
343 343 default=mercurial.configitems.dynamicdefault,
344 344 )
345 345 configitem(
346 346 b'perf',
347 347 b'all-timing',
348 348 default=mercurial.configitems.dynamicdefault,
349 349 )
350 350 configitem(
351 351 b'perf',
352 352 b'pre-run',
353 353 default=mercurial.configitems.dynamicdefault,
354 354 )
355 355 configitem(
356 356 b'perf',
357 357 b'profile-benchmark',
358 358 default=mercurial.configitems.dynamicdefault,
359 359 )
360 360 configitem(
361 361 b'perf',
362 362 b'run-limits',
363 363 default=mercurial.configitems.dynamicdefault,
364 364 )
365 365
366 366
367 367 def getlen(ui):
368 368 if ui.configbool(b"perf", b"stub", False):
369 369 return lambda x: 1
370 370 return len
371 371
372 372
373 373 class noop:
374 374 """dummy context manager"""
375 375
376 376 def __enter__(self):
377 377 pass
378 378
379 379 def __exit__(self, *args):
380 380 pass
381 381
382 382
383 383 NOOPCTX = noop()
384 384
385 385
386 386 def gettimer(ui, opts=None):
387 387 """return a timer function and formatter: (timer, formatter)
388 388
389 389 This function exists to gather the creation of formatter in a single
390 390 place instead of duplicating it in all performance commands."""
391 391
392 392 # enforce an idle period before execution to counteract power management
393 393 # experimental config: perf.presleep
394 394 time.sleep(getint(ui, b"perf", b"presleep", 1))
395 395
396 396 if opts is None:
397 397 opts = {}
398 398 # redirect all to stderr unless buffer api is in use
399 399 if not ui._buffers:
400 400 ui = ui.copy()
401 401 uifout = safeattrsetter(ui, b'fout', ignoremissing=True)
402 402 if uifout:
403 403 # for "historical portability":
404 404 # ui.fout/ferr have been available since 1.9 (or 4e1ccd4c2b6d)
405 405 uifout.set(ui.ferr)
406 406
407 407 # get a formatter
408 408 uiformatter = getattr(ui, 'formatter', None)
409 409 if uiformatter:
410 410 fm = uiformatter(b'perf', opts)
411 411 else:
412 412 # for "historical portability":
413 413 # define formatter locally, because ui.formatter has been
414 414 # available since 2.2 (or ae5f92e154d3)
415 415 from mercurial import node
416 416
417 417 class defaultformatter:
418 418 """Minimized composition of baseformatter and plainformatter"""
419 419
420 420 def __init__(self, ui, topic, opts):
421 421 self._ui = ui
422 422 if ui.debugflag:
423 423 self.hexfunc = node.hex
424 424 else:
425 425 self.hexfunc = node.short
426 426
427 427 def __nonzero__(self):
428 428 return False
429 429
430 430 __bool__ = __nonzero__
431 431
432 432 def startitem(self):
433 433 pass
434 434
435 435 def data(self, **data):
436 436 pass
437 437
438 438 def write(self, fields, deftext, *fielddata, **opts):
439 439 self._ui.write(deftext % fielddata, **opts)
440 440
441 441 def condwrite(self, cond, fields, deftext, *fielddata, **opts):
442 442 if cond:
443 443 self._ui.write(deftext % fielddata, **opts)
444 444
445 445 def plain(self, text, **opts):
446 446 self._ui.write(text, **opts)
447 447
448 448 def end(self):
449 449 pass
450 450
451 451 fm = defaultformatter(ui, b'perf', opts)
452 452
453 453 # stub function, runs code only once instead of in a loop
454 454 # experimental config: perf.stub
455 455 if ui.configbool(b"perf", b"stub", False):
456 456 return functools.partial(stub_timer, fm), fm
457 457
458 458 # experimental config: perf.all-timing
459 459 displayall = ui.configbool(b"perf", b"all-timing", False)
460 460
461 461 # experimental config: perf.run-limits
462 462 limitspec = ui.configlist(b"perf", b"run-limits", [])
463 463 limits = []
464 464 for item in limitspec:
465 465 parts = item.split(b'-', 1)
466 466 if len(parts) < 2:
467 467 ui.warn((b'malformatted run limit entry, missing "-": %s\n' % item))
468 468 continue
469 469 try:
470 470 time_limit = float(_sysstr(parts[0]))
471 471 except ValueError as e:
472 472 ui.warn(
473 473 (
474 474 b'malformatted run limit entry, %s: %s\n'
475 475 % (_bytestr(e), item)
476 476 )
477 477 )
478 478 continue
479 479 try:
480 480 run_limit = int(_sysstr(parts[1]))
481 481 except ValueError as e:
482 482 ui.warn(
483 483 (
484 484 b'malformatted run limit entry, %s: %s\n'
485 485 % (_bytestr(e), item)
486 486 )
487 487 )
488 488 continue
489 489 limits.append((time_limit, run_limit))
490 490 if not limits:
491 491 limits = DEFAULTLIMITS
492 492
493 493 profiler = None
494 494 if profiling is not None:
495 495 if ui.configbool(b"perf", b"profile-benchmark", False):
496 496 profiler = profiling.profile(ui)
497 497
498 498 prerun = getint(ui, b"perf", b"pre-run", 0)
499 499 t = functools.partial(
500 500 _timer,
501 501 fm,
502 502 displayall=displayall,
503 503 limits=limits,
504 504 prerun=prerun,
505 505 profiler=profiler,
506 506 )
507 507 return t, fm
508 508
509 509
510 510 def stub_timer(fm, func, setup=None, title=None):
511 511 if setup is not None:
512 512 setup()
513 513 func()
514 514
515 515
516 516 @contextlib.contextmanager
517 517 def timeone():
518 518 r = []
519 519 ostart = os.times()
520 520 cstart = util.timer()
521 521 yield r
522 522 cstop = util.timer()
523 523 ostop = os.times()
524 524 a, b = ostart, ostop
525 525 r.append((cstop - cstart, b[0] - a[0], b[1] - a[1]))
526 526
527 527
528 528 # list of stop condition (elapsed time, minimal run count)
529 529 DEFAULTLIMITS = (
530 530 (3.0, 100),
531 531 (10.0, 3),
532 532 )
533 533
534 534
535 535 @contextlib.contextmanager
536 536 def noop_context():
537 537 yield
538 538
539 539
540 540 def _timer(
541 541 fm,
542 542 func,
543 543 setup=None,
544 544 context=noop_context,
545 545 title=None,
546 546 displayall=False,
547 547 limits=DEFAULTLIMITS,
548 548 prerun=0,
549 549 profiler=None,
550 550 ):
551 551 gc.collect()
552 552 results = []
553 553 begin = util.timer()
554 554 count = 0
555 555 if profiler is None:
556 556 profiler = NOOPCTX
557 557 for i in range(prerun):
558 558 if setup is not None:
559 559 setup()
560 560 with context():
561 561 func()
562 562 keepgoing = True
563 563 while keepgoing:
564 564 if setup is not None:
565 565 setup()
566 566 with context():
567 567 with profiler:
568 568 with timeone() as item:
569 569 r = func()
570 570 profiler = NOOPCTX
571 571 count += 1
572 572 results.append(item[0])
573 573 cstop = util.timer()
574 574 # Look for a stop condition.
575 575 elapsed = cstop - begin
576 576 for t, mincount in limits:
577 577 if elapsed >= t and count >= mincount:
578 578 keepgoing = False
579 579 break
580 580
581 581 formatone(fm, results, title=title, result=r, displayall=displayall)
582 582
583 583
584 584 def formatone(fm, timings, title=None, result=None, displayall=False):
585 585 count = len(timings)
586 586
587 587 fm.startitem()
588 588
589 589 if title:
590 590 fm.write(b'title', b'! %s\n', title)
591 591 if result:
592 592 fm.write(b'result', b'! result: %s\n', result)
593 593
594 594 def display(role, entry):
595 595 prefix = b''
596 596 if role != b'best':
597 597 prefix = b'%s.' % role
598 598 fm.plain(b'!')
599 599 fm.write(prefix + b'wall', b' wall %f', entry[0])
600 600 fm.write(prefix + b'comb', b' comb %f', entry[1] + entry[2])
601 601 fm.write(prefix + b'user', b' user %f', entry[1])
602 602 fm.write(prefix + b'sys', b' sys %f', entry[2])
603 603 fm.write(prefix + b'count', b' (%s of %%d)' % role, count)
604 604 fm.plain(b'\n')
605 605
606 606 timings.sort()
607 607 min_val = timings[0]
608 608 display(b'best', min_val)
609 609 if displayall:
610 610 max_val = timings[-1]
611 611 display(b'max', max_val)
612 612 avg = tuple([sum(x) / count for x in zip(*timings)])
613 613 display(b'avg', avg)
614 614 median = timings[len(timings) // 2]
615 615 display(b'median', median)
616 616
617 617
618 618 # utilities for historical portability
619 619
620 620
621 621 def getint(ui, section, name, default):
622 622 # for "historical portability":
623 623 # ui.configint has been available since 1.9 (or fa2b596db182)
624 624 v = ui.config(section, name, None)
625 625 if v is None:
626 626 return default
627 627 try:
628 628 return int(v)
629 629 except ValueError:
630 630 raise error.ConfigError(
631 631 b"%s.%s is not an integer ('%s')" % (section, name, v)
632 632 )
633 633
634 634
635 635 def safeattrsetter(obj, name, ignoremissing=False):
636 636 """Ensure that 'obj' has 'name' attribute before subsequent setattr
637 637
638 638 This function is aborted, if 'obj' doesn't have 'name' attribute
639 639 at runtime. This avoids overlooking removal of an attribute, which
640 640 breaks assumption of performance measurement, in the future.
641 641
642 642 This function returns the object to (1) assign a new value, and
643 643 (2) restore an original value to the attribute.
644 644
645 645 If 'ignoremissing' is true, missing 'name' attribute doesn't cause
646 646 abortion, and this function returns None. This is useful to
647 647 examine an attribute, which isn't ensured in all Mercurial
648 648 versions.
649 649 """
650 650 if not util.safehasattr(obj, name):
651 651 if ignoremissing:
652 652 return None
653 653 raise error.Abort(
654 654 (
655 655 b"missing attribute %s of %s might break assumption"
656 656 b" of performance measurement"
657 657 )
658 658 % (name, obj)
659 659 )
660 660
661 661 origvalue = getattr(obj, _sysstr(name))
662 662
663 663 class attrutil:
664 664 def set(self, newvalue):
665 665 setattr(obj, _sysstr(name), newvalue)
666 666
667 667 def restore(self):
668 668 setattr(obj, _sysstr(name), origvalue)
669 669
670 670 return attrutil()
671 671
672 672
673 673 # utilities to examine each internal API changes
674 674
675 675
676 676 def getbranchmapsubsettable():
677 677 # for "historical portability":
678 678 # subsettable is defined in:
679 679 # - branchmap since 2.9 (or 175c6fd8cacc)
680 680 # - repoview since 2.5 (or 59a9f18d4587)
681 681 # - repoviewutil since 5.0
682 682 for mod in (branchmap, repoview, repoviewutil):
683 683 subsettable = getattr(mod, 'subsettable', None)
684 684 if subsettable:
685 685 return subsettable
686 686
687 687 # bisecting in bcee63733aad::59a9f18d4587 can reach here (both
688 688 # branchmap and repoview modules exist, but subsettable attribute
689 689 # doesn't)
690 690 raise error.Abort(
691 691 b"perfbranchmap not available with this Mercurial",
692 692 hint=b"use 2.5 or later",
693 693 )
694 694
695 695
696 696 def getsvfs(repo):
697 697 """Return appropriate object to access files under .hg/store"""
698 698 # for "historical portability":
699 699 # repo.svfs has been available since 2.3 (or 7034365089bf)
700 700 svfs = getattr(repo, 'svfs', None)
701 701 if svfs:
702 702 return svfs
703 703 else:
704 704 return getattr(repo, 'sopener')
705 705
706 706
707 707 def getvfs(repo):
708 708 """Return appropriate object to access files under .hg"""
709 709 # for "historical portability":
710 710 # repo.vfs has been available since 2.3 (or 7034365089bf)
711 711 vfs = getattr(repo, 'vfs', None)
712 712 if vfs:
713 713 return vfs
714 714 else:
715 715 return getattr(repo, 'opener')
716 716
717 717
718 718 def repocleartagscachefunc(repo):
719 719 """Return the function to clear tags cache according to repo internal API"""
720 720 if util.safehasattr(repo, b'_tagscache'): # since 2.0 (or 9dca7653b525)
721 721 # in this case, setattr(repo, '_tagscache', None) or so isn't
722 722 # correct way to clear tags cache, because existing code paths
723 723 # expect _tagscache to be a structured object.
724 724 def clearcache():
725 725 # _tagscache has been filteredpropertycache since 2.5 (or
726 726 # 98c867ac1330), and delattr() can't work in such case
727 727 if '_tagscache' in vars(repo):
728 728 del repo.__dict__['_tagscache']
729 729
730 730 return clearcache
731 731
732 732 repotags = safeattrsetter(repo, b'_tags', ignoremissing=True)
733 733 if repotags: # since 1.4 (or 5614a628d173)
734 734 return lambda: repotags.set(None)
735 735
736 736 repotagscache = safeattrsetter(repo, b'tagscache', ignoremissing=True)
737 737 if repotagscache: # since 0.6 (or d7df759d0e97)
738 738 return lambda: repotagscache.set(None)
739 739
740 740 # Mercurial earlier than 0.6 (or d7df759d0e97) logically reaches
741 741 # this point, but it isn't so problematic, because:
742 742 # - repo.tags of such Mercurial isn't "callable", and repo.tags()
743 743 # in perftags() causes failure soon
744 744 # - perf.py itself has been available since 1.1 (or eb240755386d)
745 745 raise error.Abort(b"tags API of this hg command is unknown")
746 746
747 747
748 748 # utilities to clear cache
749 749
750 750
751 751 def clearfilecache(obj, attrname):
752 752 unfiltered = getattr(obj, 'unfiltered', None)
753 753 if unfiltered is not None:
754 754 obj = obj.unfiltered()
755 755 if attrname in vars(obj):
756 756 delattr(obj, attrname)
757 757 obj._filecache.pop(attrname, None)
758 758
759 759
760 760 def clearchangelog(repo):
761 761 if repo is not repo.unfiltered():
762 762 object.__setattr__(repo, '_clcachekey', None)
763 763 object.__setattr__(repo, '_clcache', None)
764 764 clearfilecache(repo.unfiltered(), 'changelog')
765 765
766 766
767 767 # perf commands
768 768
769 769
770 770 @command(b'perf::walk|perfwalk', formatteropts)
771 771 def perfwalk(ui, repo, *pats, **opts):
772 772 opts = _byteskwargs(opts)
773 773 timer, fm = gettimer(ui, opts)
774 774 m = scmutil.match(repo[None], pats, {})
775 775 timer(
776 776 lambda: len(
777 777 list(
778 778 repo.dirstate.walk(m, subrepos=[], unknown=True, ignored=False)
779 779 )
780 780 )
781 781 )
782 782 fm.end()
783 783
784 784
785 785 @command(b'perf::annotate|perfannotate', formatteropts)
786 786 def perfannotate(ui, repo, f, **opts):
787 787 opts = _byteskwargs(opts)
788 788 timer, fm = gettimer(ui, opts)
789 789 fc = repo[b'.'][f]
790 790 timer(lambda: len(fc.annotate(True)))
791 791 fm.end()
792 792
793 793
794 794 @command(
795 795 b'perf::status|perfstatus',
796 796 [
797 797 (b'u', b'unknown', False, b'ask status to look for unknown files'),
798 798 (b'', b'dirstate', False, b'benchmark the internal dirstate call'),
799 799 ]
800 800 + formatteropts,
801 801 )
802 802 def perfstatus(ui, repo, **opts):
803 803 """benchmark the performance of a single status call
804 804
805 805 The repository data are preserved between each call.
806 806
807 807 By default, only the status of the tracked file are requested. If
808 808 `--unknown` is passed, the "unknown" files are also tracked.
809 809 """
810 810 opts = _byteskwargs(opts)
811 811 # m = match.always(repo.root, repo.getcwd())
812 812 # timer(lambda: sum(map(len, repo.dirstate.status(m, [], False, False,
813 813 # False))))
814 814 timer, fm = gettimer(ui, opts)
815 815 if opts[b'dirstate']:
816 816 dirstate = repo.dirstate
817 817 m = scmutil.matchall(repo)
818 818 unknown = opts[b'unknown']
819 819
820 820 def status_dirstate():
821 821 s = dirstate.status(
822 822 m, subrepos=[], ignored=False, clean=False, unknown=unknown
823 823 )
824 824 sum(map(bool, s))
825 825
826 826 if util.safehasattr(dirstate, 'running_status'):
827 827 with dirstate.running_status(repo):
828 828 timer(status_dirstate)
829 829 dirstate.invalidate()
830 830 else:
831 831 timer(status_dirstate)
832 832 else:
833 833 timer(lambda: sum(map(len, repo.status(unknown=opts[b'unknown']))))
834 834 fm.end()
835 835
836 836
837 837 @command(b'perf::addremove|perfaddremove', formatteropts)
838 838 def perfaddremove(ui, repo, **opts):
839 839 opts = _byteskwargs(opts)
840 840 timer, fm = gettimer(ui, opts)
841 841 try:
842 842 oldquiet = repo.ui.quiet
843 843 repo.ui.quiet = True
844 844 matcher = scmutil.match(repo[None])
845 845 opts[b'dry_run'] = True
846 846 if 'uipathfn' in getargspec(scmutil.addremove).args:
847 847 uipathfn = scmutil.getuipathfn(repo)
848 848 timer(lambda: scmutil.addremove(repo, matcher, b"", uipathfn, opts))
849 849 else:
850 850 timer(lambda: scmutil.addremove(repo, matcher, b"", opts))
851 851 finally:
852 852 repo.ui.quiet = oldquiet
853 853 fm.end()
854 854
855 855
856 856 def clearcaches(cl):
857 857 # behave somewhat consistently across internal API changes
858 858 if util.safehasattr(cl, b'clearcaches'):
859 859 cl.clearcaches()
860 860 elif util.safehasattr(cl, b'_nodecache'):
861 861 # <= hg-5.2
862 862 from mercurial.node import nullid, nullrev
863 863
864 864 cl._nodecache = {nullid: nullrev}
865 865 cl._nodepos = None
866 866
867 867
868 868 @command(b'perf::heads|perfheads', formatteropts)
869 869 def perfheads(ui, repo, **opts):
870 870 """benchmark the computation of a changelog heads"""
871 871 opts = _byteskwargs(opts)
872 872 timer, fm = gettimer(ui, opts)
873 873 cl = repo.changelog
874 874
875 875 def s():
876 876 clearcaches(cl)
877 877
878 878 def d():
879 879 len(cl.headrevs())
880 880
881 881 timer(d, setup=s)
882 882 fm.end()
883 883
884 884
885 def _default_clear_on_disk_tags_cache(repo):
886 from mercurial import tags
887
888 repo.cachevfs.tryunlink(tags._filename(repo))
889
890
891 def _default_clear_on_disk_tags_fnodes_cache(repo):
892 from mercurial import tags
893
894 repo.cachevfs.tryunlink(tags._fnodescachefile)
895
896
885 897 @command(
886 898 b'perf::tags|perftags',
887 899 formatteropts
888 900 + [
889 901 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
902 (
903 b'',
904 b'clear-on-disk-cache',
905 False,
906 b'clear on disk tags cache (DESTRUCTIVE)',
907 ),
908 (
909 b'',
910 b'clear-fnode-cache',
911 False,
912 b'clear on disk file node cache (DESTRUCTIVE),',
913 ),
890 914 ],
891 915 )
892 916 def perftags(ui, repo, **opts):
917 """Benchmark tags retrieval in various situation
918
919 The option marked as (DESTRUCTIVE) will alter the on-disk cache, possibly
920 altering performance after the command was run. However, it does not
921 destroy any stored data.
922 """
923 from mercurial import tags
924
893 925 opts = _byteskwargs(opts)
894 926 timer, fm = gettimer(ui, opts)
895 927 repocleartagscache = repocleartagscachefunc(repo)
896 928 clearrevlogs = opts[b'clear_revlogs']
929 clear_disk = opts[b'clear_on_disk_cache']
930 clear_fnode = opts[b'clear_fnode_cache']
931
932 clear_disk_fn = getattr(
933 tags,
934 "clear_cache_on_disk",
935 _default_clear_on_disk_tags_cache,
936 )
937 clear_fnodes_fn = getattr(
938 tags,
939 "clear_cache_fnodes",
940 _default_clear_on_disk_tags_fnodes_cache,
941 )
897 942
898 943 def s():
899 944 if clearrevlogs:
900 945 clearchangelog(repo)
901 946 clearfilecache(repo.unfiltered(), 'manifest')
947 if clear_disk:
948 clear_disk_fn(repo)
949 if clear_fnode:
950 clear_fnodes_fn(repo)
902 951 repocleartagscache()
903 952
904 953 def t():
905 954 len(repo.tags())
906 955
907 956 timer(t, setup=s)
908 957 fm.end()
909 958
910 959
911 960 @command(b'perf::ancestors|perfancestors', formatteropts)
912 961 def perfancestors(ui, repo, **opts):
913 962 opts = _byteskwargs(opts)
914 963 timer, fm = gettimer(ui, opts)
915 964 heads = repo.changelog.headrevs()
916 965
917 966 def d():
918 967 for a in repo.changelog.ancestors(heads):
919 968 pass
920 969
921 970 timer(d)
922 971 fm.end()
923 972
924 973
925 974 @command(b'perf::ancestorset|perfancestorset', formatteropts)
926 975 def perfancestorset(ui, repo, revset, **opts):
927 976 opts = _byteskwargs(opts)
928 977 timer, fm = gettimer(ui, opts)
929 978 revs = repo.revs(revset)
930 979 heads = repo.changelog.headrevs()
931 980
932 981 def d():
933 982 s = repo.changelog.ancestors(heads)
934 983 for rev in revs:
935 984 rev in s
936 985
937 986 timer(d)
938 987 fm.end()
939 988
940 989
941 990 @command(
942 991 b'perf::delta-find',
943 992 revlogopts + formatteropts,
944 993 b'-c|-m|FILE REV',
945 994 )
946 995 def perf_delta_find(ui, repo, arg_1, arg_2=None, **opts):
947 996 """benchmark the process of finding a valid delta for a revlog revision
948 997
949 998 When a revlog receives a new revision (e.g. from a commit, or from an
950 999 incoming bundle), it searches for a suitable delta-base to produce a delta.
951 1000 This perf command measures how much time we spend in this process. It
952 1001 operates on an already stored revision.
953 1002
954 1003 See `hg help debug-delta-find` for another related command.
955 1004 """
956 1005 from mercurial import revlogutils
957 1006 import mercurial.revlogutils.deltas as deltautil
958 1007
959 1008 opts = _byteskwargs(opts)
960 1009 if arg_2 is None:
961 1010 file_ = None
962 1011 rev = arg_1
963 1012 else:
964 1013 file_ = arg_1
965 1014 rev = arg_2
966 1015
967 1016 repo = repo.unfiltered()
968 1017
969 1018 timer, fm = gettimer(ui, opts)
970 1019
971 1020 rev = int(rev)
972 1021
973 1022 revlog = cmdutil.openrevlog(repo, b'perf::delta-find', file_, opts)
974 1023
975 1024 deltacomputer = deltautil.deltacomputer(revlog)
976 1025
977 1026 node = revlog.node(rev)
978 1027 p1r, p2r = revlog.parentrevs(rev)
979 1028 p1 = revlog.node(p1r)
980 1029 p2 = revlog.node(p2r)
981 1030 full_text = revlog.revision(rev)
982 1031 textlen = len(full_text)
983 1032 cachedelta = None
984 1033 flags = revlog.flags(rev)
985 1034
986 1035 revinfo = revlogutils.revisioninfo(
987 1036 node,
988 1037 p1,
989 1038 p2,
990 1039 [full_text], # btext
991 1040 textlen,
992 1041 cachedelta,
993 1042 flags,
994 1043 )
995 1044
996 1045 # Note: we should probably purge the potential caches (like the full
997 1046 # manifest cache) between runs.
998 1047 def find_one():
999 1048 with revlog._datafp() as fh:
1000 1049 deltacomputer.finddeltainfo(revinfo, fh, target_rev=rev)
1001 1050
1002 1051 timer(find_one)
1003 1052 fm.end()
1004 1053
1005 1054
1006 1055 @command(b'perf::discovery|perfdiscovery', formatteropts, b'PATH')
1007 1056 def perfdiscovery(ui, repo, path, **opts):
1008 1057 """benchmark discovery between local repo and the peer at given path"""
1009 1058 repos = [repo, None]
1010 1059 timer, fm = gettimer(ui, opts)
1011 1060
1012 1061 try:
1013 1062 from mercurial.utils.urlutil import get_unique_pull_path_obj
1014 1063
1015 1064 path = get_unique_pull_path_obj(b'perfdiscovery', ui, path)
1016 1065 except ImportError:
1017 1066 try:
1018 1067 from mercurial.utils.urlutil import get_unique_pull_path
1019 1068
1020 1069 path = get_unique_pull_path(b'perfdiscovery', repo, ui, path)[0]
1021 1070 except ImportError:
1022 1071 path = ui.expandpath(path)
1023 1072
1024 1073 def s():
1025 1074 repos[1] = hg.peer(ui, opts, path)
1026 1075
1027 1076 def d():
1028 1077 setdiscovery.findcommonheads(ui, *repos)
1029 1078
1030 1079 timer(d, setup=s)
1031 1080 fm.end()
1032 1081
1033 1082
1034 1083 @command(
1035 1084 b'perf::bookmarks|perfbookmarks',
1036 1085 formatteropts
1037 1086 + [
1038 1087 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
1039 1088 ],
1040 1089 )
1041 1090 def perfbookmarks(ui, repo, **opts):
1042 1091 """benchmark parsing bookmarks from disk to memory"""
1043 1092 opts = _byteskwargs(opts)
1044 1093 timer, fm = gettimer(ui, opts)
1045 1094
1046 1095 clearrevlogs = opts[b'clear_revlogs']
1047 1096
1048 1097 def s():
1049 1098 if clearrevlogs:
1050 1099 clearchangelog(repo)
1051 1100 clearfilecache(repo, b'_bookmarks')
1052 1101
1053 1102 def d():
1054 1103 repo._bookmarks
1055 1104
1056 1105 timer(d, setup=s)
1057 1106 fm.end()
1058 1107
1059 1108
1060 1109 @command(
1061 1110 b'perf::bundle',
1062 1111 [
1063 1112 (
1064 1113 b'r',
1065 1114 b'rev',
1066 1115 [],
1067 1116 b'changesets to bundle',
1068 1117 b'REV',
1069 1118 ),
1070 1119 (
1071 1120 b't',
1072 1121 b'type',
1073 1122 b'none',
1074 1123 b'bundlespec to use (see `hg help bundlespec`)',
1075 1124 b'TYPE',
1076 1125 ),
1077 1126 ]
1078 1127 + formatteropts,
1079 1128 b'REVS',
1080 1129 )
1081 1130 def perfbundle(ui, repo, *revs, **opts):
1082 1131 """benchmark the creation of a bundle from a repository
1083 1132
1084 1133 For now, this only supports "none" compression.
1085 1134 """
1086 1135 try:
1087 1136 from mercurial import bundlecaches
1088 1137
1089 1138 parsebundlespec = bundlecaches.parsebundlespec
1090 1139 except ImportError:
1091 1140 from mercurial import exchange
1092 1141
1093 1142 parsebundlespec = exchange.parsebundlespec
1094 1143
1095 1144 from mercurial import discovery
1096 1145 from mercurial import bundle2
1097 1146
1098 1147 opts = _byteskwargs(opts)
1099 1148 timer, fm = gettimer(ui, opts)
1100 1149
1101 1150 cl = repo.changelog
1102 1151 revs = list(revs)
1103 1152 revs.extend(opts.get(b'rev', ()))
1104 1153 revs = scmutil.revrange(repo, revs)
1105 1154 if not revs:
1106 1155 raise error.Abort(b"not revision specified")
1107 1156 # make it a consistent set (ie: without topological gaps)
1108 1157 old_len = len(revs)
1109 1158 revs = list(repo.revs(b"%ld::%ld", revs, revs))
1110 1159 if old_len != len(revs):
1111 1160 new_count = len(revs) - old_len
1112 1161 msg = b"add %d new revisions to make it a consistent set\n"
1113 1162 ui.write_err(msg % new_count)
1114 1163
1115 1164 targets = [cl.node(r) for r in repo.revs(b"heads(::%ld)", revs)]
1116 1165 bases = [cl.node(r) for r in repo.revs(b"heads(::%ld - %ld)", revs, revs)]
1117 1166 outgoing = discovery.outgoing(repo, bases, targets)
1118 1167
1119 1168 bundle_spec = opts.get(b'type')
1120 1169
1121 1170 bundle_spec = parsebundlespec(repo, bundle_spec, strict=False)
1122 1171
1123 1172 cgversion = bundle_spec.params.get(b"cg.version")
1124 1173 if cgversion is None:
1125 1174 if bundle_spec.version == b'v1':
1126 1175 cgversion = b'01'
1127 1176 if bundle_spec.version == b'v2':
1128 1177 cgversion = b'02'
1129 1178 if cgversion not in changegroup.supportedoutgoingversions(repo):
1130 1179 err = b"repository does not support bundle version %s"
1131 1180 raise error.Abort(err % cgversion)
1132 1181
1133 1182 if cgversion == b'01': # bundle1
1134 1183 bversion = b'HG10' + bundle_spec.wirecompression
1135 1184 bcompression = None
1136 1185 elif cgversion in (b'02', b'03'):
1137 1186 bversion = b'HG20'
1138 1187 bcompression = bundle_spec.wirecompression
1139 1188 else:
1140 1189 err = b'perf::bundle: unexpected changegroup version %s'
1141 1190 raise error.ProgrammingError(err % cgversion)
1142 1191
1143 1192 if bcompression is None:
1144 1193 bcompression = b'UN'
1145 1194
1146 1195 if bcompression != b'UN':
1147 1196 err = b'perf::bundle: compression currently unsupported: %s'
1148 1197 raise error.ProgrammingError(err % bcompression)
1149 1198
1150 1199 def do_bundle():
1151 1200 bundle2.writenewbundle(
1152 1201 ui,
1153 1202 repo,
1154 1203 b'perf::bundle',
1155 1204 os.devnull,
1156 1205 bversion,
1157 1206 outgoing,
1158 1207 bundle_spec.params,
1159 1208 )
1160 1209
1161 1210 timer(do_bundle)
1162 1211 fm.end()
1163 1212
1164 1213
1165 1214 @command(b'perf::bundleread|perfbundleread', formatteropts, b'BUNDLE')
1166 1215 def perfbundleread(ui, repo, bundlepath, **opts):
1167 1216 """Benchmark reading of bundle files.
1168 1217
1169 1218 This command is meant to isolate the I/O part of bundle reading as
1170 1219 much as possible.
1171 1220 """
1172 1221 from mercurial import (
1173 1222 bundle2,
1174 1223 exchange,
1175 1224 streamclone,
1176 1225 )
1177 1226
1178 1227 opts = _byteskwargs(opts)
1179 1228
1180 1229 def makebench(fn):
1181 1230 def run():
1182 1231 with open(bundlepath, b'rb') as fh:
1183 1232 bundle = exchange.readbundle(ui, fh, bundlepath)
1184 1233 fn(bundle)
1185 1234
1186 1235 return run
1187 1236
1188 1237 def makereadnbytes(size):
1189 1238 def run():
1190 1239 with open(bundlepath, b'rb') as fh:
1191 1240 bundle = exchange.readbundle(ui, fh, bundlepath)
1192 1241 while bundle.read(size):
1193 1242 pass
1194 1243
1195 1244 return run
1196 1245
1197 1246 def makestdioread(size):
1198 1247 def run():
1199 1248 with open(bundlepath, b'rb') as fh:
1200 1249 while fh.read(size):
1201 1250 pass
1202 1251
1203 1252 return run
1204 1253
1205 1254 # bundle1
1206 1255
1207 1256 def deltaiter(bundle):
1208 1257 for delta in bundle.deltaiter():
1209 1258 pass
1210 1259
1211 1260 def iterchunks(bundle):
1212 1261 for chunk in bundle.getchunks():
1213 1262 pass
1214 1263
1215 1264 # bundle2
1216 1265
1217 1266 def forwardchunks(bundle):
1218 1267 for chunk in bundle._forwardchunks():
1219 1268 pass
1220 1269
1221 1270 def iterparts(bundle):
1222 1271 for part in bundle.iterparts():
1223 1272 pass
1224 1273
1225 1274 def iterpartsseekable(bundle):
1226 1275 for part in bundle.iterparts(seekable=True):
1227 1276 pass
1228 1277
1229 1278 def seek(bundle):
1230 1279 for part in bundle.iterparts(seekable=True):
1231 1280 part.seek(0, os.SEEK_END)
1232 1281
1233 1282 def makepartreadnbytes(size):
1234 1283 def run():
1235 1284 with open(bundlepath, b'rb') as fh:
1236 1285 bundle = exchange.readbundle(ui, fh, bundlepath)
1237 1286 for part in bundle.iterparts():
1238 1287 while part.read(size):
1239 1288 pass
1240 1289
1241 1290 return run
1242 1291
1243 1292 benches = [
1244 1293 (makestdioread(8192), b'read(8k)'),
1245 1294 (makestdioread(16384), b'read(16k)'),
1246 1295 (makestdioread(32768), b'read(32k)'),
1247 1296 (makestdioread(131072), b'read(128k)'),
1248 1297 ]
1249 1298
1250 1299 with open(bundlepath, b'rb') as fh:
1251 1300 bundle = exchange.readbundle(ui, fh, bundlepath)
1252 1301
1253 1302 if isinstance(bundle, changegroup.cg1unpacker):
1254 1303 benches.extend(
1255 1304 [
1256 1305 (makebench(deltaiter), b'cg1 deltaiter()'),
1257 1306 (makebench(iterchunks), b'cg1 getchunks()'),
1258 1307 (makereadnbytes(8192), b'cg1 read(8k)'),
1259 1308 (makereadnbytes(16384), b'cg1 read(16k)'),
1260 1309 (makereadnbytes(32768), b'cg1 read(32k)'),
1261 1310 (makereadnbytes(131072), b'cg1 read(128k)'),
1262 1311 ]
1263 1312 )
1264 1313 elif isinstance(bundle, bundle2.unbundle20):
1265 1314 benches.extend(
1266 1315 [
1267 1316 (makebench(forwardchunks), b'bundle2 forwardchunks()'),
1268 1317 (makebench(iterparts), b'bundle2 iterparts()'),
1269 1318 (
1270 1319 makebench(iterpartsseekable),
1271 1320 b'bundle2 iterparts() seekable',
1272 1321 ),
1273 1322 (makebench(seek), b'bundle2 part seek()'),
1274 1323 (makepartreadnbytes(8192), b'bundle2 part read(8k)'),
1275 1324 (makepartreadnbytes(16384), b'bundle2 part read(16k)'),
1276 1325 (makepartreadnbytes(32768), b'bundle2 part read(32k)'),
1277 1326 (makepartreadnbytes(131072), b'bundle2 part read(128k)'),
1278 1327 ]
1279 1328 )
1280 1329 elif isinstance(bundle, streamclone.streamcloneapplier):
1281 1330 raise error.Abort(b'stream clone bundles not supported')
1282 1331 else:
1283 1332 raise error.Abort(b'unhandled bundle type: %s' % type(bundle))
1284 1333
1285 1334 for fn, title in benches:
1286 1335 timer, fm = gettimer(ui, opts)
1287 1336 timer(fn, title=title)
1288 1337 fm.end()
1289 1338
1290 1339
1291 1340 @command(
1292 1341 b'perf::changegroupchangelog|perfchangegroupchangelog',
1293 1342 formatteropts
1294 1343 + [
1295 1344 (b'', b'cgversion', b'02', b'changegroup version'),
1296 1345 (b'r', b'rev', b'', b'revisions to add to changegroup'),
1297 1346 ],
1298 1347 )
1299 1348 def perfchangegroupchangelog(ui, repo, cgversion=b'02', rev=None, **opts):
1300 1349 """Benchmark producing a changelog group for a changegroup.
1301 1350
1302 1351 This measures the time spent processing the changelog during a
1303 1352 bundle operation. This occurs during `hg bundle` and on a server
1304 1353 processing a `getbundle` wire protocol request (handles clones
1305 1354 and pull requests).
1306 1355
1307 1356 By default, all revisions are added to the changegroup.
1308 1357 """
1309 1358 opts = _byteskwargs(opts)
1310 1359 cl = repo.changelog
1311 1360 nodes = [cl.lookup(r) for r in repo.revs(rev or b'all()')]
1312 1361 bundler = changegroup.getbundler(cgversion, repo)
1313 1362
1314 1363 def d():
1315 1364 state, chunks = bundler._generatechangelog(cl, nodes)
1316 1365 for chunk in chunks:
1317 1366 pass
1318 1367
1319 1368 timer, fm = gettimer(ui, opts)
1320 1369
1321 1370 # Terminal printing can interfere with timing. So disable it.
1322 1371 with ui.configoverride({(b'progress', b'disable'): True}):
1323 1372 timer(d)
1324 1373
1325 1374 fm.end()
1326 1375
1327 1376
1328 1377 @command(b'perf::dirs|perfdirs', formatteropts)
1329 1378 def perfdirs(ui, repo, **opts):
1330 1379 opts = _byteskwargs(opts)
1331 1380 timer, fm = gettimer(ui, opts)
1332 1381 dirstate = repo.dirstate
1333 1382 b'a' in dirstate
1334 1383
1335 1384 def d():
1336 1385 dirstate.hasdir(b'a')
1337 1386 try:
1338 1387 del dirstate._map._dirs
1339 1388 except AttributeError:
1340 1389 pass
1341 1390
1342 1391 timer(d)
1343 1392 fm.end()
1344 1393
1345 1394
1346 1395 @command(
1347 1396 b'perf::dirstate|perfdirstate',
1348 1397 [
1349 1398 (
1350 1399 b'',
1351 1400 b'iteration',
1352 1401 None,
1353 1402 b'benchmark a full iteration for the dirstate',
1354 1403 ),
1355 1404 (
1356 1405 b'',
1357 1406 b'contains',
1358 1407 None,
1359 1408 b'benchmark a large amount of `nf in dirstate` calls',
1360 1409 ),
1361 1410 ]
1362 1411 + formatteropts,
1363 1412 )
1364 1413 def perfdirstate(ui, repo, **opts):
1365 1414 """benchmap the time of various distate operations
1366 1415
1367 1416 By default benchmark the time necessary to load a dirstate from scratch.
1368 1417 The dirstate is loaded to the point were a "contains" request can be
1369 1418 answered.
1370 1419 """
1371 1420 opts = _byteskwargs(opts)
1372 1421 timer, fm = gettimer(ui, opts)
1373 1422 b"a" in repo.dirstate
1374 1423
1375 1424 if opts[b'iteration'] and opts[b'contains']:
1376 1425 msg = b'only specify one of --iteration or --contains'
1377 1426 raise error.Abort(msg)
1378 1427
1379 1428 if opts[b'iteration']:
1380 1429 setup = None
1381 1430 dirstate = repo.dirstate
1382 1431
1383 1432 def d():
1384 1433 for f in dirstate:
1385 1434 pass
1386 1435
1387 1436 elif opts[b'contains']:
1388 1437 setup = None
1389 1438 dirstate = repo.dirstate
1390 1439 allfiles = list(dirstate)
1391 1440 # also add file path that will be "missing" from the dirstate
1392 1441 allfiles.extend([f[::-1] for f in allfiles])
1393 1442
1394 1443 def d():
1395 1444 for f in allfiles:
1396 1445 f in dirstate
1397 1446
1398 1447 else:
1399 1448
1400 1449 def setup():
1401 1450 repo.dirstate.invalidate()
1402 1451
1403 1452 def d():
1404 1453 b"a" in repo.dirstate
1405 1454
1406 1455 timer(d, setup=setup)
1407 1456 fm.end()
1408 1457
1409 1458
1410 1459 @command(b'perf::dirstatedirs|perfdirstatedirs', formatteropts)
1411 1460 def perfdirstatedirs(ui, repo, **opts):
1412 1461 """benchmap a 'dirstate.hasdir' call from an empty `dirs` cache"""
1413 1462 opts = _byteskwargs(opts)
1414 1463 timer, fm = gettimer(ui, opts)
1415 1464 repo.dirstate.hasdir(b"a")
1416 1465
1417 1466 def setup():
1418 1467 try:
1419 1468 del repo.dirstate._map._dirs
1420 1469 except AttributeError:
1421 1470 pass
1422 1471
1423 1472 def d():
1424 1473 repo.dirstate.hasdir(b"a")
1425 1474
1426 1475 timer(d, setup=setup)
1427 1476 fm.end()
1428 1477
1429 1478
1430 1479 @command(b'perf::dirstatefoldmap|perfdirstatefoldmap', formatteropts)
1431 1480 def perfdirstatefoldmap(ui, repo, **opts):
1432 1481 """benchmap a `dirstate._map.filefoldmap.get()` request
1433 1482
1434 1483 The dirstate filefoldmap cache is dropped between every request.
1435 1484 """
1436 1485 opts = _byteskwargs(opts)
1437 1486 timer, fm = gettimer(ui, opts)
1438 1487 dirstate = repo.dirstate
1439 1488 dirstate._map.filefoldmap.get(b'a')
1440 1489
1441 1490 def setup():
1442 1491 del dirstate._map.filefoldmap
1443 1492
1444 1493 def d():
1445 1494 dirstate._map.filefoldmap.get(b'a')
1446 1495
1447 1496 timer(d, setup=setup)
1448 1497 fm.end()
1449 1498
1450 1499
1451 1500 @command(b'perf::dirfoldmap|perfdirfoldmap', formatteropts)
1452 1501 def perfdirfoldmap(ui, repo, **opts):
1453 1502 """benchmap a `dirstate._map.dirfoldmap.get()` request
1454 1503
1455 1504 The dirstate dirfoldmap cache is dropped between every request.
1456 1505 """
1457 1506 opts = _byteskwargs(opts)
1458 1507 timer, fm = gettimer(ui, opts)
1459 1508 dirstate = repo.dirstate
1460 1509 dirstate._map.dirfoldmap.get(b'a')
1461 1510
1462 1511 def setup():
1463 1512 del dirstate._map.dirfoldmap
1464 1513 try:
1465 1514 del dirstate._map._dirs
1466 1515 except AttributeError:
1467 1516 pass
1468 1517
1469 1518 def d():
1470 1519 dirstate._map.dirfoldmap.get(b'a')
1471 1520
1472 1521 timer(d, setup=setup)
1473 1522 fm.end()
1474 1523
1475 1524
1476 1525 @command(b'perf::dirstatewrite|perfdirstatewrite', formatteropts)
1477 1526 def perfdirstatewrite(ui, repo, **opts):
1478 1527 """benchmap the time it take to write a dirstate on disk"""
1479 1528 opts = _byteskwargs(opts)
1480 1529 timer, fm = gettimer(ui, opts)
1481 1530 ds = repo.dirstate
1482 1531 b"a" in ds
1483 1532
1484 1533 def setup():
1485 1534 ds._dirty = True
1486 1535
1487 1536 def d():
1488 1537 ds.write(repo.currenttransaction())
1489 1538
1490 1539 with repo.wlock():
1491 1540 timer(d, setup=setup)
1492 1541 fm.end()
1493 1542
1494 1543
1495 1544 def _getmergerevs(repo, opts):
1496 1545 """parse command argument to return rev involved in merge
1497 1546
1498 1547 input: options dictionnary with `rev`, `from` and `bse`
1499 1548 output: (localctx, otherctx, basectx)
1500 1549 """
1501 1550 if opts[b'from']:
1502 1551 fromrev = scmutil.revsingle(repo, opts[b'from'])
1503 1552 wctx = repo[fromrev]
1504 1553 else:
1505 1554 wctx = repo[None]
1506 1555 # we don't want working dir files to be stat'd in the benchmark, so
1507 1556 # prime that cache
1508 1557 wctx.dirty()
1509 1558 rctx = scmutil.revsingle(repo, opts[b'rev'], opts[b'rev'])
1510 1559 if opts[b'base']:
1511 1560 fromrev = scmutil.revsingle(repo, opts[b'base'])
1512 1561 ancestor = repo[fromrev]
1513 1562 else:
1514 1563 ancestor = wctx.ancestor(rctx)
1515 1564 return (wctx, rctx, ancestor)
1516 1565
1517 1566
1518 1567 @command(
1519 1568 b'perf::mergecalculate|perfmergecalculate',
1520 1569 [
1521 1570 (b'r', b'rev', b'.', b'rev to merge against'),
1522 1571 (b'', b'from', b'', b'rev to merge from'),
1523 1572 (b'', b'base', b'', b'the revision to use as base'),
1524 1573 ]
1525 1574 + formatteropts,
1526 1575 )
1527 1576 def perfmergecalculate(ui, repo, **opts):
1528 1577 opts = _byteskwargs(opts)
1529 1578 timer, fm = gettimer(ui, opts)
1530 1579
1531 1580 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1532 1581
1533 1582 def d():
1534 1583 # acceptremote is True because we don't want prompts in the middle of
1535 1584 # our benchmark
1536 1585 merge.calculateupdates(
1537 1586 repo,
1538 1587 wctx,
1539 1588 rctx,
1540 1589 [ancestor],
1541 1590 branchmerge=False,
1542 1591 force=False,
1543 1592 acceptremote=True,
1544 1593 followcopies=True,
1545 1594 )
1546 1595
1547 1596 timer(d)
1548 1597 fm.end()
1549 1598
1550 1599
1551 1600 @command(
1552 1601 b'perf::mergecopies|perfmergecopies',
1553 1602 [
1554 1603 (b'r', b'rev', b'.', b'rev to merge against'),
1555 1604 (b'', b'from', b'', b'rev to merge from'),
1556 1605 (b'', b'base', b'', b'the revision to use as base'),
1557 1606 ]
1558 1607 + formatteropts,
1559 1608 )
1560 1609 def perfmergecopies(ui, repo, **opts):
1561 1610 """measure runtime of `copies.mergecopies`"""
1562 1611 opts = _byteskwargs(opts)
1563 1612 timer, fm = gettimer(ui, opts)
1564 1613 wctx, rctx, ancestor = _getmergerevs(repo, opts)
1565 1614
1566 1615 def d():
1567 1616 # acceptremote is True because we don't want prompts in the middle of
1568 1617 # our benchmark
1569 1618 copies.mergecopies(repo, wctx, rctx, ancestor)
1570 1619
1571 1620 timer(d)
1572 1621 fm.end()
1573 1622
1574 1623
1575 1624 @command(b'perf::pathcopies|perfpathcopies', [], b"REV REV")
1576 1625 def perfpathcopies(ui, repo, rev1, rev2, **opts):
1577 1626 """benchmark the copy tracing logic"""
1578 1627 opts = _byteskwargs(opts)
1579 1628 timer, fm = gettimer(ui, opts)
1580 1629 ctx1 = scmutil.revsingle(repo, rev1, rev1)
1581 1630 ctx2 = scmutil.revsingle(repo, rev2, rev2)
1582 1631
1583 1632 def d():
1584 1633 copies.pathcopies(ctx1, ctx2)
1585 1634
1586 1635 timer(d)
1587 1636 fm.end()
1588 1637
1589 1638
1590 1639 @command(
1591 1640 b'perf::phases|perfphases',
1592 1641 [
1593 1642 (b'', b'full', False, b'include file reading time too'),
1594 1643 ],
1595 1644 b"",
1596 1645 )
1597 1646 def perfphases(ui, repo, **opts):
1598 1647 """benchmark phasesets computation"""
1599 1648 opts = _byteskwargs(opts)
1600 1649 timer, fm = gettimer(ui, opts)
1601 1650 _phases = repo._phasecache
1602 1651 full = opts.get(b'full')
1603 1652
1604 1653 def d():
1605 1654 phases = _phases
1606 1655 if full:
1607 1656 clearfilecache(repo, b'_phasecache')
1608 1657 phases = repo._phasecache
1609 1658 phases.invalidate()
1610 1659 phases.loadphaserevs(repo)
1611 1660
1612 1661 timer(d)
1613 1662 fm.end()
1614 1663
1615 1664
1616 1665 @command(b'perf::phasesremote|perfphasesremote', [], b"[DEST]")
1617 1666 def perfphasesremote(ui, repo, dest=None, **opts):
1618 1667 """benchmark time needed to analyse phases of the remote server"""
1619 1668 from mercurial.node import bin
1620 1669 from mercurial import (
1621 1670 exchange,
1622 1671 hg,
1623 1672 phases,
1624 1673 )
1625 1674
1626 1675 opts = _byteskwargs(opts)
1627 1676 timer, fm = gettimer(ui, opts)
1628 1677
1629 1678 path = ui.getpath(dest, default=(b'default-push', b'default'))
1630 1679 if not path:
1631 1680 raise error.Abort(
1632 1681 b'default repository not configured!',
1633 1682 hint=b"see 'hg help config.paths'",
1634 1683 )
1635 1684 if util.safehasattr(path, 'main_path'):
1636 1685 path = path.get_push_variant()
1637 1686 dest = path.loc
1638 1687 else:
1639 1688 dest = path.pushloc or path.loc
1640 1689 ui.statusnoi18n(b'analysing phase of %s\n' % util.hidepassword(dest))
1641 1690 other = hg.peer(repo, opts, dest)
1642 1691
1643 1692 # easier to perform discovery through the operation
1644 1693 op = exchange.pushoperation(repo, other)
1645 1694 exchange._pushdiscoverychangeset(op)
1646 1695
1647 1696 remotesubset = op.fallbackheads
1648 1697
1649 1698 with other.commandexecutor() as e:
1650 1699 remotephases = e.callcommand(
1651 1700 b'listkeys', {b'namespace': b'phases'}
1652 1701 ).result()
1653 1702 del other
1654 1703 publishing = remotephases.get(b'publishing', False)
1655 1704 if publishing:
1656 1705 ui.statusnoi18n(b'publishing: yes\n')
1657 1706 else:
1658 1707 ui.statusnoi18n(b'publishing: no\n')
1659 1708
1660 1709 has_node = getattr(repo.changelog.index, 'has_node', None)
1661 1710 if has_node is None:
1662 1711 has_node = repo.changelog.nodemap.__contains__
1663 1712 nonpublishroots = 0
1664 1713 for nhex, phase in remotephases.iteritems():
1665 1714 if nhex == b'publishing': # ignore data related to publish option
1666 1715 continue
1667 1716 node = bin(nhex)
1668 1717 if has_node(node) and int(phase):
1669 1718 nonpublishroots += 1
1670 1719 ui.statusnoi18n(b'number of roots: %d\n' % len(remotephases))
1671 1720 ui.statusnoi18n(b'number of known non public roots: %d\n' % nonpublishroots)
1672 1721
1673 1722 def d():
1674 1723 phases.remotephasessummary(repo, remotesubset, remotephases)
1675 1724
1676 1725 timer(d)
1677 1726 fm.end()
1678 1727
1679 1728
1680 1729 @command(
1681 1730 b'perf::manifest|perfmanifest',
1682 1731 [
1683 1732 (b'm', b'manifest-rev', False, b'Look up a manifest node revision'),
1684 1733 (b'', b'clear-disk', False, b'clear on-disk caches too'),
1685 1734 ]
1686 1735 + formatteropts,
1687 1736 b'REV|NODE',
1688 1737 )
1689 1738 def perfmanifest(ui, repo, rev, manifest_rev=False, clear_disk=False, **opts):
1690 1739 """benchmark the time to read a manifest from disk and return a usable
1691 1740 dict-like object
1692 1741
1693 1742 Manifest caches are cleared before retrieval."""
1694 1743 opts = _byteskwargs(opts)
1695 1744 timer, fm = gettimer(ui, opts)
1696 1745 if not manifest_rev:
1697 1746 ctx = scmutil.revsingle(repo, rev, rev)
1698 1747 t = ctx.manifestnode()
1699 1748 else:
1700 1749 from mercurial.node import bin
1701 1750
1702 1751 if len(rev) == 40:
1703 1752 t = bin(rev)
1704 1753 else:
1705 1754 try:
1706 1755 rev = int(rev)
1707 1756
1708 1757 if util.safehasattr(repo.manifestlog, b'getstorage'):
1709 1758 t = repo.manifestlog.getstorage(b'').node(rev)
1710 1759 else:
1711 1760 t = repo.manifestlog._revlog.lookup(rev)
1712 1761 except ValueError:
1713 1762 raise error.Abort(
1714 1763 b'manifest revision must be integer or full node'
1715 1764 )
1716 1765
1717 1766 def d():
1718 1767 repo.manifestlog.clearcaches(clear_persisted_data=clear_disk)
1719 1768 repo.manifestlog[t].read()
1720 1769
1721 1770 timer(d)
1722 1771 fm.end()
1723 1772
1724 1773
1725 1774 @command(b'perf::changeset|perfchangeset', formatteropts)
1726 1775 def perfchangeset(ui, repo, rev, **opts):
1727 1776 opts = _byteskwargs(opts)
1728 1777 timer, fm = gettimer(ui, opts)
1729 1778 n = scmutil.revsingle(repo, rev).node()
1730 1779
1731 1780 def d():
1732 1781 repo.changelog.read(n)
1733 1782 # repo.changelog._cache = None
1734 1783
1735 1784 timer(d)
1736 1785 fm.end()
1737 1786
1738 1787
1739 1788 @command(b'perf::ignore|perfignore', formatteropts)
1740 1789 def perfignore(ui, repo, **opts):
1741 1790 """benchmark operation related to computing ignore"""
1742 1791 opts = _byteskwargs(opts)
1743 1792 timer, fm = gettimer(ui, opts)
1744 1793 dirstate = repo.dirstate
1745 1794
1746 1795 def setupone():
1747 1796 dirstate.invalidate()
1748 1797 clearfilecache(dirstate, b'_ignore')
1749 1798
1750 1799 def runone():
1751 1800 dirstate._ignore
1752 1801
1753 1802 timer(runone, setup=setupone, title=b"load")
1754 1803 fm.end()
1755 1804
1756 1805
1757 1806 @command(
1758 1807 b'perf::index|perfindex',
1759 1808 [
1760 1809 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1761 1810 (b'', b'no-lookup', None, b'do not revision lookup post creation'),
1762 1811 ]
1763 1812 + formatteropts,
1764 1813 )
1765 1814 def perfindex(ui, repo, **opts):
1766 1815 """benchmark index creation time followed by a lookup
1767 1816
1768 1817 The default is to look `tip` up. Depending on the index implementation,
1769 1818 the revision looked up can matters. For example, an implementation
1770 1819 scanning the index will have a faster lookup time for `--rev tip` than for
1771 1820 `--rev 0`. The number of looked up revisions and their order can also
1772 1821 matters.
1773 1822
1774 1823 Example of useful set to test:
1775 1824
1776 1825 * tip
1777 1826 * 0
1778 1827 * -10:
1779 1828 * :10
1780 1829 * -10: + :10
1781 1830 * :10: + -10:
1782 1831 * -10000:
1783 1832 * -10000: + 0
1784 1833
1785 1834 It is not currently possible to check for lookup of a missing node. For
1786 1835 deeper lookup benchmarking, checkout the `perfnodemap` command."""
1787 1836 import mercurial.revlog
1788 1837
1789 1838 opts = _byteskwargs(opts)
1790 1839 timer, fm = gettimer(ui, opts)
1791 1840 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1792 1841 if opts[b'no_lookup']:
1793 1842 if opts['rev']:
1794 1843 raise error.Abort('--no-lookup and --rev are mutually exclusive')
1795 1844 nodes = []
1796 1845 elif not opts[b'rev']:
1797 1846 nodes = [repo[b"tip"].node()]
1798 1847 else:
1799 1848 revs = scmutil.revrange(repo, opts[b'rev'])
1800 1849 cl = repo.changelog
1801 1850 nodes = [cl.node(r) for r in revs]
1802 1851
1803 1852 unfi = repo.unfiltered()
1804 1853 # find the filecache func directly
1805 1854 # This avoid polluting the benchmark with the filecache logic
1806 1855 makecl = unfi.__class__.changelog.func
1807 1856
1808 1857 def setup():
1809 1858 # probably not necessary, but for good measure
1810 1859 clearchangelog(unfi)
1811 1860
1812 1861 def d():
1813 1862 cl = makecl(unfi)
1814 1863 for n in nodes:
1815 1864 cl.rev(n)
1816 1865
1817 1866 timer(d, setup=setup)
1818 1867 fm.end()
1819 1868
1820 1869
1821 1870 @command(
1822 1871 b'perf::nodemap|perfnodemap',
1823 1872 [
1824 1873 (b'', b'rev', [], b'revision to be looked up (default tip)'),
1825 1874 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
1826 1875 ]
1827 1876 + formatteropts,
1828 1877 )
1829 1878 def perfnodemap(ui, repo, **opts):
1830 1879 """benchmark the time necessary to look up revision from a cold nodemap
1831 1880
1832 1881 Depending on the implementation, the amount and order of revision we look
1833 1882 up can varies. Example of useful set to test:
1834 1883 * tip
1835 1884 * 0
1836 1885 * -10:
1837 1886 * :10
1838 1887 * -10: + :10
1839 1888 * :10: + -10:
1840 1889 * -10000:
1841 1890 * -10000: + 0
1842 1891
1843 1892 The command currently focus on valid binary lookup. Benchmarking for
1844 1893 hexlookup, prefix lookup and missing lookup would also be valuable.
1845 1894 """
1846 1895 import mercurial.revlog
1847 1896
1848 1897 opts = _byteskwargs(opts)
1849 1898 timer, fm = gettimer(ui, opts)
1850 1899 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
1851 1900
1852 1901 unfi = repo.unfiltered()
1853 1902 clearcaches = opts[b'clear_caches']
1854 1903 # find the filecache func directly
1855 1904 # This avoid polluting the benchmark with the filecache logic
1856 1905 makecl = unfi.__class__.changelog.func
1857 1906 if not opts[b'rev']:
1858 1907 raise error.Abort(b'use --rev to specify revisions to look up')
1859 1908 revs = scmutil.revrange(repo, opts[b'rev'])
1860 1909 cl = repo.changelog
1861 1910 nodes = [cl.node(r) for r in revs]
1862 1911
1863 1912 # use a list to pass reference to a nodemap from one closure to the next
1864 1913 nodeget = [None]
1865 1914
1866 1915 def setnodeget():
1867 1916 # probably not necessary, but for good measure
1868 1917 clearchangelog(unfi)
1869 1918 cl = makecl(unfi)
1870 1919 if util.safehasattr(cl.index, 'get_rev'):
1871 1920 nodeget[0] = cl.index.get_rev
1872 1921 else:
1873 1922 nodeget[0] = cl.nodemap.get
1874 1923
1875 1924 def d():
1876 1925 get = nodeget[0]
1877 1926 for n in nodes:
1878 1927 get(n)
1879 1928
1880 1929 setup = None
1881 1930 if clearcaches:
1882 1931
1883 1932 def setup():
1884 1933 setnodeget()
1885 1934
1886 1935 else:
1887 1936 setnodeget()
1888 1937 d() # prewarm the data structure
1889 1938 timer(d, setup=setup)
1890 1939 fm.end()
1891 1940
1892 1941
1893 1942 @command(b'perf::startup|perfstartup', formatteropts)
1894 1943 def perfstartup(ui, repo, **opts):
1895 1944 opts = _byteskwargs(opts)
1896 1945 timer, fm = gettimer(ui, opts)
1897 1946
1898 1947 def d():
1899 1948 if os.name != 'nt':
1900 1949 os.system(
1901 1950 b"HGRCPATH= %s version -q > /dev/null" % fsencode(sys.argv[0])
1902 1951 )
1903 1952 else:
1904 1953 os.environ['HGRCPATH'] = r' '
1905 1954 os.system("%s version -q > NUL" % sys.argv[0])
1906 1955
1907 1956 timer(d)
1908 1957 fm.end()
1909 1958
1910 1959
1911 1960 def _find_stream_generator(version):
1912 1961 """find the proper generator function for this stream version"""
1913 1962 import mercurial.streamclone
1914 1963
1915 1964 available = {}
1916 1965
1917 1966 # try to fetch a v1 generator
1918 1967 generatev1 = getattr(mercurial.streamclone, "generatev1", None)
1919 1968 if generatev1 is not None:
1920 1969
1921 1970 def generate(repo):
1922 1971 entries, bytes, data = generatev2(repo, None, None, True)
1923 1972 return data
1924 1973
1925 1974 available[b'v1'] = generatev1
1926 1975 # try to fetch a v2 generator
1927 1976 generatev2 = getattr(mercurial.streamclone, "generatev2", None)
1928 1977 if generatev2 is not None:
1929 1978
1930 1979 def generate(repo):
1931 1980 entries, bytes, data = generatev2(repo, None, None, True)
1932 1981 return data
1933 1982
1934 1983 available[b'v2'] = generate
1935 1984 # try to fetch a v3 generator
1936 1985 generatev3 = getattr(mercurial.streamclone, "generatev3", None)
1937 1986 if generatev3 is not None:
1938 1987
1939 1988 def generate(repo):
1940 1989 entries, bytes, data = generatev3(repo, None, None, True)
1941 1990 return data
1942 1991
1943 1992 available[b'v3-exp'] = generate
1944 1993
1945 1994 # resolve the request
1946 1995 if version == b"latest":
1947 1996 # latest is the highest non experimental version
1948 1997 latest_key = max(v for v in available if b'-exp' not in v)
1949 1998 return available[latest_key]
1950 1999 elif version in available:
1951 2000 return available[version]
1952 2001 else:
1953 2002 msg = b"unkown or unavailable version: %s"
1954 2003 msg %= version
1955 2004 hint = b"available versions: %s"
1956 2005 hint %= b', '.join(sorted(available))
1957 2006 raise error.Abort(msg, hint=hint)
1958 2007
1959 2008
1960 2009 @command(
1961 2010 b'perf::stream-locked-section',
1962 2011 [
1963 2012 (
1964 2013 b'',
1965 2014 b'stream-version',
1966 2015 b'latest',
1967 2016 b'stream version to use ("v1", "v2", "v3" or "latest", (the default))',
1968 2017 ),
1969 2018 ]
1970 2019 + formatteropts,
1971 2020 )
1972 2021 def perf_stream_clone_scan(ui, repo, stream_version, **opts):
1973 2022 """benchmark the initial, repo-locked, section of a stream-clone"""
1974 2023
1975 2024 opts = _byteskwargs(opts)
1976 2025 timer, fm = gettimer(ui, opts)
1977 2026
1978 2027 # deletion of the generator may trigger some cleanup that we do not want to
1979 2028 # measure
1980 2029 result_holder = [None]
1981 2030
1982 2031 def setupone():
1983 2032 result_holder[0] = None
1984 2033
1985 2034 generate = _find_stream_generator(stream_version)
1986 2035
1987 2036 def runone():
1988 2037 # the lock is held for the duration the initialisation
1989 2038 result_holder[0] = generate(repo)
1990 2039
1991 2040 timer(runone, setup=setupone, title=b"load")
1992 2041 fm.end()
1993 2042
1994 2043
1995 2044 @command(
1996 2045 b'perf::stream-generate',
1997 2046 [
1998 2047 (
1999 2048 b'',
2000 2049 b'stream-version',
2001 2050 b'latest',
2002 2051 b'stream version to us ("v1", "v2" or "latest", (the default))',
2003 2052 ),
2004 2053 ]
2005 2054 + formatteropts,
2006 2055 )
2007 2056 def perf_stream_clone_generate(ui, repo, stream_version, **opts):
2008 2057 """benchmark the full generation of a stream clone"""
2009 2058
2010 2059 opts = _byteskwargs(opts)
2011 2060 timer, fm = gettimer(ui, opts)
2012 2061
2013 2062 # deletion of the generator may trigger some cleanup that we do not want to
2014 2063 # measure
2015 2064
2016 2065 generate = _find_stream_generator(stream_version)
2017 2066
2018 2067 def runone():
2019 2068 # the lock is held for the duration the initialisation
2020 2069 for chunk in generate(repo):
2021 2070 pass
2022 2071
2023 2072 timer(runone, title=b"generate")
2024 2073 fm.end()
2025 2074
2026 2075
2027 2076 @command(
2028 2077 b'perf::stream-consume',
2029 2078 formatteropts,
2030 2079 )
2031 2080 def perf_stream_clone_consume(ui, repo, filename, **opts):
2032 2081 """benchmark the full application of a stream clone
2033 2082
2034 2083 This include the creation of the repository
2035 2084 """
2036 2085 # try except to appease check code
2037 2086 msg = b"mercurial too old, missing necessary module: %s"
2038 2087 try:
2039 2088 from mercurial import bundle2
2040 2089 except ImportError as exc:
2041 2090 msg %= _bytestr(exc)
2042 2091 raise error.Abort(msg)
2043 2092 try:
2044 2093 from mercurial import exchange
2045 2094 except ImportError as exc:
2046 2095 msg %= _bytestr(exc)
2047 2096 raise error.Abort(msg)
2048 2097 try:
2049 2098 from mercurial import hg
2050 2099 except ImportError as exc:
2051 2100 msg %= _bytestr(exc)
2052 2101 raise error.Abort(msg)
2053 2102 try:
2054 2103 from mercurial import localrepo
2055 2104 except ImportError as exc:
2056 2105 msg %= _bytestr(exc)
2057 2106 raise error.Abort(msg)
2058 2107
2059 2108 opts = _byteskwargs(opts)
2060 2109 timer, fm = gettimer(ui, opts)
2061 2110
2062 2111 # deletion of the generator may trigger some cleanup that we do not want to
2063 2112 # measure
2064 2113 if not (os.path.isfile(filename) and os.access(filename, os.R_OK)):
2065 2114 raise error.Abort("not a readable file: %s" % filename)
2066 2115
2067 2116 run_variables = [None, None]
2068 2117
2069 2118 @contextlib.contextmanager
2070 2119 def context():
2071 2120 with open(filename, mode='rb') as bundle:
2072 2121 with tempfile.TemporaryDirectory() as tmp_dir:
2073 2122 tmp_dir = fsencode(tmp_dir)
2074 2123 run_variables[0] = bundle
2075 2124 run_variables[1] = tmp_dir
2076 2125 yield
2077 2126 run_variables[0] = None
2078 2127 run_variables[1] = None
2079 2128
2080 2129 def runone():
2081 2130 bundle = run_variables[0]
2082 2131 tmp_dir = run_variables[1]
2083 2132 # only pass ui when no srcrepo
2084 2133 localrepo.createrepository(
2085 2134 repo.ui, tmp_dir, requirements=repo.requirements
2086 2135 )
2087 2136 target = hg.repository(repo.ui, tmp_dir)
2088 2137 gen = exchange.readbundle(target.ui, bundle, bundle.name)
2089 2138 # stream v1
2090 2139 if util.safehasattr(gen, 'apply'):
2091 2140 gen.apply(target)
2092 2141 else:
2093 2142 with target.transaction(b"perf::stream-consume") as tr:
2094 2143 bundle2.applybundle(
2095 2144 target,
2096 2145 gen,
2097 2146 tr,
2098 2147 source=b'unbundle',
2099 2148 url=filename,
2100 2149 )
2101 2150
2102 2151 timer(runone, context=context, title=b"consume")
2103 2152 fm.end()
2104 2153
2105 2154
2106 2155 @command(b'perf::parents|perfparents', formatteropts)
2107 2156 def perfparents(ui, repo, **opts):
2108 2157 """benchmark the time necessary to fetch one changeset's parents.
2109 2158
2110 2159 The fetch is done using the `node identifier`, traversing all object layers
2111 2160 from the repository object. The first N revisions will be used for this
2112 2161 benchmark. N is controlled by the ``perf.parentscount`` config option
2113 2162 (default: 1000).
2114 2163 """
2115 2164 opts = _byteskwargs(opts)
2116 2165 timer, fm = gettimer(ui, opts)
2117 2166 # control the number of commits perfparents iterates over
2118 2167 # experimental config: perf.parentscount
2119 2168 count = getint(ui, b"perf", b"parentscount", 1000)
2120 2169 if len(repo.changelog) < count:
2121 2170 raise error.Abort(b"repo needs %d commits for this test" % count)
2122 2171 repo = repo.unfiltered()
2123 2172 nl = [repo.changelog.node(i) for i in _xrange(count)]
2124 2173
2125 2174 def d():
2126 2175 for n in nl:
2127 2176 repo.changelog.parents(n)
2128 2177
2129 2178 timer(d)
2130 2179 fm.end()
2131 2180
2132 2181
2133 2182 @command(b'perf::ctxfiles|perfctxfiles', formatteropts)
2134 2183 def perfctxfiles(ui, repo, x, **opts):
2135 2184 opts = _byteskwargs(opts)
2136 2185 x = int(x)
2137 2186 timer, fm = gettimer(ui, opts)
2138 2187
2139 2188 def d():
2140 2189 len(repo[x].files())
2141 2190
2142 2191 timer(d)
2143 2192 fm.end()
2144 2193
2145 2194
2146 2195 @command(b'perf::rawfiles|perfrawfiles', formatteropts)
2147 2196 def perfrawfiles(ui, repo, x, **opts):
2148 2197 opts = _byteskwargs(opts)
2149 2198 x = int(x)
2150 2199 timer, fm = gettimer(ui, opts)
2151 2200 cl = repo.changelog
2152 2201
2153 2202 def d():
2154 2203 len(cl.read(x)[3])
2155 2204
2156 2205 timer(d)
2157 2206 fm.end()
2158 2207
2159 2208
2160 2209 @command(b'perf::lookup|perflookup', formatteropts)
2161 2210 def perflookup(ui, repo, rev, **opts):
2162 2211 opts = _byteskwargs(opts)
2163 2212 timer, fm = gettimer(ui, opts)
2164 2213 timer(lambda: len(repo.lookup(rev)))
2165 2214 fm.end()
2166 2215
2167 2216
2168 2217 @command(
2169 2218 b'perf::linelogedits|perflinelogedits',
2170 2219 [
2171 2220 (b'n', b'edits', 10000, b'number of edits'),
2172 2221 (b'', b'max-hunk-lines', 10, b'max lines in a hunk'),
2173 2222 ],
2174 2223 norepo=True,
2175 2224 )
2176 2225 def perflinelogedits(ui, **opts):
2177 2226 from mercurial import linelog
2178 2227
2179 2228 opts = _byteskwargs(opts)
2180 2229
2181 2230 edits = opts[b'edits']
2182 2231 maxhunklines = opts[b'max_hunk_lines']
2183 2232
2184 2233 maxb1 = 100000
2185 2234 random.seed(0)
2186 2235 randint = random.randint
2187 2236 currentlines = 0
2188 2237 arglist = []
2189 2238 for rev in _xrange(edits):
2190 2239 a1 = randint(0, currentlines)
2191 2240 a2 = randint(a1, min(currentlines, a1 + maxhunklines))
2192 2241 b1 = randint(0, maxb1)
2193 2242 b2 = randint(b1, b1 + maxhunklines)
2194 2243 currentlines += (b2 - b1) - (a2 - a1)
2195 2244 arglist.append((rev, a1, a2, b1, b2))
2196 2245
2197 2246 def d():
2198 2247 ll = linelog.linelog()
2199 2248 for args in arglist:
2200 2249 ll.replacelines(*args)
2201 2250
2202 2251 timer, fm = gettimer(ui, opts)
2203 2252 timer(d)
2204 2253 fm.end()
2205 2254
2206 2255
2207 2256 @command(b'perf::revrange|perfrevrange', formatteropts)
2208 2257 def perfrevrange(ui, repo, *specs, **opts):
2209 2258 opts = _byteskwargs(opts)
2210 2259 timer, fm = gettimer(ui, opts)
2211 2260 revrange = scmutil.revrange
2212 2261 timer(lambda: len(revrange(repo, specs)))
2213 2262 fm.end()
2214 2263
2215 2264
2216 2265 @command(b'perf::nodelookup|perfnodelookup', formatteropts)
2217 2266 def perfnodelookup(ui, repo, rev, **opts):
2218 2267 opts = _byteskwargs(opts)
2219 2268 timer, fm = gettimer(ui, opts)
2220 2269 import mercurial.revlog
2221 2270
2222 2271 mercurial.revlog._prereadsize = 2 ** 24 # disable lazy parser in old hg
2223 2272 n = scmutil.revsingle(repo, rev).node()
2224 2273
2225 2274 try:
2226 2275 cl = revlog(getsvfs(repo), radix=b"00changelog")
2227 2276 except TypeError:
2228 2277 cl = revlog(getsvfs(repo), indexfile=b"00changelog.i")
2229 2278
2230 2279 def d():
2231 2280 cl.rev(n)
2232 2281 clearcaches(cl)
2233 2282
2234 2283 timer(d)
2235 2284 fm.end()
2236 2285
2237 2286
2238 2287 @command(
2239 2288 b'perf::log|perflog',
2240 2289 [(b'', b'rename', False, b'ask log to follow renames')] + formatteropts,
2241 2290 )
2242 2291 def perflog(ui, repo, rev=None, **opts):
2243 2292 opts = _byteskwargs(opts)
2244 2293 if rev is None:
2245 2294 rev = []
2246 2295 timer, fm = gettimer(ui, opts)
2247 2296 ui.pushbuffer()
2248 2297 timer(
2249 2298 lambda: commands.log(
2250 2299 ui, repo, rev=rev, date=b'', user=b'', copies=opts.get(b'rename')
2251 2300 )
2252 2301 )
2253 2302 ui.popbuffer()
2254 2303 fm.end()
2255 2304
2256 2305
2257 2306 @command(b'perf::moonwalk|perfmoonwalk', formatteropts)
2258 2307 def perfmoonwalk(ui, repo, **opts):
2259 2308 """benchmark walking the changelog backwards
2260 2309
2261 2310 This also loads the changelog data for each revision in the changelog.
2262 2311 """
2263 2312 opts = _byteskwargs(opts)
2264 2313 timer, fm = gettimer(ui, opts)
2265 2314
2266 2315 def moonwalk():
2267 2316 for i in repo.changelog.revs(start=(len(repo) - 1), stop=-1):
2268 2317 ctx = repo[i]
2269 2318 ctx.branch() # read changelog data (in addition to the index)
2270 2319
2271 2320 timer(moonwalk)
2272 2321 fm.end()
2273 2322
2274 2323
2275 2324 @command(
2276 2325 b'perf::templating|perftemplating',
2277 2326 [
2278 2327 (b'r', b'rev', [], b'revisions to run the template on'),
2279 2328 ]
2280 2329 + formatteropts,
2281 2330 )
2282 2331 def perftemplating(ui, repo, testedtemplate=None, **opts):
2283 2332 """test the rendering time of a given template"""
2284 2333 if makelogtemplater is None:
2285 2334 raise error.Abort(
2286 2335 b"perftemplating not available with this Mercurial",
2287 2336 hint=b"use 4.3 or later",
2288 2337 )
2289 2338
2290 2339 opts = _byteskwargs(opts)
2291 2340
2292 2341 nullui = ui.copy()
2293 2342 nullui.fout = open(os.devnull, 'wb')
2294 2343 nullui.disablepager()
2295 2344 revs = opts.get(b'rev')
2296 2345 if not revs:
2297 2346 revs = [b'all()']
2298 2347 revs = list(scmutil.revrange(repo, revs))
2299 2348
2300 2349 defaulttemplate = (
2301 2350 b'{date|shortdate} [{rev}:{node|short}]'
2302 2351 b' {author|person}: {desc|firstline}\n'
2303 2352 )
2304 2353 if testedtemplate is None:
2305 2354 testedtemplate = defaulttemplate
2306 2355 displayer = makelogtemplater(nullui, repo, testedtemplate)
2307 2356
2308 2357 def format():
2309 2358 for r in revs:
2310 2359 ctx = repo[r]
2311 2360 displayer.show(ctx)
2312 2361 displayer.flush(ctx)
2313 2362
2314 2363 timer, fm = gettimer(ui, opts)
2315 2364 timer(format)
2316 2365 fm.end()
2317 2366
2318 2367
2319 2368 def _displaystats(ui, opts, entries, data):
2320 2369 # use a second formatter because the data are quite different, not sure
2321 2370 # how it flies with the templater.
2322 2371 fm = ui.formatter(b'perf-stats', opts)
2323 2372 for key, title in entries:
2324 2373 values = data[key]
2325 2374 nbvalues = len(data)
2326 2375 values.sort()
2327 2376 stats = {
2328 2377 'key': key,
2329 2378 'title': title,
2330 2379 'nbitems': len(values),
2331 2380 'min': values[0][0],
2332 2381 '10%': values[(nbvalues * 10) // 100][0],
2333 2382 '25%': values[(nbvalues * 25) // 100][0],
2334 2383 '50%': values[(nbvalues * 50) // 100][0],
2335 2384 '75%': values[(nbvalues * 75) // 100][0],
2336 2385 '80%': values[(nbvalues * 80) // 100][0],
2337 2386 '85%': values[(nbvalues * 85) // 100][0],
2338 2387 '90%': values[(nbvalues * 90) // 100][0],
2339 2388 '95%': values[(nbvalues * 95) // 100][0],
2340 2389 '99%': values[(nbvalues * 99) // 100][0],
2341 2390 'max': values[-1][0],
2342 2391 }
2343 2392 fm.startitem()
2344 2393 fm.data(**stats)
2345 2394 # make node pretty for the human output
2346 2395 fm.plain('### %s (%d items)\n' % (title, len(values)))
2347 2396 lines = [
2348 2397 'min',
2349 2398 '10%',
2350 2399 '25%',
2351 2400 '50%',
2352 2401 '75%',
2353 2402 '80%',
2354 2403 '85%',
2355 2404 '90%',
2356 2405 '95%',
2357 2406 '99%',
2358 2407 'max',
2359 2408 ]
2360 2409 for l in lines:
2361 2410 fm.plain('%s: %s\n' % (l, stats[l]))
2362 2411 fm.end()
2363 2412
2364 2413
2365 2414 @command(
2366 2415 b'perf::helper-mergecopies|perfhelper-mergecopies',
2367 2416 formatteropts
2368 2417 + [
2369 2418 (b'r', b'revs', [], b'restrict search to these revisions'),
2370 2419 (b'', b'timing', False, b'provides extra data (costly)'),
2371 2420 (b'', b'stats', False, b'provides statistic about the measured data'),
2372 2421 ],
2373 2422 )
2374 2423 def perfhelpermergecopies(ui, repo, revs=[], **opts):
2375 2424 """find statistics about potential parameters for `perfmergecopies`
2376 2425
2377 2426 This command find (base, p1, p2) triplet relevant for copytracing
2378 2427 benchmarking in the context of a merge. It reports values for some of the
2379 2428 parameters that impact merge copy tracing time during merge.
2380 2429
2381 2430 If `--timing` is set, rename detection is run and the associated timing
2382 2431 will be reported. The extra details come at the cost of slower command
2383 2432 execution.
2384 2433
2385 2434 Since rename detection is only run once, other factors might easily
2386 2435 affect the precision of the timing. However it should give a good
2387 2436 approximation of which revision triplets are very costly.
2388 2437 """
2389 2438 opts = _byteskwargs(opts)
2390 2439 fm = ui.formatter(b'perf', opts)
2391 2440 dotiming = opts[b'timing']
2392 2441 dostats = opts[b'stats']
2393 2442
2394 2443 output_template = [
2395 2444 ("base", "%(base)12s"),
2396 2445 ("p1", "%(p1.node)12s"),
2397 2446 ("p2", "%(p2.node)12s"),
2398 2447 ("p1.nb-revs", "%(p1.nbrevs)12d"),
2399 2448 ("p1.nb-files", "%(p1.nbmissingfiles)12d"),
2400 2449 ("p1.renames", "%(p1.renamedfiles)12d"),
2401 2450 ("p1.time", "%(p1.time)12.3f"),
2402 2451 ("p2.nb-revs", "%(p2.nbrevs)12d"),
2403 2452 ("p2.nb-files", "%(p2.nbmissingfiles)12d"),
2404 2453 ("p2.renames", "%(p2.renamedfiles)12d"),
2405 2454 ("p2.time", "%(p2.time)12.3f"),
2406 2455 ("renames", "%(nbrenamedfiles)12d"),
2407 2456 ("total.time", "%(time)12.3f"),
2408 2457 ]
2409 2458 if not dotiming:
2410 2459 output_template = [
2411 2460 i
2412 2461 for i in output_template
2413 2462 if not ('time' in i[0] or 'renames' in i[0])
2414 2463 ]
2415 2464 header_names = [h for (h, v) in output_template]
2416 2465 output = ' '.join([v for (h, v) in output_template]) + '\n'
2417 2466 header = ' '.join(['%12s'] * len(header_names)) + '\n'
2418 2467 fm.plain(header % tuple(header_names))
2419 2468
2420 2469 if not revs:
2421 2470 revs = ['all()']
2422 2471 revs = scmutil.revrange(repo, revs)
2423 2472
2424 2473 if dostats:
2425 2474 alldata = {
2426 2475 'nbrevs': [],
2427 2476 'nbmissingfiles': [],
2428 2477 }
2429 2478 if dotiming:
2430 2479 alldata['parentnbrenames'] = []
2431 2480 alldata['totalnbrenames'] = []
2432 2481 alldata['parenttime'] = []
2433 2482 alldata['totaltime'] = []
2434 2483
2435 2484 roi = repo.revs('merge() and %ld', revs)
2436 2485 for r in roi:
2437 2486 ctx = repo[r]
2438 2487 p1 = ctx.p1()
2439 2488 p2 = ctx.p2()
2440 2489 bases = repo.changelog._commonancestorsheads(p1.rev(), p2.rev())
2441 2490 for b in bases:
2442 2491 b = repo[b]
2443 2492 p1missing = copies._computeforwardmissing(b, p1)
2444 2493 p2missing = copies._computeforwardmissing(b, p2)
2445 2494 data = {
2446 2495 b'base': b.hex(),
2447 2496 b'p1.node': p1.hex(),
2448 2497 b'p1.nbrevs': len(repo.revs('only(%d, %d)', p1.rev(), b.rev())),
2449 2498 b'p1.nbmissingfiles': len(p1missing),
2450 2499 b'p2.node': p2.hex(),
2451 2500 b'p2.nbrevs': len(repo.revs('only(%d, %d)', p2.rev(), b.rev())),
2452 2501 b'p2.nbmissingfiles': len(p2missing),
2453 2502 }
2454 2503 if dostats:
2455 2504 if p1missing:
2456 2505 alldata['nbrevs'].append(
2457 2506 (data['p1.nbrevs'], b.hex(), p1.hex())
2458 2507 )
2459 2508 alldata['nbmissingfiles'].append(
2460 2509 (data['p1.nbmissingfiles'], b.hex(), p1.hex())
2461 2510 )
2462 2511 if p2missing:
2463 2512 alldata['nbrevs'].append(
2464 2513 (data['p2.nbrevs'], b.hex(), p2.hex())
2465 2514 )
2466 2515 alldata['nbmissingfiles'].append(
2467 2516 (data['p2.nbmissingfiles'], b.hex(), p2.hex())
2468 2517 )
2469 2518 if dotiming:
2470 2519 begin = util.timer()
2471 2520 mergedata = copies.mergecopies(repo, p1, p2, b)
2472 2521 end = util.timer()
2473 2522 # not very stable timing since we did only one run
2474 2523 data['time'] = end - begin
2475 2524 # mergedata contains five dicts: "copy", "movewithdir",
2476 2525 # "diverge", "renamedelete" and "dirmove".
2477 2526 # The first 4 are about renamed file so lets count that.
2478 2527 renames = len(mergedata[0])
2479 2528 renames += len(mergedata[1])
2480 2529 renames += len(mergedata[2])
2481 2530 renames += len(mergedata[3])
2482 2531 data['nbrenamedfiles'] = renames
2483 2532 begin = util.timer()
2484 2533 p1renames = copies.pathcopies(b, p1)
2485 2534 end = util.timer()
2486 2535 data['p1.time'] = end - begin
2487 2536 begin = util.timer()
2488 2537 p2renames = copies.pathcopies(b, p2)
2489 2538 end = util.timer()
2490 2539 data['p2.time'] = end - begin
2491 2540 data['p1.renamedfiles'] = len(p1renames)
2492 2541 data['p2.renamedfiles'] = len(p2renames)
2493 2542
2494 2543 if dostats:
2495 2544 if p1missing:
2496 2545 alldata['parentnbrenames'].append(
2497 2546 (data['p1.renamedfiles'], b.hex(), p1.hex())
2498 2547 )
2499 2548 alldata['parenttime'].append(
2500 2549 (data['p1.time'], b.hex(), p1.hex())
2501 2550 )
2502 2551 if p2missing:
2503 2552 alldata['parentnbrenames'].append(
2504 2553 (data['p2.renamedfiles'], b.hex(), p2.hex())
2505 2554 )
2506 2555 alldata['parenttime'].append(
2507 2556 (data['p2.time'], b.hex(), p2.hex())
2508 2557 )
2509 2558 if p1missing or p2missing:
2510 2559 alldata['totalnbrenames'].append(
2511 2560 (
2512 2561 data['nbrenamedfiles'],
2513 2562 b.hex(),
2514 2563 p1.hex(),
2515 2564 p2.hex(),
2516 2565 )
2517 2566 )
2518 2567 alldata['totaltime'].append(
2519 2568 (data['time'], b.hex(), p1.hex(), p2.hex())
2520 2569 )
2521 2570 fm.startitem()
2522 2571 fm.data(**data)
2523 2572 # make node pretty for the human output
2524 2573 out = data.copy()
2525 2574 out['base'] = fm.hexfunc(b.node())
2526 2575 out['p1.node'] = fm.hexfunc(p1.node())
2527 2576 out['p2.node'] = fm.hexfunc(p2.node())
2528 2577 fm.plain(output % out)
2529 2578
2530 2579 fm.end()
2531 2580 if dostats:
2532 2581 # use a second formatter because the data are quite different, not sure
2533 2582 # how it flies with the templater.
2534 2583 entries = [
2535 2584 ('nbrevs', 'number of revision covered'),
2536 2585 ('nbmissingfiles', 'number of missing files at head'),
2537 2586 ]
2538 2587 if dotiming:
2539 2588 entries.append(
2540 2589 ('parentnbrenames', 'rename from one parent to base')
2541 2590 )
2542 2591 entries.append(('totalnbrenames', 'total number of renames'))
2543 2592 entries.append(('parenttime', 'time for one parent'))
2544 2593 entries.append(('totaltime', 'time for both parents'))
2545 2594 _displaystats(ui, opts, entries, alldata)
2546 2595
2547 2596
2548 2597 @command(
2549 2598 b'perf::helper-pathcopies|perfhelper-pathcopies',
2550 2599 formatteropts
2551 2600 + [
2552 2601 (b'r', b'revs', [], b'restrict search to these revisions'),
2553 2602 (b'', b'timing', False, b'provides extra data (costly)'),
2554 2603 (b'', b'stats', False, b'provides statistic about the measured data'),
2555 2604 ],
2556 2605 )
2557 2606 def perfhelperpathcopies(ui, repo, revs=[], **opts):
2558 2607 """find statistic about potential parameters for the `perftracecopies`
2559 2608
2560 2609 This command find source-destination pair relevant for copytracing testing.
2561 2610 It report value for some of the parameters that impact copy tracing time.
2562 2611
2563 2612 If `--timing` is set, rename detection is run and the associated timing
2564 2613 will be reported. The extra details comes at the cost of a slower command
2565 2614 execution.
2566 2615
2567 2616 Since the rename detection is only run once, other factors might easily
2568 2617 affect the precision of the timing. However it should give a good
2569 2618 approximation of which revision pairs are very costly.
2570 2619 """
2571 2620 opts = _byteskwargs(opts)
2572 2621 fm = ui.formatter(b'perf', opts)
2573 2622 dotiming = opts[b'timing']
2574 2623 dostats = opts[b'stats']
2575 2624
2576 2625 if dotiming:
2577 2626 header = '%12s %12s %12s %12s %12s %12s\n'
2578 2627 output = (
2579 2628 "%(source)12s %(destination)12s "
2580 2629 "%(nbrevs)12d %(nbmissingfiles)12d "
2581 2630 "%(nbrenamedfiles)12d %(time)18.5f\n"
2582 2631 )
2583 2632 header_names = (
2584 2633 "source",
2585 2634 "destination",
2586 2635 "nb-revs",
2587 2636 "nb-files",
2588 2637 "nb-renames",
2589 2638 "time",
2590 2639 )
2591 2640 fm.plain(header % header_names)
2592 2641 else:
2593 2642 header = '%12s %12s %12s %12s\n'
2594 2643 output = (
2595 2644 "%(source)12s %(destination)12s "
2596 2645 "%(nbrevs)12d %(nbmissingfiles)12d\n"
2597 2646 )
2598 2647 fm.plain(header % ("source", "destination", "nb-revs", "nb-files"))
2599 2648
2600 2649 if not revs:
2601 2650 revs = ['all()']
2602 2651 revs = scmutil.revrange(repo, revs)
2603 2652
2604 2653 if dostats:
2605 2654 alldata = {
2606 2655 'nbrevs': [],
2607 2656 'nbmissingfiles': [],
2608 2657 }
2609 2658 if dotiming:
2610 2659 alldata['nbrenames'] = []
2611 2660 alldata['time'] = []
2612 2661
2613 2662 roi = repo.revs('merge() and %ld', revs)
2614 2663 for r in roi:
2615 2664 ctx = repo[r]
2616 2665 p1 = ctx.p1().rev()
2617 2666 p2 = ctx.p2().rev()
2618 2667 bases = repo.changelog._commonancestorsheads(p1, p2)
2619 2668 for p in (p1, p2):
2620 2669 for b in bases:
2621 2670 base = repo[b]
2622 2671 parent = repo[p]
2623 2672 missing = copies._computeforwardmissing(base, parent)
2624 2673 if not missing:
2625 2674 continue
2626 2675 data = {
2627 2676 b'source': base.hex(),
2628 2677 b'destination': parent.hex(),
2629 2678 b'nbrevs': len(repo.revs('only(%d, %d)', p, b)),
2630 2679 b'nbmissingfiles': len(missing),
2631 2680 }
2632 2681 if dostats:
2633 2682 alldata['nbrevs'].append(
2634 2683 (
2635 2684 data['nbrevs'],
2636 2685 base.hex(),
2637 2686 parent.hex(),
2638 2687 )
2639 2688 )
2640 2689 alldata['nbmissingfiles'].append(
2641 2690 (
2642 2691 data['nbmissingfiles'],
2643 2692 base.hex(),
2644 2693 parent.hex(),
2645 2694 )
2646 2695 )
2647 2696 if dotiming:
2648 2697 begin = util.timer()
2649 2698 renames = copies.pathcopies(base, parent)
2650 2699 end = util.timer()
2651 2700 # not very stable timing since we did only one run
2652 2701 data['time'] = end - begin
2653 2702 data['nbrenamedfiles'] = len(renames)
2654 2703 if dostats:
2655 2704 alldata['time'].append(
2656 2705 (
2657 2706 data['time'],
2658 2707 base.hex(),
2659 2708 parent.hex(),
2660 2709 )
2661 2710 )
2662 2711 alldata['nbrenames'].append(
2663 2712 (
2664 2713 data['nbrenamedfiles'],
2665 2714 base.hex(),
2666 2715 parent.hex(),
2667 2716 )
2668 2717 )
2669 2718 fm.startitem()
2670 2719 fm.data(**data)
2671 2720 out = data.copy()
2672 2721 out['source'] = fm.hexfunc(base.node())
2673 2722 out['destination'] = fm.hexfunc(parent.node())
2674 2723 fm.plain(output % out)
2675 2724
2676 2725 fm.end()
2677 2726 if dostats:
2678 2727 entries = [
2679 2728 ('nbrevs', 'number of revision covered'),
2680 2729 ('nbmissingfiles', 'number of missing files at head'),
2681 2730 ]
2682 2731 if dotiming:
2683 2732 entries.append(('nbrenames', 'renamed files'))
2684 2733 entries.append(('time', 'time'))
2685 2734 _displaystats(ui, opts, entries, alldata)
2686 2735
2687 2736
2688 2737 @command(b'perf::cca|perfcca', formatteropts)
2689 2738 def perfcca(ui, repo, **opts):
2690 2739 opts = _byteskwargs(opts)
2691 2740 timer, fm = gettimer(ui, opts)
2692 2741 timer(lambda: scmutil.casecollisionauditor(ui, False, repo.dirstate))
2693 2742 fm.end()
2694 2743
2695 2744
2696 2745 @command(b'perf::fncacheload|perffncacheload', formatteropts)
2697 2746 def perffncacheload(ui, repo, **opts):
2698 2747 opts = _byteskwargs(opts)
2699 2748 timer, fm = gettimer(ui, opts)
2700 2749 s = repo.store
2701 2750
2702 2751 def d():
2703 2752 s.fncache._load()
2704 2753
2705 2754 timer(d)
2706 2755 fm.end()
2707 2756
2708 2757
2709 2758 @command(b'perf::fncachewrite|perffncachewrite', formatteropts)
2710 2759 def perffncachewrite(ui, repo, **opts):
2711 2760 opts = _byteskwargs(opts)
2712 2761 timer, fm = gettimer(ui, opts)
2713 2762 s = repo.store
2714 2763 lock = repo.lock()
2715 2764 s.fncache._load()
2716 2765 tr = repo.transaction(b'perffncachewrite')
2717 2766 tr.addbackup(b'fncache')
2718 2767
2719 2768 def d():
2720 2769 s.fncache._dirty = True
2721 2770 s.fncache.write(tr)
2722 2771
2723 2772 timer(d)
2724 2773 tr.close()
2725 2774 lock.release()
2726 2775 fm.end()
2727 2776
2728 2777
2729 2778 @command(b'perf::fncacheencode|perffncacheencode', formatteropts)
2730 2779 def perffncacheencode(ui, repo, **opts):
2731 2780 opts = _byteskwargs(opts)
2732 2781 timer, fm = gettimer(ui, opts)
2733 2782 s = repo.store
2734 2783 s.fncache._load()
2735 2784
2736 2785 def d():
2737 2786 for p in s.fncache.entries:
2738 2787 s.encode(p)
2739 2788
2740 2789 timer(d)
2741 2790 fm.end()
2742 2791
2743 2792
2744 2793 def _bdiffworker(q, blocks, xdiff, ready, done):
2745 2794 while not done.is_set():
2746 2795 pair = q.get()
2747 2796 while pair is not None:
2748 2797 if xdiff:
2749 2798 mdiff.bdiff.xdiffblocks(*pair)
2750 2799 elif blocks:
2751 2800 mdiff.bdiff.blocks(*pair)
2752 2801 else:
2753 2802 mdiff.textdiff(*pair)
2754 2803 q.task_done()
2755 2804 pair = q.get()
2756 2805 q.task_done() # for the None one
2757 2806 with ready:
2758 2807 ready.wait()
2759 2808
2760 2809
2761 2810 def _manifestrevision(repo, mnode):
2762 2811 ml = repo.manifestlog
2763 2812
2764 2813 if util.safehasattr(ml, b'getstorage'):
2765 2814 store = ml.getstorage(b'')
2766 2815 else:
2767 2816 store = ml._revlog
2768 2817
2769 2818 return store.revision(mnode)
2770 2819
2771 2820
2772 2821 @command(
2773 2822 b'perf::bdiff|perfbdiff',
2774 2823 revlogopts
2775 2824 + formatteropts
2776 2825 + [
2777 2826 (
2778 2827 b'',
2779 2828 b'count',
2780 2829 1,
2781 2830 b'number of revisions to test (when using --startrev)',
2782 2831 ),
2783 2832 (b'', b'alldata', False, b'test bdiffs for all associated revisions'),
2784 2833 (b'', b'threads', 0, b'number of thread to use (disable with 0)'),
2785 2834 (b'', b'blocks', False, b'test computing diffs into blocks'),
2786 2835 (b'', b'xdiff', False, b'use xdiff algorithm'),
2787 2836 ],
2788 2837 b'-c|-m|FILE REV',
2789 2838 )
2790 2839 def perfbdiff(ui, repo, file_, rev=None, count=None, threads=0, **opts):
2791 2840 """benchmark a bdiff between revisions
2792 2841
2793 2842 By default, benchmark a bdiff between its delta parent and itself.
2794 2843
2795 2844 With ``--count``, benchmark bdiffs between delta parents and self for N
2796 2845 revisions starting at the specified revision.
2797 2846
2798 2847 With ``--alldata``, assume the requested revision is a changeset and
2799 2848 measure bdiffs for all changes related to that changeset (manifest
2800 2849 and filelogs).
2801 2850 """
2802 2851 opts = _byteskwargs(opts)
2803 2852
2804 2853 if opts[b'xdiff'] and not opts[b'blocks']:
2805 2854 raise error.CommandError(b'perfbdiff', b'--xdiff requires --blocks')
2806 2855
2807 2856 if opts[b'alldata']:
2808 2857 opts[b'changelog'] = True
2809 2858
2810 2859 if opts.get(b'changelog') or opts.get(b'manifest'):
2811 2860 file_, rev = None, file_
2812 2861 elif rev is None:
2813 2862 raise error.CommandError(b'perfbdiff', b'invalid arguments')
2814 2863
2815 2864 blocks = opts[b'blocks']
2816 2865 xdiff = opts[b'xdiff']
2817 2866 textpairs = []
2818 2867
2819 2868 r = cmdutil.openrevlog(repo, b'perfbdiff', file_, opts)
2820 2869
2821 2870 startrev = r.rev(r.lookup(rev))
2822 2871 for rev in range(startrev, min(startrev + count, len(r) - 1)):
2823 2872 if opts[b'alldata']:
2824 2873 # Load revisions associated with changeset.
2825 2874 ctx = repo[rev]
2826 2875 mtext = _manifestrevision(repo, ctx.manifestnode())
2827 2876 for pctx in ctx.parents():
2828 2877 pman = _manifestrevision(repo, pctx.manifestnode())
2829 2878 textpairs.append((pman, mtext))
2830 2879
2831 2880 # Load filelog revisions by iterating manifest delta.
2832 2881 man = ctx.manifest()
2833 2882 pman = ctx.p1().manifest()
2834 2883 for filename, change in pman.diff(man).items():
2835 2884 fctx = repo.file(filename)
2836 2885 f1 = fctx.revision(change[0][0] or -1)
2837 2886 f2 = fctx.revision(change[1][0] or -1)
2838 2887 textpairs.append((f1, f2))
2839 2888 else:
2840 2889 dp = r.deltaparent(rev)
2841 2890 textpairs.append((r.revision(dp), r.revision(rev)))
2842 2891
2843 2892 withthreads = threads > 0
2844 2893 if not withthreads:
2845 2894
2846 2895 def d():
2847 2896 for pair in textpairs:
2848 2897 if xdiff:
2849 2898 mdiff.bdiff.xdiffblocks(*pair)
2850 2899 elif blocks:
2851 2900 mdiff.bdiff.blocks(*pair)
2852 2901 else:
2853 2902 mdiff.textdiff(*pair)
2854 2903
2855 2904 else:
2856 2905 q = queue()
2857 2906 for i in _xrange(threads):
2858 2907 q.put(None)
2859 2908 ready = threading.Condition()
2860 2909 done = threading.Event()
2861 2910 for i in _xrange(threads):
2862 2911 threading.Thread(
2863 2912 target=_bdiffworker, args=(q, blocks, xdiff, ready, done)
2864 2913 ).start()
2865 2914 q.join()
2866 2915
2867 2916 def d():
2868 2917 for pair in textpairs:
2869 2918 q.put(pair)
2870 2919 for i in _xrange(threads):
2871 2920 q.put(None)
2872 2921 with ready:
2873 2922 ready.notify_all()
2874 2923 q.join()
2875 2924
2876 2925 timer, fm = gettimer(ui, opts)
2877 2926 timer(d)
2878 2927 fm.end()
2879 2928
2880 2929 if withthreads:
2881 2930 done.set()
2882 2931 for i in _xrange(threads):
2883 2932 q.put(None)
2884 2933 with ready:
2885 2934 ready.notify_all()
2886 2935
2887 2936
2888 2937 @command(
2889 2938 b'perf::unbundle',
2890 2939 formatteropts,
2891 2940 b'BUNDLE_FILE',
2892 2941 )
2893 2942 def perf_unbundle(ui, repo, fname, **opts):
2894 2943 """benchmark application of a bundle in a repository.
2895 2944
2896 2945 This does not include the final transaction processing"""
2897 2946
2898 2947 from mercurial import exchange
2899 2948 from mercurial import bundle2
2900 2949 from mercurial import transaction
2901 2950
2902 2951 opts = _byteskwargs(opts)
2903 2952
2904 2953 ### some compatibility hotfix
2905 2954 #
2906 2955 # the data attribute is dropped in 63edc384d3b7 a changeset introducing a
2907 2956 # critical regression that break transaction rollback for files that are
2908 2957 # de-inlined.
2909 2958 method = transaction.transaction._addentry
2910 2959 pre_63edc384d3b7 = "data" in getargspec(method).args
2911 2960 # the `detailed_exit_code` attribute is introduced in 33c0c25d0b0f
2912 2961 # a changeset that is a close descendant of 18415fc918a1, the changeset
2913 2962 # that conclude the fix run for the bug introduced in 63edc384d3b7.
2914 2963 args = getargspec(error.Abort.__init__).args
2915 2964 post_18415fc918a1 = "detailed_exit_code" in args
2916 2965
2917 2966 old_max_inline = None
2918 2967 try:
2919 2968 if not (pre_63edc384d3b7 or post_18415fc918a1):
2920 2969 # disable inlining
2921 2970 old_max_inline = mercurial.revlog._maxinline
2922 2971 # large enough to never happen
2923 2972 mercurial.revlog._maxinline = 2 ** 50
2924 2973
2925 2974 with repo.lock():
2926 2975 bundle = [None, None]
2927 2976 orig_quiet = repo.ui.quiet
2928 2977 try:
2929 2978 repo.ui.quiet = True
2930 2979 with open(fname, mode="rb") as f:
2931 2980
2932 2981 def noop_report(*args, **kwargs):
2933 2982 pass
2934 2983
2935 2984 def setup():
2936 2985 gen, tr = bundle
2937 2986 if tr is not None:
2938 2987 tr.abort()
2939 2988 bundle[:] = [None, None]
2940 2989 f.seek(0)
2941 2990 bundle[0] = exchange.readbundle(ui, f, fname)
2942 2991 bundle[1] = repo.transaction(b'perf::unbundle')
2943 2992 # silence the transaction
2944 2993 bundle[1]._report = noop_report
2945 2994
2946 2995 def apply():
2947 2996 gen, tr = bundle
2948 2997 bundle2.applybundle(
2949 2998 repo,
2950 2999 gen,
2951 3000 tr,
2952 3001 source=b'perf::unbundle',
2953 3002 url=fname,
2954 3003 )
2955 3004
2956 3005 timer, fm = gettimer(ui, opts)
2957 3006 timer(apply, setup=setup)
2958 3007 fm.end()
2959 3008 finally:
2960 3009 repo.ui.quiet == orig_quiet
2961 3010 gen, tr = bundle
2962 3011 if tr is not None:
2963 3012 tr.abort()
2964 3013 finally:
2965 3014 if old_max_inline is not None:
2966 3015 mercurial.revlog._maxinline = old_max_inline
2967 3016
2968 3017
2969 3018 @command(
2970 3019 b'perf::unidiff|perfunidiff',
2971 3020 revlogopts
2972 3021 + formatteropts
2973 3022 + [
2974 3023 (
2975 3024 b'',
2976 3025 b'count',
2977 3026 1,
2978 3027 b'number of revisions to test (when using --startrev)',
2979 3028 ),
2980 3029 (b'', b'alldata', False, b'test unidiffs for all associated revisions'),
2981 3030 ],
2982 3031 b'-c|-m|FILE REV',
2983 3032 )
2984 3033 def perfunidiff(ui, repo, file_, rev=None, count=None, **opts):
2985 3034 """benchmark a unified diff between revisions
2986 3035
2987 3036 This doesn't include any copy tracing - it's just a unified diff
2988 3037 of the texts.
2989 3038
2990 3039 By default, benchmark a diff between its delta parent and itself.
2991 3040
2992 3041 With ``--count``, benchmark diffs between delta parents and self for N
2993 3042 revisions starting at the specified revision.
2994 3043
2995 3044 With ``--alldata``, assume the requested revision is a changeset and
2996 3045 measure diffs for all changes related to that changeset (manifest
2997 3046 and filelogs).
2998 3047 """
2999 3048 opts = _byteskwargs(opts)
3000 3049 if opts[b'alldata']:
3001 3050 opts[b'changelog'] = True
3002 3051
3003 3052 if opts.get(b'changelog') or opts.get(b'manifest'):
3004 3053 file_, rev = None, file_
3005 3054 elif rev is None:
3006 3055 raise error.CommandError(b'perfunidiff', b'invalid arguments')
3007 3056
3008 3057 textpairs = []
3009 3058
3010 3059 r = cmdutil.openrevlog(repo, b'perfunidiff', file_, opts)
3011 3060
3012 3061 startrev = r.rev(r.lookup(rev))
3013 3062 for rev in range(startrev, min(startrev + count, len(r) - 1)):
3014 3063 if opts[b'alldata']:
3015 3064 # Load revisions associated with changeset.
3016 3065 ctx = repo[rev]
3017 3066 mtext = _manifestrevision(repo, ctx.manifestnode())
3018 3067 for pctx in ctx.parents():
3019 3068 pman = _manifestrevision(repo, pctx.manifestnode())
3020 3069 textpairs.append((pman, mtext))
3021 3070
3022 3071 # Load filelog revisions by iterating manifest delta.
3023 3072 man = ctx.manifest()
3024 3073 pman = ctx.p1().manifest()
3025 3074 for filename, change in pman.diff(man).items():
3026 3075 fctx = repo.file(filename)
3027 3076 f1 = fctx.revision(change[0][0] or -1)
3028 3077 f2 = fctx.revision(change[1][0] or -1)
3029 3078 textpairs.append((f1, f2))
3030 3079 else:
3031 3080 dp = r.deltaparent(rev)
3032 3081 textpairs.append((r.revision(dp), r.revision(rev)))
3033 3082
3034 3083 def d():
3035 3084 for left, right in textpairs:
3036 3085 # The date strings don't matter, so we pass empty strings.
3037 3086 headerlines, hunks = mdiff.unidiff(
3038 3087 left, b'', right, b'', b'left', b'right', binary=False
3039 3088 )
3040 3089 # consume iterators in roughly the way patch.py does
3041 3090 b'\n'.join(headerlines)
3042 3091 b''.join(sum((list(hlines) for hrange, hlines in hunks), []))
3043 3092
3044 3093 timer, fm = gettimer(ui, opts)
3045 3094 timer(d)
3046 3095 fm.end()
3047 3096
3048 3097
3049 3098 @command(b'perf::diffwd|perfdiffwd', formatteropts)
3050 3099 def perfdiffwd(ui, repo, **opts):
3051 3100 """Profile diff of working directory changes"""
3052 3101 opts = _byteskwargs(opts)
3053 3102 timer, fm = gettimer(ui, opts)
3054 3103 options = {
3055 3104 'w': 'ignore_all_space',
3056 3105 'b': 'ignore_space_change',
3057 3106 'B': 'ignore_blank_lines',
3058 3107 }
3059 3108
3060 3109 for diffopt in ('', 'w', 'b', 'B', 'wB'):
3061 3110 opts = {options[c]: b'1' for c in diffopt}
3062 3111
3063 3112 def d():
3064 3113 ui.pushbuffer()
3065 3114 commands.diff(ui, repo, **opts)
3066 3115 ui.popbuffer()
3067 3116
3068 3117 diffopt = diffopt.encode('ascii')
3069 3118 title = b'diffopts: %s' % (diffopt and (b'-' + diffopt) or b'none')
3070 3119 timer(d, title=title)
3071 3120 fm.end()
3072 3121
3073 3122
3074 3123 @command(
3075 3124 b'perf::revlogindex|perfrevlogindex',
3076 3125 revlogopts + formatteropts,
3077 3126 b'-c|-m|FILE',
3078 3127 )
3079 3128 def perfrevlogindex(ui, repo, file_=None, **opts):
3080 3129 """Benchmark operations against a revlog index.
3081 3130
3082 3131 This tests constructing a revlog instance, reading index data,
3083 3132 parsing index data, and performing various operations related to
3084 3133 index data.
3085 3134 """
3086 3135
3087 3136 opts = _byteskwargs(opts)
3088 3137
3089 3138 rl = cmdutil.openrevlog(repo, b'perfrevlogindex', file_, opts)
3090 3139
3091 3140 opener = getattr(rl, 'opener') # trick linter
3092 3141 # compat with hg <= 5.8
3093 3142 radix = getattr(rl, 'radix', None)
3094 3143 indexfile = getattr(rl, '_indexfile', None)
3095 3144 if indexfile is None:
3096 3145 # compatibility with <= hg-5.8
3097 3146 indexfile = getattr(rl, 'indexfile')
3098 3147 data = opener.read(indexfile)
3099 3148
3100 3149 header = struct.unpack(b'>I', data[0:4])[0]
3101 3150 version = header & 0xFFFF
3102 3151 if version == 1:
3103 3152 inline = header & (1 << 16)
3104 3153 else:
3105 3154 raise error.Abort(b'unsupported revlog version: %d' % version)
3106 3155
3107 3156 parse_index_v1 = getattr(mercurial.revlog, 'parse_index_v1', None)
3108 3157 if parse_index_v1 is None:
3109 3158 parse_index_v1 = mercurial.revlog.revlogio().parseindex
3110 3159
3111 3160 rllen = len(rl)
3112 3161
3113 3162 node0 = rl.node(0)
3114 3163 node25 = rl.node(rllen // 4)
3115 3164 node50 = rl.node(rllen // 2)
3116 3165 node75 = rl.node(rllen // 4 * 3)
3117 3166 node100 = rl.node(rllen - 1)
3118 3167
3119 3168 allrevs = range(rllen)
3120 3169 allrevsrev = list(reversed(allrevs))
3121 3170 allnodes = [rl.node(rev) for rev in range(rllen)]
3122 3171 allnodesrev = list(reversed(allnodes))
3123 3172
3124 3173 def constructor():
3125 3174 if radix is not None:
3126 3175 revlog(opener, radix=radix)
3127 3176 else:
3128 3177 # hg <= 5.8
3129 3178 revlog(opener, indexfile=indexfile)
3130 3179
3131 3180 def read():
3132 3181 with opener(indexfile) as fh:
3133 3182 fh.read()
3134 3183
3135 3184 def parseindex():
3136 3185 parse_index_v1(data, inline)
3137 3186
3138 3187 def getentry(revornode):
3139 3188 index = parse_index_v1(data, inline)[0]
3140 3189 index[revornode]
3141 3190
3142 3191 def getentries(revs, count=1):
3143 3192 index = parse_index_v1(data, inline)[0]
3144 3193
3145 3194 for i in range(count):
3146 3195 for rev in revs:
3147 3196 index[rev]
3148 3197
3149 3198 def resolvenode(node):
3150 3199 index = parse_index_v1(data, inline)[0]
3151 3200 rev = getattr(index, 'rev', None)
3152 3201 if rev is None:
3153 3202 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3154 3203 # This only works for the C code.
3155 3204 if nodemap is None:
3156 3205 return
3157 3206 rev = nodemap.__getitem__
3158 3207
3159 3208 try:
3160 3209 rev(node)
3161 3210 except error.RevlogError:
3162 3211 pass
3163 3212
3164 3213 def resolvenodes(nodes, count=1):
3165 3214 index = parse_index_v1(data, inline)[0]
3166 3215 rev = getattr(index, 'rev', None)
3167 3216 if rev is None:
3168 3217 nodemap = getattr(parse_index_v1(data, inline)[0], 'nodemap', None)
3169 3218 # This only works for the C code.
3170 3219 if nodemap is None:
3171 3220 return
3172 3221 rev = nodemap.__getitem__
3173 3222
3174 3223 for i in range(count):
3175 3224 for node in nodes:
3176 3225 try:
3177 3226 rev(node)
3178 3227 except error.RevlogError:
3179 3228 pass
3180 3229
3181 3230 benches = [
3182 3231 (constructor, b'revlog constructor'),
3183 3232 (read, b'read'),
3184 3233 (parseindex, b'create index object'),
3185 3234 (lambda: getentry(0), b'retrieve index entry for rev 0'),
3186 3235 (lambda: resolvenode(b'a' * 20), b'look up missing node'),
3187 3236 (lambda: resolvenode(node0), b'look up node at rev 0'),
3188 3237 (lambda: resolvenode(node25), b'look up node at 1/4 len'),
3189 3238 (lambda: resolvenode(node50), b'look up node at 1/2 len'),
3190 3239 (lambda: resolvenode(node75), b'look up node at 3/4 len'),
3191 3240 (lambda: resolvenode(node100), b'look up node at tip'),
3192 3241 # 2x variation is to measure caching impact.
3193 3242 (lambda: resolvenodes(allnodes), b'look up all nodes (forward)'),
3194 3243 (lambda: resolvenodes(allnodes, 2), b'look up all nodes 2x (forward)'),
3195 3244 (lambda: resolvenodes(allnodesrev), b'look up all nodes (reverse)'),
3196 3245 (
3197 3246 lambda: resolvenodes(allnodesrev, 2),
3198 3247 b'look up all nodes 2x (reverse)',
3199 3248 ),
3200 3249 (lambda: getentries(allrevs), b'retrieve all index entries (forward)'),
3201 3250 (
3202 3251 lambda: getentries(allrevs, 2),
3203 3252 b'retrieve all index entries 2x (forward)',
3204 3253 ),
3205 3254 (
3206 3255 lambda: getentries(allrevsrev),
3207 3256 b'retrieve all index entries (reverse)',
3208 3257 ),
3209 3258 (
3210 3259 lambda: getentries(allrevsrev, 2),
3211 3260 b'retrieve all index entries 2x (reverse)',
3212 3261 ),
3213 3262 ]
3214 3263
3215 3264 for fn, title in benches:
3216 3265 timer, fm = gettimer(ui, opts)
3217 3266 timer(fn, title=title)
3218 3267 fm.end()
3219 3268
3220 3269
3221 3270 @command(
3222 3271 b'perf::revlogrevisions|perfrevlogrevisions',
3223 3272 revlogopts
3224 3273 + formatteropts
3225 3274 + [
3226 3275 (b'd', b'dist', 100, b'distance between the revisions'),
3227 3276 (b's', b'startrev', 0, b'revision to start reading at'),
3228 3277 (b'', b'reverse', False, b'read in reverse'),
3229 3278 ],
3230 3279 b'-c|-m|FILE',
3231 3280 )
3232 3281 def perfrevlogrevisions(
3233 3282 ui, repo, file_=None, startrev=0, reverse=False, **opts
3234 3283 ):
3235 3284 """Benchmark reading a series of revisions from a revlog.
3236 3285
3237 3286 By default, we read every ``-d/--dist`` revision from 0 to tip of
3238 3287 the specified revlog.
3239 3288
3240 3289 The start revision can be defined via ``-s/--startrev``.
3241 3290 """
3242 3291 opts = _byteskwargs(opts)
3243 3292
3244 3293 rl = cmdutil.openrevlog(repo, b'perfrevlogrevisions', file_, opts)
3245 3294 rllen = getlen(ui)(rl)
3246 3295
3247 3296 if startrev < 0:
3248 3297 startrev = rllen + startrev
3249 3298
3250 3299 def d():
3251 3300 rl.clearcaches()
3252 3301
3253 3302 beginrev = startrev
3254 3303 endrev = rllen
3255 3304 dist = opts[b'dist']
3256 3305
3257 3306 if reverse:
3258 3307 beginrev, endrev = endrev - 1, beginrev - 1
3259 3308 dist = -1 * dist
3260 3309
3261 3310 for x in _xrange(beginrev, endrev, dist):
3262 3311 # Old revisions don't support passing int.
3263 3312 n = rl.node(x)
3264 3313 rl.revision(n)
3265 3314
3266 3315 timer, fm = gettimer(ui, opts)
3267 3316 timer(d)
3268 3317 fm.end()
3269 3318
3270 3319
3271 3320 @command(
3272 3321 b'perf::revlogwrite|perfrevlogwrite',
3273 3322 revlogopts
3274 3323 + formatteropts
3275 3324 + [
3276 3325 (b's', b'startrev', 1000, b'revision to start writing at'),
3277 3326 (b'', b'stoprev', -1, b'last revision to write'),
3278 3327 (b'', b'count', 3, b'number of passes to perform'),
3279 3328 (b'', b'details', False, b'print timing for every revisions tested'),
3280 3329 (b'', b'source', b'full', b'the kind of data feed in the revlog'),
3281 3330 (b'', b'lazydeltabase', True, b'try the provided delta first'),
3282 3331 (b'', b'clear-caches', True, b'clear revlog cache between calls'),
3283 3332 ],
3284 3333 b'-c|-m|FILE',
3285 3334 )
3286 3335 def perfrevlogwrite(ui, repo, file_=None, startrev=1000, stoprev=-1, **opts):
3287 3336 """Benchmark writing a series of revisions to a revlog.
3288 3337
3289 3338 Possible source values are:
3290 3339 * `full`: add from a full text (default).
3291 3340 * `parent-1`: add from a delta to the first parent
3292 3341 * `parent-2`: add from a delta to the second parent if it exists
3293 3342 (use a delta from the first parent otherwise)
3294 3343 * `parent-smallest`: add from the smallest delta (either p1 or p2)
3295 3344 * `storage`: add from the existing precomputed deltas
3296 3345
3297 3346 Note: This performance command measures performance in a custom way. As a
3298 3347 result some of the global configuration of the 'perf' command does not
3299 3348 apply to it:
3300 3349
3301 3350 * ``pre-run``: disabled
3302 3351
3303 3352 * ``profile-benchmark``: disabled
3304 3353
3305 3354 * ``run-limits``: disabled use --count instead
3306 3355 """
3307 3356 opts = _byteskwargs(opts)
3308 3357
3309 3358 rl = cmdutil.openrevlog(repo, b'perfrevlogwrite', file_, opts)
3310 3359 rllen = getlen(ui)(rl)
3311 3360 if startrev < 0:
3312 3361 startrev = rllen + startrev
3313 3362 if stoprev < 0:
3314 3363 stoprev = rllen + stoprev
3315 3364
3316 3365 lazydeltabase = opts['lazydeltabase']
3317 3366 source = opts['source']
3318 3367 clearcaches = opts['clear_caches']
3319 3368 validsource = (
3320 3369 b'full',
3321 3370 b'parent-1',
3322 3371 b'parent-2',
3323 3372 b'parent-smallest',
3324 3373 b'storage',
3325 3374 )
3326 3375 if source not in validsource:
3327 3376 raise error.Abort('invalid source type: %s' % source)
3328 3377
3329 3378 ### actually gather results
3330 3379 count = opts['count']
3331 3380 if count <= 0:
3332 3381 raise error.Abort('invalide run count: %d' % count)
3333 3382 allresults = []
3334 3383 for c in range(count):
3335 3384 timing = _timeonewrite(
3336 3385 ui,
3337 3386 rl,
3338 3387 source,
3339 3388 startrev,
3340 3389 stoprev,
3341 3390 c + 1,
3342 3391 lazydeltabase=lazydeltabase,
3343 3392 clearcaches=clearcaches,
3344 3393 )
3345 3394 allresults.append(timing)
3346 3395
3347 3396 ### consolidate the results in a single list
3348 3397 results = []
3349 3398 for idx, (rev, t) in enumerate(allresults[0]):
3350 3399 ts = [t]
3351 3400 for other in allresults[1:]:
3352 3401 orev, ot = other[idx]
3353 3402 assert orev == rev
3354 3403 ts.append(ot)
3355 3404 results.append((rev, ts))
3356 3405 resultcount = len(results)
3357 3406
3358 3407 ### Compute and display relevant statistics
3359 3408
3360 3409 # get a formatter
3361 3410 fm = ui.formatter(b'perf', opts)
3362 3411 displayall = ui.configbool(b"perf", b"all-timing", False)
3363 3412
3364 3413 # print individual details if requested
3365 3414 if opts['details']:
3366 3415 for idx, item in enumerate(results, 1):
3367 3416 rev, data = item
3368 3417 title = 'revisions #%d of %d, rev %d' % (idx, resultcount, rev)
3369 3418 formatone(fm, data, title=title, displayall=displayall)
3370 3419
3371 3420 # sorts results by median time
3372 3421 results.sort(key=lambda x: sorted(x[1])[len(x[1]) // 2])
3373 3422 # list of (name, index) to display)
3374 3423 relevants = [
3375 3424 ("min", 0),
3376 3425 ("10%", resultcount * 10 // 100),
3377 3426 ("25%", resultcount * 25 // 100),
3378 3427 ("50%", resultcount * 70 // 100),
3379 3428 ("75%", resultcount * 75 // 100),
3380 3429 ("90%", resultcount * 90 // 100),
3381 3430 ("95%", resultcount * 95 // 100),
3382 3431 ("99%", resultcount * 99 // 100),
3383 3432 ("99.9%", resultcount * 999 // 1000),
3384 3433 ("99.99%", resultcount * 9999 // 10000),
3385 3434 ("99.999%", resultcount * 99999 // 100000),
3386 3435 ("max", -1),
3387 3436 ]
3388 3437 if not ui.quiet:
3389 3438 for name, idx in relevants:
3390 3439 data = results[idx]
3391 3440 title = '%s of %d, rev %d' % (name, resultcount, data[0])
3392 3441 formatone(fm, data[1], title=title, displayall=displayall)
3393 3442
3394 3443 # XXX summing that many float will not be very precise, we ignore this fact
3395 3444 # for now
3396 3445 totaltime = []
3397 3446 for item in allresults:
3398 3447 totaltime.append(
3399 3448 (
3400 3449 sum(x[1][0] for x in item),
3401 3450 sum(x[1][1] for x in item),
3402 3451 sum(x[1][2] for x in item),
3403 3452 )
3404 3453 )
3405 3454 formatone(
3406 3455 fm,
3407 3456 totaltime,
3408 3457 title="total time (%d revs)" % resultcount,
3409 3458 displayall=displayall,
3410 3459 )
3411 3460 fm.end()
3412 3461
3413 3462
3414 3463 class _faketr:
3415 3464 def add(s, x, y, z=None):
3416 3465 return None
3417 3466
3418 3467
3419 3468 def _timeonewrite(
3420 3469 ui,
3421 3470 orig,
3422 3471 source,
3423 3472 startrev,
3424 3473 stoprev,
3425 3474 runidx=None,
3426 3475 lazydeltabase=True,
3427 3476 clearcaches=True,
3428 3477 ):
3429 3478 timings = []
3430 3479 tr = _faketr()
3431 3480 with _temprevlog(ui, orig, startrev) as dest:
3432 3481 dest._lazydeltabase = lazydeltabase
3433 3482 revs = list(orig.revs(startrev, stoprev))
3434 3483 total = len(revs)
3435 3484 topic = 'adding'
3436 3485 if runidx is not None:
3437 3486 topic += ' (run #%d)' % runidx
3438 3487 # Support both old and new progress API
3439 3488 if util.safehasattr(ui, 'makeprogress'):
3440 3489 progress = ui.makeprogress(topic, unit='revs', total=total)
3441 3490
3442 3491 def updateprogress(pos):
3443 3492 progress.update(pos)
3444 3493
3445 3494 def completeprogress():
3446 3495 progress.complete()
3447 3496
3448 3497 else:
3449 3498
3450 3499 def updateprogress(pos):
3451 3500 ui.progress(topic, pos, unit='revs', total=total)
3452 3501
3453 3502 def completeprogress():
3454 3503 ui.progress(topic, None, unit='revs', total=total)
3455 3504
3456 3505 for idx, rev in enumerate(revs):
3457 3506 updateprogress(idx)
3458 3507 addargs, addkwargs = _getrevisionseed(orig, rev, tr, source)
3459 3508 if clearcaches:
3460 3509 dest.index.clearcaches()
3461 3510 dest.clearcaches()
3462 3511 with timeone() as r:
3463 3512 dest.addrawrevision(*addargs, **addkwargs)
3464 3513 timings.append((rev, r[0]))
3465 3514 updateprogress(total)
3466 3515 completeprogress()
3467 3516 return timings
3468 3517
3469 3518
3470 3519 def _getrevisionseed(orig, rev, tr, source):
3471 3520 from mercurial.node import nullid
3472 3521
3473 3522 linkrev = orig.linkrev(rev)
3474 3523 node = orig.node(rev)
3475 3524 p1, p2 = orig.parents(node)
3476 3525 flags = orig.flags(rev)
3477 3526 cachedelta = None
3478 3527 text = None
3479 3528
3480 3529 if source == b'full':
3481 3530 text = orig.revision(rev)
3482 3531 elif source == b'parent-1':
3483 3532 baserev = orig.rev(p1)
3484 3533 cachedelta = (baserev, orig.revdiff(p1, rev))
3485 3534 elif source == b'parent-2':
3486 3535 parent = p2
3487 3536 if p2 == nullid:
3488 3537 parent = p1
3489 3538 baserev = orig.rev(parent)
3490 3539 cachedelta = (baserev, orig.revdiff(parent, rev))
3491 3540 elif source == b'parent-smallest':
3492 3541 p1diff = orig.revdiff(p1, rev)
3493 3542 parent = p1
3494 3543 diff = p1diff
3495 3544 if p2 != nullid:
3496 3545 p2diff = orig.revdiff(p2, rev)
3497 3546 if len(p1diff) > len(p2diff):
3498 3547 parent = p2
3499 3548 diff = p2diff
3500 3549 baserev = orig.rev(parent)
3501 3550 cachedelta = (baserev, diff)
3502 3551 elif source == b'storage':
3503 3552 baserev = orig.deltaparent(rev)
3504 3553 cachedelta = (baserev, orig.revdiff(orig.node(baserev), rev))
3505 3554
3506 3555 return (
3507 3556 (text, tr, linkrev, p1, p2),
3508 3557 {'node': node, 'flags': flags, 'cachedelta': cachedelta},
3509 3558 )
3510 3559
3511 3560
3512 3561 @contextlib.contextmanager
3513 3562 def _temprevlog(ui, orig, truncaterev):
3514 3563 from mercurial import vfs as vfsmod
3515 3564
3516 3565 if orig._inline:
3517 3566 raise error.Abort('not supporting inline revlog (yet)')
3518 3567 revlogkwargs = {}
3519 3568 k = 'upperboundcomp'
3520 3569 if util.safehasattr(orig, k):
3521 3570 revlogkwargs[k] = getattr(orig, k)
3522 3571
3523 3572 indexfile = getattr(orig, '_indexfile', None)
3524 3573 if indexfile is None:
3525 3574 # compatibility with <= hg-5.8
3526 3575 indexfile = getattr(orig, 'indexfile')
3527 3576 origindexpath = orig.opener.join(indexfile)
3528 3577
3529 3578 datafile = getattr(orig, '_datafile', getattr(orig, 'datafile'))
3530 3579 origdatapath = orig.opener.join(datafile)
3531 3580 radix = b'revlog'
3532 3581 indexname = b'revlog.i'
3533 3582 dataname = b'revlog.d'
3534 3583
3535 3584 tmpdir = tempfile.mkdtemp(prefix='tmp-hgperf-')
3536 3585 try:
3537 3586 # copy the data file in a temporary directory
3538 3587 ui.debug('copying data in %s\n' % tmpdir)
3539 3588 destindexpath = os.path.join(tmpdir, 'revlog.i')
3540 3589 destdatapath = os.path.join(tmpdir, 'revlog.d')
3541 3590 shutil.copyfile(origindexpath, destindexpath)
3542 3591 shutil.copyfile(origdatapath, destdatapath)
3543 3592
3544 3593 # remove the data we want to add again
3545 3594 ui.debug('truncating data to be rewritten\n')
3546 3595 with open(destindexpath, 'ab') as index:
3547 3596 index.seek(0)
3548 3597 index.truncate(truncaterev * orig._io.size)
3549 3598 with open(destdatapath, 'ab') as data:
3550 3599 data.seek(0)
3551 3600 data.truncate(orig.start(truncaterev))
3552 3601
3553 3602 # instantiate a new revlog from the temporary copy
3554 3603 ui.debug('truncating adding to be rewritten\n')
3555 3604 vfs = vfsmod.vfs(tmpdir)
3556 3605 vfs.options = getattr(orig.opener, 'options', None)
3557 3606
3558 3607 try:
3559 3608 dest = revlog(vfs, radix=radix, **revlogkwargs)
3560 3609 except TypeError:
3561 3610 dest = revlog(
3562 3611 vfs, indexfile=indexname, datafile=dataname, **revlogkwargs
3563 3612 )
3564 3613 if dest._inline:
3565 3614 raise error.Abort('not supporting inline revlog (yet)')
3566 3615 # make sure internals are initialized
3567 3616 dest.revision(len(dest) - 1)
3568 3617 yield dest
3569 3618 del dest, vfs
3570 3619 finally:
3571 3620 shutil.rmtree(tmpdir, True)
3572 3621
3573 3622
3574 3623 @command(
3575 3624 b'perf::revlogchunks|perfrevlogchunks',
3576 3625 revlogopts
3577 3626 + formatteropts
3578 3627 + [
3579 3628 (b'e', b'engines', b'', b'compression engines to use'),
3580 3629 (b's', b'startrev', 0, b'revision to start at'),
3581 3630 ],
3582 3631 b'-c|-m|FILE',
3583 3632 )
3584 3633 def perfrevlogchunks(ui, repo, file_=None, engines=None, startrev=0, **opts):
3585 3634 """Benchmark operations on revlog chunks.
3586 3635
3587 3636 Logically, each revlog is a collection of fulltext revisions. However,
3588 3637 stored within each revlog are "chunks" of possibly compressed data. This
3589 3638 data needs to be read and decompressed or compressed and written.
3590 3639
3591 3640 This command measures the time it takes to read+decompress and recompress
3592 3641 chunks in a revlog. It effectively isolates I/O and compression performance.
3593 3642 For measurements of higher-level operations like resolving revisions,
3594 3643 see ``perfrevlogrevisions`` and ``perfrevlogrevision``.
3595 3644 """
3596 3645 opts = _byteskwargs(opts)
3597 3646
3598 3647 rl = cmdutil.openrevlog(repo, b'perfrevlogchunks', file_, opts)
3599 3648
3600 3649 # _chunkraw was renamed to _getsegmentforrevs.
3601 3650 try:
3602 3651 segmentforrevs = rl._getsegmentforrevs
3603 3652 except AttributeError:
3604 3653 segmentforrevs = rl._chunkraw
3605 3654
3606 3655 # Verify engines argument.
3607 3656 if engines:
3608 3657 engines = {e.strip() for e in engines.split(b',')}
3609 3658 for engine in engines:
3610 3659 try:
3611 3660 util.compressionengines[engine]
3612 3661 except KeyError:
3613 3662 raise error.Abort(b'unknown compression engine: %s' % engine)
3614 3663 else:
3615 3664 engines = []
3616 3665 for e in util.compengines:
3617 3666 engine = util.compengines[e]
3618 3667 try:
3619 3668 if engine.available():
3620 3669 engine.revlogcompressor().compress(b'dummy')
3621 3670 engines.append(e)
3622 3671 except NotImplementedError:
3623 3672 pass
3624 3673
3625 3674 revs = list(rl.revs(startrev, len(rl) - 1))
3626 3675
3627 3676 def rlfh(rl):
3628 3677 if rl._inline:
3629 3678 indexfile = getattr(rl, '_indexfile', None)
3630 3679 if indexfile is None:
3631 3680 # compatibility with <= hg-5.8
3632 3681 indexfile = getattr(rl, 'indexfile')
3633 3682 return getsvfs(repo)(indexfile)
3634 3683 else:
3635 3684 datafile = getattr(rl, 'datafile', getattr(rl, 'datafile'))
3636 3685 return getsvfs(repo)(datafile)
3637 3686
3638 3687 def doread():
3639 3688 rl.clearcaches()
3640 3689 for rev in revs:
3641 3690 segmentforrevs(rev, rev)
3642 3691
3643 3692 def doreadcachedfh():
3644 3693 rl.clearcaches()
3645 3694 fh = rlfh(rl)
3646 3695 for rev in revs:
3647 3696 segmentforrevs(rev, rev, df=fh)
3648 3697
3649 3698 def doreadbatch():
3650 3699 rl.clearcaches()
3651 3700 segmentforrevs(revs[0], revs[-1])
3652 3701
3653 3702 def doreadbatchcachedfh():
3654 3703 rl.clearcaches()
3655 3704 fh = rlfh(rl)
3656 3705 segmentforrevs(revs[0], revs[-1], df=fh)
3657 3706
3658 3707 def dochunk():
3659 3708 rl.clearcaches()
3660 3709 fh = rlfh(rl)
3661 3710 for rev in revs:
3662 3711 rl._chunk(rev, df=fh)
3663 3712
3664 3713 chunks = [None]
3665 3714
3666 3715 def dochunkbatch():
3667 3716 rl.clearcaches()
3668 3717 fh = rlfh(rl)
3669 3718 # Save chunks as a side-effect.
3670 3719 chunks[0] = rl._chunks(revs, df=fh)
3671 3720
3672 3721 def docompress(compressor):
3673 3722 rl.clearcaches()
3674 3723
3675 3724 try:
3676 3725 # Swap in the requested compression engine.
3677 3726 oldcompressor = rl._compressor
3678 3727 rl._compressor = compressor
3679 3728 for chunk in chunks[0]:
3680 3729 rl.compress(chunk)
3681 3730 finally:
3682 3731 rl._compressor = oldcompressor
3683 3732
3684 3733 benches = [
3685 3734 (lambda: doread(), b'read'),
3686 3735 (lambda: doreadcachedfh(), b'read w/ reused fd'),
3687 3736 (lambda: doreadbatch(), b'read batch'),
3688 3737 (lambda: doreadbatchcachedfh(), b'read batch w/ reused fd'),
3689 3738 (lambda: dochunk(), b'chunk'),
3690 3739 (lambda: dochunkbatch(), b'chunk batch'),
3691 3740 ]
3692 3741
3693 3742 for engine in sorted(engines):
3694 3743 compressor = util.compengines[engine].revlogcompressor()
3695 3744 benches.append(
3696 3745 (
3697 3746 functools.partial(docompress, compressor),
3698 3747 b'compress w/ %s' % engine,
3699 3748 )
3700 3749 )
3701 3750
3702 3751 for fn, title in benches:
3703 3752 timer, fm = gettimer(ui, opts)
3704 3753 timer(fn, title=title)
3705 3754 fm.end()
3706 3755
3707 3756
3708 3757 @command(
3709 3758 b'perf::revlogrevision|perfrevlogrevision',
3710 3759 revlogopts
3711 3760 + formatteropts
3712 3761 + [(b'', b'cache', False, b'use caches instead of clearing')],
3713 3762 b'-c|-m|FILE REV',
3714 3763 )
3715 3764 def perfrevlogrevision(ui, repo, file_, rev=None, cache=None, **opts):
3716 3765 """Benchmark obtaining a revlog revision.
3717 3766
3718 3767 Obtaining a revlog revision consists of roughly the following steps:
3719 3768
3720 3769 1. Compute the delta chain
3721 3770 2. Slice the delta chain if applicable
3722 3771 3. Obtain the raw chunks for that delta chain
3723 3772 4. Decompress each raw chunk
3724 3773 5. Apply binary patches to obtain fulltext
3725 3774 6. Verify hash of fulltext
3726 3775
3727 3776 This command measures the time spent in each of these phases.
3728 3777 """
3729 3778 opts = _byteskwargs(opts)
3730 3779
3731 3780 if opts.get(b'changelog') or opts.get(b'manifest'):
3732 3781 file_, rev = None, file_
3733 3782 elif rev is None:
3734 3783 raise error.CommandError(b'perfrevlogrevision', b'invalid arguments')
3735 3784
3736 3785 r = cmdutil.openrevlog(repo, b'perfrevlogrevision', file_, opts)
3737 3786
3738 3787 # _chunkraw was renamed to _getsegmentforrevs.
3739 3788 try:
3740 3789 segmentforrevs = r._getsegmentforrevs
3741 3790 except AttributeError:
3742 3791 segmentforrevs = r._chunkraw
3743 3792
3744 3793 node = r.lookup(rev)
3745 3794 rev = r.rev(node)
3746 3795
3747 3796 def getrawchunks(data, chain):
3748 3797 start = r.start
3749 3798 length = r.length
3750 3799 inline = r._inline
3751 3800 try:
3752 3801 iosize = r.index.entry_size
3753 3802 except AttributeError:
3754 3803 iosize = r._io.size
3755 3804 buffer = util.buffer
3756 3805
3757 3806 chunks = []
3758 3807 ladd = chunks.append
3759 3808 for idx, item in enumerate(chain):
3760 3809 offset = start(item[0])
3761 3810 bits = data[idx]
3762 3811 for rev in item:
3763 3812 chunkstart = start(rev)
3764 3813 if inline:
3765 3814 chunkstart += (rev + 1) * iosize
3766 3815 chunklength = length(rev)
3767 3816 ladd(buffer(bits, chunkstart - offset, chunklength))
3768 3817
3769 3818 return chunks
3770 3819
3771 3820 def dodeltachain(rev):
3772 3821 if not cache:
3773 3822 r.clearcaches()
3774 3823 r._deltachain(rev)
3775 3824
3776 3825 def doread(chain):
3777 3826 if not cache:
3778 3827 r.clearcaches()
3779 3828 for item in slicedchain:
3780 3829 segmentforrevs(item[0], item[-1])
3781 3830
3782 3831 def doslice(r, chain, size):
3783 3832 for s in slicechunk(r, chain, targetsize=size):
3784 3833 pass
3785 3834
3786 3835 def dorawchunks(data, chain):
3787 3836 if not cache:
3788 3837 r.clearcaches()
3789 3838 getrawchunks(data, chain)
3790 3839
3791 3840 def dodecompress(chunks):
3792 3841 decomp = r.decompress
3793 3842 for chunk in chunks:
3794 3843 decomp(chunk)
3795 3844
3796 3845 def dopatch(text, bins):
3797 3846 if not cache:
3798 3847 r.clearcaches()
3799 3848 mdiff.patches(text, bins)
3800 3849
3801 3850 def dohash(text):
3802 3851 if not cache:
3803 3852 r.clearcaches()
3804 3853 r.checkhash(text, node, rev=rev)
3805 3854
3806 3855 def dorevision():
3807 3856 if not cache:
3808 3857 r.clearcaches()
3809 3858 r.revision(node)
3810 3859
3811 3860 try:
3812 3861 from mercurial.revlogutils.deltas import slicechunk
3813 3862 except ImportError:
3814 3863 slicechunk = getattr(revlog, '_slicechunk', None)
3815 3864
3816 3865 size = r.length(rev)
3817 3866 chain = r._deltachain(rev)[0]
3818 3867 if not getattr(r, '_withsparseread', False):
3819 3868 slicedchain = (chain,)
3820 3869 else:
3821 3870 slicedchain = tuple(slicechunk(r, chain, targetsize=size))
3822 3871 data = [segmentforrevs(seg[0], seg[-1])[1] for seg in slicedchain]
3823 3872 rawchunks = getrawchunks(data, slicedchain)
3824 3873 bins = r._chunks(chain)
3825 3874 text = bytes(bins[0])
3826 3875 bins = bins[1:]
3827 3876 text = mdiff.patches(text, bins)
3828 3877
3829 3878 benches = [
3830 3879 (lambda: dorevision(), b'full'),
3831 3880 (lambda: dodeltachain(rev), b'deltachain'),
3832 3881 (lambda: doread(chain), b'read'),
3833 3882 ]
3834 3883
3835 3884 if getattr(r, '_withsparseread', False):
3836 3885 slicing = (lambda: doslice(r, chain, size), b'slice-sparse-chain')
3837 3886 benches.append(slicing)
3838 3887
3839 3888 benches.extend(
3840 3889 [
3841 3890 (lambda: dorawchunks(data, slicedchain), b'rawchunks'),
3842 3891 (lambda: dodecompress(rawchunks), b'decompress'),
3843 3892 (lambda: dopatch(text, bins), b'patch'),
3844 3893 (lambda: dohash(text), b'hash'),
3845 3894 ]
3846 3895 )
3847 3896
3848 3897 timer, fm = gettimer(ui, opts)
3849 3898 for fn, title in benches:
3850 3899 timer(fn, title=title)
3851 3900 fm.end()
3852 3901
3853 3902
3854 3903 @command(
3855 3904 b'perf::revset|perfrevset',
3856 3905 [
3857 3906 (b'C', b'clear', False, b'clear volatile cache between each call.'),
3858 3907 (b'', b'contexts', False, b'obtain changectx for each revision'),
3859 3908 ]
3860 3909 + formatteropts,
3861 3910 b"REVSET",
3862 3911 )
3863 3912 def perfrevset(ui, repo, expr, clear=False, contexts=False, **opts):
3864 3913 """benchmark the execution time of a revset
3865 3914
3866 3915 Use the --clean option if need to evaluate the impact of build volatile
3867 3916 revisions set cache on the revset execution. Volatile cache hold filtered
3868 3917 and obsolete related cache."""
3869 3918 opts = _byteskwargs(opts)
3870 3919
3871 3920 timer, fm = gettimer(ui, opts)
3872 3921
3873 3922 def d():
3874 3923 if clear:
3875 3924 repo.invalidatevolatilesets()
3876 3925 if contexts:
3877 3926 for ctx in repo.set(expr):
3878 3927 pass
3879 3928 else:
3880 3929 for r in repo.revs(expr):
3881 3930 pass
3882 3931
3883 3932 timer(d)
3884 3933 fm.end()
3885 3934
3886 3935
3887 3936 @command(
3888 3937 b'perf::volatilesets|perfvolatilesets',
3889 3938 [
3890 3939 (b'', b'clear-obsstore', False, b'drop obsstore between each call.'),
3891 3940 ]
3892 3941 + formatteropts,
3893 3942 )
3894 3943 def perfvolatilesets(ui, repo, *names, **opts):
3895 3944 """benchmark the computation of various volatile set
3896 3945
3897 3946 Volatile set computes element related to filtering and obsolescence."""
3898 3947 opts = _byteskwargs(opts)
3899 3948 timer, fm = gettimer(ui, opts)
3900 3949 repo = repo.unfiltered()
3901 3950
3902 3951 def getobs(name):
3903 3952 def d():
3904 3953 repo.invalidatevolatilesets()
3905 3954 if opts[b'clear_obsstore']:
3906 3955 clearfilecache(repo, b'obsstore')
3907 3956 obsolete.getrevs(repo, name)
3908 3957
3909 3958 return d
3910 3959
3911 3960 allobs = sorted(obsolete.cachefuncs)
3912 3961 if names:
3913 3962 allobs = [n for n in allobs if n in names]
3914 3963
3915 3964 for name in allobs:
3916 3965 timer(getobs(name), title=name)
3917 3966
3918 3967 def getfiltered(name):
3919 3968 def d():
3920 3969 repo.invalidatevolatilesets()
3921 3970 if opts[b'clear_obsstore']:
3922 3971 clearfilecache(repo, b'obsstore')
3923 3972 repoview.filterrevs(repo, name)
3924 3973
3925 3974 return d
3926 3975
3927 3976 allfilter = sorted(repoview.filtertable)
3928 3977 if names:
3929 3978 allfilter = [n for n in allfilter if n in names]
3930 3979
3931 3980 for name in allfilter:
3932 3981 timer(getfiltered(name), title=name)
3933 3982 fm.end()
3934 3983
3935 3984
3936 3985 @command(
3937 3986 b'perf::branchmap|perfbranchmap',
3938 3987 [
3939 3988 (b'f', b'full', False, b'Includes build time of subset'),
3940 3989 (
3941 3990 b'',
3942 3991 b'clear-revbranch',
3943 3992 False,
3944 3993 b'purge the revbranch cache between computation',
3945 3994 ),
3946 3995 ]
3947 3996 + formatteropts,
3948 3997 )
3949 3998 def perfbranchmap(ui, repo, *filternames, **opts):
3950 3999 """benchmark the update of a branchmap
3951 4000
3952 4001 This benchmarks the full repo.branchmap() call with read and write disabled
3953 4002 """
3954 4003 opts = _byteskwargs(opts)
3955 4004 full = opts.get(b"full", False)
3956 4005 clear_revbranch = opts.get(b"clear_revbranch", False)
3957 4006 timer, fm = gettimer(ui, opts)
3958 4007
3959 4008 def getbranchmap(filtername):
3960 4009 """generate a benchmark function for the filtername"""
3961 4010 if filtername is None:
3962 4011 view = repo
3963 4012 else:
3964 4013 view = repo.filtered(filtername)
3965 4014 if util.safehasattr(view._branchcaches, '_per_filter'):
3966 4015 filtered = view._branchcaches._per_filter
3967 4016 else:
3968 4017 # older versions
3969 4018 filtered = view._branchcaches
3970 4019
3971 4020 def d():
3972 4021 if clear_revbranch:
3973 4022 repo.revbranchcache()._clear()
3974 4023 if full:
3975 4024 view._branchcaches.clear()
3976 4025 else:
3977 4026 filtered.pop(filtername, None)
3978 4027 view.branchmap()
3979 4028
3980 4029 return d
3981 4030
3982 4031 # add filter in smaller subset to bigger subset
3983 4032 possiblefilters = set(repoview.filtertable)
3984 4033 if filternames:
3985 4034 possiblefilters &= set(filternames)
3986 4035 subsettable = getbranchmapsubsettable()
3987 4036 allfilters = []
3988 4037 while possiblefilters:
3989 4038 for name in possiblefilters:
3990 4039 subset = subsettable.get(name)
3991 4040 if subset not in possiblefilters:
3992 4041 break
3993 4042 else:
3994 4043 assert False, b'subset cycle %s!' % possiblefilters
3995 4044 allfilters.append(name)
3996 4045 possiblefilters.remove(name)
3997 4046
3998 4047 # warm the cache
3999 4048 if not full:
4000 4049 for name in allfilters:
4001 4050 repo.filtered(name).branchmap()
4002 4051 if not filternames or b'unfiltered' in filternames:
4003 4052 # add unfiltered
4004 4053 allfilters.append(None)
4005 4054
4006 4055 if util.safehasattr(branchmap.branchcache, 'fromfile'):
4007 4056 branchcacheread = safeattrsetter(branchmap.branchcache, b'fromfile')
4008 4057 branchcacheread.set(classmethod(lambda *args: None))
4009 4058 else:
4010 4059 # older versions
4011 4060 branchcacheread = safeattrsetter(branchmap, b'read')
4012 4061 branchcacheread.set(lambda *args: None)
4013 4062 branchcachewrite = safeattrsetter(branchmap.branchcache, b'write')
4014 4063 branchcachewrite.set(lambda *args: None)
4015 4064 try:
4016 4065 for name in allfilters:
4017 4066 printname = name
4018 4067 if name is None:
4019 4068 printname = b'unfiltered'
4020 4069 timer(getbranchmap(name), title=printname)
4021 4070 finally:
4022 4071 branchcacheread.restore()
4023 4072 branchcachewrite.restore()
4024 4073 fm.end()
4025 4074
4026 4075
4027 4076 @command(
4028 4077 b'perf::branchmapupdate|perfbranchmapupdate',
4029 4078 [
4030 4079 (b'', b'base', [], b'subset of revision to start from'),
4031 4080 (b'', b'target', [], b'subset of revision to end with'),
4032 4081 (b'', b'clear-caches', False, b'clear cache between each runs'),
4033 4082 ]
4034 4083 + formatteropts,
4035 4084 )
4036 4085 def perfbranchmapupdate(ui, repo, base=(), target=(), **opts):
4037 4086 """benchmark branchmap update from for <base> revs to <target> revs
4038 4087
4039 4088 If `--clear-caches` is passed, the following items will be reset before
4040 4089 each update:
4041 4090 * the changelog instance and associated indexes
4042 4091 * the rev-branch-cache instance
4043 4092
4044 4093 Examples:
4045 4094
4046 4095 # update for the one last revision
4047 4096 $ hg perfbranchmapupdate --base 'not tip' --target 'tip'
4048 4097
4049 4098 $ update for change coming with a new branch
4050 4099 $ hg perfbranchmapupdate --base 'stable' --target 'default'
4051 4100 """
4052 4101 from mercurial import branchmap
4053 4102 from mercurial import repoview
4054 4103
4055 4104 opts = _byteskwargs(opts)
4056 4105 timer, fm = gettimer(ui, opts)
4057 4106 clearcaches = opts[b'clear_caches']
4058 4107 unfi = repo.unfiltered()
4059 4108 x = [None] # used to pass data between closure
4060 4109
4061 4110 # we use a `list` here to avoid possible side effect from smartset
4062 4111 baserevs = list(scmutil.revrange(repo, base))
4063 4112 targetrevs = list(scmutil.revrange(repo, target))
4064 4113 if not baserevs:
4065 4114 raise error.Abort(b'no revisions selected for --base')
4066 4115 if not targetrevs:
4067 4116 raise error.Abort(b'no revisions selected for --target')
4068 4117
4069 4118 # make sure the target branchmap also contains the one in the base
4070 4119 targetrevs = list(set(baserevs) | set(targetrevs))
4071 4120 targetrevs.sort()
4072 4121
4073 4122 cl = repo.changelog
4074 4123 allbaserevs = list(cl.ancestors(baserevs, inclusive=True))
4075 4124 allbaserevs.sort()
4076 4125 alltargetrevs = frozenset(cl.ancestors(targetrevs, inclusive=True))
4077 4126
4078 4127 newrevs = list(alltargetrevs.difference(allbaserevs))
4079 4128 newrevs.sort()
4080 4129
4081 4130 allrevs = frozenset(unfi.changelog.revs())
4082 4131 basefilterrevs = frozenset(allrevs.difference(allbaserevs))
4083 4132 targetfilterrevs = frozenset(allrevs.difference(alltargetrevs))
4084 4133
4085 4134 def basefilter(repo, visibilityexceptions=None):
4086 4135 return basefilterrevs
4087 4136
4088 4137 def targetfilter(repo, visibilityexceptions=None):
4089 4138 return targetfilterrevs
4090 4139
4091 4140 msg = b'benchmark of branchmap with %d revisions with %d new ones\n'
4092 4141 ui.status(msg % (len(allbaserevs), len(newrevs)))
4093 4142 if targetfilterrevs:
4094 4143 msg = b'(%d revisions still filtered)\n'
4095 4144 ui.status(msg % len(targetfilterrevs))
4096 4145
4097 4146 try:
4098 4147 repoview.filtertable[b'__perf_branchmap_update_base'] = basefilter
4099 4148 repoview.filtertable[b'__perf_branchmap_update_target'] = targetfilter
4100 4149
4101 4150 baserepo = repo.filtered(b'__perf_branchmap_update_base')
4102 4151 targetrepo = repo.filtered(b'__perf_branchmap_update_target')
4103 4152
4104 4153 # try to find an existing branchmap to reuse
4105 4154 subsettable = getbranchmapsubsettable()
4106 4155 candidatefilter = subsettable.get(None)
4107 4156 while candidatefilter is not None:
4108 4157 candidatebm = repo.filtered(candidatefilter).branchmap()
4109 4158 if candidatebm.validfor(baserepo):
4110 4159 filtered = repoview.filterrevs(repo, candidatefilter)
4111 4160 missing = [r for r in allbaserevs if r in filtered]
4112 4161 base = candidatebm.copy()
4113 4162 base.update(baserepo, missing)
4114 4163 break
4115 4164 candidatefilter = subsettable.get(candidatefilter)
4116 4165 else:
4117 4166 # no suitable subset where found
4118 4167 base = branchmap.branchcache()
4119 4168 base.update(baserepo, allbaserevs)
4120 4169
4121 4170 def setup():
4122 4171 x[0] = base.copy()
4123 4172 if clearcaches:
4124 4173 unfi._revbranchcache = None
4125 4174 clearchangelog(repo)
4126 4175
4127 4176 def bench():
4128 4177 x[0].update(targetrepo, newrevs)
4129 4178
4130 4179 timer(bench, setup=setup)
4131 4180 fm.end()
4132 4181 finally:
4133 4182 repoview.filtertable.pop(b'__perf_branchmap_update_base', None)
4134 4183 repoview.filtertable.pop(b'__perf_branchmap_update_target', None)
4135 4184
4136 4185
4137 4186 @command(
4138 4187 b'perf::branchmapload|perfbranchmapload',
4139 4188 [
4140 4189 (b'f', b'filter', b'', b'Specify repoview filter'),
4141 4190 (b'', b'list', False, b'List brachmap filter caches'),
4142 4191 (b'', b'clear-revlogs', False, b'refresh changelog and manifest'),
4143 4192 ]
4144 4193 + formatteropts,
4145 4194 )
4146 4195 def perfbranchmapload(ui, repo, filter=b'', list=False, **opts):
4147 4196 """benchmark reading the branchmap"""
4148 4197 opts = _byteskwargs(opts)
4149 4198 clearrevlogs = opts[b'clear_revlogs']
4150 4199
4151 4200 if list:
4152 4201 for name, kind, st in repo.cachevfs.readdir(stat=True):
4153 4202 if name.startswith(b'branch2'):
4154 4203 filtername = name.partition(b'-')[2] or b'unfiltered'
4155 4204 ui.status(
4156 4205 b'%s - %s\n' % (filtername, util.bytecount(st.st_size))
4157 4206 )
4158 4207 return
4159 4208 if not filter:
4160 4209 filter = None
4161 4210 subsettable = getbranchmapsubsettable()
4162 4211 if filter is None:
4163 4212 repo = repo.unfiltered()
4164 4213 else:
4165 4214 repo = repoview.repoview(repo, filter)
4166 4215
4167 4216 repo.branchmap() # make sure we have a relevant, up to date branchmap
4168 4217
4169 4218 try:
4170 4219 fromfile = branchmap.branchcache.fromfile
4171 4220 except AttributeError:
4172 4221 # older versions
4173 4222 fromfile = branchmap.read
4174 4223
4175 4224 currentfilter = filter
4176 4225 # try once without timer, the filter may not be cached
4177 4226 while fromfile(repo) is None:
4178 4227 currentfilter = subsettable.get(currentfilter)
4179 4228 if currentfilter is None:
4180 4229 raise error.Abort(
4181 4230 b'No branchmap cached for %s repo' % (filter or b'unfiltered')
4182 4231 )
4183 4232 repo = repo.filtered(currentfilter)
4184 4233 timer, fm = gettimer(ui, opts)
4185 4234
4186 4235 def setup():
4187 4236 if clearrevlogs:
4188 4237 clearchangelog(repo)
4189 4238
4190 4239 def bench():
4191 4240 fromfile(repo)
4192 4241
4193 4242 timer(bench, setup=setup)
4194 4243 fm.end()
4195 4244
4196 4245
4197 4246 @command(b'perf::loadmarkers|perfloadmarkers')
4198 4247 def perfloadmarkers(ui, repo):
4199 4248 """benchmark the time to parse the on-disk markers for a repo
4200 4249
4201 4250 Result is the number of markers in the repo."""
4202 4251 timer, fm = gettimer(ui)
4203 4252 svfs = getsvfs(repo)
4204 4253 timer(lambda: len(obsolete.obsstore(repo, svfs)))
4205 4254 fm.end()
4206 4255
4207 4256
4208 4257 @command(
4209 4258 b'perf::lrucachedict|perflrucachedict',
4210 4259 formatteropts
4211 4260 + [
4212 4261 (b'', b'costlimit', 0, b'maximum total cost of items in cache'),
4213 4262 (b'', b'mincost', 0, b'smallest cost of items in cache'),
4214 4263 (b'', b'maxcost', 100, b'maximum cost of items in cache'),
4215 4264 (b'', b'size', 4, b'size of cache'),
4216 4265 (b'', b'gets', 10000, b'number of key lookups'),
4217 4266 (b'', b'sets', 10000, b'number of key sets'),
4218 4267 (b'', b'mixed', 10000, b'number of mixed mode operations'),
4219 4268 (
4220 4269 b'',
4221 4270 b'mixedgetfreq',
4222 4271 50,
4223 4272 b'frequency of get vs set ops in mixed mode',
4224 4273 ),
4225 4274 ],
4226 4275 norepo=True,
4227 4276 )
4228 4277 def perflrucache(
4229 4278 ui,
4230 4279 mincost=0,
4231 4280 maxcost=100,
4232 4281 costlimit=0,
4233 4282 size=4,
4234 4283 gets=10000,
4235 4284 sets=10000,
4236 4285 mixed=10000,
4237 4286 mixedgetfreq=50,
4238 4287 **opts
4239 4288 ):
4240 4289 opts = _byteskwargs(opts)
4241 4290
4242 4291 def doinit():
4243 4292 for i in _xrange(10000):
4244 4293 util.lrucachedict(size)
4245 4294
4246 4295 costrange = list(range(mincost, maxcost + 1))
4247 4296
4248 4297 values = []
4249 4298 for i in _xrange(size):
4250 4299 values.append(random.randint(0, _maxint))
4251 4300
4252 4301 # Get mode fills the cache and tests raw lookup performance with no
4253 4302 # eviction.
4254 4303 getseq = []
4255 4304 for i in _xrange(gets):
4256 4305 getseq.append(random.choice(values))
4257 4306
4258 4307 def dogets():
4259 4308 d = util.lrucachedict(size)
4260 4309 for v in values:
4261 4310 d[v] = v
4262 4311 for key in getseq:
4263 4312 value = d[key]
4264 4313 value # silence pyflakes warning
4265 4314
4266 4315 def dogetscost():
4267 4316 d = util.lrucachedict(size, maxcost=costlimit)
4268 4317 for i, v in enumerate(values):
4269 4318 d.insert(v, v, cost=costs[i])
4270 4319 for key in getseq:
4271 4320 try:
4272 4321 value = d[key]
4273 4322 value # silence pyflakes warning
4274 4323 except KeyError:
4275 4324 pass
4276 4325
4277 4326 # Set mode tests insertion speed with cache eviction.
4278 4327 setseq = []
4279 4328 costs = []
4280 4329 for i in _xrange(sets):
4281 4330 setseq.append(random.randint(0, _maxint))
4282 4331 costs.append(random.choice(costrange))
4283 4332
4284 4333 def doinserts():
4285 4334 d = util.lrucachedict(size)
4286 4335 for v in setseq:
4287 4336 d.insert(v, v)
4288 4337
4289 4338 def doinsertscost():
4290 4339 d = util.lrucachedict(size, maxcost=costlimit)
4291 4340 for i, v in enumerate(setseq):
4292 4341 d.insert(v, v, cost=costs[i])
4293 4342
4294 4343 def dosets():
4295 4344 d = util.lrucachedict(size)
4296 4345 for v in setseq:
4297 4346 d[v] = v
4298 4347
4299 4348 # Mixed mode randomly performs gets and sets with eviction.
4300 4349 mixedops = []
4301 4350 for i in _xrange(mixed):
4302 4351 r = random.randint(0, 100)
4303 4352 if r < mixedgetfreq:
4304 4353 op = 0
4305 4354 else:
4306 4355 op = 1
4307 4356
4308 4357 mixedops.append(
4309 4358 (op, random.randint(0, size * 2), random.choice(costrange))
4310 4359 )
4311 4360
4312 4361 def domixed():
4313 4362 d = util.lrucachedict(size)
4314 4363
4315 4364 for op, v, cost in mixedops:
4316 4365 if op == 0:
4317 4366 try:
4318 4367 d[v]
4319 4368 except KeyError:
4320 4369 pass
4321 4370 else:
4322 4371 d[v] = v
4323 4372
4324 4373 def domixedcost():
4325 4374 d = util.lrucachedict(size, maxcost=costlimit)
4326 4375
4327 4376 for op, v, cost in mixedops:
4328 4377 if op == 0:
4329 4378 try:
4330 4379 d[v]
4331 4380 except KeyError:
4332 4381 pass
4333 4382 else:
4334 4383 d.insert(v, v, cost=cost)
4335 4384
4336 4385 benches = [
4337 4386 (doinit, b'init'),
4338 4387 ]
4339 4388
4340 4389 if costlimit:
4341 4390 benches.extend(
4342 4391 [
4343 4392 (dogetscost, b'gets w/ cost limit'),
4344 4393 (doinsertscost, b'inserts w/ cost limit'),
4345 4394 (domixedcost, b'mixed w/ cost limit'),
4346 4395 ]
4347 4396 )
4348 4397 else:
4349 4398 benches.extend(
4350 4399 [
4351 4400 (dogets, b'gets'),
4352 4401 (doinserts, b'inserts'),
4353 4402 (dosets, b'sets'),
4354 4403 (domixed, b'mixed'),
4355 4404 ]
4356 4405 )
4357 4406
4358 4407 for fn, title in benches:
4359 4408 timer, fm = gettimer(ui, opts)
4360 4409 timer(fn, title=title)
4361 4410 fm.end()
4362 4411
4363 4412
4364 4413 @command(
4365 4414 b'perf::write|perfwrite',
4366 4415 formatteropts
4367 4416 + [
4368 4417 (b'', b'write-method', b'write', b'ui write method'),
4369 4418 (b'', b'nlines', 100, b'number of lines'),
4370 4419 (b'', b'nitems', 100, b'number of items (per line)'),
4371 4420 (b'', b'item', b'x', b'item that is written'),
4372 4421 (b'', b'batch-line', None, b'pass whole line to write method at once'),
4373 4422 (b'', b'flush-line', None, b'flush after each line'),
4374 4423 ],
4375 4424 )
4376 4425 def perfwrite(ui, repo, **opts):
4377 4426 """microbenchmark ui.write (and others)"""
4378 4427 opts = _byteskwargs(opts)
4379 4428
4380 4429 write = getattr(ui, _sysstr(opts[b'write_method']))
4381 4430 nlines = int(opts[b'nlines'])
4382 4431 nitems = int(opts[b'nitems'])
4383 4432 item = opts[b'item']
4384 4433 batch_line = opts.get(b'batch_line')
4385 4434 flush_line = opts.get(b'flush_line')
4386 4435
4387 4436 if batch_line:
4388 4437 line = item * nitems + b'\n'
4389 4438
4390 4439 def benchmark():
4391 4440 for i in pycompat.xrange(nlines):
4392 4441 if batch_line:
4393 4442 write(line)
4394 4443 else:
4395 4444 for i in pycompat.xrange(nitems):
4396 4445 write(item)
4397 4446 write(b'\n')
4398 4447 if flush_line:
4399 4448 ui.flush()
4400 4449 ui.flush()
4401 4450
4402 4451 timer, fm = gettimer(ui, opts)
4403 4452 timer(benchmark)
4404 4453 fm.end()
4405 4454
4406 4455
4407 4456 def uisetup(ui):
4408 4457 if util.safehasattr(cmdutil, b'openrevlog') and not util.safehasattr(
4409 4458 commands, b'debugrevlogopts'
4410 4459 ):
4411 4460 # for "historical portability":
4412 4461 # In this case, Mercurial should be 1.9 (or a79fea6b3e77) -
4413 4462 # 3.7 (or 5606f7d0d063). Therefore, '--dir' option for
4414 4463 # openrevlog() should cause failure, because it has been
4415 4464 # available since 3.5 (or 49c583ca48c4).
4416 4465 def openrevlog(orig, repo, cmd, file_, opts):
4417 4466 if opts.get(b'dir') and not util.safehasattr(repo, b'dirlog'):
4418 4467 raise error.Abort(
4419 4468 b"This version doesn't support --dir option",
4420 4469 hint=b"use 3.5 or later",
4421 4470 )
4422 4471 return orig(repo, cmd, file_, opts)
4423 4472
4424 4473 extensions.wrapfunction(cmdutil, b'openrevlog', openrevlog)
4425 4474
4426 4475
4427 4476 @command(
4428 4477 b'perf::progress|perfprogress',
4429 4478 formatteropts
4430 4479 + [
4431 4480 (b'', b'topic', b'topic', b'topic for progress messages'),
4432 4481 (b'c', b'total', 1000000, b'total value we are progressing to'),
4433 4482 ],
4434 4483 norepo=True,
4435 4484 )
4436 4485 def perfprogress(ui, topic=None, total=None, **opts):
4437 4486 """printing of progress bars"""
4438 4487 opts = _byteskwargs(opts)
4439 4488
4440 4489 timer, fm = gettimer(ui, opts)
4441 4490
4442 4491 def doprogress():
4443 4492 with ui.makeprogress(topic, total=total) as progress:
4444 4493 for i in _xrange(total):
4445 4494 progress.increment()
4446 4495
4447 4496 timer(doprogress)
4448 4497 fm.end()
@@ -1,912 +1,922 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13
14 14 import binascii
15 15 import io
16 16
17 17 from .node import (
18 18 bin,
19 19 hex,
20 20 nullrev,
21 21 short,
22 22 )
23 23 from .i18n import _
24 24 from . import (
25 25 encoding,
26 26 error,
27 27 match as matchmod,
28 28 scmutil,
29 29 util,
30 30 )
31 31 from .utils import stringutil
32 32
33 33 # Tags computation can be expensive and caches exist to make it fast in
34 34 # the common case.
35 35 #
36 36 # The "hgtagsfnodes1" cache file caches the .hgtags filenode values for
37 37 # each revision in the repository. The file is effectively an array of
38 38 # fixed length records. Read the docs for "hgtagsfnodescache" for technical
39 39 # details.
40 40 #
41 41 # The .hgtags filenode cache grows in proportion to the length of the
42 42 # changelog. The file is truncated when the # changelog is stripped.
43 43 #
44 44 # The purpose of the filenode cache is to avoid the most expensive part
45 45 # of finding global tags, which is looking up the .hgtags filenode in the
46 46 # manifest for each head. This can take dozens or over 100ms for
47 47 # repositories with very large manifests. Multiplied by dozens or even
48 48 # hundreds of heads and there is a significant performance concern.
49 49 #
50 50 # There also exist a separate cache file for each repository filter.
51 51 # These "tags-*" files store information about the history of tags.
52 52 #
53 53 # The tags cache files consists of a cache validation line followed by
54 54 # a history of tags.
55 55 #
56 56 # The cache validation line has the format:
57 57 #
58 58 # <tiprev> <tipnode> [<filteredhash>]
59 59 #
60 60 # <tiprev> is an integer revision and <tipnode> is a 40 character hex
61 61 # node for that changeset. These redundantly identify the repository
62 62 # tip from the time the cache was written. In addition, <filteredhash>,
63 63 # if present, is a 40 character hex hash of the contents of the filtered
64 64 # revisions for this filter. If the set of filtered revs changes, the
65 65 # hash will change and invalidate the cache.
66 66 #
67 67 # The history part of the tags cache consists of lines of the form:
68 68 #
69 69 # <node> <tag>
70 70 #
71 71 # (This format is identical to that of .hgtags files.)
72 72 #
73 73 # <tag> is the tag name and <node> is the 40 character hex changeset
74 74 # the tag is associated with.
75 75 #
76 76 # Tags are written sorted by tag name.
77 77 #
78 78 # Tags associated with multiple changesets have an entry for each changeset.
79 79 # The most recent changeset (in terms of revlog ordering for the head
80 80 # setting it) for each tag is last.
81 81
82 82
83 83 def fnoderevs(ui, repo, revs):
84 84 """return the list of '.hgtags' fnodes used in a set revisions
85 85
86 86 This is returned as list of unique fnodes. We use a list instead of a set
87 87 because order matters when it comes to tags."""
88 88 unfi = repo.unfiltered()
89 89 tonode = unfi.changelog.node
90 90 nodes = [tonode(r) for r in revs]
91 91 fnodes = _getfnodes(ui, repo, nodes)
92 92 fnodes = _filterfnodes(fnodes, nodes)
93 93 return fnodes
94 94
95 95
96 96 def _nulltonone(repo, value):
97 97 """convert nullid to None
98 98
99 99 For tag value, nullid means "deleted". This small utility function helps
100 100 translating that to None."""
101 101 if value == repo.nullid:
102 102 return None
103 103 return value
104 104
105 105
106 106 def difftags(ui, repo, oldfnodes, newfnodes):
107 107 """list differences between tags expressed in two set of file-nodes
108 108
109 109 The list contains entries in the form: (tagname, oldvalue, new value).
110 110 None is used to expressed missing value:
111 111 ('foo', None, 'abcd') is a new tag,
112 112 ('bar', 'ef01', None) is a deletion,
113 113 ('baz', 'abcd', 'ef01') is a tag movement.
114 114 """
115 115 if oldfnodes == newfnodes:
116 116 return []
117 117 oldtags = _tagsfromfnodes(ui, repo, oldfnodes)
118 118 newtags = _tagsfromfnodes(ui, repo, newfnodes)
119 119
120 120 # list of (tag, old, new): None means missing
121 121 entries = []
122 122 for tag, (new, __) in newtags.items():
123 123 new = _nulltonone(repo, new)
124 124 old, __ = oldtags.pop(tag, (None, None))
125 125 old = _nulltonone(repo, old)
126 126 if old != new:
127 127 entries.append((tag, old, new))
128 128 # handle deleted tags
129 129 for tag, (old, __) in oldtags.items():
130 130 old = _nulltonone(repo, old)
131 131 if old is not None:
132 132 entries.append((tag, old, None))
133 133 entries.sort()
134 134 return entries
135 135
136 136
137 137 def writediff(fp, difflist):
138 138 """write tags diff information to a file.
139 139
140 140 Data are stored with a line based format:
141 141
142 142 <action> <hex-node> <tag-name>\n
143 143
144 144 Action are defined as follow:
145 145 -R tag is removed,
146 146 +A tag is added,
147 147 -M tag is moved (old value),
148 148 +M tag is moved (new value),
149 149
150 150 Example:
151 151
152 152 +A 875517b4806a848f942811a315a5bce30804ae85 t5
153 153
154 154 See documentation of difftags output for details about the input.
155 155 """
156 156 add = b'+A %s %s\n'
157 157 remove = b'-R %s %s\n'
158 158 updateold = b'-M %s %s\n'
159 159 updatenew = b'+M %s %s\n'
160 160 for tag, old, new in difflist:
161 161 # translate to hex
162 162 if old is not None:
163 163 old = hex(old)
164 164 if new is not None:
165 165 new = hex(new)
166 166 # write to file
167 167 if old is None:
168 168 fp.write(add % (new, tag))
169 169 elif new is None:
170 170 fp.write(remove % (old, tag))
171 171 else:
172 172 fp.write(updateold % (old, tag))
173 173 fp.write(updatenew % (new, tag))
174 174
175 175
176 176 def findglobaltags(ui, repo):
177 177 """Find global tags in a repo: return a tagsmap
178 178
179 179 tagsmap: tag name to (node, hist) 2-tuples.
180 180
181 181 The tags cache is read and updated as a side-effect of calling.
182 182 """
183 183 (heads, tagfnode, valid, cachetags, shouldwrite) = _readtagcache(ui, repo)
184 184 if cachetags is not None:
185 185 assert not shouldwrite
186 186 # XXX is this really 100% correct? are there oddball special
187 187 # cases where a global tag should outrank a local tag but won't,
188 188 # because cachetags does not contain rank info?
189 189 alltags = {}
190 190 _updatetags(cachetags, alltags)
191 191 return alltags
192 192
193 193 for head in reversed(heads): # oldest to newest
194 194 assert repo.changelog.index.has_node(
195 195 head
196 196 ), b"tag cache returned bogus head %s" % short(head)
197 197 fnodes = _filterfnodes(tagfnode, reversed(heads))
198 198 alltags = _tagsfromfnodes(ui, repo, fnodes)
199 199
200 200 # and update the cache (if necessary)
201 201 if shouldwrite:
202 202 _writetagcache(ui, repo, valid, alltags)
203 203 return alltags
204 204
205 205
206 206 def _filterfnodes(tagfnode, nodes):
207 207 """return a list of unique fnodes
208 208
209 209 The order of this list matches the order of "nodes". Preserving this order
210 210 is important as reading tags in different order provides different
211 211 results."""
212 212 seen = set() # set of fnode
213 213 fnodes = []
214 214 for no in nodes: # oldest to newest
215 215 fnode = tagfnode.get(no)
216 216 if fnode and fnode not in seen:
217 217 seen.add(fnode)
218 218 fnodes.append(fnode)
219 219 return fnodes
220 220
221 221
222 222 def _tagsfromfnodes(ui, repo, fnodes):
223 223 """return a tagsmap from a list of file-node
224 224
225 225 tagsmap: tag name to (node, hist) 2-tuples.
226 226
227 227 The order of the list matters."""
228 228 alltags = {}
229 229 fctx = None
230 230 for fnode in fnodes:
231 231 if fctx is None:
232 232 fctx = repo.filectx(b'.hgtags', fileid=fnode)
233 233 else:
234 234 fctx = fctx.filectx(fnode)
235 235 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
236 236 _updatetags(filetags, alltags)
237 237 return alltags
238 238
239 239
240 240 def readlocaltags(ui, repo, alltags, tagtypes):
241 241 '''Read local tags in repo. Update alltags and tagtypes.'''
242 242 try:
243 243 data = repo.vfs.read(b"localtags")
244 244 except FileNotFoundError:
245 245 return
246 246
247 247 # localtags is in the local encoding; re-encode to UTF-8 on
248 248 # input for consistency with the rest of this module.
249 249 filetags = _readtags(
250 250 ui, repo, data.splitlines(), b"localtags", recode=encoding.fromlocal
251 251 )
252 252
253 253 # remove tags pointing to invalid nodes
254 254 cl = repo.changelog
255 255 for t in list(filetags):
256 256 try:
257 257 cl.rev(filetags[t][0])
258 258 except (LookupError, ValueError):
259 259 del filetags[t]
260 260
261 261 _updatetags(filetags, alltags, b'local', tagtypes)
262 262
263 263
264 264 def _readtaghist(ui, repo, lines, fn, recode=None, calcnodelines=False):
265 265 """Read tag definitions from a file (or any source of lines).
266 266
267 267 This function returns two sortdicts with similar information:
268 268
269 269 - the first dict, bintaghist, contains the tag information as expected by
270 270 the _readtags function, i.e. a mapping from tag name to (node, hist):
271 271 - node is the node id from the last line read for that name,
272 272 - hist is the list of node ids previously associated with it (in file
273 273 order). All node ids are binary, not hex.
274 274
275 275 - the second dict, hextaglines, is a mapping from tag name to a list of
276 276 [hexnode, line number] pairs, ordered from the oldest to the newest node.
277 277
278 278 When calcnodelines is False the hextaglines dict is not calculated (an
279 279 empty dict is returned). This is done to improve this function's
280 280 performance in cases where the line numbers are not needed.
281 281 """
282 282
283 283 bintaghist = util.sortdict()
284 284 hextaglines = util.sortdict()
285 285 count = 0
286 286
287 287 def dbg(msg):
288 288 ui.debug(b"%s, line %d: %s\n" % (fn, count, msg))
289 289
290 290 for nline, line in enumerate(lines):
291 291 count += 1
292 292 if not line:
293 293 continue
294 294 try:
295 295 (nodehex, name) = line.split(b" ", 1)
296 296 except ValueError:
297 297 dbg(b"cannot parse entry")
298 298 continue
299 299 name = name.strip()
300 300 if recode:
301 301 name = recode(name)
302 302 try:
303 303 nodebin = bin(nodehex)
304 304 except binascii.Error:
305 305 dbg(b"node '%s' is not well formed" % nodehex)
306 306 continue
307 307
308 308 # update filetags
309 309 if calcnodelines:
310 310 # map tag name to a list of line numbers
311 311 if name not in hextaglines:
312 312 hextaglines[name] = []
313 313 hextaglines[name].append([nodehex, nline])
314 314 continue
315 315 # map tag name to (node, hist)
316 316 if name not in bintaghist:
317 317 bintaghist[name] = []
318 318 bintaghist[name].append(nodebin)
319 319 return bintaghist, hextaglines
320 320
321 321
322 322 def _readtags(ui, repo, lines, fn, recode=None, calcnodelines=False):
323 323 """Read tag definitions from a file (or any source of lines).
324 324
325 325 Returns a mapping from tag name to (node, hist).
326 326
327 327 "node" is the node id from the last line read for that name. "hist"
328 328 is the list of node ids previously associated with it (in file order).
329 329 All node ids are binary, not hex.
330 330 """
331 331 filetags, nodelines = _readtaghist(
332 332 ui, repo, lines, fn, recode=recode, calcnodelines=calcnodelines
333 333 )
334 334 # util.sortdict().__setitem__ is much slower at replacing then inserting
335 335 # new entries. The difference can matter if there are thousands of tags.
336 336 # Create a new sortdict to avoid the performance penalty.
337 337 newtags = util.sortdict()
338 338 for tag, taghist in filetags.items():
339 339 newtags[tag] = (taghist[-1], taghist[:-1])
340 340 return newtags
341 341
342 342
343 343 def _updatetags(filetags, alltags, tagtype=None, tagtypes=None):
344 344 """Incorporate the tag info read from one file into dictionnaries
345 345
346 346 The first one, 'alltags', is a "tagmaps" (see 'findglobaltags' for details).
347 347
348 348 The second one, 'tagtypes', is optional and will be updated to track the
349 349 "tagtype" of entries in the tagmaps. When set, the 'tagtype' argument also
350 350 needs to be set."""
351 351 if tagtype is None:
352 352 assert tagtypes is None
353 353
354 354 for name, nodehist in filetags.items():
355 355 if name not in alltags:
356 356 alltags[name] = nodehist
357 357 if tagtype is not None:
358 358 tagtypes[name] = tagtype
359 359 continue
360 360
361 361 # we prefer alltags[name] if:
362 362 # it supersedes us OR
363 363 # mutual supersedes and it has a higher rank
364 364 # otherwise we win because we're tip-most
365 365 anode, ahist = nodehist
366 366 bnode, bhist = alltags[name]
367 367 if (
368 368 bnode != anode
369 369 and anode in bhist
370 370 and (bnode not in ahist or len(bhist) > len(ahist))
371 371 ):
372 372 anode = bnode
373 373 elif tagtype is not None:
374 374 tagtypes[name] = tagtype
375 375 ahist.extend([n for n in bhist if n not in ahist])
376 376 alltags[name] = anode, ahist
377 377
378 378
379 379 def _filename(repo):
380 380 """name of a tagcache file for a given repo or repoview"""
381 381 filename = b'tags2'
382 382 if repo.filtername:
383 383 filename = b'%s-%s' % (filename, repo.filtername)
384 384 return filename
385 385
386 386
387 387 def _readtagcache(ui, repo):
388 388 """Read the tag cache.
389 389
390 390 Returns a tuple (heads, fnodes, validinfo, cachetags, shouldwrite).
391 391
392 392 If the cache is completely up-to-date, "cachetags" is a dict of the
393 393 form returned by _readtags() and "heads", "fnodes", and "validinfo" are
394 394 None and "shouldwrite" is False.
395 395
396 396 If the cache is not up to date, "cachetags" is None. "heads" is a list
397 397 of all heads currently in the repository, ordered from tip to oldest.
398 398 "validinfo" is a tuple describing cache validation info. This is used
399 399 when writing the tags cache. "fnodes" is a mapping from head to .hgtags
400 400 filenode. "shouldwrite" is True.
401 401
402 402 If the cache is not up to date, the caller is responsible for reading tag
403 403 info from each returned head. (See findglobaltags().)
404 404 """
405 405 try:
406 406 cachefile = repo.cachevfs(_filename(repo), b'r')
407 407 # force reading the file for static-http
408 408 cachelines = iter(cachefile)
409 409 except IOError:
410 410 cachefile = None
411 411
412 412 cacherev = None
413 413 cachenode = None
414 414 cachehash = None
415 415 if cachefile:
416 416 try:
417 417 validline = next(cachelines)
418 418 validline = validline.split()
419 419 cacherev = int(validline[0])
420 420 cachenode = bin(validline[1])
421 421 if len(validline) > 2:
422 422 cachehash = bin(validline[2])
423 423 except Exception:
424 424 # corruption of the cache, just recompute it.
425 425 pass
426 426
427 427 tipnode = repo.changelog.tip()
428 428 tiprev = len(repo.changelog) - 1
429 429
430 430 # Case 1 (common): tip is the same, so nothing has changed.
431 431 # (Unchanged tip trivially means no changesets have been added.
432 432 # But, thanks to localrepository.destroyed(), it also means none
433 433 # have been destroyed by strip or rollback.)
434 434 if (
435 435 cacherev == tiprev
436 436 and cachenode == tipnode
437 437 and cachehash == scmutil.filteredhash(repo, tiprev)
438 438 ):
439 439 tags = _readtags(ui, repo, cachelines, cachefile.name)
440 440 cachefile.close()
441 441 return (None, None, None, tags, False)
442 442 if cachefile:
443 443 cachefile.close() # ignore rest of file
444 444
445 445 valid = (tiprev, tipnode, scmutil.filteredhash(repo, tiprev))
446 446
447 447 repoheads = repo.heads()
448 448 # Case 2 (uncommon): empty repo; get out quickly and don't bother
449 449 # writing an empty cache.
450 450 if repoheads == [repo.nullid]:
451 451 return ([], {}, valid, {}, False)
452 452
453 453 # Case 3 (uncommon): cache file missing or empty.
454 454
455 455 # Case 4 (uncommon): tip rev decreased. This should only happen
456 456 # when we're called from localrepository.destroyed(). Refresh the
457 457 # cache so future invocations will not see disappeared heads in the
458 458 # cache.
459 459
460 460 # Case 5 (common): tip has changed, so we've added/replaced heads.
461 461
462 462 # As it happens, the code to handle cases 3, 4, 5 is the same.
463 463
464 464 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
465 465 # exposed".
466 466 if not len(repo.file(b'.hgtags')):
467 467 # No tags have ever been committed, so we can avoid a
468 468 # potentially expensive search.
469 469 return ([], {}, valid, None, True)
470 470
471 471 # Now we have to lookup the .hgtags filenode for every new head.
472 472 # This is the most expensive part of finding tags, so performance
473 473 # depends primarily on the size of newheads. Worst case: no cache
474 474 # file, so newheads == repoheads.
475 475 # Reversed order helps the cache ('repoheads' is in descending order)
476 476 cachefnode = _getfnodes(ui, repo, reversed(repoheads))
477 477
478 478 # Caller has to iterate over all heads, but can use the filenodes in
479 479 # cachefnode to get to each .hgtags revision quickly.
480 480 return (repoheads, cachefnode, valid, None, True)
481 481
482 482
483 483 def _getfnodes(ui, repo, nodes):
484 484 """return .hgtags fnodes for a list of changeset nodes
485 485
486 486 Return value is a {node: fnode} mapping. There will be no entry for nodes
487 487 without a '.hgtags' file.
488 488 """
489 489 starttime = util.timer()
490 490 fnodescache = hgtagsfnodescache(repo.unfiltered())
491 491 cachefnode = {}
492 492 validated_fnodes = set()
493 493 unknown_entries = set()
494 494
495 495 flog = None
496 496 for node in nodes:
497 497 fnode = fnodescache.getfnode(node)
498 498 if fnode != repo.nullid:
499 499 if fnode not in validated_fnodes:
500 500 if flog is None:
501 501 flog = repo.file(b'.hgtags')
502 502 if flog.hasnode(fnode):
503 503 validated_fnodes.add(fnode)
504 504 else:
505 505 unknown_entries.add(node)
506 506 cachefnode[node] = fnode
507 507
508 508 if unknown_entries:
509 509 fixed_nodemap = fnodescache.refresh_invalid_nodes(unknown_entries)
510 510 for node, fnode in fixed_nodemap.items():
511 511 if fnode != repo.nullid:
512 512 cachefnode[node] = fnode
513 513
514 514 fnodescache.write()
515 515
516 516 duration = util.timer() - starttime
517 517 ui.log(
518 518 b'tagscache',
519 519 b'%d/%d cache hits/lookups in %0.4f seconds\n',
520 520 fnodescache.hitcount,
521 521 fnodescache.lookupcount,
522 522 duration,
523 523 )
524 524 return cachefnode
525 525
526 526
527 527 def _writetagcache(ui, repo, valid, cachetags):
528 528 filename = _filename(repo)
529 529 try:
530 530 cachefile = repo.cachevfs(filename, b'w', atomictemp=True)
531 531 except (OSError, IOError):
532 532 return
533 533
534 534 ui.log(
535 535 b'tagscache',
536 536 b'writing .hg/cache/%s with %d tags\n',
537 537 filename,
538 538 len(cachetags),
539 539 )
540 540
541 541 if valid[2]:
542 542 cachefile.write(
543 543 b'%d %s %s\n' % (valid[0], hex(valid[1]), hex(valid[2]))
544 544 )
545 545 else:
546 546 cachefile.write(b'%d %s\n' % (valid[0], hex(valid[1])))
547 547
548 548 # Tag names in the cache are in UTF-8 -- which is the whole reason
549 549 # we keep them in UTF-8 throughout this module. If we converted
550 550 # them local encoding on input, we would lose info writing them to
551 551 # the cache.
552 552 for (name, (node, hist)) in sorted(cachetags.items()):
553 553 for n in hist:
554 554 cachefile.write(b"%s %s\n" % (hex(n), name))
555 555 cachefile.write(b"%s %s\n" % (hex(node), name))
556 556
557 557 try:
558 558 cachefile.close()
559 559 except (OSError, IOError):
560 560 pass
561 561
562 562
563 563 def tag(repo, names, node, message, local, user, date, editor=False):
564 564 """tag a revision with one or more symbolic names.
565 565
566 566 names is a list of strings or, when adding a single tag, names may be a
567 567 string.
568 568
569 569 if local is True, the tags are stored in a per-repository file.
570 570 otherwise, they are stored in the .hgtags file, and a new
571 571 changeset is committed with the change.
572 572
573 573 keyword arguments:
574 574
575 575 local: whether to store tags in non-version-controlled file
576 576 (default False)
577 577
578 578 message: commit message to use if committing
579 579
580 580 user: name of user to use if committing
581 581
582 582 date: date tuple to use if committing"""
583 583
584 584 if not local:
585 585 m = matchmod.exact([b'.hgtags'])
586 586 st = repo.status(match=m, unknown=True, ignored=True)
587 587 if any(
588 588 (
589 589 st.modified,
590 590 st.added,
591 591 st.removed,
592 592 st.deleted,
593 593 st.unknown,
594 594 st.ignored,
595 595 )
596 596 ):
597 597 raise error.Abort(
598 598 _(b'working copy of .hgtags is changed'),
599 599 hint=_(b'please commit .hgtags manually'),
600 600 )
601 601
602 602 with repo.wlock():
603 603 repo.tags() # instantiate the cache
604 604 _tag(repo, names, node, message, local, user, date, editor=editor)
605 605
606 606
607 607 def _tag(
608 608 repo, names, node, message, local, user, date, extra=None, editor=False
609 609 ):
610 610 if isinstance(names, bytes):
611 611 names = (names,)
612 612
613 613 branches = repo.branchmap()
614 614 for name in names:
615 615 repo.hook(b'pretag', throw=True, node=hex(node), tag=name, local=local)
616 616 if name in branches:
617 617 repo.ui.warn(
618 618 _(b"warning: tag %s conflicts with existing branch name\n")
619 619 % name
620 620 )
621 621
622 622 def writetags(fp, names, munge, prevtags):
623 623 fp.seek(0, io.SEEK_END)
624 624 if prevtags and not prevtags.endswith(b'\n'):
625 625 fp.write(b'\n')
626 626 for name in names:
627 627 if munge:
628 628 m = munge(name)
629 629 else:
630 630 m = name
631 631
632 632 if repo._tagscache.tagtypes and name in repo._tagscache.tagtypes:
633 633 old = repo.tags().get(name, repo.nullid)
634 634 fp.write(b'%s %s\n' % (hex(old), m))
635 635 fp.write(b'%s %s\n' % (hex(node), m))
636 636 fp.close()
637 637
638 638 prevtags = b''
639 639 if local:
640 640 try:
641 641 fp = repo.vfs(b'localtags', b'r+')
642 642 except IOError:
643 643 fp = repo.vfs(b'localtags', b'a')
644 644 else:
645 645 prevtags = fp.read()
646 646
647 647 # local tags are stored in the current charset
648 648 writetags(fp, names, None, prevtags)
649 649 for name in names:
650 650 repo.hook(b'tag', node=hex(node), tag=name, local=local)
651 651 return
652 652
653 653 try:
654 654 fp = repo.wvfs(b'.hgtags', b'rb+')
655 655 except FileNotFoundError:
656 656 fp = repo.wvfs(b'.hgtags', b'ab')
657 657 else:
658 658 prevtags = fp.read()
659 659
660 660 # committed tags are stored in UTF-8
661 661 writetags(fp, names, encoding.fromlocal, prevtags)
662 662
663 663 fp.close()
664 664
665 665 repo.invalidatecaches()
666 666
667 667 with repo.dirstate.changing_files(repo):
668 668 if b'.hgtags' not in repo.dirstate:
669 669 repo[None].add([b'.hgtags'])
670 670
671 671 m = matchmod.exact([b'.hgtags'])
672 672 tagnode = repo.commit(
673 673 message, user, date, extra=extra, match=m, editor=editor
674 674 )
675 675
676 676 for name in names:
677 677 repo.hook(b'tag', node=hex(node), tag=name, local=local)
678 678
679 679 return tagnode
680 680
681 681
682 682 _fnodescachefile = b'hgtagsfnodes1'
683 683 _fnodesrecsize = 4 + 20 # changeset fragment + filenode
684 684 _fnodesmissingrec = b'\xff' * 24
685 685
686 686
687 687 class hgtagsfnodescache:
688 688 """Persistent cache mapping revisions to .hgtags filenodes.
689 689
690 690 The cache is an array of records. Each item in the array corresponds to
691 691 a changelog revision. Values in the array contain the first 4 bytes of
692 692 the node hash and the 20 bytes .hgtags filenode for that revision.
693 693
694 694 The first 4 bytes are present as a form of verification. Repository
695 695 stripping and rewriting may change the node at a numeric revision in the
696 696 changelog. The changeset fragment serves as a verifier to detect
697 697 rewriting. This logic is shared with the rev branch cache (see
698 698 branchmap.py).
699 699
700 700 The instance holds in memory the full cache content but entries are
701 701 only parsed on read.
702 702
703 703 Instances behave like lists. ``c[i]`` works where i is a rev or
704 704 changeset node. Missing indexes are populated automatically on access.
705 705 """
706 706
707 707 def __init__(self, repo):
708 708 assert repo.filtername is None
709 709
710 710 self._repo = repo
711 711
712 712 # Only for reporting purposes.
713 713 self.lookupcount = 0
714 714 self.hitcount = 0
715 715
716 716 try:
717 717 data = repo.cachevfs.read(_fnodescachefile)
718 718 except (OSError, IOError):
719 719 data = b""
720 720 self._raw = bytearray(data)
721 721
722 722 # The end state of self._raw is an array that is of the exact length
723 723 # required to hold a record for every revision in the repository.
724 724 # We truncate or extend the array as necessary. self._dirtyoffset is
725 725 # defined to be the start offset at which we need to write the output
726 726 # file. This offset is also adjusted when new entries are calculated
727 727 # for array members.
728 728 cllen = len(repo.changelog)
729 729 wantedlen = cllen * _fnodesrecsize
730 730 rawlen = len(self._raw)
731 731
732 732 self._dirtyoffset = None
733 733
734 734 rawlentokeep = min(
735 735 wantedlen, (rawlen // _fnodesrecsize) * _fnodesrecsize
736 736 )
737 737 if rawlen > rawlentokeep:
738 738 # There's no easy way to truncate array instances. This seems
739 739 # slightly less evil than copying a potentially large array slice.
740 740 for i in range(rawlen - rawlentokeep):
741 741 self._raw.pop()
742 742 rawlen = len(self._raw)
743 743 self._dirtyoffset = rawlen
744 744 if rawlen < wantedlen:
745 745 if self._dirtyoffset is None:
746 746 self._dirtyoffset = rawlen
747 747 # TODO: zero fill entire record, because it's invalid not missing?
748 748 self._raw.extend(b'\xff' * (wantedlen - rawlen))
749 749
750 750 def getfnode(self, node, computemissing=True):
751 751 """Obtain the filenode of the .hgtags file at a specified revision.
752 752
753 753 If the value is in the cache, the entry will be validated and returned.
754 754 Otherwise, the filenode will be computed and returned unless
755 755 "computemissing" is False. In that case, None will be returned if
756 756 the entry is missing or False if the entry is invalid without
757 757 any potentially expensive computation being performed.
758 758
759 759 If an .hgtags does not exist at the specified revision, nullid is
760 760 returned.
761 761 """
762 762 if node == self._repo.nullid:
763 763 return node
764 764
765 765 rev = self._repo.changelog.rev(node)
766 766
767 767 self.lookupcount += 1
768 768
769 769 offset = rev * _fnodesrecsize
770 770 record = b'%s' % self._raw[offset : offset + _fnodesrecsize]
771 771 properprefix = node[0:4]
772 772
773 773 # Validate and return existing entry.
774 774 if record != _fnodesmissingrec and len(record) == _fnodesrecsize:
775 775 fileprefix = record[0:4]
776 776
777 777 if fileprefix == properprefix:
778 778 self.hitcount += 1
779 779 return record[4:]
780 780
781 781 # Fall through.
782 782
783 783 # If we get here, the entry is either missing or invalid.
784 784
785 785 if not computemissing:
786 786 if record != _fnodesmissingrec:
787 787 return False
788 788 return None
789 789
790 790 fnode = self._computefnode(node)
791 791 self._writeentry(offset, properprefix, fnode)
792 792 return fnode
793 793
794 794 def _computefnode(self, node):
795 795 """Finds the tag filenode for a node which is missing or invalid
796 796 in cache"""
797 797 ctx = self._repo[node]
798 798 rev = ctx.rev()
799 799 fnode = None
800 800 cl = self._repo.changelog
801 801 p1rev, p2rev = cl._uncheckedparentrevs(rev)
802 802 p1node = cl.node(p1rev)
803 803 p1fnode = self.getfnode(p1node, computemissing=False)
804 804 if p2rev != nullrev:
805 805 # There is some no-merge changeset where p1 is null and p2 is set
806 806 # Processing them as merge is just slower, but still gives a good
807 807 # result.
808 808 p2node = cl.node(p2rev)
809 809 p2fnode = self.getfnode(p2node, computemissing=False)
810 810 if p1fnode != p2fnode:
811 811 # we cannot rely on readfast because we don't know against what
812 812 # parent the readfast delta is computed
813 813 p1fnode = None
814 814 if p1fnode:
815 815 mctx = ctx.manifestctx()
816 816 fnode = mctx.readfast().get(b'.hgtags')
817 817 if fnode is None:
818 818 fnode = p1fnode
819 819 if fnode is None:
820 820 # Populate missing entry.
821 821 try:
822 822 fnode = ctx.filenode(b'.hgtags')
823 823 except error.LookupError:
824 824 # No .hgtags file on this revision.
825 825 fnode = self._repo.nullid
826 826 return fnode
827 827
828 828 def setfnode(self, node, fnode):
829 829 """Set the .hgtags filenode for a given changeset."""
830 830 assert len(fnode) == 20
831 831 ctx = self._repo[node]
832 832
833 833 # Do a lookup first to avoid writing if nothing has changed.
834 834 if self.getfnode(ctx.node(), computemissing=False) == fnode:
835 835 return
836 836
837 837 self._writeentry(ctx.rev() * _fnodesrecsize, node[0:4], fnode)
838 838
839 839 def refresh_invalid_nodes(self, nodes):
840 840 """recomputes file nodes for a given set of nodes which has unknown
841 841 filenodes for them in the cache
842 842 Also updates the in-memory cache with the correct filenode.
843 843 Caller needs to take care about calling `.write()` so that updates are
844 844 persisted.
845 845 Returns a map {node: recomputed fnode}
846 846 """
847 847 fixed_nodemap = {}
848 848 for node in nodes:
849 849 fnode = self._computefnode(node)
850 850 fixed_nodemap[node] = fnode
851 851 self.setfnode(node, fnode)
852 852 return fixed_nodemap
853 853
854 854 def _writeentry(self, offset, prefix, fnode):
855 855 # Slices on array instances only accept other array.
856 856 entry = bytearray(prefix + fnode)
857 857 self._raw[offset : offset + _fnodesrecsize] = entry
858 858 # self._dirtyoffset could be None.
859 859 self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0)
860 860
861 861 def write(self):
862 862 """Perform all necessary writes to cache file.
863 863
864 864 This may no-op if no writes are needed or if a write lock could
865 865 not be obtained.
866 866 """
867 867 if self._dirtyoffset is None:
868 868 return
869 869
870 870 data = self._raw[self._dirtyoffset :]
871 871 if not data:
872 872 return
873 873
874 874 repo = self._repo
875 875
876 876 try:
877 877 lock = repo.lock(wait=False)
878 878 except error.LockError:
879 879 repo.ui.log(
880 880 b'tagscache',
881 881 b'not writing .hg/cache/%s because '
882 882 b'lock cannot be acquired\n' % _fnodescachefile,
883 883 )
884 884 return
885 885
886 886 try:
887 887 f = repo.cachevfs.open(_fnodescachefile, b'ab')
888 888 try:
889 889 # if the file has been truncated
890 890 actualoffset = f.tell()
891 891 if actualoffset < self._dirtyoffset:
892 892 self._dirtyoffset = actualoffset
893 893 data = self._raw[self._dirtyoffset :]
894 894 f.seek(self._dirtyoffset)
895 895 f.truncate()
896 896 repo.ui.log(
897 897 b'tagscache',
898 898 b'writing %d bytes to cache/%s\n'
899 899 % (len(data), _fnodescachefile),
900 900 )
901 901 f.write(data)
902 902 self._dirtyoffset = None
903 903 finally:
904 904 f.close()
905 905 except (IOError, OSError) as inst:
906 906 repo.ui.log(
907 907 b'tagscache',
908 908 b"couldn't write cache/%s: %s\n"
909 909 % (_fnodescachefile, stringutil.forcebytestr(inst)),
910 910 )
911 911 finally:
912 912 lock.release()
913
914
915 def clear_cache_on_disk(repo):
916 """function used by the perf extension to "tags" cache"""
917 repo.cachevfs.tryunlink(_filename(repo))
918
919
920 def clear_cache_fnodes(repo):
921 """function used by the perf extension to clear "file node cache"""
922 repo.cachevfs.tryunlink(_filename(repo))
@@ -1,441 +1,441 b''
1 1 #require test-repo
2 2
3 3 Set vars:
4 4
5 5 $ . "$TESTDIR/helpers-testrepo.sh"
6 6 $ CONTRIBDIR="$TESTDIR/../contrib"
7 7
8 8 Prepare repo:
9 9
10 10 $ hg init
11 11
12 12 $ echo this is file a > a
13 13 $ hg add a
14 14 $ hg commit -m first
15 15
16 16 $ echo adding to file a >> a
17 17 $ hg commit -m second
18 18
19 19 $ echo adding more to file a >> a
20 20 $ hg commit -m third
21 21
22 22 $ hg up -r 0
23 23 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
24 24 $ echo merge-this >> a
25 25 $ hg commit -m merge-able
26 26 created new head
27 27
28 28 $ hg up -r 2
29 29 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
30 30
31 31 perfstatus
32 32
33 33 $ cat >> $HGRCPATH << EOF
34 34 > [extensions]
35 35 > perf=$CONTRIBDIR/perf.py
36 36 > [perf]
37 37 > presleep=0
38 38 > stub=on
39 39 > parentscount=1
40 40 > EOF
41 41 $ hg help -e perf
42 42 perf extension - helper extension to measure performance
43 43
44 44 Configurations
45 45 ==============
46 46
47 47 "perf"
48 48 ------
49 49
50 50 "all-timing"
51 51 When set, additional statistics will be reported for each benchmark: best,
52 52 worst, median average. If not set only the best timing is reported
53 53 (default: off).
54 54
55 55 "presleep"
56 56 number of second to wait before any group of runs (default: 1)
57 57
58 58 "pre-run"
59 59 number of run to perform before starting measurement.
60 60
61 61 "profile-benchmark"
62 62 Enable profiling for the benchmarked section. (The first iteration is
63 63 benchmarked)
64 64
65 65 "run-limits"
66 66 Control the number of runs each benchmark will perform. The option value
67 67 should be a list of '<time>-<numberofrun>' pairs. After each run the
68 68 conditions are considered in order with the following logic:
69 69
70 70 If benchmark has been running for <time> seconds, and we have performed
71 71 <numberofrun> iterations, stop the benchmark,
72 72
73 73 The default value is: '3.0-100, 10.0-3'
74 74
75 75 "stub"
76 76 When set, benchmarks will only be run once, useful for testing (default:
77 77 off)
78 78
79 79 list of commands:
80 80
81 81 perf::addremove
82 82 (no help text available)
83 83 perf::ancestors
84 84 (no help text available)
85 85 perf::ancestorset
86 86 (no help text available)
87 87 perf::annotate
88 88 (no help text available)
89 89 perf::bdiff benchmark a bdiff between revisions
90 90 perf::bookmarks
91 91 benchmark parsing bookmarks from disk to memory
92 92 perf::branchmap
93 93 benchmark the update of a branchmap
94 94 perf::branchmapload
95 95 benchmark reading the branchmap
96 96 perf::branchmapupdate
97 97 benchmark branchmap update from for <base> revs to <target>
98 98 revs
99 99 perf::bundle benchmark the creation of a bundle from a repository
100 100 perf::bundleread
101 101 Benchmark reading of bundle files.
102 102 perf::cca (no help text available)
103 103 perf::changegroupchangelog
104 104 Benchmark producing a changelog group for a changegroup.
105 105 perf::changeset
106 106 (no help text available)
107 107 perf::ctxfiles
108 108 (no help text available)
109 109 perf::delta-find
110 110 benchmark the process of finding a valid delta for a revlog
111 111 revision
112 112 perf::diffwd Profile diff of working directory changes
113 113 perf::dirfoldmap
114 114 benchmap a 'dirstate._map.dirfoldmap.get()' request
115 115 perf::dirs (no help text available)
116 116 perf::dirstate
117 117 benchmap the time of various distate operations
118 118 perf::dirstatedirs
119 119 benchmap a 'dirstate.hasdir' call from an empty 'dirs' cache
120 120 perf::dirstatefoldmap
121 121 benchmap a 'dirstate._map.filefoldmap.get()' request
122 122 perf::dirstatewrite
123 123 benchmap the time it take to write a dirstate on disk
124 124 perf::discovery
125 125 benchmark discovery between local repo and the peer at given
126 126 path
127 127 perf::fncacheencode
128 128 (no help text available)
129 129 perf::fncacheload
130 130 (no help text available)
131 131 perf::fncachewrite
132 132 (no help text available)
133 133 perf::heads benchmark the computation of a changelog heads
134 134 perf::helper-mergecopies
135 135 find statistics about potential parameters for
136 136 'perfmergecopies'
137 137 perf::helper-pathcopies
138 138 find statistic about potential parameters for the
139 139 'perftracecopies'
140 140 perf::ignore benchmark operation related to computing ignore
141 141 perf::index benchmark index creation time followed by a lookup
142 142 perf::linelogedits
143 143 (no help text available)
144 144 perf::loadmarkers
145 145 benchmark the time to parse the on-disk markers for a repo
146 146 perf::log (no help text available)
147 147 perf::lookup (no help text available)
148 148 perf::lrucachedict
149 149 (no help text available)
150 150 perf::manifest
151 151 benchmark the time to read a manifest from disk and return a
152 152 usable
153 153 perf::mergecalculate
154 154 (no help text available)
155 155 perf::mergecopies
156 156 measure runtime of 'copies.mergecopies'
157 157 perf::moonwalk
158 158 benchmark walking the changelog backwards
159 159 perf::nodelookup
160 160 (no help text available)
161 161 perf::nodemap
162 162 benchmark the time necessary to look up revision from a cold
163 163 nodemap
164 164 perf::parents
165 165 benchmark the time necessary to fetch one changeset's parents.
166 166 perf::pathcopies
167 167 benchmark the copy tracing logic
168 168 perf::phases benchmark phasesets computation
169 169 perf::phasesremote
170 170 benchmark time needed to analyse phases of the remote server
171 171 perf::progress
172 172 printing of progress bars
173 173 perf::rawfiles
174 174 (no help text available)
175 175 perf::revlogchunks
176 176 Benchmark operations on revlog chunks.
177 177 perf::revlogindex
178 178 Benchmark operations against a revlog index.
179 179 perf::revlogrevision
180 180 Benchmark obtaining a revlog revision.
181 181 perf::revlogrevisions
182 182 Benchmark reading a series of revisions from a revlog.
183 183 perf::revlogwrite
184 184 Benchmark writing a series of revisions to a revlog.
185 185 perf::revrange
186 186 (no help text available)
187 187 perf::revset benchmark the execution time of a revset
188 188 perf::startup
189 189 (no help text available)
190 190 perf::status benchmark the performance of a single status call
191 191 perf::stream-consume
192 192 benchmark the full application of a stream clone
193 193 perf::stream-generate
194 194 benchmark the full generation of a stream clone
195 195 perf::stream-locked-section
196 196 benchmark the initial, repo-locked, section of a stream-clone
197 perf::tags (no help text available)
197 perf::tags Benchmark tags retrieval in various situation
198 198 perf::templating
199 199 test the rendering time of a given template
200 200 perf::unbundle
201 201 benchmark application of a bundle in a repository.
202 202 perf::unidiff
203 203 benchmark a unified diff between revisions
204 204 perf::volatilesets
205 205 benchmark the computation of various volatile set
206 206 perf::walk (no help text available)
207 207 perf::write microbenchmark ui.write (and others)
208 208
209 209 (use 'hg help -v perf' to show built-in aliases and global options)
210 210
211 211 $ hg help perfaddremove
212 212 hg perf::addremove
213 213
214 214 aliases: perfaddremove
215 215
216 216 (no help text available)
217 217
218 218 options:
219 219
220 220 -T --template TEMPLATE display with template
221 221
222 222 (some details hidden, use --verbose to show complete help)
223 223
224 224 $ hg perfaddremove
225 225 $ hg perfancestors
226 226 $ hg perfancestorset 2
227 227 $ hg perfannotate a
228 228 $ hg perfbdiff -c 1
229 229 $ hg perfbdiff --alldata 1
230 230 $ hg perfunidiff -c 1
231 231 $ hg perfunidiff --alldata 1
232 232 $ hg perfbookmarks
233 233 $ hg perfbranchmap
234 234 $ hg perfbranchmapload
235 235 $ hg perfbranchmapupdate --base "not tip" --target "tip"
236 236 benchmark of branchmap with 3 revisions with 1 new ones
237 237 $ hg perfcca
238 238 $ hg perfchangegroupchangelog
239 239 $ hg perfchangegroupchangelog --cgversion 01
240 240 $ hg perfchangeset 2
241 241 $ hg perfctxfiles 2
242 242 $ hg perfdiffwd
243 243 $ hg perfdirfoldmap
244 244 $ hg perfdirs
245 245 $ hg perfdirstate
246 246 $ hg perfdirstate --contains
247 247 $ hg perfdirstate --iteration
248 248 $ hg perfdirstatedirs
249 249 $ hg perfdirstatefoldmap
250 250 $ hg perfdirstatewrite
251 251 #if repofncache
252 252 $ hg perffncacheencode
253 253 $ hg perffncacheload
254 254 $ hg debugrebuildfncache
255 255 fncache already up to date
256 256 $ hg perffncachewrite
257 257 $ hg debugrebuildfncache
258 258 fncache already up to date
259 259 #endif
260 260 $ hg perfheads
261 261 $ hg perfignore
262 262 $ hg perfindex
263 263 $ hg perflinelogedits -n 1
264 264 $ hg perfloadmarkers
265 265 $ hg perflog
266 266 $ hg perflookup 2
267 267 $ hg perflrucache
268 268 $ hg perfmanifest 2
269 269 $ hg perfmanifest -m 44fe2c8352bb3a478ffd7d8350bbc721920134d1
270 270 $ hg perfmanifest -m 44fe2c8352bb
271 271 abort: manifest revision must be integer or full node
272 272 [255]
273 273 $ hg perfmergecalculate -r 3
274 274 $ hg perfmoonwalk
275 275 $ hg perfnodelookup 2
276 276 $ hg perfpathcopies 1 2
277 277 $ hg perfprogress --total 1000
278 278 $ hg perfrawfiles 2
279 279 $ hg perfrevlogindex -c
280 280 #if reporevlogstore
281 281 $ hg perfrevlogrevisions .hg/store/data/a.i
282 282 #endif
283 283 $ hg perfrevlogrevision -m 0
284 284 $ hg perfrevlogchunks -c
285 285 $ hg perfrevrange
286 286 $ hg perfrevset 'all()'
287 287 $ hg perfstartup
288 288 $ hg perfstatus
289 289 $ hg perfstatus --dirstate
290 290 $ hg perftags
291 291 $ hg perftemplating
292 292 $ hg perfvolatilesets
293 293 $ hg perfwalk
294 294 $ hg perfparents
295 295 $ hg perfdiscovery -q .
296 296
297 297 Test run control
298 298 ----------------
299 299
300 300 Simple single entry
301 301
302 302 $ hg perfparents --config perf.stub=no --config perf.run-limits='0.000000001-15'
303 303 ! wall * comb * user * sys * (best of 15) (glob)
304 304
305 305 Multiple entries
306 306
307 307 $ hg perfparents --config perf.stub=no --config perf.run-limits='500000-1, 0.000000001-50'
308 308 ! wall * comb * user * sys * (best of 50) (glob)
309 309
310 310 error case are ignored
311 311
312 312 $ hg perfparents --config perf.stub=no --config perf.run-limits='500, 0.000000001-50'
313 313 malformatted run limit entry, missing "-": 500
314 314 ! wall * comb * user * sys * (best of 50) (glob)
315 315 $ hg perfparents --config perf.stub=no --config perf.run-limits='aaa-120, 0.000000001-50'
316 316 malformatted run limit entry, could not convert string to float: 'aaa': aaa-120
317 317 ! wall * comb * user * sys * (best of 50) (glob)
318 318 $ hg perfparents --config perf.stub=no --config perf.run-limits='120-aaaaaa, 0.000000001-50'
319 319 malformatted run limit entry, invalid literal for int() with base 10: 'aaaaaa': 120-aaaaaa
320 320 ! wall * comb * user * sys * (best of 50) (glob)
321 321
322 322 test actual output
323 323 ------------------
324 324
325 325 normal output:
326 326
327 327 $ hg perfheads --config perf.stub=no
328 328 ! wall * comb * user * sys * (best of *) (glob)
329 329
330 330 detailed output:
331 331
332 332 $ hg perfheads --config perf.all-timing=yes --config perf.stub=no
333 333 ! wall * comb * user * sys * (best of *) (glob)
334 334 ! wall * comb * user * sys * (max of *) (glob)
335 335 ! wall * comb * user * sys * (avg of *) (glob)
336 336 ! wall * comb * user * sys * (median of *) (glob)
337 337
338 338 test json output
339 339 ----------------
340 340
341 341 normal output:
342 342
343 343 $ hg perfheads --template json --config perf.stub=no
344 344 [
345 345 {
346 346 "comb": *, (glob)
347 347 "count": *, (glob)
348 348 "sys": *, (glob)
349 349 "user": *, (glob)
350 350 "wall": * (glob)
351 351 }
352 352 ]
353 353
354 354 detailed output:
355 355
356 356 $ hg perfheads --template json --config perf.all-timing=yes --config perf.stub=no
357 357 [
358 358 {
359 359 "avg.comb": *, (glob)
360 360 "avg.count": *, (glob)
361 361 "avg.sys": *, (glob)
362 362 "avg.user": *, (glob)
363 363 "avg.wall": *, (glob)
364 364 "comb": *, (glob)
365 365 "count": *, (glob)
366 366 "max.comb": *, (glob)
367 367 "max.count": *, (glob)
368 368 "max.sys": *, (glob)
369 369 "max.user": *, (glob)
370 370 "max.wall": *, (glob)
371 371 "median.comb": *, (glob)
372 372 "median.count": *, (glob)
373 373 "median.sys": *, (glob)
374 374 "median.user": *, (glob)
375 375 "median.wall": *, (glob)
376 376 "sys": *, (glob)
377 377 "user": *, (glob)
378 378 "wall": * (glob)
379 379 }
380 380 ]
381 381
382 382 Test pre-run feature
383 383 --------------------
384 384
385 385 (perf discovery has some spurious output)
386 386
387 387 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=0
388 388 ! wall * comb * user * sys * (best of 1) (glob)
389 389 searching for changes
390 390 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=1
391 391 ! wall * comb * user * sys * (best of 1) (glob)
392 392 searching for changes
393 393 searching for changes
394 394 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.pre-run=3
395 395 ! wall * comb * user * sys * (best of 1) (glob)
396 396 searching for changes
397 397 searching for changes
398 398 searching for changes
399 399 searching for changes
400 400 $ hg perf::bundle 'last(all(), 5)'
401 401 $ hg bundle --exact --rev 'last(all(), 5)' last-5.hg
402 402 4 changesets found
403 403 $ hg perf::unbundle last-5.hg
404 404
405 405
406 406 test profile-benchmark option
407 407 ------------------------------
408 408
409 409 Function to check that statprof ran
410 410 $ statprofran () {
411 411 > grep -E 'Sample count:|No samples recorded' > /dev/null
412 412 > }
413 413 $ hg perfdiscovery . --config perf.stub=no --config perf.run-limits='0.000000001-1' --config perf.profile-benchmark=yes 2>&1 | statprofran
414 414
415 415 Check perf.py for historical portability
416 416 ----------------------------------------
417 417
418 418 $ cd "$TESTDIR/.."
419 419
420 420 $ (testrepohg files -r 1.2 glob:mercurial/*.c glob:mercurial/*.py;
421 421 > testrepohg files -r tip glob:mercurial/*.c glob:mercurial/*.py) |
422 422 > "$TESTDIR"/check-perf-code.py contrib/perf.py
423 423 contrib/perf.py:\d+: (re)
424 424 > from mercurial import (
425 425 import newer module separately in try clause for early Mercurial
426 426 contrib/perf.py:\d+: (re)
427 427 > from mercurial import (
428 428 import newer module separately in try clause for early Mercurial
429 429 contrib/perf.py:\d+: (re)
430 430 > origindexpath = orig.opener.join(indexfile)
431 431 use getvfs()/getsvfs() for early Mercurial
432 432 contrib/perf.py:\d+: (re)
433 433 > origdatapath = orig.opener.join(datafile)
434 434 use getvfs()/getsvfs() for early Mercurial
435 435 contrib/perf.py:\d+: (re)
436 436 > vfs = vfsmod.vfs(tmpdir)
437 437 use getvfs()/getsvfs() for early Mercurial
438 438 contrib/perf.py:\d+: (re)
439 439 > vfs.options = getattr(orig.opener, 'options', None)
440 440 use getvfs()/getsvfs() for early Mercurial
441 441 [1]
General Comments 0
You need to be logged in to leave comments. Login now